Linux Audio

Check our new training course

Loading...
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/* smp.c: Sparc64 SMP support.
   3 *
   4 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
   5 */
   6
   7#include <linux/export.h>
   8#include <linux/kernel.h>
   9#include <linux/sched/mm.h>
  10#include <linux/sched/hotplug.h>
  11#include <linux/mm.h>
  12#include <linux/pagemap.h>
  13#include <linux/threads.h>
  14#include <linux/smp.h>
  15#include <linux/interrupt.h>
  16#include <linux/kernel_stat.h>
  17#include <linux/delay.h>
  18#include <linux/init.h>
  19#include <linux/spinlock.h>
  20#include <linux/fs.h>
  21#include <linux/seq_file.h>
  22#include <linux/cache.h>
  23#include <linux/jiffies.h>
  24#include <linux/profile.h>
  25#include <linux/bootmem.h>
  26#include <linux/vmalloc.h>
  27#include <linux/ftrace.h>
  28#include <linux/cpu.h>
  29#include <linux/slab.h>
  30#include <linux/kgdb.h>
  31
  32#include <asm/head.h>
  33#include <asm/ptrace.h>
  34#include <linux/atomic.h>
  35#include <asm/tlbflush.h>
  36#include <asm/mmu_context.h>
  37#include <asm/cpudata.h>
  38#include <asm/hvtramp.h>
  39#include <asm/io.h>
  40#include <asm/timer.h>
  41#include <asm/setup.h>
  42
  43#include <asm/irq.h>
  44#include <asm/irq_regs.h>
  45#include <asm/page.h>
  46#include <asm/pgtable.h>
  47#include <asm/oplib.h>
  48#include <linux/uaccess.h>
  49#include <asm/starfire.h>
  50#include <asm/tlb.h>
  51#include <asm/sections.h>
  52#include <asm/prom.h>
  53#include <asm/mdesc.h>
  54#include <asm/ldc.h>
  55#include <asm/hypervisor.h>
  56#include <asm/pcr.h>
  57
  58#include "cpumap.h"
  59#include "kernel.h"
 
  60
  61DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
  62cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
  63	{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
  64
  65cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
  66	[0 ... NR_CPUS-1] = CPU_MASK_NONE };
  67
  68cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = {
  69	[0 ... NR_CPUS - 1] = CPU_MASK_NONE };
  70
  71EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  72EXPORT_SYMBOL(cpu_core_map);
  73EXPORT_SYMBOL(cpu_core_sib_map);
  74EXPORT_SYMBOL(cpu_core_sib_cache_map);
  75
  76static cpumask_t smp_commenced_mask;
  77
  78static DEFINE_PER_CPU(bool, poke);
  79static bool cpu_poke;
  80
  81void smp_info(struct seq_file *m)
  82{
  83	int i;
  84	
  85	seq_printf(m, "State:\n");
  86	for_each_online_cpu(i)
  87		seq_printf(m, "CPU%d:\t\tonline\n", i);
  88}
  89
  90void smp_bogo(struct seq_file *m)
  91{
  92	int i;
  93	
  94	for_each_online_cpu(i)
  95		seq_printf(m,
  96			   "Cpu%dClkTck\t: %016lx\n",
  97			   i, cpu_data(i).clock_tick);
  98}
  99
 100extern void setup_sparc64_timer(void);
 101
 102static volatile unsigned long callin_flag = 0;
 103
 104void smp_callin(void)
 105{
 106	int cpuid = hard_smp_processor_id();
 107
 108	__local_per_cpu_offset = __per_cpu_offset(cpuid);
 109
 110	if (tlb_type == hypervisor)
 111		sun4v_ktsb_register();
 112
 113	__flush_tlb_all();
 114
 115	setup_sparc64_timer();
 116
 117	if (cheetah_pcache_forced_on)
 118		cheetah_enable_pcache();
 119
 
 
 120	callin_flag = 1;
 121	__asm__ __volatile__("membar #Sync\n\t"
 122			     "flush  %%g6" : : : "memory");
 123
 124	/* Clear this or we will die instantly when we
 125	 * schedule back to this idler...
 126	 */
 127	current_thread_info()->new_child = 0;
 128
 129	/* Attach to the address space of init_task. */
 130	mmgrab(&init_mm);
 131	current->active_mm = &init_mm;
 132
 133	/* inform the notifiers about the new cpu */
 134	notify_cpu_starting(cpuid);
 135
 136	while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
 137		rmb();
 138
 
 139	set_cpu_online(cpuid, true);
 
 140
 141	/* idle thread is expected to have preempt disabled */
 142	preempt_disable();
 143
 144	local_irq_enable();
 145
 146	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 147}
 148
 149void cpu_panic(void)
 150{
 151	printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
 152	panic("SMP bolixed\n");
 153}
 154
 155/* This tick register synchronization scheme is taken entirely from
 156 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
 157 *
 158 * The only change I've made is to rework it so that the master
 159 * initiates the synchonization instead of the slave. -DaveM
 160 */
 161
 162#define MASTER	0
 163#define SLAVE	(SMP_CACHE_BYTES/sizeof(unsigned long))
 164
 165#define NUM_ROUNDS	64	/* magic value */
 166#define NUM_ITERS	5	/* likewise */
 167
 168static DEFINE_RAW_SPINLOCK(itc_sync_lock);
 169static unsigned long go[SLAVE + 1];
 170
 171#define DEBUG_TICK_SYNC	0
 172
 173static inline long get_delta (long *rt, long *master)
 174{
 175	unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
 176	unsigned long tcenter, t0, t1, tm;
 177	unsigned long i;
 178
 179	for (i = 0; i < NUM_ITERS; i++) {
 180		t0 = tick_ops->get_tick();
 181		go[MASTER] = 1;
 182		membar_safe("#StoreLoad");
 183		while (!(tm = go[SLAVE]))
 184			rmb();
 185		go[SLAVE] = 0;
 186		wmb();
 187		t1 = tick_ops->get_tick();
 188
 189		if (t1 - t0 < best_t1 - best_t0)
 190			best_t0 = t0, best_t1 = t1, best_tm = tm;
 191	}
 192
 193	*rt = best_t1 - best_t0;
 194	*master = best_tm - best_t0;
 195
 196	/* average best_t0 and best_t1 without overflow: */
 197	tcenter = (best_t0/2 + best_t1/2);
 198	if (best_t0 % 2 + best_t1 % 2 == 2)
 199		tcenter++;
 200	return tcenter - best_tm;
 201}
 202
 203void smp_synchronize_tick_client(void)
 204{
 205	long i, delta, adj, adjust_latency = 0, done = 0;
 206	unsigned long flags, rt, master_time_stamp;
 207#if DEBUG_TICK_SYNC
 208	struct {
 209		long rt;	/* roundtrip time */
 210		long master;	/* master's timestamp */
 211		long diff;	/* difference between midpoint and master's timestamp */
 212		long lat;	/* estimate of itc adjustment latency */
 213	} t[NUM_ROUNDS];
 214#endif
 215
 216	go[MASTER] = 1;
 217
 218	while (go[MASTER])
 219		rmb();
 220
 221	local_irq_save(flags);
 222	{
 223		for (i = 0; i < NUM_ROUNDS; i++) {
 224			delta = get_delta(&rt, &master_time_stamp);
 225			if (delta == 0)
 226				done = 1;	/* let's lock on to this... */
 227
 228			if (!done) {
 229				if (i > 0) {
 230					adjust_latency += -delta;
 231					adj = -delta + adjust_latency/4;
 232				} else
 233					adj = -delta;
 234
 235				tick_ops->add_tick(adj);
 236			}
 237#if DEBUG_TICK_SYNC
 238			t[i].rt = rt;
 239			t[i].master = master_time_stamp;
 240			t[i].diff = delta;
 241			t[i].lat = adjust_latency/4;
 242#endif
 243		}
 244	}
 245	local_irq_restore(flags);
 246
 247#if DEBUG_TICK_SYNC
 248	for (i = 0; i < NUM_ROUNDS; i++)
 249		printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
 250		       t[i].rt, t[i].master, t[i].diff, t[i].lat);
 251#endif
 252
 253	printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
 254	       "(last diff %ld cycles, maxerr %lu cycles)\n",
 255	       smp_processor_id(), delta, rt);
 256}
 257
 258static void smp_start_sync_tick_client(int cpu);
 259
 260static void smp_synchronize_one_tick(int cpu)
 261{
 262	unsigned long flags, i;
 263
 264	go[MASTER] = 0;
 265
 266	smp_start_sync_tick_client(cpu);
 267
 268	/* wait for client to be ready */
 269	while (!go[MASTER])
 270		rmb();
 271
 272	/* now let the client proceed into his loop */
 273	go[MASTER] = 0;
 274	membar_safe("#StoreLoad");
 275
 276	raw_spin_lock_irqsave(&itc_sync_lock, flags);
 277	{
 278		for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
 279			while (!go[MASTER])
 280				rmb();
 281			go[MASTER] = 0;
 282			wmb();
 283			go[SLAVE] = tick_ops->get_tick();
 284			membar_safe("#StoreLoad");
 285		}
 286	}
 287	raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
 288}
 289
 290#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
 291static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
 292				void **descrp)
 
 
 
 
 
 
 
 293{
 294	extern unsigned long sparc64_ttable_tl0;
 295	extern unsigned long kern_locked_tte_data;
 296	struct hvtramp_descr *hdesc;
 297	unsigned long trampoline_ra;
 298	struct trap_per_cpu *tb;
 299	u64 tte_vaddr, tte_data;
 300	unsigned long hv_err;
 301	int i;
 302
 303	hdesc = kzalloc(sizeof(*hdesc) +
 304			(sizeof(struct hvtramp_mapping) *
 305			 num_kernel_image_mappings - 1),
 306			GFP_KERNEL);
 307	if (!hdesc) {
 308		printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
 309		       "hvtramp_descr.\n");
 310		return;
 311	}
 312	*descrp = hdesc;
 313
 314	hdesc->cpu = cpu;
 315	hdesc->num_mappings = num_kernel_image_mappings;
 316
 317	tb = &trap_block[cpu];
 318
 319	hdesc->fault_info_va = (unsigned long) &tb->fault_info;
 320	hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
 321
 322	hdesc->thread_reg = thread_reg;
 323
 324	tte_vaddr = (unsigned long) KERNBASE;
 325	tte_data = kern_locked_tte_data;
 326
 327	for (i = 0; i < hdesc->num_mappings; i++) {
 328		hdesc->maps[i].vaddr = tte_vaddr;
 329		hdesc->maps[i].tte   = tte_data;
 330		tte_vaddr += 0x400000;
 331		tte_data  += 0x400000;
 332	}
 333
 334	trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
 335
 336	hv_err = sun4v_cpu_start(cpu, trampoline_ra,
 337				 kimage_addr_to_ra(&sparc64_ttable_tl0),
 338				 __pa(hdesc));
 339	if (hv_err)
 340		printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
 341		       "gives error %lu\n", hv_err);
 342}
 343#endif
 344
 345extern unsigned long sparc64_cpu_startup;
 346
 347/* The OBP cpu startup callback truncates the 3rd arg cookie to
 348 * 32-bits (I think) so to be safe we have it read the pointer
 349 * contained here so we work on >4GB machines. -DaveM
 350 */
 351static struct thread_info *cpu_new_thread = NULL;
 352
 353static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
 354{
 355	unsigned long entry =
 356		(unsigned long)(&sparc64_cpu_startup);
 357	unsigned long cookie =
 358		(unsigned long)(&cpu_new_thread);
 
 359	void *descr = NULL;
 360	int timeout, ret;
 361
 
 
 
 362	callin_flag = 0;
 363	cpu_new_thread = task_thread_info(idle);
 364
 365	if (tlb_type == hypervisor) {
 366#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
 367		if (ldom_domaining_enabled)
 368			ldom_startcpu_cpuid(cpu,
 369					    (unsigned long) cpu_new_thread,
 370					    &descr);
 371		else
 372#endif
 373			prom_startcpu_cpuid(cpu, entry, cookie);
 374	} else {
 375		struct device_node *dp = of_find_node_by_cpuid(cpu);
 376
 377		prom_startcpu(dp->phandle, entry, cookie);
 378	}
 379
 380	for (timeout = 0; timeout < 50000; timeout++) {
 381		if (callin_flag)
 382			break;
 383		udelay(100);
 384	}
 385
 386	if (callin_flag) {
 387		ret = 0;
 388	} else {
 389		printk("Processor %d is stuck.\n", cpu);
 390		ret = -ENODEV;
 391	}
 392	cpu_new_thread = NULL;
 393
 394	kfree(descr);
 395
 396	return ret;
 397}
 398
 399static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
 400{
 401	u64 result, target;
 402	int stuck, tmp;
 403
 404	if (this_is_starfire) {
 405		/* map to real upaid */
 406		cpu = (((cpu & 0x3c) << 1) |
 407			((cpu & 0x40) >> 4) |
 408			(cpu & 0x3));
 409	}
 410
 411	target = (cpu << 14) | 0x70;
 412again:
 413	/* Ok, this is the real Spitfire Errata #54.
 414	 * One must read back from a UDB internal register
 415	 * after writes to the UDB interrupt dispatch, but
 416	 * before the membar Sync for that write.
 417	 * So we use the high UDB control register (ASI 0x7f,
 418	 * ADDR 0x20) for the dummy read. -DaveM
 419	 */
 420	tmp = 0x40;
 421	__asm__ __volatile__(
 422	"wrpr	%1, %2, %%pstate\n\t"
 423	"stxa	%4, [%0] %3\n\t"
 424	"stxa	%5, [%0+%8] %3\n\t"
 425	"add	%0, %8, %0\n\t"
 426	"stxa	%6, [%0+%8] %3\n\t"
 427	"membar	#Sync\n\t"
 428	"stxa	%%g0, [%7] %3\n\t"
 429	"membar	#Sync\n\t"
 430	"mov	0x20, %%g1\n\t"
 431	"ldxa	[%%g1] 0x7f, %%g0\n\t"
 432	"membar	#Sync"
 433	: "=r" (tmp)
 434	: "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
 435	  "r" (data0), "r" (data1), "r" (data2), "r" (target),
 436	  "r" (0x10), "0" (tmp)
 437        : "g1");
 438
 439	/* NOTE: PSTATE_IE is still clear. */
 440	stuck = 100000;
 441	do {
 442		__asm__ __volatile__("ldxa [%%g0] %1, %0"
 443			: "=r" (result)
 444			: "i" (ASI_INTR_DISPATCH_STAT));
 445		if (result == 0) {
 446			__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 447					     : : "r" (pstate));
 448			return;
 449		}
 450		stuck -= 1;
 451		if (stuck == 0)
 452			break;
 453	} while (result & 0x1);
 454	__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 455			     : : "r" (pstate));
 456	if (stuck == 0) {
 457		printk("CPU[%d]: mondo stuckage result[%016llx]\n",
 458		       smp_processor_id(), result);
 459	} else {
 460		udelay(2);
 461		goto again;
 462	}
 463}
 464
 465static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
 466{
 467	u64 *mondo, data0, data1, data2;
 468	u16 *cpu_list;
 469	u64 pstate;
 470	int i;
 471
 472	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
 473	cpu_list = __va(tb->cpu_list_pa);
 474	mondo = __va(tb->cpu_mondo_block_pa);
 475	data0 = mondo[0];
 476	data1 = mondo[1];
 477	data2 = mondo[2];
 478	for (i = 0; i < cnt; i++)
 479		spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]);
 480}
 481
 482/* Cheetah now allows to send the whole 64-bytes of data in the interrupt
 483 * packet, but we have no use for that.  However we do take advantage of
 484 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
 485 */
 486static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
 487{
 488	int nack_busy_id, is_jbus, need_more;
 489	u64 *mondo, pstate, ver, busy_mask;
 490	u16 *cpu_list;
 491
 492	cpu_list = __va(tb->cpu_list_pa);
 493	mondo = __va(tb->cpu_mondo_block_pa);
 494
 495	/* Unfortunately, someone at Sun had the brilliant idea to make the
 496	 * busy/nack fields hard-coded by ITID number for this Ultra-III
 497	 * derivative processor.
 498	 */
 499	__asm__ ("rdpr %%ver, %0" : "=r" (ver));
 500	is_jbus = ((ver >> 32) == __JALAPENO_ID ||
 501		   (ver >> 32) == __SERRANO_ID);
 502
 503	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
 504
 505retry:
 506	need_more = 0;
 507	__asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
 508			     : : "r" (pstate), "i" (PSTATE_IE));
 509
 510	/* Setup the dispatch data registers. */
 511	__asm__ __volatile__("stxa	%0, [%3] %6\n\t"
 512			     "stxa	%1, [%4] %6\n\t"
 513			     "stxa	%2, [%5] %6\n\t"
 514			     "membar	#Sync\n\t"
 515			     : /* no outputs */
 516			     : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]),
 517			       "r" (0x40), "r" (0x50), "r" (0x60),
 518			       "i" (ASI_INTR_W));
 519
 520	nack_busy_id = 0;
 521	busy_mask = 0;
 522	{
 523		int i;
 524
 525		for (i = 0; i < cnt; i++) {
 526			u64 target, nr;
 527
 528			nr = cpu_list[i];
 529			if (nr == 0xffff)
 530				continue;
 531
 532			target = (nr << 14) | 0x70;
 533			if (is_jbus) {
 534				busy_mask |= (0x1UL << (nr * 2));
 535			} else {
 536				target |= (nack_busy_id << 24);
 537				busy_mask |= (0x1UL <<
 538					      (nack_busy_id * 2));
 539			}
 540			__asm__ __volatile__(
 541				"stxa	%%g0, [%0] %1\n\t"
 542				"membar	#Sync\n\t"
 543				: /* no outputs */
 544				: "r" (target), "i" (ASI_INTR_W));
 545			nack_busy_id++;
 546			if (nack_busy_id == 32) {
 547				need_more = 1;
 548				break;
 549			}
 550		}
 551	}
 552
 553	/* Now, poll for completion. */
 554	{
 555		u64 dispatch_stat, nack_mask;
 556		long stuck;
 557
 558		stuck = 100000 * nack_busy_id;
 559		nack_mask = busy_mask << 1;
 560		do {
 561			__asm__ __volatile__("ldxa	[%%g0] %1, %0"
 562					     : "=r" (dispatch_stat)
 563					     : "i" (ASI_INTR_DISPATCH_STAT));
 564			if (!(dispatch_stat & (busy_mask | nack_mask))) {
 565				__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 566						     : : "r" (pstate));
 567				if (unlikely(need_more)) {
 568					int i, this_cnt = 0;
 569					for (i = 0; i < cnt; i++) {
 570						if (cpu_list[i] == 0xffff)
 571							continue;
 572						cpu_list[i] = 0xffff;
 573						this_cnt++;
 574						if (this_cnt == 32)
 575							break;
 576					}
 577					goto retry;
 578				}
 579				return;
 580			}
 581			if (!--stuck)
 582				break;
 583		} while (dispatch_stat & busy_mask);
 584
 585		__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 586				     : : "r" (pstate));
 587
 588		if (dispatch_stat & busy_mask) {
 589			/* Busy bits will not clear, continue instead
 590			 * of freezing up on this cpu.
 591			 */
 592			printk("CPU[%d]: mondo stuckage result[%016llx]\n",
 593			       smp_processor_id(), dispatch_stat);
 594		} else {
 595			int i, this_busy_nack = 0;
 596
 597			/* Delay some random time with interrupts enabled
 598			 * to prevent deadlock.
 599			 */
 600			udelay(2 * nack_busy_id);
 601
 602			/* Clear out the mask bits for cpus which did not
 603			 * NACK us.
 604			 */
 605			for (i = 0; i < cnt; i++) {
 606				u64 check_mask, nr;
 607
 608				nr = cpu_list[i];
 609				if (nr == 0xffff)
 610					continue;
 611
 612				if (is_jbus)
 613					check_mask = (0x2UL << (2*nr));
 614				else
 615					check_mask = (0x2UL <<
 616						      this_busy_nack);
 617				if ((dispatch_stat & check_mask) == 0)
 618					cpu_list[i] = 0xffff;
 619				this_busy_nack += 2;
 620				if (this_busy_nack == 64)
 621					break;
 622			}
 623
 624			goto retry;
 625		}
 626	}
 627}
 628
 629#define	CPU_MONDO_COUNTER(cpuid)	(cpu_mondo_counter[cpuid])
 630#define	MONDO_USEC_WAIT_MIN		2
 631#define	MONDO_USEC_WAIT_MAX		100
 632#define	MONDO_RETRY_LIMIT		500000
 633
 634/* Multi-cpu list version.
 635 *
 636 * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
 637 * Sometimes not all cpus receive the mondo, requiring us to re-send
 638 * the mondo until all cpus have received, or cpus are truly stuck
 639 * unable to receive mondo, and we timeout.
 640 * Occasionally a target cpu strand is borrowed briefly by hypervisor to
 641 * perform guest service, such as PCIe error handling. Consider the
 642 * service time, 1 second overall wait is reasonable for 1 cpu.
 643 * Here two in-between mondo check wait time are defined: 2 usec for
 644 * single cpu quick turn around and up to 100usec for large cpu count.
 645 * Deliver mondo to large number of cpus could take longer, we adjusts
 646 * the retry count as long as target cpus are making forward progress.
 647 */
 648static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
 649{
 650	int this_cpu, tot_cpus, prev_sent, i, rem;
 651	int usec_wait, retries, tot_retries;
 652	u16 first_cpu = 0xffff;
 653	unsigned long xc_rcvd = 0;
 654	unsigned long status;
 655	int ecpuerror_id = 0;
 656	int enocpu_id = 0;
 657	u16 *cpu_list;
 658	u16 cpu;
 659
 660	this_cpu = smp_processor_id();
 
 661	cpu_list = __va(tb->cpu_list_pa);
 662	usec_wait = cnt * MONDO_USEC_WAIT_MIN;
 663	if (usec_wait > MONDO_USEC_WAIT_MAX)
 664		usec_wait = MONDO_USEC_WAIT_MAX;
 665	retries = tot_retries = 0;
 666	tot_cpus = cnt;
 667	prev_sent = 0;
 668
 
 
 
 669	do {
 670		int n_sent, mondo_delivered, target_cpu_busy;
 671
 672		status = sun4v_cpu_mondo_send(cnt,
 673					      tb->cpu_list_pa,
 674					      tb->cpu_mondo_block_pa);
 675
 676		/* HV_EOK means all cpus received the xcall, we're done.  */
 677		if (likely(status == HV_EOK))
 678			goto xcall_done;
 679
 680		/* If not these non-fatal errors, panic */
 681		if (unlikely((status != HV_EWOULDBLOCK) &&
 682			(status != HV_ECPUERROR) &&
 683			(status != HV_ENOCPU)))
 684			goto fatal_errors;
 685
 686		/* First, see if we made any forward progress.
 687		 *
 688		 * Go through the cpu_list, count the target cpus that have
 689		 * received our mondo (n_sent), and those that did not (rem).
 690		 * Re-pack cpu_list with the cpus remain to be retried in the
 691		 * front - this simplifies tracking the truly stalled cpus.
 692		 *
 693		 * The hypervisor indicates successful sends by setting
 694		 * cpu list entries to the value 0xffff.
 695		 *
 696		 * EWOULDBLOCK means some target cpus did not receive the
 697		 * mondo and retry usually helps.
 698		 *
 699		 * ECPUERROR means at least one target cpu is in error state,
 700		 * it's usually safe to skip the faulty cpu and retry.
 701		 *
 702		 * ENOCPU means one of the target cpu doesn't belong to the
 703		 * domain, perhaps offlined which is unexpected, but not
 704		 * fatal and it's okay to skip the offlined cpu.
 705		 */
 706		rem = 0;
 707		n_sent = 0;
 708		for (i = 0; i < cnt; i++) {
 709			cpu = cpu_list[i];
 710			if (likely(cpu == 0xffff)) {
 711				n_sent++;
 712			} else if ((status == HV_ECPUERROR) &&
 713				(sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
 714				ecpuerror_id = cpu + 1;
 715			} else if (status == HV_ENOCPU && !cpu_online(cpu)) {
 716				enocpu_id = cpu + 1;
 717			} else {
 718				cpu_list[rem++] = cpu;
 719			}
 720		}
 721
 722		/* No cpu remained, we're done. */
 723		if (rem == 0)
 724			break;
 725
 726		/* Otherwise, update the cpu count for retry. */
 727		cnt = rem;
 728
 729		/* Record the overall number of mondos received by the
 730		 * first of the remaining cpus.
 
 731		 */
 732		if (first_cpu != cpu_list[0]) {
 733			first_cpu = cpu_list[0];
 734			xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
 735		}
 736
 737		/* Was any mondo delivered successfully? */
 738		mondo_delivered = (n_sent > prev_sent);
 739		prev_sent = n_sent;
 740
 741		/* or, was any target cpu busy processing other mondos? */
 742		target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
 743		xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
 744
 745		/* Retry count is for no progress. If we're making progress,
 746		 * reset the retry count.
 747		 */
 748		if (likely(mondo_delivered || target_cpu_busy)) {
 749			tot_retries += retries;
 750			retries = 0;
 751		} else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
 752			goto fatal_mondo_timeout;
 753		}
 754
 755		/* Delay a little bit to let other cpus catch up on
 756		 * their cpu mondo queue work.
 
 
 
 
 757		 */
 758		if (!mondo_delivered)
 759			udelay(usec_wait);
 
 760
 761		retries++;
 
 
 
 
 762	} while (1);
 763
 764xcall_done:
 765	if (unlikely(ecpuerror_id > 0)) {
 766		pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
 767		       this_cpu, ecpuerror_id - 1);
 768	} else if (unlikely(enocpu_id > 0)) {
 769		pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
 770		       this_cpu, enocpu_id - 1);
 771	}
 772	return;
 773
 774fatal_errors:
 775	/* fatal errors include bad alignment, etc */
 776	pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
 777	       this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
 778	panic("Unexpected SUN4V mondo error %lu\n", status);
 779
 780fatal_mondo_timeout:
 781	/* some cpus being non-responsive to the cpu mondo */
 782	pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
 783	       this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
 784	panic("SUN4V mondo timeout panic\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 785}
 786
 787static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
 788
 789static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
 790{
 791	struct trap_per_cpu *tb;
 792	int this_cpu, i, cnt;
 793	unsigned long flags;
 794	u16 *cpu_list;
 795	u64 *mondo;
 796
 797	/* We have to do this whole thing with interrupts fully disabled.
 798	 * Otherwise if we send an xcall from interrupt context it will
 799	 * corrupt both our mondo block and cpu list state.
 800	 *
 801	 * One consequence of this is that we cannot use timeout mechanisms
 802	 * that depend upon interrupts being delivered locally.  So, for
 803	 * example, we cannot sample jiffies and expect it to advance.
 804	 *
 805	 * Fortunately, udelay() uses %stick/%tick so we can use that.
 806	 */
 807	local_irq_save(flags);
 808
 809	this_cpu = smp_processor_id();
 810	tb = &trap_block[this_cpu];
 811
 812	mondo = __va(tb->cpu_mondo_block_pa);
 813	mondo[0] = data0;
 814	mondo[1] = data1;
 815	mondo[2] = data2;
 816	wmb();
 817
 818	cpu_list = __va(tb->cpu_list_pa);
 819
 820	/* Setup the initial cpu list.  */
 821	cnt = 0;
 822	for_each_cpu(i, mask) {
 823		if (i == this_cpu || !cpu_online(i))
 824			continue;
 825		cpu_list[cnt++] = i;
 826	}
 827
 828	if (cnt)
 829		xcall_deliver_impl(tb, cnt);
 830
 831	local_irq_restore(flags);
 832}
 833
 834/* Send cross call to all processors mentioned in MASK_P
 835 * except self.  Really, there are only two cases currently,
 836 * "cpu_online_mask" and "mm_cpumask(mm)".
 837 */
 838static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
 839{
 840	u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
 841
 842	xcall_deliver(data0, data1, data2, mask);
 843}
 844
 845/* Send cross call to all processors except self. */
 846static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
 847{
 848	smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
 849}
 850
 851extern unsigned long xcall_sync_tick;
 852
 853static void smp_start_sync_tick_client(int cpu)
 854{
 855	xcall_deliver((u64) &xcall_sync_tick, 0, 0,
 856		      cpumask_of(cpu));
 857}
 858
 859extern unsigned long xcall_call_function;
 860
 861void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 862{
 863	xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
 864}
 865
 866extern unsigned long xcall_call_function_single;
 867
 868void arch_send_call_function_single_ipi(int cpu)
 869{
 870	xcall_deliver((u64) &xcall_call_function_single, 0, 0,
 871		      cpumask_of(cpu));
 872}
 873
 874void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
 875{
 876	clear_softint(1 << irq);
 877	irq_enter();
 878	generic_smp_call_function_interrupt();
 879	irq_exit();
 880}
 881
 882void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
 883{
 884	clear_softint(1 << irq);
 885	irq_enter();
 886	generic_smp_call_function_single_interrupt();
 887	irq_exit();
 888}
 889
 890static void tsb_sync(void *info)
 891{
 892	struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
 893	struct mm_struct *mm = info;
 894
 895	/* It is not valid to test "current->active_mm == mm" here.
 896	 *
 897	 * The value of "current" is not changed atomically with
 898	 * switch_mm().  But that's OK, we just need to check the
 899	 * current cpu's trap block PGD physical address.
 900	 */
 901	if (tp->pgd_paddr == __pa(mm->pgd))
 902		tsb_context_switch(mm);
 903}
 904
 905void smp_tsb_sync(struct mm_struct *mm)
 906{
 907	smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
 908}
 909
 910extern unsigned long xcall_flush_tlb_mm;
 911extern unsigned long xcall_flush_tlb_page;
 912extern unsigned long xcall_flush_tlb_kernel_range;
 913extern unsigned long xcall_fetch_glob_regs;
 914extern unsigned long xcall_fetch_glob_pmu;
 915extern unsigned long xcall_fetch_glob_pmu_n4;
 916extern unsigned long xcall_receive_signal;
 917extern unsigned long xcall_new_mmu_context_version;
 918#ifdef CONFIG_KGDB
 919extern unsigned long xcall_kgdb_capture;
 920#endif
 921
 922#ifdef DCACHE_ALIASING_POSSIBLE
 923extern unsigned long xcall_flush_dcache_page_cheetah;
 924#endif
 925extern unsigned long xcall_flush_dcache_page_spitfire;
 926
 
 
 
 
 
 927static inline void __local_flush_dcache_page(struct page *page)
 928{
 929#ifdef DCACHE_ALIASING_POSSIBLE
 930	__flush_dcache_page(page_address(page),
 931			    ((tlb_type == spitfire) &&
 932			     page_mapping_file(page) != NULL));
 933#else
 934	if (page_mapping_file(page) != NULL &&
 935	    tlb_type == spitfire)
 936		__flush_icache_page(__pa(page_address(page)));
 937#endif
 938}
 939
 940void smp_flush_dcache_page_impl(struct page *page, int cpu)
 941{
 942	int this_cpu;
 943
 944	if (tlb_type == hypervisor)
 945		return;
 946
 947#ifdef CONFIG_DEBUG_DCFLUSH
 948	atomic_inc(&dcpage_flushes);
 949#endif
 950
 951	this_cpu = get_cpu();
 952
 953	if (cpu == this_cpu) {
 954		__local_flush_dcache_page(page);
 955	} else if (cpu_online(cpu)) {
 956		void *pg_addr = page_address(page);
 957		u64 data0 = 0;
 958
 959		if (tlb_type == spitfire) {
 960			data0 = ((u64)&xcall_flush_dcache_page_spitfire);
 961			if (page_mapping_file(page) != NULL)
 962				data0 |= ((u64)1 << 32);
 963		} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
 964#ifdef DCACHE_ALIASING_POSSIBLE
 965			data0 =	((u64)&xcall_flush_dcache_page_cheetah);
 966#endif
 967		}
 968		if (data0) {
 969			xcall_deliver(data0, __pa(pg_addr),
 970				      (u64) pg_addr, cpumask_of(cpu));
 971#ifdef CONFIG_DEBUG_DCFLUSH
 972			atomic_inc(&dcpage_flushes_xcall);
 973#endif
 974		}
 975	}
 976
 977	put_cpu();
 978}
 979
 980void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
 981{
 982	void *pg_addr;
 983	u64 data0;
 984
 985	if (tlb_type == hypervisor)
 986		return;
 987
 988	preempt_disable();
 989
 990#ifdef CONFIG_DEBUG_DCFLUSH
 991	atomic_inc(&dcpage_flushes);
 992#endif
 993	data0 = 0;
 994	pg_addr = page_address(page);
 995	if (tlb_type == spitfire) {
 996		data0 = ((u64)&xcall_flush_dcache_page_spitfire);
 997		if (page_mapping_file(page) != NULL)
 998			data0 |= ((u64)1 << 32);
 999	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1000#ifdef DCACHE_ALIASING_POSSIBLE
1001		data0 = ((u64)&xcall_flush_dcache_page_cheetah);
1002#endif
1003	}
1004	if (data0) {
1005		xcall_deliver(data0, __pa(pg_addr),
1006			      (u64) pg_addr, cpu_online_mask);
1007#ifdef CONFIG_DEBUG_DCFLUSH
1008		atomic_inc(&dcpage_flushes_xcall);
1009#endif
1010	}
1011	__local_flush_dcache_page(page);
1012
1013	preempt_enable();
1014}
1015
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1016#ifdef CONFIG_KGDB
1017void kgdb_roundup_cpus(unsigned long flags)
1018{
1019	smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
1020}
1021#endif
1022
1023void smp_fetch_global_regs(void)
1024{
1025	smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
1026}
1027
1028void smp_fetch_global_pmu(void)
1029{
1030	if (tlb_type == hypervisor &&
1031	    sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
1032		smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0);
1033	else
1034		smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0);
1035}
1036
1037/* We know that the window frames of the user have been flushed
1038 * to the stack before we get here because all callers of us
1039 * are flush_tlb_*() routines, and these run after flush_cache_*()
1040 * which performs the flushw.
1041 *
1042 * The SMP TLB coherency scheme we use works as follows:
1043 *
1044 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1045 *    space has (potentially) executed on, this is the heuristic
1046 *    we use to avoid doing cross calls.
1047 *
1048 *    Also, for flushing from kswapd and also for clones, we
1049 *    use cpu_vm_mask as the list of cpus to make run the TLB.
1050 *
1051 * 2) TLB context numbers are shared globally across all processors
1052 *    in the system, this allows us to play several games to avoid
1053 *    cross calls.
1054 *
1055 *    One invariant is that when a cpu switches to a process, and
1056 *    that processes tsk->active_mm->cpu_vm_mask does not have the
1057 *    current cpu's bit set, that tlb context is flushed locally.
1058 *
1059 *    If the address space is non-shared (ie. mm->count == 1) we avoid
1060 *    cross calls when we want to flush the currently running process's
1061 *    tlb state.  This is done by clearing all cpu bits except the current
1062 *    processor's in current->mm->cpu_vm_mask and performing the
1063 *    flush locally only.  This will force any subsequent cpus which run
1064 *    this task to flush the context from the local tlb if the process
1065 *    migrates to another cpu (again).
1066 *
1067 * 3) For shared address spaces (threads) and swapping we bite the
1068 *    bullet for most cases and perform the cross call (but only to
1069 *    the cpus listed in cpu_vm_mask).
1070 *
1071 *    The performance gain from "optimizing" away the cross call for threads is
1072 *    questionable (in theory the big win for threads is the massive sharing of
1073 *    address space state across processors).
1074 */
1075
1076/* This currently is only used by the hugetlb arch pre-fault
1077 * hook on UltraSPARC-III+ and later when changing the pagesize
1078 * bits of the context register for an address space.
1079 */
1080void smp_flush_tlb_mm(struct mm_struct *mm)
1081{
1082	u32 ctx = CTX_HWBITS(mm->context);
1083	int cpu = get_cpu();
1084
1085	if (atomic_read(&mm->mm_users) == 1) {
1086		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1087		goto local_flush_and_out;
1088	}
1089
1090	smp_cross_call_masked(&xcall_flush_tlb_mm,
1091			      ctx, 0, 0,
1092			      mm_cpumask(mm));
1093
1094local_flush_and_out:
1095	__flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1096
1097	put_cpu();
1098}
1099
1100struct tlb_pending_info {
1101	unsigned long ctx;
1102	unsigned long nr;
1103	unsigned long *vaddrs;
1104};
1105
1106static void tlb_pending_func(void *info)
1107{
1108	struct tlb_pending_info *t = info;
1109
1110	__flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
1111}
1112
1113void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1114{
1115	u32 ctx = CTX_HWBITS(mm->context);
1116	struct tlb_pending_info info;
1117	int cpu = get_cpu();
1118
1119	info.ctx = ctx;
1120	info.nr = nr;
1121	info.vaddrs = vaddrs;
1122
1123	if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1124		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1125	else
1126		smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
1127				       &info, 1);
 
1128
1129	__flush_tlb_pending(ctx, nr, vaddrs);
1130
1131	put_cpu();
1132}
1133
1134void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
1135{
1136	unsigned long context = CTX_HWBITS(mm->context);
1137	int cpu = get_cpu();
1138
1139	if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1140		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1141	else
1142		smp_cross_call_masked(&xcall_flush_tlb_page,
1143				      context, vaddr, 0,
1144				      mm_cpumask(mm));
1145	__flush_tlb_page(context, vaddr);
1146
1147	put_cpu();
1148}
1149
1150void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1151{
1152	start &= PAGE_MASK;
1153	end    = PAGE_ALIGN(end);
1154	if (start != end) {
1155		smp_cross_call(&xcall_flush_tlb_kernel_range,
1156			       0, start, end);
1157
1158		__flush_tlb_kernel_range(start, end);
1159	}
1160}
1161
1162/* CPU capture. */
1163/* #define CAPTURE_DEBUG */
1164extern unsigned long xcall_capture;
1165
1166static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1167static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1168static unsigned long penguins_are_doing_time;
1169
1170void smp_capture(void)
1171{
1172	int result = atomic_add_return(1, &smp_capture_depth);
1173
1174	if (result == 1) {
1175		int ncpus = num_online_cpus();
1176
1177#ifdef CAPTURE_DEBUG
1178		printk("CPU[%d]: Sending penguins to jail...",
1179		       smp_processor_id());
1180#endif
1181		penguins_are_doing_time = 1;
1182		atomic_inc(&smp_capture_registry);
1183		smp_cross_call(&xcall_capture, 0, 0, 0);
1184		while (atomic_read(&smp_capture_registry) != ncpus)
1185			rmb();
1186#ifdef CAPTURE_DEBUG
1187		printk("done\n");
1188#endif
1189	}
1190}
1191
1192void smp_release(void)
1193{
1194	if (atomic_dec_and_test(&smp_capture_depth)) {
1195#ifdef CAPTURE_DEBUG
1196		printk("CPU[%d]: Giving pardon to "
1197		       "imprisoned penguins\n",
1198		       smp_processor_id());
1199#endif
1200		penguins_are_doing_time = 0;
1201		membar_safe("#StoreLoad");
1202		atomic_dec(&smp_capture_registry);
1203	}
1204}
1205
1206/* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE
1207 * set, so they can service tlb flush xcalls...
1208 */
1209extern void prom_world(int);
1210
1211void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
1212{
1213	clear_softint(1 << irq);
1214
1215	preempt_disable();
1216
1217	__asm__ __volatile__("flushw");
1218	prom_world(1);
1219	atomic_inc(&smp_capture_registry);
1220	membar_safe("#StoreLoad");
1221	while (penguins_are_doing_time)
1222		rmb();
1223	atomic_dec(&smp_capture_registry);
1224	prom_world(0);
1225
1226	preempt_enable();
1227}
1228
1229/* /proc/profile writes can call this, don't __init it please. */
1230int setup_profiling_timer(unsigned int multiplier)
1231{
1232	return -EINVAL;
1233}
1234
1235void __init smp_prepare_cpus(unsigned int max_cpus)
1236{
1237}
1238
1239void smp_prepare_boot_cpu(void)
1240{
1241}
1242
1243void __init smp_setup_processor_id(void)
1244{
1245	if (tlb_type == spitfire)
1246		xcall_deliver_impl = spitfire_xcall_deliver;
1247	else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1248		xcall_deliver_impl = cheetah_xcall_deliver;
1249	else
1250		xcall_deliver_impl = hypervisor_xcall_deliver;
1251}
1252
1253void __init smp_fill_in_cpu_possible_map(void)
1254{
1255	int possible_cpus = num_possible_cpus();
1256	int i;
1257
1258	if (possible_cpus > nr_cpu_ids)
1259		possible_cpus = nr_cpu_ids;
1260
1261	for (i = 0; i < possible_cpus; i++)
1262		set_cpu_possible(i, true);
1263	for (; i < NR_CPUS; i++)
1264		set_cpu_possible(i, false);
1265}
1266
1267void smp_fill_in_sib_core_maps(void)
1268{
1269	unsigned int i;
1270
1271	for_each_present_cpu(i) {
1272		unsigned int j;
1273
1274		cpumask_clear(&cpu_core_map[i]);
1275		if (cpu_data(i).core_id == 0) {
1276			cpumask_set_cpu(i, &cpu_core_map[i]);
1277			continue;
1278		}
1279
1280		for_each_present_cpu(j) {
1281			if (cpu_data(i).core_id ==
1282			    cpu_data(j).core_id)
1283				cpumask_set_cpu(j, &cpu_core_map[i]);
1284		}
1285	}
1286
1287	for_each_present_cpu(i)  {
1288		unsigned int j;
1289
1290		for_each_present_cpu(j)  {
1291			if (cpu_data(i).max_cache_id ==
1292			    cpu_data(j).max_cache_id)
1293				cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]);
1294
1295			if (cpu_data(i).sock_id == cpu_data(j).sock_id)
1296				cpumask_set_cpu(j, &cpu_core_sib_map[i]);
1297		}
1298	}
1299
1300	for_each_present_cpu(i) {
1301		unsigned int j;
1302
1303		cpumask_clear(&per_cpu(cpu_sibling_map, i));
1304		if (cpu_data(i).proc_id == -1) {
1305			cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
1306			continue;
1307		}
1308
1309		for_each_present_cpu(j) {
1310			if (cpu_data(i).proc_id ==
1311			    cpu_data(j).proc_id)
1312				cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
1313		}
1314	}
1315}
1316
1317int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1318{
1319	int ret = smp_boot_one_cpu(cpu, tidle);
1320
1321	if (!ret) {
1322		cpumask_set_cpu(cpu, &smp_commenced_mask);
1323		while (!cpu_online(cpu))
1324			mb();
1325		if (!cpu_online(cpu)) {
1326			ret = -ENODEV;
1327		} else {
1328			/* On SUN4V, writes to %tick and %stick are
1329			 * not allowed.
1330			 */
1331			if (tlb_type != hypervisor)
1332				smp_synchronize_one_tick(cpu);
1333		}
1334	}
1335	return ret;
1336}
1337
1338#ifdef CONFIG_HOTPLUG_CPU
1339void cpu_play_dead(void)
1340{
1341	int cpu = smp_processor_id();
1342	unsigned long pstate;
1343
1344	idle_task_exit();
1345
1346	if (tlb_type == hypervisor) {
1347		struct trap_per_cpu *tb = &trap_block[cpu];
1348
1349		sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1350				tb->cpu_mondo_pa, 0);
1351		sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1352				tb->dev_mondo_pa, 0);
1353		sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1354				tb->resum_mondo_pa, 0);
1355		sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1356				tb->nonresum_mondo_pa, 0);
1357	}
1358
1359	cpumask_clear_cpu(cpu, &smp_commenced_mask);
1360	membar_safe("#Sync");
1361
1362	local_irq_disable();
1363
1364	__asm__ __volatile__(
1365		"rdpr	%%pstate, %0\n\t"
1366		"wrpr	%0, %1, %%pstate"
1367		: "=r" (pstate)
1368		: "i" (PSTATE_IE));
1369
1370	while (1)
1371		barrier();
1372}
1373
1374int __cpu_disable(void)
1375{
1376	int cpu = smp_processor_id();
1377	cpuinfo_sparc *c;
1378	int i;
1379
1380	for_each_cpu(i, &cpu_core_map[cpu])
1381		cpumask_clear_cpu(cpu, &cpu_core_map[i]);
1382	cpumask_clear(&cpu_core_map[cpu]);
1383
1384	for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
1385		cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
1386	cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
1387
1388	c = &cpu_data(cpu);
1389
1390	c->core_id = 0;
1391	c->proc_id = -1;
1392
1393	smp_wmb();
1394
1395	/* Make sure no interrupts point to this cpu.  */
1396	fixup_irqs();
1397
1398	local_irq_enable();
1399	mdelay(1);
1400	local_irq_disable();
1401
 
1402	set_cpu_online(cpu, false);
 
1403
1404	cpu_map_rebuild();
1405
1406	return 0;
1407}
1408
1409void __cpu_die(unsigned int cpu)
1410{
1411	int i;
1412
1413	for (i = 0; i < 100; i++) {
1414		smp_rmb();
1415		if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
1416			break;
1417		msleep(100);
1418	}
1419	if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
1420		printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1421	} else {
1422#if defined(CONFIG_SUN_LDOMS)
1423		unsigned long hv_err;
1424		int limit = 100;
1425
1426		do {
1427			hv_err = sun4v_cpu_stop(cpu);
1428			if (hv_err == HV_EOK) {
1429				set_cpu_present(cpu, false);
1430				break;
1431			}
1432		} while (--limit > 0);
1433		if (limit <= 0) {
1434			printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1435			       hv_err);
1436		}
1437#endif
1438	}
1439}
1440#endif
1441
1442void __init smp_cpus_done(unsigned int max_cpus)
1443{
1444}
1445
1446static void send_cpu_ipi(int cpu)
1447{
1448	xcall_deliver((u64) &xcall_receive_signal,
1449			0, 0, cpumask_of(cpu));
1450}
1451
1452void scheduler_poke(void)
1453{
1454	if (!cpu_poke)
1455		return;
1456
1457	if (!__this_cpu_read(poke))
1458		return;
1459
1460	__this_cpu_write(poke, false);
1461	set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
1462}
1463
1464static unsigned long send_cpu_poke(int cpu)
1465{
1466	unsigned long hv_err;
1467
1468	per_cpu(poke, cpu) = true;
1469	hv_err = sun4v_cpu_poke(cpu);
1470	if (hv_err != HV_EOK) {
1471		per_cpu(poke, cpu) = false;
1472		pr_err_ratelimited("%s: sun4v_cpu_poke() fails err=%lu\n",
1473				    __func__, hv_err);
1474	}
1475
1476	return hv_err;
1477}
1478
1479void smp_send_reschedule(int cpu)
1480{
1481	if (cpu == smp_processor_id()) {
1482		WARN_ON_ONCE(preemptible());
1483		set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
1484		return;
1485	}
1486
1487	/* Use cpu poke to resume idle cpu if supported. */
1488	if (cpu_poke && idle_cpu(cpu)) {
1489		unsigned long ret;
1490
1491		ret = send_cpu_poke(cpu);
1492		if (ret == HV_EOK)
1493			return;
1494	}
1495
1496	/* Use IPI in following cases:
1497	 * - cpu poke not supported
1498	 * - cpu not idle
1499	 * - send_cpu_poke() returns with error
1500	 */
1501	send_cpu_ipi(cpu);
1502}
1503
1504void smp_init_cpu_poke(void)
1505{
1506	unsigned long major;
1507	unsigned long minor;
1508	int ret;
1509
1510	if (tlb_type != hypervisor)
1511		return;
1512
1513	ret = sun4v_hvapi_get(HV_GRP_CORE, &major, &minor);
1514	if (ret) {
1515		pr_debug("HV_GRP_CORE is not registered\n");
1516		return;
1517	}
1518
1519	if (major == 1 && minor >= 6) {
1520		/* CPU POKE is registered. */
1521		cpu_poke = true;
1522		return;
1523	}
1524
1525	pr_debug("CPU_POKE not supported\n");
1526}
1527
1528void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1529{
1530	clear_softint(1 << irq);
1531	scheduler_ipi();
1532}
1533
1534static void stop_this_cpu(void *dummy)
1535{
1536	set_cpu_online(smp_processor_id(), false);
1537	prom_stopself();
1538}
1539
1540void smp_send_stop(void)
1541{
1542	int cpu;
1543
1544	if (tlb_type == hypervisor) {
1545		int this_cpu = smp_processor_id();
1546#ifdef CONFIG_SERIAL_SUNHV
1547		sunhv_migrate_hvcons_irq(this_cpu);
1548#endif
1549		for_each_online_cpu(cpu) {
1550			if (cpu == this_cpu)
1551				continue;
1552
1553			set_cpu_online(cpu, false);
1554#ifdef CONFIG_SUN_LDOMS
1555			if (ldom_domaining_enabled) {
1556				unsigned long hv_err;
1557				hv_err = sun4v_cpu_stop(cpu);
1558				if (hv_err)
1559					printk(KERN_ERR "sun4v_cpu_stop() "
1560					       "failed err=%lu\n", hv_err);
1561			} else
1562#endif
1563				prom_stopcpu_cpuid(cpu);
1564		}
1565	} else
1566		smp_call_function(stop_this_cpu, NULL, 0);
1567}
1568
1569/**
1570 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
1571 * @cpu: cpu to allocate for
1572 * @size: size allocation in bytes
1573 * @align: alignment
1574 *
1575 * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
1576 * does the right thing for NUMA regardless of the current
1577 * configuration.
1578 *
1579 * RETURNS:
1580 * Pointer to the allocated area on success, NULL on failure.
1581 */
1582static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
1583					size_t align)
1584{
1585	const unsigned long goal = __pa(MAX_DMA_ADDRESS);
1586#ifdef CONFIG_NEED_MULTIPLE_NODES
1587	int node = cpu_to_node(cpu);
1588	void *ptr;
1589
1590	if (!node_online(node) || !NODE_DATA(node)) {
1591		ptr = __alloc_bootmem(size, align, goal);
1592		pr_info("cpu %d has no node %d or node-local memory\n",
1593			cpu, node);
1594		pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
1595			 cpu, size, __pa(ptr));
1596	} else {
1597		ptr = __alloc_bootmem_node(NODE_DATA(node),
1598					   size, align, goal);
1599		pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
1600			 "%016lx\n", cpu, size, node, __pa(ptr));
1601	}
1602	return ptr;
1603#else
1604	return __alloc_bootmem(size, align, goal);
1605#endif
1606}
1607
1608static void __init pcpu_free_bootmem(void *ptr, size_t size)
1609{
1610	free_bootmem(__pa(ptr), size);
1611}
1612
1613static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
1614{
1615	if (cpu_to_node(from) == cpu_to_node(to))
1616		return LOCAL_DISTANCE;
1617	else
1618		return REMOTE_DISTANCE;
1619}
1620
1621static void __init pcpu_populate_pte(unsigned long addr)
1622{
1623	pgd_t *pgd = pgd_offset_k(addr);
1624	pud_t *pud;
1625	pmd_t *pmd;
1626
1627	if (pgd_none(*pgd)) {
1628		pud_t *new;
1629
1630		new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1631		pgd_populate(&init_mm, pgd, new);
1632	}
1633
1634	pud = pud_offset(pgd, addr);
1635	if (pud_none(*pud)) {
1636		pmd_t *new;
1637
1638		new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1639		pud_populate(&init_mm, pud, new);
1640	}
1641
1642	pmd = pmd_offset(pud, addr);
1643	if (!pmd_present(*pmd)) {
1644		pte_t *new;
1645
1646		new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1647		pmd_populate_kernel(&init_mm, pmd, new);
1648	}
1649}
1650
1651void __init setup_per_cpu_areas(void)
1652{
1653	unsigned long delta;
1654	unsigned int cpu;
1655	int rc = -EINVAL;
1656
1657	if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1658		rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1659					    PERCPU_DYNAMIC_RESERVE, 4 << 20,
1660					    pcpu_cpu_distance,
1661					    pcpu_alloc_bootmem,
1662					    pcpu_free_bootmem);
1663		if (rc)
1664			pr_warning("PERCPU: %s allocator failed (%d), "
1665				   "falling back to page size\n",
1666				   pcpu_fc_names[pcpu_chosen_fc], rc);
1667	}
1668	if (rc < 0)
1669		rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
1670					   pcpu_alloc_bootmem,
1671					   pcpu_free_bootmem,
1672					   pcpu_populate_pte);
1673	if (rc < 0)
1674		panic("cannot initialize percpu area (err=%d)", rc);
1675
1676	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1677	for_each_possible_cpu(cpu)
1678		__per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1679
1680	/* Setup %g5 for the boot cpu.  */
1681	__local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1682
1683	of_fill_in_cpu_data();
1684	if (tlb_type == hypervisor)
1685		mdesc_fill_in_cpu_data(cpu_all_mask);
1686}
v3.1
 
   1/* smp.c: Sparc64 SMP support.
   2 *
   3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
   4 */
   5
   6#include <linux/module.h>
   7#include <linux/kernel.h>
   8#include <linux/sched.h>
 
   9#include <linux/mm.h>
  10#include <linux/pagemap.h>
  11#include <linux/threads.h>
  12#include <linux/smp.h>
  13#include <linux/interrupt.h>
  14#include <linux/kernel_stat.h>
  15#include <linux/delay.h>
  16#include <linux/init.h>
  17#include <linux/spinlock.h>
  18#include <linux/fs.h>
  19#include <linux/seq_file.h>
  20#include <linux/cache.h>
  21#include <linux/jiffies.h>
  22#include <linux/profile.h>
  23#include <linux/bootmem.h>
  24#include <linux/vmalloc.h>
  25#include <linux/ftrace.h>
  26#include <linux/cpu.h>
  27#include <linux/slab.h>
 
  28
  29#include <asm/head.h>
  30#include <asm/ptrace.h>
  31#include <linux/atomic.h>
  32#include <asm/tlbflush.h>
  33#include <asm/mmu_context.h>
  34#include <asm/cpudata.h>
  35#include <asm/hvtramp.h>
  36#include <asm/io.h>
  37#include <asm/timer.h>
 
  38
  39#include <asm/irq.h>
  40#include <asm/irq_regs.h>
  41#include <asm/page.h>
  42#include <asm/pgtable.h>
  43#include <asm/oplib.h>
  44#include <asm/uaccess.h>
  45#include <asm/starfire.h>
  46#include <asm/tlb.h>
  47#include <asm/sections.h>
  48#include <asm/prom.h>
  49#include <asm/mdesc.h>
  50#include <asm/ldc.h>
  51#include <asm/hypervisor.h>
  52#include <asm/pcr.h>
  53
  54#include "cpumap.h"
  55
  56int sparc64_multi_core __read_mostly;
  57
  58DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
  59cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
  60	{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
  61
 
 
 
 
 
 
  62EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  63EXPORT_SYMBOL(cpu_core_map);
 
 
  64
  65static cpumask_t smp_commenced_mask;
  66
 
 
 
  67void smp_info(struct seq_file *m)
  68{
  69	int i;
  70	
  71	seq_printf(m, "State:\n");
  72	for_each_online_cpu(i)
  73		seq_printf(m, "CPU%d:\t\tonline\n", i);
  74}
  75
  76void smp_bogo(struct seq_file *m)
  77{
  78	int i;
  79	
  80	for_each_online_cpu(i)
  81		seq_printf(m,
  82			   "Cpu%dClkTck\t: %016lx\n",
  83			   i, cpu_data(i).clock_tick);
  84}
  85
  86extern void setup_sparc64_timer(void);
  87
  88static volatile unsigned long callin_flag = 0;
  89
  90void __cpuinit smp_callin(void)
  91{
  92	int cpuid = hard_smp_processor_id();
  93
  94	__local_per_cpu_offset = __per_cpu_offset(cpuid);
  95
  96	if (tlb_type == hypervisor)
  97		sun4v_ktsb_register();
  98
  99	__flush_tlb_all();
 100
 101	setup_sparc64_timer();
 102
 103	if (cheetah_pcache_forced_on)
 104		cheetah_enable_pcache();
 105
 106	local_irq_enable();
 107
 108	callin_flag = 1;
 109	__asm__ __volatile__("membar #Sync\n\t"
 110			     "flush  %%g6" : : : "memory");
 111
 112	/* Clear this or we will die instantly when we
 113	 * schedule back to this idler...
 114	 */
 115	current_thread_info()->new_child = 0;
 116
 117	/* Attach to the address space of init_task. */
 118	atomic_inc(&init_mm.mm_count);
 119	current->active_mm = &init_mm;
 120
 121	/* inform the notifiers about the new cpu */
 122	notify_cpu_starting(cpuid);
 123
 124	while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
 125		rmb();
 126
 127	ipi_call_lock_irq();
 128	set_cpu_online(cpuid, true);
 129	ipi_call_unlock_irq();
 130
 131	/* idle thread is expected to have preempt disabled */
 132	preempt_disable();
 
 
 
 
 133}
 134
 135void cpu_panic(void)
 136{
 137	printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
 138	panic("SMP bolixed\n");
 139}
 140
 141/* This tick register synchronization scheme is taken entirely from
 142 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
 143 *
 144 * The only change I've made is to rework it so that the master
 145 * initiates the synchonization instead of the slave. -DaveM
 146 */
 147
 148#define MASTER	0
 149#define SLAVE	(SMP_CACHE_BYTES/sizeof(unsigned long))
 150
 151#define NUM_ROUNDS	64	/* magic value */
 152#define NUM_ITERS	5	/* likewise */
 153
 154static DEFINE_SPINLOCK(itc_sync_lock);
 155static unsigned long go[SLAVE + 1];
 156
 157#define DEBUG_TICK_SYNC	0
 158
 159static inline long get_delta (long *rt, long *master)
 160{
 161	unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
 162	unsigned long tcenter, t0, t1, tm;
 163	unsigned long i;
 164
 165	for (i = 0; i < NUM_ITERS; i++) {
 166		t0 = tick_ops->get_tick();
 167		go[MASTER] = 1;
 168		membar_safe("#StoreLoad");
 169		while (!(tm = go[SLAVE]))
 170			rmb();
 171		go[SLAVE] = 0;
 172		wmb();
 173		t1 = tick_ops->get_tick();
 174
 175		if (t1 - t0 < best_t1 - best_t0)
 176			best_t0 = t0, best_t1 = t1, best_tm = tm;
 177	}
 178
 179	*rt = best_t1 - best_t0;
 180	*master = best_tm - best_t0;
 181
 182	/* average best_t0 and best_t1 without overflow: */
 183	tcenter = (best_t0/2 + best_t1/2);
 184	if (best_t0 % 2 + best_t1 % 2 == 2)
 185		tcenter++;
 186	return tcenter - best_tm;
 187}
 188
 189void smp_synchronize_tick_client(void)
 190{
 191	long i, delta, adj, adjust_latency = 0, done = 0;
 192	unsigned long flags, rt, master_time_stamp;
 193#if DEBUG_TICK_SYNC
 194	struct {
 195		long rt;	/* roundtrip time */
 196		long master;	/* master's timestamp */
 197		long diff;	/* difference between midpoint and master's timestamp */
 198		long lat;	/* estimate of itc adjustment latency */
 199	} t[NUM_ROUNDS];
 200#endif
 201
 202	go[MASTER] = 1;
 203
 204	while (go[MASTER])
 205		rmb();
 206
 207	local_irq_save(flags);
 208	{
 209		for (i = 0; i < NUM_ROUNDS; i++) {
 210			delta = get_delta(&rt, &master_time_stamp);
 211			if (delta == 0)
 212				done = 1;	/* let's lock on to this... */
 213
 214			if (!done) {
 215				if (i > 0) {
 216					adjust_latency += -delta;
 217					adj = -delta + adjust_latency/4;
 218				} else
 219					adj = -delta;
 220
 221				tick_ops->add_tick(adj);
 222			}
 223#if DEBUG_TICK_SYNC
 224			t[i].rt = rt;
 225			t[i].master = master_time_stamp;
 226			t[i].diff = delta;
 227			t[i].lat = adjust_latency/4;
 228#endif
 229		}
 230	}
 231	local_irq_restore(flags);
 232
 233#if DEBUG_TICK_SYNC
 234	for (i = 0; i < NUM_ROUNDS; i++)
 235		printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
 236		       t[i].rt, t[i].master, t[i].diff, t[i].lat);
 237#endif
 238
 239	printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
 240	       "(last diff %ld cycles, maxerr %lu cycles)\n",
 241	       smp_processor_id(), delta, rt);
 242}
 243
 244static void smp_start_sync_tick_client(int cpu);
 245
 246static void smp_synchronize_one_tick(int cpu)
 247{
 248	unsigned long flags, i;
 249
 250	go[MASTER] = 0;
 251
 252	smp_start_sync_tick_client(cpu);
 253
 254	/* wait for client to be ready */
 255	while (!go[MASTER])
 256		rmb();
 257
 258	/* now let the client proceed into his loop */
 259	go[MASTER] = 0;
 260	membar_safe("#StoreLoad");
 261
 262	spin_lock_irqsave(&itc_sync_lock, flags);
 263	{
 264		for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
 265			while (!go[MASTER])
 266				rmb();
 267			go[MASTER] = 0;
 268			wmb();
 269			go[SLAVE] = tick_ops->get_tick();
 270			membar_safe("#StoreLoad");
 271		}
 272	}
 273	spin_unlock_irqrestore(&itc_sync_lock, flags);
 274}
 275
 276#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
 277/* XXX Put this in some common place. XXX */
 278static unsigned long kimage_addr_to_ra(void *p)
 279{
 280	unsigned long val = (unsigned long) p;
 281
 282	return kern_base + (val - KERNBASE);
 283}
 284
 285static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, void **descrp)
 286{
 287	extern unsigned long sparc64_ttable_tl0;
 288	extern unsigned long kern_locked_tte_data;
 289	struct hvtramp_descr *hdesc;
 290	unsigned long trampoline_ra;
 291	struct trap_per_cpu *tb;
 292	u64 tte_vaddr, tte_data;
 293	unsigned long hv_err;
 294	int i;
 295
 296	hdesc = kzalloc(sizeof(*hdesc) +
 297			(sizeof(struct hvtramp_mapping) *
 298			 num_kernel_image_mappings - 1),
 299			GFP_KERNEL);
 300	if (!hdesc) {
 301		printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
 302		       "hvtramp_descr.\n");
 303		return;
 304	}
 305	*descrp = hdesc;
 306
 307	hdesc->cpu = cpu;
 308	hdesc->num_mappings = num_kernel_image_mappings;
 309
 310	tb = &trap_block[cpu];
 311
 312	hdesc->fault_info_va = (unsigned long) &tb->fault_info;
 313	hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
 314
 315	hdesc->thread_reg = thread_reg;
 316
 317	tte_vaddr = (unsigned long) KERNBASE;
 318	tte_data = kern_locked_tte_data;
 319
 320	for (i = 0; i < hdesc->num_mappings; i++) {
 321		hdesc->maps[i].vaddr = tte_vaddr;
 322		hdesc->maps[i].tte   = tte_data;
 323		tte_vaddr += 0x400000;
 324		tte_data  += 0x400000;
 325	}
 326
 327	trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
 328
 329	hv_err = sun4v_cpu_start(cpu, trampoline_ra,
 330				 kimage_addr_to_ra(&sparc64_ttable_tl0),
 331				 __pa(hdesc));
 332	if (hv_err)
 333		printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
 334		       "gives error %lu\n", hv_err);
 335}
 336#endif
 337
 338extern unsigned long sparc64_cpu_startup;
 339
 340/* The OBP cpu startup callback truncates the 3rd arg cookie to
 341 * 32-bits (I think) so to be safe we have it read the pointer
 342 * contained here so we work on >4GB machines. -DaveM
 343 */
 344static struct thread_info *cpu_new_thread = NULL;
 345
 346static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
 347{
 348	unsigned long entry =
 349		(unsigned long)(&sparc64_cpu_startup);
 350	unsigned long cookie =
 351		(unsigned long)(&cpu_new_thread);
 352	struct task_struct *p;
 353	void *descr = NULL;
 354	int timeout, ret;
 355
 356	p = fork_idle(cpu);
 357	if (IS_ERR(p))
 358		return PTR_ERR(p);
 359	callin_flag = 0;
 360	cpu_new_thread = task_thread_info(p);
 361
 362	if (tlb_type == hypervisor) {
 363#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
 364		if (ldom_domaining_enabled)
 365			ldom_startcpu_cpuid(cpu,
 366					    (unsigned long) cpu_new_thread,
 367					    &descr);
 368		else
 369#endif
 370			prom_startcpu_cpuid(cpu, entry, cookie);
 371	} else {
 372		struct device_node *dp = of_find_node_by_cpuid(cpu);
 373
 374		prom_startcpu(dp->phandle, entry, cookie);
 375	}
 376
 377	for (timeout = 0; timeout < 50000; timeout++) {
 378		if (callin_flag)
 379			break;
 380		udelay(100);
 381	}
 382
 383	if (callin_flag) {
 384		ret = 0;
 385	} else {
 386		printk("Processor %d is stuck.\n", cpu);
 387		ret = -ENODEV;
 388	}
 389	cpu_new_thread = NULL;
 390
 391	kfree(descr);
 392
 393	return ret;
 394}
 395
 396static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
 397{
 398	u64 result, target;
 399	int stuck, tmp;
 400
 401	if (this_is_starfire) {
 402		/* map to real upaid */
 403		cpu = (((cpu & 0x3c) << 1) |
 404			((cpu & 0x40) >> 4) |
 405			(cpu & 0x3));
 406	}
 407
 408	target = (cpu << 14) | 0x70;
 409again:
 410	/* Ok, this is the real Spitfire Errata #54.
 411	 * One must read back from a UDB internal register
 412	 * after writes to the UDB interrupt dispatch, but
 413	 * before the membar Sync for that write.
 414	 * So we use the high UDB control register (ASI 0x7f,
 415	 * ADDR 0x20) for the dummy read. -DaveM
 416	 */
 417	tmp = 0x40;
 418	__asm__ __volatile__(
 419	"wrpr	%1, %2, %%pstate\n\t"
 420	"stxa	%4, [%0] %3\n\t"
 421	"stxa	%5, [%0+%8] %3\n\t"
 422	"add	%0, %8, %0\n\t"
 423	"stxa	%6, [%0+%8] %3\n\t"
 424	"membar	#Sync\n\t"
 425	"stxa	%%g0, [%7] %3\n\t"
 426	"membar	#Sync\n\t"
 427	"mov	0x20, %%g1\n\t"
 428	"ldxa	[%%g1] 0x7f, %%g0\n\t"
 429	"membar	#Sync"
 430	: "=r" (tmp)
 431	: "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
 432	  "r" (data0), "r" (data1), "r" (data2), "r" (target),
 433	  "r" (0x10), "0" (tmp)
 434        : "g1");
 435
 436	/* NOTE: PSTATE_IE is still clear. */
 437	stuck = 100000;
 438	do {
 439		__asm__ __volatile__("ldxa [%%g0] %1, %0"
 440			: "=r" (result)
 441			: "i" (ASI_INTR_DISPATCH_STAT));
 442		if (result == 0) {
 443			__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 444					     : : "r" (pstate));
 445			return;
 446		}
 447		stuck -= 1;
 448		if (stuck == 0)
 449			break;
 450	} while (result & 0x1);
 451	__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 452			     : : "r" (pstate));
 453	if (stuck == 0) {
 454		printk("CPU[%d]: mondo stuckage result[%016llx]\n",
 455		       smp_processor_id(), result);
 456	} else {
 457		udelay(2);
 458		goto again;
 459	}
 460}
 461
 462static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
 463{
 464	u64 *mondo, data0, data1, data2;
 465	u16 *cpu_list;
 466	u64 pstate;
 467	int i;
 468
 469	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
 470	cpu_list = __va(tb->cpu_list_pa);
 471	mondo = __va(tb->cpu_mondo_block_pa);
 472	data0 = mondo[0];
 473	data1 = mondo[1];
 474	data2 = mondo[2];
 475	for (i = 0; i < cnt; i++)
 476		spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]);
 477}
 478
 479/* Cheetah now allows to send the whole 64-bytes of data in the interrupt
 480 * packet, but we have no use for that.  However we do take advantage of
 481 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
 482 */
 483static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
 484{
 485	int nack_busy_id, is_jbus, need_more;
 486	u64 *mondo, pstate, ver, busy_mask;
 487	u16 *cpu_list;
 488
 489	cpu_list = __va(tb->cpu_list_pa);
 490	mondo = __va(tb->cpu_mondo_block_pa);
 491
 492	/* Unfortunately, someone at Sun had the brilliant idea to make the
 493	 * busy/nack fields hard-coded by ITID number for this Ultra-III
 494	 * derivative processor.
 495	 */
 496	__asm__ ("rdpr %%ver, %0" : "=r" (ver));
 497	is_jbus = ((ver >> 32) == __JALAPENO_ID ||
 498		   (ver >> 32) == __SERRANO_ID);
 499
 500	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
 501
 502retry:
 503	need_more = 0;
 504	__asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
 505			     : : "r" (pstate), "i" (PSTATE_IE));
 506
 507	/* Setup the dispatch data registers. */
 508	__asm__ __volatile__("stxa	%0, [%3] %6\n\t"
 509			     "stxa	%1, [%4] %6\n\t"
 510			     "stxa	%2, [%5] %6\n\t"
 511			     "membar	#Sync\n\t"
 512			     : /* no outputs */
 513			     : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]),
 514			       "r" (0x40), "r" (0x50), "r" (0x60),
 515			       "i" (ASI_INTR_W));
 516
 517	nack_busy_id = 0;
 518	busy_mask = 0;
 519	{
 520		int i;
 521
 522		for (i = 0; i < cnt; i++) {
 523			u64 target, nr;
 524
 525			nr = cpu_list[i];
 526			if (nr == 0xffff)
 527				continue;
 528
 529			target = (nr << 14) | 0x70;
 530			if (is_jbus) {
 531				busy_mask |= (0x1UL << (nr * 2));
 532			} else {
 533				target |= (nack_busy_id << 24);
 534				busy_mask |= (0x1UL <<
 535					      (nack_busy_id * 2));
 536			}
 537			__asm__ __volatile__(
 538				"stxa	%%g0, [%0] %1\n\t"
 539				"membar	#Sync\n\t"
 540				: /* no outputs */
 541				: "r" (target), "i" (ASI_INTR_W));
 542			nack_busy_id++;
 543			if (nack_busy_id == 32) {
 544				need_more = 1;
 545				break;
 546			}
 547		}
 548	}
 549
 550	/* Now, poll for completion. */
 551	{
 552		u64 dispatch_stat, nack_mask;
 553		long stuck;
 554
 555		stuck = 100000 * nack_busy_id;
 556		nack_mask = busy_mask << 1;
 557		do {
 558			__asm__ __volatile__("ldxa	[%%g0] %1, %0"
 559					     : "=r" (dispatch_stat)
 560					     : "i" (ASI_INTR_DISPATCH_STAT));
 561			if (!(dispatch_stat & (busy_mask | nack_mask))) {
 562				__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 563						     : : "r" (pstate));
 564				if (unlikely(need_more)) {
 565					int i, this_cnt = 0;
 566					for (i = 0; i < cnt; i++) {
 567						if (cpu_list[i] == 0xffff)
 568							continue;
 569						cpu_list[i] = 0xffff;
 570						this_cnt++;
 571						if (this_cnt == 32)
 572							break;
 573					}
 574					goto retry;
 575				}
 576				return;
 577			}
 578			if (!--stuck)
 579				break;
 580		} while (dispatch_stat & busy_mask);
 581
 582		__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 583				     : : "r" (pstate));
 584
 585		if (dispatch_stat & busy_mask) {
 586			/* Busy bits will not clear, continue instead
 587			 * of freezing up on this cpu.
 588			 */
 589			printk("CPU[%d]: mondo stuckage result[%016llx]\n",
 590			       smp_processor_id(), dispatch_stat);
 591		} else {
 592			int i, this_busy_nack = 0;
 593
 594			/* Delay some random time with interrupts enabled
 595			 * to prevent deadlock.
 596			 */
 597			udelay(2 * nack_busy_id);
 598
 599			/* Clear out the mask bits for cpus which did not
 600			 * NACK us.
 601			 */
 602			for (i = 0; i < cnt; i++) {
 603				u64 check_mask, nr;
 604
 605				nr = cpu_list[i];
 606				if (nr == 0xffff)
 607					continue;
 608
 609				if (is_jbus)
 610					check_mask = (0x2UL << (2*nr));
 611				else
 612					check_mask = (0x2UL <<
 613						      this_busy_nack);
 614				if ((dispatch_stat & check_mask) == 0)
 615					cpu_list[i] = 0xffff;
 616				this_busy_nack += 2;
 617				if (this_busy_nack == 64)
 618					break;
 619			}
 620
 621			goto retry;
 622		}
 623	}
 624}
 625
 626/* Multi-cpu list version.  */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 627static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
 628{
 629	int retries, this_cpu, prev_sent, i, saw_cpu_error;
 
 
 
 630	unsigned long status;
 
 
 631	u16 *cpu_list;
 
 632
 633	this_cpu = smp_processor_id();
 634
 635	cpu_list = __va(tb->cpu_list_pa);
 
 
 
 
 
 
 636
 637	saw_cpu_error = 0;
 638	retries = 0;
 639	prev_sent = 0;
 640	do {
 641		int forward_progress, n_sent;
 642
 643		status = sun4v_cpu_mondo_send(cnt,
 644					      tb->cpu_list_pa,
 645					      tb->cpu_mondo_block_pa);
 646
 647		/* HV_EOK means all cpus received the xcall, we're done.  */
 648		if (likely(status == HV_EOK))
 649			break;
 
 
 
 
 
 
 650
 651		/* First, see if we made any forward progress.
 652		 *
 
 
 
 
 
 653		 * The hypervisor indicates successful sends by setting
 654		 * cpu list entries to the value 0xffff.
 
 
 
 
 
 
 
 
 
 
 655		 */
 
 656		n_sent = 0;
 657		for (i = 0; i < cnt; i++) {
 658			if (likely(cpu_list[i] == 0xffff))
 
 659				n_sent++;
 
 
 
 
 
 
 
 
 660		}
 661
 662		forward_progress = 0;
 663		if (n_sent > prev_sent)
 664			forward_progress = 1;
 665
 666		prev_sent = n_sent;
 
 667
 668		/* If we get a HV_ECPUERROR, then one or more of the cpus
 669		 * in the list are in error state.  Use the cpu_state()
 670		 * hypervisor call to find out which cpus are in error state.
 671		 */
 672		if (unlikely(status == HV_ECPUERROR)) {
 673			for (i = 0; i < cnt; i++) {
 674				long err;
 675				u16 cpu;
 
 
 
 
 676
 677				cpu = cpu_list[i];
 678				if (cpu == 0xffff)
 679					continue;
 680
 681				err = sun4v_cpu_state(cpu);
 682				if (err == HV_CPU_STATE_ERROR) {
 683					saw_cpu_error = (cpu + 1);
 684					cpu_list[i] = 0xffff;
 685				}
 686			}
 687		} else if (unlikely(status != HV_EWOULDBLOCK))
 688			goto fatal_mondo_error;
 
 689
 690		/* Don't bother rewriting the CPU list, just leave the
 691		 * 0xffff and non-0xffff entries in there and the
 692		 * hypervisor will do the right thing.
 693		 *
 694		 * Only advance timeout state if we didn't make any
 695		 * forward progress.
 696		 */
 697		if (unlikely(!forward_progress)) {
 698			if (unlikely(++retries > 10000))
 699				goto fatal_mondo_timeout;
 700
 701			/* Delay a little bit to let other cpus catch up
 702			 * on their cpu mondo queue work.
 703			 */
 704			udelay(2 * cnt);
 705		}
 706	} while (1);
 707
 708	if (unlikely(saw_cpu_error))
 709		goto fatal_mondo_cpu_error;
 710
 
 
 
 
 
 711	return;
 712
 713fatal_mondo_cpu_error:
 714	printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
 715	       "(including %d) were in error state\n",
 716	       this_cpu, saw_cpu_error - 1);
 717	return;
 718
 719fatal_mondo_timeout:
 720	printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
 721	       " progress after %d retries.\n",
 722	       this_cpu, retries);
 723	goto dump_cpu_list_and_out;
 724
 725fatal_mondo_error:
 726	printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
 727	       this_cpu, status);
 728	printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
 729	       "mondo_block_pa(%lx)\n",
 730	       this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
 731
 732dump_cpu_list_and_out:
 733	printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
 734	for (i = 0; i < cnt; i++)
 735		printk("%u ", cpu_list[i]);
 736	printk("]\n");
 737}
 738
 739static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
 740
 741static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
 742{
 743	struct trap_per_cpu *tb;
 744	int this_cpu, i, cnt;
 745	unsigned long flags;
 746	u16 *cpu_list;
 747	u64 *mondo;
 748
 749	/* We have to do this whole thing with interrupts fully disabled.
 750	 * Otherwise if we send an xcall from interrupt context it will
 751	 * corrupt both our mondo block and cpu list state.
 752	 *
 753	 * One consequence of this is that we cannot use timeout mechanisms
 754	 * that depend upon interrupts being delivered locally.  So, for
 755	 * example, we cannot sample jiffies and expect it to advance.
 756	 *
 757	 * Fortunately, udelay() uses %stick/%tick so we can use that.
 758	 */
 759	local_irq_save(flags);
 760
 761	this_cpu = smp_processor_id();
 762	tb = &trap_block[this_cpu];
 763
 764	mondo = __va(tb->cpu_mondo_block_pa);
 765	mondo[0] = data0;
 766	mondo[1] = data1;
 767	mondo[2] = data2;
 768	wmb();
 769
 770	cpu_list = __va(tb->cpu_list_pa);
 771
 772	/* Setup the initial cpu list.  */
 773	cnt = 0;
 774	for_each_cpu(i, mask) {
 775		if (i == this_cpu || !cpu_online(i))
 776			continue;
 777		cpu_list[cnt++] = i;
 778	}
 779
 780	if (cnt)
 781		xcall_deliver_impl(tb, cnt);
 782
 783	local_irq_restore(flags);
 784}
 785
 786/* Send cross call to all processors mentioned in MASK_P
 787 * except self.  Really, there are only two cases currently,
 788 * "cpu_online_mask" and "mm_cpumask(mm)".
 789 */
 790static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
 791{
 792	u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
 793
 794	xcall_deliver(data0, data1, data2, mask);
 795}
 796
 797/* Send cross call to all processors except self. */
 798static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
 799{
 800	smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
 801}
 802
 803extern unsigned long xcall_sync_tick;
 804
 805static void smp_start_sync_tick_client(int cpu)
 806{
 807	xcall_deliver((u64) &xcall_sync_tick, 0, 0,
 808		      cpumask_of(cpu));
 809}
 810
 811extern unsigned long xcall_call_function;
 812
 813void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 814{
 815	xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
 816}
 817
 818extern unsigned long xcall_call_function_single;
 819
 820void arch_send_call_function_single_ipi(int cpu)
 821{
 822	xcall_deliver((u64) &xcall_call_function_single, 0, 0,
 823		      cpumask_of(cpu));
 824}
 825
 826void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
 827{
 828	clear_softint(1 << irq);
 
 829	generic_smp_call_function_interrupt();
 
 830}
 831
 832void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
 833{
 834	clear_softint(1 << irq);
 
 835	generic_smp_call_function_single_interrupt();
 
 836}
 837
 838static void tsb_sync(void *info)
 839{
 840	struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
 841	struct mm_struct *mm = info;
 842
 843	/* It is not valid to test "currrent->active_mm == mm" here.
 844	 *
 845	 * The value of "current" is not changed atomically with
 846	 * switch_mm().  But that's OK, we just need to check the
 847	 * current cpu's trap block PGD physical address.
 848	 */
 849	if (tp->pgd_paddr == __pa(mm->pgd))
 850		tsb_context_switch(mm);
 851}
 852
 853void smp_tsb_sync(struct mm_struct *mm)
 854{
 855	smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
 856}
 857
 858extern unsigned long xcall_flush_tlb_mm;
 859extern unsigned long xcall_flush_tlb_pending;
 860extern unsigned long xcall_flush_tlb_kernel_range;
 861extern unsigned long xcall_fetch_glob_regs;
 
 
 862extern unsigned long xcall_receive_signal;
 863extern unsigned long xcall_new_mmu_context_version;
 864#ifdef CONFIG_KGDB
 865extern unsigned long xcall_kgdb_capture;
 866#endif
 867
 868#ifdef DCACHE_ALIASING_POSSIBLE
 869extern unsigned long xcall_flush_dcache_page_cheetah;
 870#endif
 871extern unsigned long xcall_flush_dcache_page_spitfire;
 872
 873#ifdef CONFIG_DEBUG_DCFLUSH
 874extern atomic_t dcpage_flushes;
 875extern atomic_t dcpage_flushes_xcall;
 876#endif
 877
 878static inline void __local_flush_dcache_page(struct page *page)
 879{
 880#ifdef DCACHE_ALIASING_POSSIBLE
 881	__flush_dcache_page(page_address(page),
 882			    ((tlb_type == spitfire) &&
 883			     page_mapping(page) != NULL));
 884#else
 885	if (page_mapping(page) != NULL &&
 886	    tlb_type == spitfire)
 887		__flush_icache_page(__pa(page_address(page)));
 888#endif
 889}
 890
 891void smp_flush_dcache_page_impl(struct page *page, int cpu)
 892{
 893	int this_cpu;
 894
 895	if (tlb_type == hypervisor)
 896		return;
 897
 898#ifdef CONFIG_DEBUG_DCFLUSH
 899	atomic_inc(&dcpage_flushes);
 900#endif
 901
 902	this_cpu = get_cpu();
 903
 904	if (cpu == this_cpu) {
 905		__local_flush_dcache_page(page);
 906	} else if (cpu_online(cpu)) {
 907		void *pg_addr = page_address(page);
 908		u64 data0 = 0;
 909
 910		if (tlb_type == spitfire) {
 911			data0 = ((u64)&xcall_flush_dcache_page_spitfire);
 912			if (page_mapping(page) != NULL)
 913				data0 |= ((u64)1 << 32);
 914		} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
 915#ifdef DCACHE_ALIASING_POSSIBLE
 916			data0 =	((u64)&xcall_flush_dcache_page_cheetah);
 917#endif
 918		}
 919		if (data0) {
 920			xcall_deliver(data0, __pa(pg_addr),
 921				      (u64) pg_addr, cpumask_of(cpu));
 922#ifdef CONFIG_DEBUG_DCFLUSH
 923			atomic_inc(&dcpage_flushes_xcall);
 924#endif
 925		}
 926	}
 927
 928	put_cpu();
 929}
 930
 931void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
 932{
 933	void *pg_addr;
 934	u64 data0;
 935
 936	if (tlb_type == hypervisor)
 937		return;
 938
 939	preempt_disable();
 940
 941#ifdef CONFIG_DEBUG_DCFLUSH
 942	atomic_inc(&dcpage_flushes);
 943#endif
 944	data0 = 0;
 945	pg_addr = page_address(page);
 946	if (tlb_type == spitfire) {
 947		data0 = ((u64)&xcall_flush_dcache_page_spitfire);
 948		if (page_mapping(page) != NULL)
 949			data0 |= ((u64)1 << 32);
 950	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
 951#ifdef DCACHE_ALIASING_POSSIBLE
 952		data0 = ((u64)&xcall_flush_dcache_page_cheetah);
 953#endif
 954	}
 955	if (data0) {
 956		xcall_deliver(data0, __pa(pg_addr),
 957			      (u64) pg_addr, cpu_online_mask);
 958#ifdef CONFIG_DEBUG_DCFLUSH
 959		atomic_inc(&dcpage_flushes_xcall);
 960#endif
 961	}
 962	__local_flush_dcache_page(page);
 963
 964	preempt_enable();
 965}
 966
 967void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
 968{
 969	struct mm_struct *mm;
 970	unsigned long flags;
 971
 972	clear_softint(1 << irq);
 973
 974	/* See if we need to allocate a new TLB context because
 975	 * the version of the one we are using is now out of date.
 976	 */
 977	mm = current->active_mm;
 978	if (unlikely(!mm || (mm == &init_mm)))
 979		return;
 980
 981	spin_lock_irqsave(&mm->context.lock, flags);
 982
 983	if (unlikely(!CTX_VALID(mm->context)))
 984		get_new_mmu_context(mm);
 985
 986	spin_unlock_irqrestore(&mm->context.lock, flags);
 987
 988	load_secondary_context(mm);
 989	__flush_tlb_mm(CTX_HWBITS(mm->context),
 990		       SECONDARY_CONTEXT);
 991}
 992
 993void smp_new_mmu_context_version(void)
 994{
 995	smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
 996}
 997
 998#ifdef CONFIG_KGDB
 999void kgdb_roundup_cpus(unsigned long flags)
1000{
1001	smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
1002}
1003#endif
1004
1005void smp_fetch_global_regs(void)
1006{
1007	smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
1008}
1009
 
 
 
 
 
 
 
 
 
1010/* We know that the window frames of the user have been flushed
1011 * to the stack before we get here because all callers of us
1012 * are flush_tlb_*() routines, and these run after flush_cache_*()
1013 * which performs the flushw.
1014 *
1015 * The SMP TLB coherency scheme we use works as follows:
1016 *
1017 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1018 *    space has (potentially) executed on, this is the heuristic
1019 *    we use to avoid doing cross calls.
1020 *
1021 *    Also, for flushing from kswapd and also for clones, we
1022 *    use cpu_vm_mask as the list of cpus to make run the TLB.
1023 *
1024 * 2) TLB context numbers are shared globally across all processors
1025 *    in the system, this allows us to play several games to avoid
1026 *    cross calls.
1027 *
1028 *    One invariant is that when a cpu switches to a process, and
1029 *    that processes tsk->active_mm->cpu_vm_mask does not have the
1030 *    current cpu's bit set, that tlb context is flushed locally.
1031 *
1032 *    If the address space is non-shared (ie. mm->count == 1) we avoid
1033 *    cross calls when we want to flush the currently running process's
1034 *    tlb state.  This is done by clearing all cpu bits except the current
1035 *    processor's in current->mm->cpu_vm_mask and performing the
1036 *    flush locally only.  This will force any subsequent cpus which run
1037 *    this task to flush the context from the local tlb if the process
1038 *    migrates to another cpu (again).
1039 *
1040 * 3) For shared address spaces (threads) and swapping we bite the
1041 *    bullet for most cases and perform the cross call (but only to
1042 *    the cpus listed in cpu_vm_mask).
1043 *
1044 *    The performance gain from "optimizing" away the cross call for threads is
1045 *    questionable (in theory the big win for threads is the massive sharing of
1046 *    address space state across processors).
1047 */
1048
1049/* This currently is only used by the hugetlb arch pre-fault
1050 * hook on UltraSPARC-III+ and later when changing the pagesize
1051 * bits of the context register for an address space.
1052 */
1053void smp_flush_tlb_mm(struct mm_struct *mm)
1054{
1055	u32 ctx = CTX_HWBITS(mm->context);
1056	int cpu = get_cpu();
1057
1058	if (atomic_read(&mm->mm_users) == 1) {
1059		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1060		goto local_flush_and_out;
1061	}
1062
1063	smp_cross_call_masked(&xcall_flush_tlb_mm,
1064			      ctx, 0, 0,
1065			      mm_cpumask(mm));
1066
1067local_flush_and_out:
1068	__flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1069
1070	put_cpu();
1071}
1072
 
 
 
 
 
 
 
 
 
 
 
 
 
1073void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1074{
1075	u32 ctx = CTX_HWBITS(mm->context);
 
1076	int cpu = get_cpu();
1077
 
 
 
 
1078	if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1079		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1080	else
1081		smp_cross_call_masked(&xcall_flush_tlb_pending,
1082				      ctx, nr, (unsigned long) vaddrs,
1083				      mm_cpumask(mm));
1084
1085	__flush_tlb_pending(ctx, nr, vaddrs);
1086
1087	put_cpu();
1088}
1089
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1090void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1091{
1092	start &= PAGE_MASK;
1093	end    = PAGE_ALIGN(end);
1094	if (start != end) {
1095		smp_cross_call(&xcall_flush_tlb_kernel_range,
1096			       0, start, end);
1097
1098		__flush_tlb_kernel_range(start, end);
1099	}
1100}
1101
1102/* CPU capture. */
1103/* #define CAPTURE_DEBUG */
1104extern unsigned long xcall_capture;
1105
1106static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1107static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1108static unsigned long penguins_are_doing_time;
1109
1110void smp_capture(void)
1111{
1112	int result = atomic_add_ret(1, &smp_capture_depth);
1113
1114	if (result == 1) {
1115		int ncpus = num_online_cpus();
1116
1117#ifdef CAPTURE_DEBUG
1118		printk("CPU[%d]: Sending penguins to jail...",
1119		       smp_processor_id());
1120#endif
1121		penguins_are_doing_time = 1;
1122		atomic_inc(&smp_capture_registry);
1123		smp_cross_call(&xcall_capture, 0, 0, 0);
1124		while (atomic_read(&smp_capture_registry) != ncpus)
1125			rmb();
1126#ifdef CAPTURE_DEBUG
1127		printk("done\n");
1128#endif
1129	}
1130}
1131
1132void smp_release(void)
1133{
1134	if (atomic_dec_and_test(&smp_capture_depth)) {
1135#ifdef CAPTURE_DEBUG
1136		printk("CPU[%d]: Giving pardon to "
1137		       "imprisoned penguins\n",
1138		       smp_processor_id());
1139#endif
1140		penguins_are_doing_time = 0;
1141		membar_safe("#StoreLoad");
1142		atomic_dec(&smp_capture_registry);
1143	}
1144}
1145
1146/* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE
1147 * set, so they can service tlb flush xcalls...
1148 */
1149extern void prom_world(int);
1150
1151void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
1152{
1153	clear_softint(1 << irq);
1154
1155	preempt_disable();
1156
1157	__asm__ __volatile__("flushw");
1158	prom_world(1);
1159	atomic_inc(&smp_capture_registry);
1160	membar_safe("#StoreLoad");
1161	while (penguins_are_doing_time)
1162		rmb();
1163	atomic_dec(&smp_capture_registry);
1164	prom_world(0);
1165
1166	preempt_enable();
1167}
1168
1169/* /proc/profile writes can call this, don't __init it please. */
1170int setup_profiling_timer(unsigned int multiplier)
1171{
1172	return -EINVAL;
1173}
1174
1175void __init smp_prepare_cpus(unsigned int max_cpus)
1176{
1177}
1178
1179void __devinit smp_prepare_boot_cpu(void)
1180{
1181}
1182
1183void __init smp_setup_processor_id(void)
1184{
1185	if (tlb_type == spitfire)
1186		xcall_deliver_impl = spitfire_xcall_deliver;
1187	else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1188		xcall_deliver_impl = cheetah_xcall_deliver;
1189	else
1190		xcall_deliver_impl = hypervisor_xcall_deliver;
1191}
1192
1193void __devinit smp_fill_in_sib_core_maps(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1194{
1195	unsigned int i;
1196
1197	for_each_present_cpu(i) {
1198		unsigned int j;
1199
1200		cpumask_clear(&cpu_core_map[i]);
1201		if (cpu_data(i).core_id == 0) {
1202			cpumask_set_cpu(i, &cpu_core_map[i]);
1203			continue;
1204		}
1205
1206		for_each_present_cpu(j) {
1207			if (cpu_data(i).core_id ==
1208			    cpu_data(j).core_id)
1209				cpumask_set_cpu(j, &cpu_core_map[i]);
1210		}
1211	}
1212
 
 
 
 
 
 
 
 
 
 
 
 
 
1213	for_each_present_cpu(i) {
1214		unsigned int j;
1215
1216		cpumask_clear(&per_cpu(cpu_sibling_map, i));
1217		if (cpu_data(i).proc_id == -1) {
1218			cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
1219			continue;
1220		}
1221
1222		for_each_present_cpu(j) {
1223			if (cpu_data(i).proc_id ==
1224			    cpu_data(j).proc_id)
1225				cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
1226		}
1227	}
1228}
1229
1230int __cpuinit __cpu_up(unsigned int cpu)
1231{
1232	int ret = smp_boot_one_cpu(cpu);
1233
1234	if (!ret) {
1235		cpumask_set_cpu(cpu, &smp_commenced_mask);
1236		while (!cpu_online(cpu))
1237			mb();
1238		if (!cpu_online(cpu)) {
1239			ret = -ENODEV;
1240		} else {
1241			/* On SUN4V, writes to %tick and %stick are
1242			 * not allowed.
1243			 */
1244			if (tlb_type != hypervisor)
1245				smp_synchronize_one_tick(cpu);
1246		}
1247	}
1248	return ret;
1249}
1250
1251#ifdef CONFIG_HOTPLUG_CPU
1252void cpu_play_dead(void)
1253{
1254	int cpu = smp_processor_id();
1255	unsigned long pstate;
1256
1257	idle_task_exit();
1258
1259	if (tlb_type == hypervisor) {
1260		struct trap_per_cpu *tb = &trap_block[cpu];
1261
1262		sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1263				tb->cpu_mondo_pa, 0);
1264		sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1265				tb->dev_mondo_pa, 0);
1266		sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1267				tb->resum_mondo_pa, 0);
1268		sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1269				tb->nonresum_mondo_pa, 0);
1270	}
1271
1272	cpumask_clear_cpu(cpu, &smp_commenced_mask);
1273	membar_safe("#Sync");
1274
1275	local_irq_disable();
1276
1277	__asm__ __volatile__(
1278		"rdpr	%%pstate, %0\n\t"
1279		"wrpr	%0, %1, %%pstate"
1280		: "=r" (pstate)
1281		: "i" (PSTATE_IE));
1282
1283	while (1)
1284		barrier();
1285}
1286
1287int __cpu_disable(void)
1288{
1289	int cpu = smp_processor_id();
1290	cpuinfo_sparc *c;
1291	int i;
1292
1293	for_each_cpu(i, &cpu_core_map[cpu])
1294		cpumask_clear_cpu(cpu, &cpu_core_map[i]);
1295	cpumask_clear(&cpu_core_map[cpu]);
1296
1297	for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
1298		cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
1299	cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
1300
1301	c = &cpu_data(cpu);
1302
1303	c->core_id = 0;
1304	c->proc_id = -1;
1305
1306	smp_wmb();
1307
1308	/* Make sure no interrupts point to this cpu.  */
1309	fixup_irqs();
1310
1311	local_irq_enable();
1312	mdelay(1);
1313	local_irq_disable();
1314
1315	ipi_call_lock();
1316	set_cpu_online(cpu, false);
1317	ipi_call_unlock();
1318
1319	cpu_map_rebuild();
1320
1321	return 0;
1322}
1323
1324void __cpu_die(unsigned int cpu)
1325{
1326	int i;
1327
1328	for (i = 0; i < 100; i++) {
1329		smp_rmb();
1330		if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
1331			break;
1332		msleep(100);
1333	}
1334	if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
1335		printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1336	} else {
1337#if defined(CONFIG_SUN_LDOMS)
1338		unsigned long hv_err;
1339		int limit = 100;
1340
1341		do {
1342			hv_err = sun4v_cpu_stop(cpu);
1343			if (hv_err == HV_EOK) {
1344				set_cpu_present(cpu, false);
1345				break;
1346			}
1347		} while (--limit > 0);
1348		if (limit <= 0) {
1349			printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1350			       hv_err);
1351		}
1352#endif
1353	}
1354}
1355#endif
1356
1357void __init smp_cpus_done(unsigned int max_cpus)
1358{
1359	pcr_arch_init();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1360}
1361
1362void smp_send_reschedule(int cpu)
1363{
1364	xcall_deliver((u64) &xcall_receive_signal, 0, 0,
1365		      cpumask_of(cpu));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1366}
1367
1368void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1369{
1370	clear_softint(1 << irq);
1371	scheduler_ipi();
1372}
1373
1374/* This is a nop because we capture all other cpus
1375 * anyways when making the PROM active.
1376 */
 
 
 
1377void smp_send_stop(void)
1378{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1379}
1380
1381/**
1382 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
1383 * @cpu: cpu to allocate for
1384 * @size: size allocation in bytes
1385 * @align: alignment
1386 *
1387 * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
1388 * does the right thing for NUMA regardless of the current
1389 * configuration.
1390 *
1391 * RETURNS:
1392 * Pointer to the allocated area on success, NULL on failure.
1393 */
1394static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
1395					size_t align)
1396{
1397	const unsigned long goal = __pa(MAX_DMA_ADDRESS);
1398#ifdef CONFIG_NEED_MULTIPLE_NODES
1399	int node = cpu_to_node(cpu);
1400	void *ptr;
1401
1402	if (!node_online(node) || !NODE_DATA(node)) {
1403		ptr = __alloc_bootmem(size, align, goal);
1404		pr_info("cpu %d has no node %d or node-local memory\n",
1405			cpu, node);
1406		pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
1407			 cpu, size, __pa(ptr));
1408	} else {
1409		ptr = __alloc_bootmem_node(NODE_DATA(node),
1410					   size, align, goal);
1411		pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
1412			 "%016lx\n", cpu, size, node, __pa(ptr));
1413	}
1414	return ptr;
1415#else
1416	return __alloc_bootmem(size, align, goal);
1417#endif
1418}
1419
1420static void __init pcpu_free_bootmem(void *ptr, size_t size)
1421{
1422	free_bootmem(__pa(ptr), size);
1423}
1424
1425static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
1426{
1427	if (cpu_to_node(from) == cpu_to_node(to))
1428		return LOCAL_DISTANCE;
1429	else
1430		return REMOTE_DISTANCE;
1431}
1432
1433static void __init pcpu_populate_pte(unsigned long addr)
1434{
1435	pgd_t *pgd = pgd_offset_k(addr);
1436	pud_t *pud;
1437	pmd_t *pmd;
 
 
 
 
 
 
 
1438
1439	pud = pud_offset(pgd, addr);
1440	if (pud_none(*pud)) {
1441		pmd_t *new;
1442
1443		new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1444		pud_populate(&init_mm, pud, new);
1445	}
1446
1447	pmd = pmd_offset(pud, addr);
1448	if (!pmd_present(*pmd)) {
1449		pte_t *new;
1450
1451		new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1452		pmd_populate_kernel(&init_mm, pmd, new);
1453	}
1454}
1455
1456void __init setup_per_cpu_areas(void)
1457{
1458	unsigned long delta;
1459	unsigned int cpu;
1460	int rc = -EINVAL;
1461
1462	if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1463		rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1464					    PERCPU_DYNAMIC_RESERVE, 4 << 20,
1465					    pcpu_cpu_distance,
1466					    pcpu_alloc_bootmem,
1467					    pcpu_free_bootmem);
1468		if (rc)
1469			pr_warning("PERCPU: %s allocator failed (%d), "
1470				   "falling back to page size\n",
1471				   pcpu_fc_names[pcpu_chosen_fc], rc);
1472	}
1473	if (rc < 0)
1474		rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
1475					   pcpu_alloc_bootmem,
1476					   pcpu_free_bootmem,
1477					   pcpu_populate_pte);
1478	if (rc < 0)
1479		panic("cannot initialize percpu area (err=%d)", rc);
1480
1481	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1482	for_each_possible_cpu(cpu)
1483		__per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1484
1485	/* Setup %g5 for the boot cpu.  */
1486	__local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1487
1488	of_fill_in_cpu_data();
1489	if (tlb_type == hypervisor)
1490		mdesc_fill_in_cpu_data(cpu_all_mask);
1491}