Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/* smp.c: Sparc64 SMP support.
   3 *
   4 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
   5 */
   6
   7#include <linux/export.h>
   8#include <linux/kernel.h>
   9#include <linux/sched/mm.h>
  10#include <linux/sched/hotplug.h>
  11#include <linux/mm.h>
  12#include <linux/pagemap.h>
  13#include <linux/threads.h>
  14#include <linux/smp.h>
  15#include <linux/interrupt.h>
  16#include <linux/kernel_stat.h>
  17#include <linux/delay.h>
  18#include <linux/init.h>
  19#include <linux/spinlock.h>
  20#include <linux/fs.h>
  21#include <linux/seq_file.h>
  22#include <linux/cache.h>
  23#include <linux/jiffies.h>
  24#include <linux/profile.h>
  25#include <linux/memblock.h>
  26#include <linux/vmalloc.h>
  27#include <linux/ftrace.h>
  28#include <linux/cpu.h>
  29#include <linux/slab.h>
  30#include <linux/kgdb.h>
  31
  32#include <asm/head.h>
  33#include <asm/ptrace.h>
  34#include <linux/atomic.h>
  35#include <asm/tlbflush.h>
  36#include <asm/mmu_context.h>
  37#include <asm/cpudata.h>
  38#include <asm/hvtramp.h>
  39#include <asm/io.h>
  40#include <asm/timer.h>
  41#include <asm/setup.h>
  42
  43#include <asm/irq.h>
  44#include <asm/irq_regs.h>
  45#include <asm/page.h>
 
  46#include <asm/oplib.h>
  47#include <linux/uaccess.h>
  48#include <asm/starfire.h>
  49#include <asm/tlb.h>
  50#include <asm/pgalloc.h>
  51#include <asm/sections.h>
  52#include <asm/prom.h>
  53#include <asm/mdesc.h>
  54#include <asm/ldc.h>
  55#include <asm/hypervisor.h>
  56#include <asm/pcr.h>
  57
  58#include "cpumap.h"
  59#include "kernel.h"
  60
  61DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
  62cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
  63	{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
  64
  65cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
  66	[0 ... NR_CPUS-1] = CPU_MASK_NONE };
  67
  68cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = {
  69	[0 ... NR_CPUS - 1] = CPU_MASK_NONE };
  70
  71EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  72EXPORT_SYMBOL(cpu_core_map);
  73EXPORT_SYMBOL(cpu_core_sib_map);
  74EXPORT_SYMBOL(cpu_core_sib_cache_map);
  75
  76static cpumask_t smp_commenced_mask;
  77
  78static DEFINE_PER_CPU(bool, poke);
  79static bool cpu_poke;
  80
  81void smp_info(struct seq_file *m)
  82{
  83	int i;
  84	
  85	seq_printf(m, "State:\n");
  86	for_each_online_cpu(i)
  87		seq_printf(m, "CPU%d:\t\tonline\n", i);
  88}
  89
  90void smp_bogo(struct seq_file *m)
  91{
  92	int i;
  93	
  94	for_each_online_cpu(i)
  95		seq_printf(m,
  96			   "Cpu%dClkTck\t: %016lx\n",
  97			   i, cpu_data(i).clock_tick);
  98}
  99
 100extern void setup_sparc64_timer(void);
 101
 102static volatile unsigned long callin_flag = 0;
 103
 104void smp_callin(void)
 105{
 106	int cpuid = hard_smp_processor_id();
 107
 108	__local_per_cpu_offset = __per_cpu_offset(cpuid);
 109
 110	if (tlb_type == hypervisor)
 111		sun4v_ktsb_register();
 112
 113	__flush_tlb_all();
 114
 115	setup_sparc64_timer();
 116
 117	if (cheetah_pcache_forced_on)
 118		cheetah_enable_pcache();
 119
 120	callin_flag = 1;
 121	__asm__ __volatile__("membar #Sync\n\t"
 122			     "flush  %%g6" : : : "memory");
 123
 124	/* Clear this or we will die instantly when we
 125	 * schedule back to this idler...
 126	 */
 127	current_thread_info()->new_child = 0;
 128
 129	/* Attach to the address space of init_task. */
 130	mmgrab(&init_mm);
 131	current->active_mm = &init_mm;
 132
 133	/* inform the notifiers about the new cpu */
 134	notify_cpu_starting(cpuid);
 135
 136	while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
 137		rmb();
 138
 139	set_cpu_online(cpuid, true);
 140
 
 
 
 141	local_irq_enable();
 142
 143	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 144}
 145
 146void cpu_panic(void)
 147{
 148	printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
 149	panic("SMP bolixed\n");
 150}
 151
 152/* This tick register synchronization scheme is taken entirely from
 153 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
 154 *
 155 * The only change I've made is to rework it so that the master
 156 * initiates the synchonization instead of the slave. -DaveM
 157 */
 158
 159#define MASTER	0
 160#define SLAVE	(SMP_CACHE_BYTES/sizeof(unsigned long))
 161
 162#define NUM_ROUNDS	64	/* magic value */
 163#define NUM_ITERS	5	/* likewise */
 164
 165static DEFINE_RAW_SPINLOCK(itc_sync_lock);
 166static unsigned long go[SLAVE + 1];
 167
 168#define DEBUG_TICK_SYNC	0
 169
 170static inline long get_delta (long *rt, long *master)
 171{
 172	unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
 173	unsigned long tcenter, t0, t1, tm;
 174	unsigned long i;
 175
 176	for (i = 0; i < NUM_ITERS; i++) {
 177		t0 = tick_ops->get_tick();
 178		go[MASTER] = 1;
 179		membar_safe("#StoreLoad");
 180		while (!(tm = go[SLAVE]))
 181			rmb();
 182		go[SLAVE] = 0;
 183		wmb();
 184		t1 = tick_ops->get_tick();
 185
 186		if (t1 - t0 < best_t1 - best_t0)
 187			best_t0 = t0, best_t1 = t1, best_tm = tm;
 188	}
 189
 190	*rt = best_t1 - best_t0;
 191	*master = best_tm - best_t0;
 192
 193	/* average best_t0 and best_t1 without overflow: */
 194	tcenter = (best_t0/2 + best_t1/2);
 195	if (best_t0 % 2 + best_t1 % 2 == 2)
 196		tcenter++;
 197	return tcenter - best_tm;
 198}
 199
 200void smp_synchronize_tick_client(void)
 201{
 202	long i, delta, adj, adjust_latency = 0, done = 0;
 203	unsigned long flags, rt, master_time_stamp;
 204#if DEBUG_TICK_SYNC
 205	struct {
 206		long rt;	/* roundtrip time */
 207		long master;	/* master's timestamp */
 208		long diff;	/* difference between midpoint and master's timestamp */
 209		long lat;	/* estimate of itc adjustment latency */
 210	} t[NUM_ROUNDS];
 211#endif
 212
 213	go[MASTER] = 1;
 214
 215	while (go[MASTER])
 216		rmb();
 217
 218	local_irq_save(flags);
 219	{
 220		for (i = 0; i < NUM_ROUNDS; i++) {
 221			delta = get_delta(&rt, &master_time_stamp);
 222			if (delta == 0)
 223				done = 1;	/* let's lock on to this... */
 224
 225			if (!done) {
 226				if (i > 0) {
 227					adjust_latency += -delta;
 228					adj = -delta + adjust_latency/4;
 229				} else
 230					adj = -delta;
 231
 232				tick_ops->add_tick(adj);
 233			}
 234#if DEBUG_TICK_SYNC
 235			t[i].rt = rt;
 236			t[i].master = master_time_stamp;
 237			t[i].diff = delta;
 238			t[i].lat = adjust_latency/4;
 239#endif
 240		}
 241	}
 242	local_irq_restore(flags);
 243
 244#if DEBUG_TICK_SYNC
 245	for (i = 0; i < NUM_ROUNDS; i++)
 246		printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
 247		       t[i].rt, t[i].master, t[i].diff, t[i].lat);
 248#endif
 249
 250	printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
 251	       "(last diff %ld cycles, maxerr %lu cycles)\n",
 252	       smp_processor_id(), delta, rt);
 253}
 254
 255static void smp_start_sync_tick_client(int cpu);
 256
 257static void smp_synchronize_one_tick(int cpu)
 258{
 259	unsigned long flags, i;
 260
 261	go[MASTER] = 0;
 262
 263	smp_start_sync_tick_client(cpu);
 264
 265	/* wait for client to be ready */
 266	while (!go[MASTER])
 267		rmb();
 268
 269	/* now let the client proceed into his loop */
 270	go[MASTER] = 0;
 271	membar_safe("#StoreLoad");
 272
 273	raw_spin_lock_irqsave(&itc_sync_lock, flags);
 274	{
 275		for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
 276			while (!go[MASTER])
 277				rmb();
 278			go[MASTER] = 0;
 279			wmb();
 280			go[SLAVE] = tick_ops->get_tick();
 281			membar_safe("#StoreLoad");
 282		}
 283	}
 284	raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
 285}
 286
 287#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
 
 
 
 
 
 
 
 
 288static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
 289				void **descrp)
 290{
 291	extern unsigned long sparc64_ttable_tl0;
 292	extern unsigned long kern_locked_tte_data;
 293	struct hvtramp_descr *hdesc;
 294	unsigned long trampoline_ra;
 295	struct trap_per_cpu *tb;
 296	u64 tte_vaddr, tte_data;
 297	unsigned long hv_err;
 298	int i;
 299
 300	hdesc = kzalloc(sizeof(*hdesc) +
 301			(sizeof(struct hvtramp_mapping) *
 302			 num_kernel_image_mappings - 1),
 303			GFP_KERNEL);
 304	if (!hdesc) {
 305		printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
 306		       "hvtramp_descr.\n");
 307		return;
 308	}
 309	*descrp = hdesc;
 310
 311	hdesc->cpu = cpu;
 312	hdesc->num_mappings = num_kernel_image_mappings;
 313
 314	tb = &trap_block[cpu];
 315
 316	hdesc->fault_info_va = (unsigned long) &tb->fault_info;
 317	hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
 318
 319	hdesc->thread_reg = thread_reg;
 320
 321	tte_vaddr = (unsigned long) KERNBASE;
 322	tte_data = kern_locked_tte_data;
 323
 324	for (i = 0; i < hdesc->num_mappings; i++) {
 325		hdesc->maps[i].vaddr = tte_vaddr;
 326		hdesc->maps[i].tte   = tte_data;
 327		tte_vaddr += 0x400000;
 328		tte_data  += 0x400000;
 329	}
 330
 331	trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
 332
 333	hv_err = sun4v_cpu_start(cpu, trampoline_ra,
 334				 kimage_addr_to_ra(&sparc64_ttable_tl0),
 335				 __pa(hdesc));
 336	if (hv_err)
 337		printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
 338		       "gives error %lu\n", hv_err);
 339}
 340#endif
 341
 342extern unsigned long sparc64_cpu_startup;
 343
 344/* The OBP cpu startup callback truncates the 3rd arg cookie to
 345 * 32-bits (I think) so to be safe we have it read the pointer
 346 * contained here so we work on >4GB machines. -DaveM
 347 */
 348static struct thread_info *cpu_new_thread = NULL;
 349
 350static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
 351{
 352	unsigned long entry =
 353		(unsigned long)(&sparc64_cpu_startup);
 354	unsigned long cookie =
 355		(unsigned long)(&cpu_new_thread);
 356	void *descr = NULL;
 357	int timeout, ret;
 358
 359	callin_flag = 0;
 360	cpu_new_thread = task_thread_info(idle);
 361
 362	if (tlb_type == hypervisor) {
 363#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
 364		if (ldom_domaining_enabled)
 365			ldom_startcpu_cpuid(cpu,
 366					    (unsigned long) cpu_new_thread,
 367					    &descr);
 368		else
 369#endif
 370			prom_startcpu_cpuid(cpu, entry, cookie);
 371	} else {
 372		struct device_node *dp = of_find_node_by_cpuid(cpu);
 373
 374		prom_startcpu(dp->phandle, entry, cookie);
 375	}
 376
 377	for (timeout = 0; timeout < 50000; timeout++) {
 378		if (callin_flag)
 379			break;
 380		udelay(100);
 381	}
 382
 383	if (callin_flag) {
 384		ret = 0;
 385	} else {
 386		printk("Processor %d is stuck.\n", cpu);
 387		ret = -ENODEV;
 388	}
 389	cpu_new_thread = NULL;
 390
 391	kfree(descr);
 392
 393	return ret;
 394}
 395
 396static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
 397{
 398	u64 result, target;
 399	int stuck, tmp;
 400
 401	if (this_is_starfire) {
 402		/* map to real upaid */
 403		cpu = (((cpu & 0x3c) << 1) |
 404			((cpu & 0x40) >> 4) |
 405			(cpu & 0x3));
 406	}
 407
 408	target = (cpu << 14) | 0x70;
 409again:
 410	/* Ok, this is the real Spitfire Errata #54.
 411	 * One must read back from a UDB internal register
 412	 * after writes to the UDB interrupt dispatch, but
 413	 * before the membar Sync for that write.
 414	 * So we use the high UDB control register (ASI 0x7f,
 415	 * ADDR 0x20) for the dummy read. -DaveM
 416	 */
 417	tmp = 0x40;
 418	__asm__ __volatile__(
 419	"wrpr	%1, %2, %%pstate\n\t"
 420	"stxa	%4, [%0] %3\n\t"
 421	"stxa	%5, [%0+%8] %3\n\t"
 422	"add	%0, %8, %0\n\t"
 423	"stxa	%6, [%0+%8] %3\n\t"
 424	"membar	#Sync\n\t"
 425	"stxa	%%g0, [%7] %3\n\t"
 426	"membar	#Sync\n\t"
 427	"mov	0x20, %%g1\n\t"
 428	"ldxa	[%%g1] 0x7f, %%g0\n\t"
 429	"membar	#Sync"
 430	: "=r" (tmp)
 431	: "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
 432	  "r" (data0), "r" (data1), "r" (data2), "r" (target),
 433	  "r" (0x10), "0" (tmp)
 434        : "g1");
 435
 436	/* NOTE: PSTATE_IE is still clear. */
 437	stuck = 100000;
 438	do {
 439		__asm__ __volatile__("ldxa [%%g0] %1, %0"
 440			: "=r" (result)
 441			: "i" (ASI_INTR_DISPATCH_STAT));
 442		if (result == 0) {
 443			__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 444					     : : "r" (pstate));
 445			return;
 446		}
 447		stuck -= 1;
 448		if (stuck == 0)
 449			break;
 450	} while (result & 0x1);
 451	__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 452			     : : "r" (pstate));
 453	if (stuck == 0) {
 454		printk("CPU[%d]: mondo stuckage result[%016llx]\n",
 455		       smp_processor_id(), result);
 456	} else {
 457		udelay(2);
 458		goto again;
 459	}
 460}
 461
 462static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
 463{
 464	u64 *mondo, data0, data1, data2;
 465	u16 *cpu_list;
 466	u64 pstate;
 467	int i;
 468
 469	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
 470	cpu_list = __va(tb->cpu_list_pa);
 471	mondo = __va(tb->cpu_mondo_block_pa);
 472	data0 = mondo[0];
 473	data1 = mondo[1];
 474	data2 = mondo[2];
 475	for (i = 0; i < cnt; i++)
 476		spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]);
 477}
 478
 479/* Cheetah now allows to send the whole 64-bytes of data in the interrupt
 480 * packet, but we have no use for that.  However we do take advantage of
 481 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
 482 */
 483static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
 484{
 485	int nack_busy_id, is_jbus, need_more;
 486	u64 *mondo, pstate, ver, busy_mask;
 487	u16 *cpu_list;
 488
 489	cpu_list = __va(tb->cpu_list_pa);
 490	mondo = __va(tb->cpu_mondo_block_pa);
 491
 492	/* Unfortunately, someone at Sun had the brilliant idea to make the
 493	 * busy/nack fields hard-coded by ITID number for this Ultra-III
 494	 * derivative processor.
 495	 */
 496	__asm__ ("rdpr %%ver, %0" : "=r" (ver));
 497	is_jbus = ((ver >> 32) == __JALAPENO_ID ||
 498		   (ver >> 32) == __SERRANO_ID);
 499
 500	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
 501
 502retry:
 503	need_more = 0;
 504	__asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
 505			     : : "r" (pstate), "i" (PSTATE_IE));
 506
 507	/* Setup the dispatch data registers. */
 508	__asm__ __volatile__("stxa	%0, [%3] %6\n\t"
 509			     "stxa	%1, [%4] %6\n\t"
 510			     "stxa	%2, [%5] %6\n\t"
 511			     "membar	#Sync\n\t"
 512			     : /* no outputs */
 513			     : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]),
 514			       "r" (0x40), "r" (0x50), "r" (0x60),
 515			       "i" (ASI_INTR_W));
 516
 517	nack_busy_id = 0;
 518	busy_mask = 0;
 519	{
 520		int i;
 521
 522		for (i = 0; i < cnt; i++) {
 523			u64 target, nr;
 524
 525			nr = cpu_list[i];
 526			if (nr == 0xffff)
 527				continue;
 528
 529			target = (nr << 14) | 0x70;
 530			if (is_jbus) {
 531				busy_mask |= (0x1UL << (nr * 2));
 532			} else {
 533				target |= (nack_busy_id << 24);
 534				busy_mask |= (0x1UL <<
 535					      (nack_busy_id * 2));
 536			}
 537			__asm__ __volatile__(
 538				"stxa	%%g0, [%0] %1\n\t"
 539				"membar	#Sync\n\t"
 540				: /* no outputs */
 541				: "r" (target), "i" (ASI_INTR_W));
 542			nack_busy_id++;
 543			if (nack_busy_id == 32) {
 544				need_more = 1;
 545				break;
 546			}
 547		}
 548	}
 549
 550	/* Now, poll for completion. */
 551	{
 552		u64 dispatch_stat, nack_mask;
 553		long stuck;
 554
 555		stuck = 100000 * nack_busy_id;
 556		nack_mask = busy_mask << 1;
 557		do {
 558			__asm__ __volatile__("ldxa	[%%g0] %1, %0"
 559					     : "=r" (dispatch_stat)
 560					     : "i" (ASI_INTR_DISPATCH_STAT));
 561			if (!(dispatch_stat & (busy_mask | nack_mask))) {
 562				__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 563						     : : "r" (pstate));
 564				if (unlikely(need_more)) {
 565					int i, this_cnt = 0;
 566					for (i = 0; i < cnt; i++) {
 567						if (cpu_list[i] == 0xffff)
 568							continue;
 569						cpu_list[i] = 0xffff;
 570						this_cnt++;
 571						if (this_cnt == 32)
 572							break;
 573					}
 574					goto retry;
 575				}
 576				return;
 577			}
 578			if (!--stuck)
 579				break;
 580		} while (dispatch_stat & busy_mask);
 581
 582		__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 583				     : : "r" (pstate));
 584
 585		if (dispatch_stat & busy_mask) {
 586			/* Busy bits will not clear, continue instead
 587			 * of freezing up on this cpu.
 588			 */
 589			printk("CPU[%d]: mondo stuckage result[%016llx]\n",
 590			       smp_processor_id(), dispatch_stat);
 591		} else {
 592			int i, this_busy_nack = 0;
 593
 594			/* Delay some random time with interrupts enabled
 595			 * to prevent deadlock.
 596			 */
 597			udelay(2 * nack_busy_id);
 598
 599			/* Clear out the mask bits for cpus which did not
 600			 * NACK us.
 601			 */
 602			for (i = 0; i < cnt; i++) {
 603				u64 check_mask, nr;
 604
 605				nr = cpu_list[i];
 606				if (nr == 0xffff)
 607					continue;
 608
 609				if (is_jbus)
 610					check_mask = (0x2UL << (2*nr));
 611				else
 612					check_mask = (0x2UL <<
 613						      this_busy_nack);
 614				if ((dispatch_stat & check_mask) == 0)
 615					cpu_list[i] = 0xffff;
 616				this_busy_nack += 2;
 617				if (this_busy_nack == 64)
 618					break;
 619			}
 620
 621			goto retry;
 622		}
 623	}
 624}
 625
 626#define	CPU_MONDO_COUNTER(cpuid)	(cpu_mondo_counter[cpuid])
 627#define	MONDO_USEC_WAIT_MIN		2
 628#define	MONDO_USEC_WAIT_MAX		100
 629#define	MONDO_RETRY_LIMIT		500000
 630
 631/* Multi-cpu list version.
 632 *
 633 * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
 634 * Sometimes not all cpus receive the mondo, requiring us to re-send
 635 * the mondo until all cpus have received, or cpus are truly stuck
 636 * unable to receive mondo, and we timeout.
 637 * Occasionally a target cpu strand is borrowed briefly by hypervisor to
 638 * perform guest service, such as PCIe error handling. Consider the
 639 * service time, 1 second overall wait is reasonable for 1 cpu.
 640 * Here two in-between mondo check wait time are defined: 2 usec for
 641 * single cpu quick turn around and up to 100usec for large cpu count.
 642 * Deliver mondo to large number of cpus could take longer, we adjusts
 643 * the retry count as long as target cpus are making forward progress.
 644 */
 645static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
 646{
 647	int this_cpu, tot_cpus, prev_sent, i, rem;
 648	int usec_wait, retries, tot_retries;
 649	u16 first_cpu = 0xffff;
 650	unsigned long xc_rcvd = 0;
 651	unsigned long status;
 652	int ecpuerror_id = 0;
 653	int enocpu_id = 0;
 654	u16 *cpu_list;
 655	u16 cpu;
 656
 657	this_cpu = smp_processor_id();
 
 658	cpu_list = __va(tb->cpu_list_pa);
 659	usec_wait = cnt * MONDO_USEC_WAIT_MIN;
 660	if (usec_wait > MONDO_USEC_WAIT_MAX)
 661		usec_wait = MONDO_USEC_WAIT_MAX;
 662	retries = tot_retries = 0;
 663	tot_cpus = cnt;
 664	prev_sent = 0;
 665
 
 
 
 666	do {
 667		int n_sent, mondo_delivered, target_cpu_busy;
 668
 669		status = sun4v_cpu_mondo_send(cnt,
 670					      tb->cpu_list_pa,
 671					      tb->cpu_mondo_block_pa);
 672
 673		/* HV_EOK means all cpus received the xcall, we're done.  */
 674		if (likely(status == HV_EOK))
 675			goto xcall_done;
 676
 677		/* If not these non-fatal errors, panic */
 678		if (unlikely((status != HV_EWOULDBLOCK) &&
 679			(status != HV_ECPUERROR) &&
 680			(status != HV_ENOCPU)))
 681			goto fatal_errors;
 682
 683		/* First, see if we made any forward progress.
 684		 *
 685		 * Go through the cpu_list, count the target cpus that have
 686		 * received our mondo (n_sent), and those that did not (rem).
 687		 * Re-pack cpu_list with the cpus remain to be retried in the
 688		 * front - this simplifies tracking the truly stalled cpus.
 689		 *
 690		 * The hypervisor indicates successful sends by setting
 691		 * cpu list entries to the value 0xffff.
 692		 *
 693		 * EWOULDBLOCK means some target cpus did not receive the
 694		 * mondo and retry usually helps.
 695		 *
 696		 * ECPUERROR means at least one target cpu is in error state,
 697		 * it's usually safe to skip the faulty cpu and retry.
 698		 *
 699		 * ENOCPU means one of the target cpu doesn't belong to the
 700		 * domain, perhaps offlined which is unexpected, but not
 701		 * fatal and it's okay to skip the offlined cpu.
 702		 */
 703		rem = 0;
 704		n_sent = 0;
 705		for (i = 0; i < cnt; i++) {
 706			cpu = cpu_list[i];
 707			if (likely(cpu == 0xffff)) {
 708				n_sent++;
 709			} else if ((status == HV_ECPUERROR) &&
 710				(sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
 711				ecpuerror_id = cpu + 1;
 712			} else if (status == HV_ENOCPU && !cpu_online(cpu)) {
 713				enocpu_id = cpu + 1;
 714			} else {
 715				cpu_list[rem++] = cpu;
 716			}
 717		}
 718
 719		/* No cpu remained, we're done. */
 720		if (rem == 0)
 721			break;
 722
 723		/* Otherwise, update the cpu count for retry. */
 724		cnt = rem;
 725
 726		/* Record the overall number of mondos received by the
 727		 * first of the remaining cpus.
 
 728		 */
 729		if (first_cpu != cpu_list[0]) {
 730			first_cpu = cpu_list[0];
 731			xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
 732		}
 733
 734		/* Was any mondo delivered successfully? */
 735		mondo_delivered = (n_sent > prev_sent);
 736		prev_sent = n_sent;
 737
 738		/* or, was any target cpu busy processing other mondos? */
 739		target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
 740		xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
 741
 742		/* Retry count is for no progress. If we're making progress,
 743		 * reset the retry count.
 744		 */
 745		if (likely(mondo_delivered || target_cpu_busy)) {
 746			tot_retries += retries;
 747			retries = 0;
 748		} else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
 749			goto fatal_mondo_timeout;
 750		}
 751
 752		/* Delay a little bit to let other cpus catch up on
 753		 * their cpu mondo queue work.
 
 
 
 
 754		 */
 755		if (!mondo_delivered)
 756			udelay(usec_wait);
 
 757
 758		retries++;
 
 
 
 
 759	} while (1);
 760
 761xcall_done:
 762	if (unlikely(ecpuerror_id > 0)) {
 763		pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
 764		       this_cpu, ecpuerror_id - 1);
 765	} else if (unlikely(enocpu_id > 0)) {
 766		pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
 767		       this_cpu, enocpu_id - 1);
 768	}
 769	return;
 770
 771fatal_errors:
 772	/* fatal errors include bad alignment, etc */
 773	pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
 774	       this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
 775	panic("Unexpected SUN4V mondo error %lu\n", status);
 776
 777fatal_mondo_timeout:
 778	/* some cpus being non-responsive to the cpu mondo */
 779	pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
 780	       this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
 781	panic("SUN4V mondo timeout panic\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 782}
 783
 784static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
 785
 786static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
 787{
 788	struct trap_per_cpu *tb;
 789	int this_cpu, i, cnt;
 790	unsigned long flags;
 791	u16 *cpu_list;
 792	u64 *mondo;
 793
 794	/* We have to do this whole thing with interrupts fully disabled.
 795	 * Otherwise if we send an xcall from interrupt context it will
 796	 * corrupt both our mondo block and cpu list state.
 797	 *
 798	 * One consequence of this is that we cannot use timeout mechanisms
 799	 * that depend upon interrupts being delivered locally.  So, for
 800	 * example, we cannot sample jiffies and expect it to advance.
 801	 *
 802	 * Fortunately, udelay() uses %stick/%tick so we can use that.
 803	 */
 804	local_irq_save(flags);
 805
 806	this_cpu = smp_processor_id();
 807	tb = &trap_block[this_cpu];
 808
 809	mondo = __va(tb->cpu_mondo_block_pa);
 810	mondo[0] = data0;
 811	mondo[1] = data1;
 812	mondo[2] = data2;
 813	wmb();
 814
 815	cpu_list = __va(tb->cpu_list_pa);
 816
 817	/* Setup the initial cpu list.  */
 818	cnt = 0;
 819	for_each_cpu(i, mask) {
 820		if (i == this_cpu || !cpu_online(i))
 821			continue;
 822		cpu_list[cnt++] = i;
 823	}
 824
 825	if (cnt)
 826		xcall_deliver_impl(tb, cnt);
 827
 828	local_irq_restore(flags);
 829}
 830
 831/* Send cross call to all processors mentioned in MASK_P
 832 * except self.  Really, there are only two cases currently,
 833 * "cpu_online_mask" and "mm_cpumask(mm)".
 834 */
 835static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
 836{
 837	u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
 838
 839	xcall_deliver(data0, data1, data2, mask);
 840}
 841
 842/* Send cross call to all processors except self. */
 843static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
 844{
 845	smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
 846}
 847
 848extern unsigned long xcall_sync_tick;
 849
 850static void smp_start_sync_tick_client(int cpu)
 851{
 852	xcall_deliver((u64) &xcall_sync_tick, 0, 0,
 853		      cpumask_of(cpu));
 854}
 855
 856extern unsigned long xcall_call_function;
 857
 858void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 859{
 860	xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
 861}
 862
 863extern unsigned long xcall_call_function_single;
 864
 865void arch_send_call_function_single_ipi(int cpu)
 866{
 867	xcall_deliver((u64) &xcall_call_function_single, 0, 0,
 868		      cpumask_of(cpu));
 869}
 870
 871void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
 872{
 873	clear_softint(1 << irq);
 874	irq_enter();
 875	generic_smp_call_function_interrupt();
 876	irq_exit();
 877}
 878
 879void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
 880{
 881	clear_softint(1 << irq);
 882	irq_enter();
 883	generic_smp_call_function_single_interrupt();
 884	irq_exit();
 885}
 886
 887static void tsb_sync(void *info)
 888{
 889	struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
 890	struct mm_struct *mm = info;
 891
 892	/* It is not valid to test "current->active_mm == mm" here.
 893	 *
 894	 * The value of "current" is not changed atomically with
 895	 * switch_mm().  But that's OK, we just need to check the
 896	 * current cpu's trap block PGD physical address.
 897	 */
 898	if (tp->pgd_paddr == __pa(mm->pgd))
 899		tsb_context_switch(mm);
 900}
 901
 902void smp_tsb_sync(struct mm_struct *mm)
 903{
 904	smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
 905}
 906
 907extern unsigned long xcall_flush_tlb_mm;
 908extern unsigned long xcall_flush_tlb_page;
 909extern unsigned long xcall_flush_tlb_kernel_range;
 910extern unsigned long xcall_fetch_glob_regs;
 911extern unsigned long xcall_fetch_glob_pmu;
 912extern unsigned long xcall_fetch_glob_pmu_n4;
 913extern unsigned long xcall_receive_signal;
 914extern unsigned long xcall_new_mmu_context_version;
 915#ifdef CONFIG_KGDB
 916extern unsigned long xcall_kgdb_capture;
 917#endif
 918
 919#ifdef DCACHE_ALIASING_POSSIBLE
 920extern unsigned long xcall_flush_dcache_page_cheetah;
 921#endif
 922extern unsigned long xcall_flush_dcache_page_spitfire;
 923
 
 
 
 
 
 924static inline void __local_flush_dcache_page(struct page *page)
 925{
 926#ifdef DCACHE_ALIASING_POSSIBLE
 927	__flush_dcache_page(page_address(page),
 928			    ((tlb_type == spitfire) &&
 929			     page_mapping_file(page) != NULL));
 930#else
 931	if (page_mapping_file(page) != NULL &&
 932	    tlb_type == spitfire)
 933		__flush_icache_page(__pa(page_address(page)));
 934#endif
 935}
 936
 937void smp_flush_dcache_page_impl(struct page *page, int cpu)
 938{
 939	int this_cpu;
 940
 941	if (tlb_type == hypervisor)
 942		return;
 943
 944#ifdef CONFIG_DEBUG_DCFLUSH
 945	atomic_inc(&dcpage_flushes);
 946#endif
 947
 948	this_cpu = get_cpu();
 949
 950	if (cpu == this_cpu) {
 951		__local_flush_dcache_page(page);
 952	} else if (cpu_online(cpu)) {
 953		void *pg_addr = page_address(page);
 954		u64 data0 = 0;
 955
 956		if (tlb_type == spitfire) {
 957			data0 = ((u64)&xcall_flush_dcache_page_spitfire);
 958			if (page_mapping_file(page) != NULL)
 959				data0 |= ((u64)1 << 32);
 960		} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
 961#ifdef DCACHE_ALIASING_POSSIBLE
 962			data0 =	((u64)&xcall_flush_dcache_page_cheetah);
 963#endif
 964		}
 965		if (data0) {
 966			xcall_deliver(data0, __pa(pg_addr),
 967				      (u64) pg_addr, cpumask_of(cpu));
 968#ifdef CONFIG_DEBUG_DCFLUSH
 969			atomic_inc(&dcpage_flushes_xcall);
 970#endif
 971		}
 972	}
 973
 974	put_cpu();
 975}
 976
 977void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
 978{
 979	void *pg_addr;
 980	u64 data0;
 981
 982	if (tlb_type == hypervisor)
 983		return;
 984
 985	preempt_disable();
 986
 987#ifdef CONFIG_DEBUG_DCFLUSH
 988	atomic_inc(&dcpage_flushes);
 989#endif
 990	data0 = 0;
 991	pg_addr = page_address(page);
 992	if (tlb_type == spitfire) {
 993		data0 = ((u64)&xcall_flush_dcache_page_spitfire);
 994		if (page_mapping_file(page) != NULL)
 995			data0 |= ((u64)1 << 32);
 996	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
 997#ifdef DCACHE_ALIASING_POSSIBLE
 998		data0 = ((u64)&xcall_flush_dcache_page_cheetah);
 999#endif
1000	}
1001	if (data0) {
1002		xcall_deliver(data0, __pa(pg_addr),
1003			      (u64) pg_addr, cpu_online_mask);
1004#ifdef CONFIG_DEBUG_DCFLUSH
1005		atomic_inc(&dcpage_flushes_xcall);
1006#endif
1007	}
1008	__local_flush_dcache_page(page);
1009
1010	preempt_enable();
1011}
1012
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1013#ifdef CONFIG_KGDB
1014void kgdb_roundup_cpus(void)
1015{
1016	smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
1017}
1018#endif
1019
1020void smp_fetch_global_regs(void)
1021{
1022	smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
1023}
1024
1025void smp_fetch_global_pmu(void)
1026{
1027	if (tlb_type == hypervisor &&
1028	    sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
1029		smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0);
1030	else
1031		smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0);
1032}
1033
1034/* We know that the window frames of the user have been flushed
1035 * to the stack before we get here because all callers of us
1036 * are flush_tlb_*() routines, and these run after flush_cache_*()
1037 * which performs the flushw.
1038 *
1039 * mm->cpu_vm_mask is a bit mask of which cpus an address
1040 * space has (potentially) executed on, this is the heuristic
1041 * we use to limit cross calls.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1042 */
1043
1044/* This currently is only used by the hugetlb arch pre-fault
1045 * hook on UltraSPARC-III+ and later when changing the pagesize
1046 * bits of the context register for an address space.
1047 */
1048void smp_flush_tlb_mm(struct mm_struct *mm)
1049{
1050	u32 ctx = CTX_HWBITS(mm->context);
 
1051
1052	get_cpu();
 
 
 
1053
1054	smp_cross_call_masked(&xcall_flush_tlb_mm,
1055			      ctx, 0, 0,
1056			      mm_cpumask(mm));
1057
 
1058	__flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1059
1060	put_cpu();
1061}
1062
1063struct tlb_pending_info {
1064	unsigned long ctx;
1065	unsigned long nr;
1066	unsigned long *vaddrs;
1067};
1068
1069static void tlb_pending_func(void *info)
1070{
1071	struct tlb_pending_info *t = info;
1072
1073	__flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
1074}
1075
1076void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1077{
1078	u32 ctx = CTX_HWBITS(mm->context);
1079	struct tlb_pending_info info;
1080
1081	get_cpu();
1082
1083	info.ctx = ctx;
1084	info.nr = nr;
1085	info.vaddrs = vaddrs;
1086
1087	smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
1088			       &info, 1);
 
 
 
1089
1090	__flush_tlb_pending(ctx, nr, vaddrs);
1091
1092	put_cpu();
1093}
1094
1095void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
1096{
1097	unsigned long context = CTX_HWBITS(mm->context);
 
1098
1099	get_cpu();
1100
1101	smp_cross_call_masked(&xcall_flush_tlb_page,
1102			      context, vaddr, 0,
1103			      mm_cpumask(mm));
1104
1105	__flush_tlb_page(context, vaddr);
1106
1107	put_cpu();
1108}
1109
1110void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1111{
1112	start &= PAGE_MASK;
1113	end    = PAGE_ALIGN(end);
1114	if (start != end) {
1115		smp_cross_call(&xcall_flush_tlb_kernel_range,
1116			       0, start, end);
1117
1118		__flush_tlb_kernel_range(start, end);
1119	}
1120}
1121
1122/* CPU capture. */
1123/* #define CAPTURE_DEBUG */
1124extern unsigned long xcall_capture;
1125
1126static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1127static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1128static unsigned long penguins_are_doing_time;
1129
1130void smp_capture(void)
1131{
1132	int result = atomic_add_return(1, &smp_capture_depth);
1133
1134	if (result == 1) {
1135		int ncpus = num_online_cpus();
1136
1137#ifdef CAPTURE_DEBUG
1138		printk("CPU[%d]: Sending penguins to jail...",
1139		       smp_processor_id());
1140#endif
1141		penguins_are_doing_time = 1;
1142		atomic_inc(&smp_capture_registry);
1143		smp_cross_call(&xcall_capture, 0, 0, 0);
1144		while (atomic_read(&smp_capture_registry) != ncpus)
1145			rmb();
1146#ifdef CAPTURE_DEBUG
1147		printk("done\n");
1148#endif
1149	}
1150}
1151
1152void smp_release(void)
1153{
1154	if (atomic_dec_and_test(&smp_capture_depth)) {
1155#ifdef CAPTURE_DEBUG
1156		printk("CPU[%d]: Giving pardon to "
1157		       "imprisoned penguins\n",
1158		       smp_processor_id());
1159#endif
1160		penguins_are_doing_time = 0;
1161		membar_safe("#StoreLoad");
1162		atomic_dec(&smp_capture_registry);
1163	}
1164}
1165
1166/* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE
1167 * set, so they can service tlb flush xcalls...
1168 */
1169extern void prom_world(int);
1170
1171void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
1172{
1173	clear_softint(1 << irq);
1174
1175	preempt_disable();
1176
1177	__asm__ __volatile__("flushw");
1178	prom_world(1);
1179	atomic_inc(&smp_capture_registry);
1180	membar_safe("#StoreLoad");
1181	while (penguins_are_doing_time)
1182		rmb();
1183	atomic_dec(&smp_capture_registry);
1184	prom_world(0);
1185
1186	preempt_enable();
1187}
1188
 
 
 
 
 
 
1189void __init smp_prepare_cpus(unsigned int max_cpus)
1190{
1191}
1192
1193void smp_prepare_boot_cpu(void)
1194{
1195}
1196
1197void __init smp_setup_processor_id(void)
1198{
1199	if (tlb_type == spitfire)
1200		xcall_deliver_impl = spitfire_xcall_deliver;
1201	else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1202		xcall_deliver_impl = cheetah_xcall_deliver;
1203	else
1204		xcall_deliver_impl = hypervisor_xcall_deliver;
1205}
1206
1207void __init smp_fill_in_cpu_possible_map(void)
1208{
1209	int possible_cpus = num_possible_cpus();
1210	int i;
1211
1212	if (possible_cpus > nr_cpu_ids)
1213		possible_cpus = nr_cpu_ids;
1214
1215	for (i = 0; i < possible_cpus; i++)
1216		set_cpu_possible(i, true);
1217	for (; i < NR_CPUS; i++)
1218		set_cpu_possible(i, false);
1219}
1220
1221void smp_fill_in_sib_core_maps(void)
1222{
1223	unsigned int i;
1224
1225	for_each_present_cpu(i) {
1226		unsigned int j;
1227
1228		cpumask_clear(&cpu_core_map[i]);
1229		if (cpu_data(i).core_id == 0) {
1230			cpumask_set_cpu(i, &cpu_core_map[i]);
1231			continue;
1232		}
1233
1234		for_each_present_cpu(j) {
1235			if (cpu_data(i).core_id ==
1236			    cpu_data(j).core_id)
1237				cpumask_set_cpu(j, &cpu_core_map[i]);
1238		}
1239	}
1240
1241	for_each_present_cpu(i)  {
1242		unsigned int j;
1243
1244		for_each_present_cpu(j)  {
1245			if (cpu_data(i).max_cache_id ==
1246			    cpu_data(j).max_cache_id)
1247				cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]);
1248
1249			if (cpu_data(i).sock_id == cpu_data(j).sock_id)
1250				cpumask_set_cpu(j, &cpu_core_sib_map[i]);
1251		}
1252	}
1253
1254	for_each_present_cpu(i) {
1255		unsigned int j;
1256
1257		cpumask_clear(&per_cpu(cpu_sibling_map, i));
1258		if (cpu_data(i).proc_id == -1) {
1259			cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
1260			continue;
1261		}
1262
1263		for_each_present_cpu(j) {
1264			if (cpu_data(i).proc_id ==
1265			    cpu_data(j).proc_id)
1266				cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
1267		}
1268	}
1269}
1270
1271int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1272{
1273	int ret = smp_boot_one_cpu(cpu, tidle);
1274
1275	if (!ret) {
1276		cpumask_set_cpu(cpu, &smp_commenced_mask);
1277		while (!cpu_online(cpu))
1278			mb();
1279		if (!cpu_online(cpu)) {
1280			ret = -ENODEV;
1281		} else {
1282			/* On SUN4V, writes to %tick and %stick are
1283			 * not allowed.
1284			 */
1285			if (tlb_type != hypervisor)
1286				smp_synchronize_one_tick(cpu);
1287		}
1288	}
1289	return ret;
1290}
1291
1292#ifdef CONFIG_HOTPLUG_CPU
1293void cpu_play_dead(void)
1294{
1295	int cpu = smp_processor_id();
1296	unsigned long pstate;
1297
1298	idle_task_exit();
1299
1300	if (tlb_type == hypervisor) {
1301		struct trap_per_cpu *tb = &trap_block[cpu];
1302
1303		sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1304				tb->cpu_mondo_pa, 0);
1305		sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1306				tb->dev_mondo_pa, 0);
1307		sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1308				tb->resum_mondo_pa, 0);
1309		sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1310				tb->nonresum_mondo_pa, 0);
1311	}
1312
1313	cpumask_clear_cpu(cpu, &smp_commenced_mask);
1314	membar_safe("#Sync");
1315
1316	local_irq_disable();
1317
1318	__asm__ __volatile__(
1319		"rdpr	%%pstate, %0\n\t"
1320		"wrpr	%0, %1, %%pstate"
1321		: "=r" (pstate)
1322		: "i" (PSTATE_IE));
1323
1324	while (1)
1325		barrier();
1326}
1327
1328int __cpu_disable(void)
1329{
1330	int cpu = smp_processor_id();
1331	cpuinfo_sparc *c;
1332	int i;
1333
1334	for_each_cpu(i, &cpu_core_map[cpu])
1335		cpumask_clear_cpu(cpu, &cpu_core_map[i]);
1336	cpumask_clear(&cpu_core_map[cpu]);
1337
1338	for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
1339		cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
1340	cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
1341
1342	c = &cpu_data(cpu);
1343
1344	c->core_id = 0;
1345	c->proc_id = -1;
1346
1347	smp_wmb();
1348
1349	/* Make sure no interrupts point to this cpu.  */
1350	fixup_irqs();
1351
1352	local_irq_enable();
1353	mdelay(1);
1354	local_irq_disable();
1355
1356	set_cpu_online(cpu, false);
1357
1358	cpu_map_rebuild();
1359
1360	return 0;
1361}
1362
1363void __cpu_die(unsigned int cpu)
1364{
1365	int i;
1366
1367	for (i = 0; i < 100; i++) {
1368		smp_rmb();
1369		if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
1370			break;
1371		msleep(100);
1372	}
1373	if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
1374		printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1375	} else {
1376#if defined(CONFIG_SUN_LDOMS)
1377		unsigned long hv_err;
1378		int limit = 100;
1379
1380		do {
1381			hv_err = sun4v_cpu_stop(cpu);
1382			if (hv_err == HV_EOK) {
1383				set_cpu_present(cpu, false);
1384				break;
1385			}
1386		} while (--limit > 0);
1387		if (limit <= 0) {
1388			printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1389			       hv_err);
1390		}
1391#endif
1392	}
1393}
1394#endif
1395
1396void __init smp_cpus_done(unsigned int max_cpus)
1397{
1398}
1399
1400static void send_cpu_ipi(int cpu)
1401{
1402	xcall_deliver((u64) &xcall_receive_signal,
1403			0, 0, cpumask_of(cpu));
1404}
1405
1406void scheduler_poke(void)
1407{
1408	if (!cpu_poke)
1409		return;
1410
1411	if (!__this_cpu_read(poke))
1412		return;
1413
1414	__this_cpu_write(poke, false);
1415	set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
1416}
1417
1418static unsigned long send_cpu_poke(int cpu)
1419{
1420	unsigned long hv_err;
1421
1422	per_cpu(poke, cpu) = true;
1423	hv_err = sun4v_cpu_poke(cpu);
1424	if (hv_err != HV_EOK) {
1425		per_cpu(poke, cpu) = false;
1426		pr_err_ratelimited("%s: sun4v_cpu_poke() fails err=%lu\n",
1427				    __func__, hv_err);
1428	}
1429
1430	return hv_err;
1431}
1432
1433void smp_send_reschedule(int cpu)
1434{
1435	if (cpu == smp_processor_id()) {
1436		WARN_ON_ONCE(preemptible());
1437		set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
1438		return;
1439	}
1440
1441	/* Use cpu poke to resume idle cpu if supported. */
1442	if (cpu_poke && idle_cpu(cpu)) {
1443		unsigned long ret;
1444
1445		ret = send_cpu_poke(cpu);
1446		if (ret == HV_EOK)
1447			return;
1448	}
1449
1450	/* Use IPI in following cases:
1451	 * - cpu poke not supported
1452	 * - cpu not idle
1453	 * - send_cpu_poke() returns with error
1454	 */
1455	send_cpu_ipi(cpu);
1456}
1457
1458void smp_init_cpu_poke(void)
1459{
1460	unsigned long major;
1461	unsigned long minor;
1462	int ret;
1463
1464	if (tlb_type != hypervisor)
1465		return;
1466
1467	ret = sun4v_hvapi_get(HV_GRP_CORE, &major, &minor);
1468	if (ret) {
1469		pr_debug("HV_GRP_CORE is not registered\n");
1470		return;
1471	}
1472
1473	if (major == 1 && minor >= 6) {
1474		/* CPU POKE is registered. */
1475		cpu_poke = true;
1476		return;
1477	}
1478
1479	pr_debug("CPU_POKE not supported\n");
1480}
1481
1482void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1483{
1484	clear_softint(1 << irq);
1485	scheduler_ipi();
1486}
1487
1488static void stop_this_cpu(void *dummy)
 
 
 
1489{
1490	set_cpu_online(smp_processor_id(), false);
1491	prom_stopself();
1492}
1493
1494void smp_send_stop(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1495{
1496	int cpu;
1497
1498	if (tlb_type == hypervisor) {
1499		int this_cpu = smp_processor_id();
1500#ifdef CONFIG_SERIAL_SUNHV
1501		sunhv_migrate_hvcons_irq(this_cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1502#endif
1503		for_each_online_cpu(cpu) {
1504			if (cpu == this_cpu)
1505				continue;
1506
1507			set_cpu_online(cpu, false);
1508#ifdef CONFIG_SUN_LDOMS
1509			if (ldom_domaining_enabled) {
1510				unsigned long hv_err;
1511				hv_err = sun4v_cpu_stop(cpu);
1512				if (hv_err)
1513					printk(KERN_ERR "sun4v_cpu_stop() "
1514					       "failed err=%lu\n", hv_err);
1515			} else
1516#endif
1517				prom_stopcpu_cpuid(cpu);
1518		}
1519	} else
1520		smp_call_function(stop_this_cpu, NULL, 0);
1521}
1522
1523static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
1524{
1525	if (cpu_to_node(from) == cpu_to_node(to))
1526		return LOCAL_DISTANCE;
1527	else
1528		return REMOTE_DISTANCE;
1529}
1530
1531static int __init pcpu_cpu_to_node(int cpu)
1532{
1533	return cpu_to_node(cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1534}
1535
1536void __init setup_per_cpu_areas(void)
1537{
1538	unsigned long delta;
1539	unsigned int cpu;
1540	int rc = -EINVAL;
1541
1542	if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1543		rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1544					    PERCPU_DYNAMIC_RESERVE, 4 << 20,
1545					    pcpu_cpu_distance,
1546					    pcpu_cpu_to_node);
 
1547		if (rc)
1548			pr_warn("PERCPU: %s allocator failed (%d), "
1549				"falling back to page size\n",
1550				pcpu_fc_names[pcpu_chosen_fc], rc);
1551	}
1552	if (rc < 0)
1553		rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
1554					   pcpu_cpu_to_node);
 
 
1555	if (rc < 0)
1556		panic("cannot initialize percpu area (err=%d)", rc);
1557
1558	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1559	for_each_possible_cpu(cpu)
1560		__per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1561
1562	/* Setup %g5 for the boot cpu.  */
1563	__local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1564
1565	of_fill_in_cpu_data();
1566	if (tlb_type == hypervisor)
1567		mdesc_fill_in_cpu_data(cpu_all_mask);
1568}
v3.15
 
   1/* smp.c: Sparc64 SMP support.
   2 *
   3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
   4 */
   5
   6#include <linux/export.h>
   7#include <linux/kernel.h>
   8#include <linux/sched.h>
 
   9#include <linux/mm.h>
  10#include <linux/pagemap.h>
  11#include <linux/threads.h>
  12#include <linux/smp.h>
  13#include <linux/interrupt.h>
  14#include <linux/kernel_stat.h>
  15#include <linux/delay.h>
  16#include <linux/init.h>
  17#include <linux/spinlock.h>
  18#include <linux/fs.h>
  19#include <linux/seq_file.h>
  20#include <linux/cache.h>
  21#include <linux/jiffies.h>
  22#include <linux/profile.h>
  23#include <linux/bootmem.h>
  24#include <linux/vmalloc.h>
  25#include <linux/ftrace.h>
  26#include <linux/cpu.h>
  27#include <linux/slab.h>
 
  28
  29#include <asm/head.h>
  30#include <asm/ptrace.h>
  31#include <linux/atomic.h>
  32#include <asm/tlbflush.h>
  33#include <asm/mmu_context.h>
  34#include <asm/cpudata.h>
  35#include <asm/hvtramp.h>
  36#include <asm/io.h>
  37#include <asm/timer.h>
 
  38
  39#include <asm/irq.h>
  40#include <asm/irq_regs.h>
  41#include <asm/page.h>
  42#include <asm/pgtable.h>
  43#include <asm/oplib.h>
  44#include <asm/uaccess.h>
  45#include <asm/starfire.h>
  46#include <asm/tlb.h>
 
  47#include <asm/sections.h>
  48#include <asm/prom.h>
  49#include <asm/mdesc.h>
  50#include <asm/ldc.h>
  51#include <asm/hypervisor.h>
  52#include <asm/pcr.h>
  53
  54#include "cpumap.h"
 
  55
  56DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
  57cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
  58	{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
  59
 
 
 
 
 
 
  60EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  61EXPORT_SYMBOL(cpu_core_map);
 
 
  62
  63static cpumask_t smp_commenced_mask;
  64
 
 
 
  65void smp_info(struct seq_file *m)
  66{
  67	int i;
  68	
  69	seq_printf(m, "State:\n");
  70	for_each_online_cpu(i)
  71		seq_printf(m, "CPU%d:\t\tonline\n", i);
  72}
  73
  74void smp_bogo(struct seq_file *m)
  75{
  76	int i;
  77	
  78	for_each_online_cpu(i)
  79		seq_printf(m,
  80			   "Cpu%dClkTck\t: %016lx\n",
  81			   i, cpu_data(i).clock_tick);
  82}
  83
  84extern void setup_sparc64_timer(void);
  85
  86static volatile unsigned long callin_flag = 0;
  87
  88void smp_callin(void)
  89{
  90	int cpuid = hard_smp_processor_id();
  91
  92	__local_per_cpu_offset = __per_cpu_offset(cpuid);
  93
  94	if (tlb_type == hypervisor)
  95		sun4v_ktsb_register();
  96
  97	__flush_tlb_all();
  98
  99	setup_sparc64_timer();
 100
 101	if (cheetah_pcache_forced_on)
 102		cheetah_enable_pcache();
 103
 104	callin_flag = 1;
 105	__asm__ __volatile__("membar #Sync\n\t"
 106			     "flush  %%g6" : : : "memory");
 107
 108	/* Clear this or we will die instantly when we
 109	 * schedule back to this idler...
 110	 */
 111	current_thread_info()->new_child = 0;
 112
 113	/* Attach to the address space of init_task. */
 114	atomic_inc(&init_mm.mm_count);
 115	current->active_mm = &init_mm;
 116
 117	/* inform the notifiers about the new cpu */
 118	notify_cpu_starting(cpuid);
 119
 120	while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
 121		rmb();
 122
 123	set_cpu_online(cpuid, true);
 124
 125	/* idle thread is expected to have preempt disabled */
 126	preempt_disable();
 127
 128	local_irq_enable();
 129
 130	cpu_startup_entry(CPUHP_ONLINE);
 131}
 132
 133void cpu_panic(void)
 134{
 135	printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
 136	panic("SMP bolixed\n");
 137}
 138
 139/* This tick register synchronization scheme is taken entirely from
 140 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
 141 *
 142 * The only change I've made is to rework it so that the master
 143 * initiates the synchonization instead of the slave. -DaveM
 144 */
 145
 146#define MASTER	0
 147#define SLAVE	(SMP_CACHE_BYTES/sizeof(unsigned long))
 148
 149#define NUM_ROUNDS	64	/* magic value */
 150#define NUM_ITERS	5	/* likewise */
 151
 152static DEFINE_RAW_SPINLOCK(itc_sync_lock);
 153static unsigned long go[SLAVE + 1];
 154
 155#define DEBUG_TICK_SYNC	0
 156
 157static inline long get_delta (long *rt, long *master)
 158{
 159	unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
 160	unsigned long tcenter, t0, t1, tm;
 161	unsigned long i;
 162
 163	for (i = 0; i < NUM_ITERS; i++) {
 164		t0 = tick_ops->get_tick();
 165		go[MASTER] = 1;
 166		membar_safe("#StoreLoad");
 167		while (!(tm = go[SLAVE]))
 168			rmb();
 169		go[SLAVE] = 0;
 170		wmb();
 171		t1 = tick_ops->get_tick();
 172
 173		if (t1 - t0 < best_t1 - best_t0)
 174			best_t0 = t0, best_t1 = t1, best_tm = tm;
 175	}
 176
 177	*rt = best_t1 - best_t0;
 178	*master = best_tm - best_t0;
 179
 180	/* average best_t0 and best_t1 without overflow: */
 181	tcenter = (best_t0/2 + best_t1/2);
 182	if (best_t0 % 2 + best_t1 % 2 == 2)
 183		tcenter++;
 184	return tcenter - best_tm;
 185}
 186
 187void smp_synchronize_tick_client(void)
 188{
 189	long i, delta, adj, adjust_latency = 0, done = 0;
 190	unsigned long flags, rt, master_time_stamp;
 191#if DEBUG_TICK_SYNC
 192	struct {
 193		long rt;	/* roundtrip time */
 194		long master;	/* master's timestamp */
 195		long diff;	/* difference between midpoint and master's timestamp */
 196		long lat;	/* estimate of itc adjustment latency */
 197	} t[NUM_ROUNDS];
 198#endif
 199
 200	go[MASTER] = 1;
 201
 202	while (go[MASTER])
 203		rmb();
 204
 205	local_irq_save(flags);
 206	{
 207		for (i = 0; i < NUM_ROUNDS; i++) {
 208			delta = get_delta(&rt, &master_time_stamp);
 209			if (delta == 0)
 210				done = 1;	/* let's lock on to this... */
 211
 212			if (!done) {
 213				if (i > 0) {
 214					adjust_latency += -delta;
 215					adj = -delta + adjust_latency/4;
 216				} else
 217					adj = -delta;
 218
 219				tick_ops->add_tick(adj);
 220			}
 221#if DEBUG_TICK_SYNC
 222			t[i].rt = rt;
 223			t[i].master = master_time_stamp;
 224			t[i].diff = delta;
 225			t[i].lat = adjust_latency/4;
 226#endif
 227		}
 228	}
 229	local_irq_restore(flags);
 230
 231#if DEBUG_TICK_SYNC
 232	for (i = 0; i < NUM_ROUNDS; i++)
 233		printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
 234		       t[i].rt, t[i].master, t[i].diff, t[i].lat);
 235#endif
 236
 237	printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
 238	       "(last diff %ld cycles, maxerr %lu cycles)\n",
 239	       smp_processor_id(), delta, rt);
 240}
 241
 242static void smp_start_sync_tick_client(int cpu);
 243
 244static void smp_synchronize_one_tick(int cpu)
 245{
 246	unsigned long flags, i;
 247
 248	go[MASTER] = 0;
 249
 250	smp_start_sync_tick_client(cpu);
 251
 252	/* wait for client to be ready */
 253	while (!go[MASTER])
 254		rmb();
 255
 256	/* now let the client proceed into his loop */
 257	go[MASTER] = 0;
 258	membar_safe("#StoreLoad");
 259
 260	raw_spin_lock_irqsave(&itc_sync_lock, flags);
 261	{
 262		for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
 263			while (!go[MASTER])
 264				rmb();
 265			go[MASTER] = 0;
 266			wmb();
 267			go[SLAVE] = tick_ops->get_tick();
 268			membar_safe("#StoreLoad");
 269		}
 270	}
 271	raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
 272}
 273
 274#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
 275/* XXX Put this in some common place. XXX */
 276static unsigned long kimage_addr_to_ra(void *p)
 277{
 278	unsigned long val = (unsigned long) p;
 279
 280	return kern_base + (val - KERNBASE);
 281}
 282
 283static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
 284				void **descrp)
 285{
 286	extern unsigned long sparc64_ttable_tl0;
 287	extern unsigned long kern_locked_tte_data;
 288	struct hvtramp_descr *hdesc;
 289	unsigned long trampoline_ra;
 290	struct trap_per_cpu *tb;
 291	u64 tte_vaddr, tte_data;
 292	unsigned long hv_err;
 293	int i;
 294
 295	hdesc = kzalloc(sizeof(*hdesc) +
 296			(sizeof(struct hvtramp_mapping) *
 297			 num_kernel_image_mappings - 1),
 298			GFP_KERNEL);
 299	if (!hdesc) {
 300		printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
 301		       "hvtramp_descr.\n");
 302		return;
 303	}
 304	*descrp = hdesc;
 305
 306	hdesc->cpu = cpu;
 307	hdesc->num_mappings = num_kernel_image_mappings;
 308
 309	tb = &trap_block[cpu];
 310
 311	hdesc->fault_info_va = (unsigned long) &tb->fault_info;
 312	hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
 313
 314	hdesc->thread_reg = thread_reg;
 315
 316	tte_vaddr = (unsigned long) KERNBASE;
 317	tte_data = kern_locked_tte_data;
 318
 319	for (i = 0; i < hdesc->num_mappings; i++) {
 320		hdesc->maps[i].vaddr = tte_vaddr;
 321		hdesc->maps[i].tte   = tte_data;
 322		tte_vaddr += 0x400000;
 323		tte_data  += 0x400000;
 324	}
 325
 326	trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
 327
 328	hv_err = sun4v_cpu_start(cpu, trampoline_ra,
 329				 kimage_addr_to_ra(&sparc64_ttable_tl0),
 330				 __pa(hdesc));
 331	if (hv_err)
 332		printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
 333		       "gives error %lu\n", hv_err);
 334}
 335#endif
 336
 337extern unsigned long sparc64_cpu_startup;
 338
 339/* The OBP cpu startup callback truncates the 3rd arg cookie to
 340 * 32-bits (I think) so to be safe we have it read the pointer
 341 * contained here so we work on >4GB machines. -DaveM
 342 */
 343static struct thread_info *cpu_new_thread = NULL;
 344
 345static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
 346{
 347	unsigned long entry =
 348		(unsigned long)(&sparc64_cpu_startup);
 349	unsigned long cookie =
 350		(unsigned long)(&cpu_new_thread);
 351	void *descr = NULL;
 352	int timeout, ret;
 353
 354	callin_flag = 0;
 355	cpu_new_thread = task_thread_info(idle);
 356
 357	if (tlb_type == hypervisor) {
 358#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
 359		if (ldom_domaining_enabled)
 360			ldom_startcpu_cpuid(cpu,
 361					    (unsigned long) cpu_new_thread,
 362					    &descr);
 363		else
 364#endif
 365			prom_startcpu_cpuid(cpu, entry, cookie);
 366	} else {
 367		struct device_node *dp = of_find_node_by_cpuid(cpu);
 368
 369		prom_startcpu(dp->phandle, entry, cookie);
 370	}
 371
 372	for (timeout = 0; timeout < 50000; timeout++) {
 373		if (callin_flag)
 374			break;
 375		udelay(100);
 376	}
 377
 378	if (callin_flag) {
 379		ret = 0;
 380	} else {
 381		printk("Processor %d is stuck.\n", cpu);
 382		ret = -ENODEV;
 383	}
 384	cpu_new_thread = NULL;
 385
 386	kfree(descr);
 387
 388	return ret;
 389}
 390
 391static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
 392{
 393	u64 result, target;
 394	int stuck, tmp;
 395
 396	if (this_is_starfire) {
 397		/* map to real upaid */
 398		cpu = (((cpu & 0x3c) << 1) |
 399			((cpu & 0x40) >> 4) |
 400			(cpu & 0x3));
 401	}
 402
 403	target = (cpu << 14) | 0x70;
 404again:
 405	/* Ok, this is the real Spitfire Errata #54.
 406	 * One must read back from a UDB internal register
 407	 * after writes to the UDB interrupt dispatch, but
 408	 * before the membar Sync for that write.
 409	 * So we use the high UDB control register (ASI 0x7f,
 410	 * ADDR 0x20) for the dummy read. -DaveM
 411	 */
 412	tmp = 0x40;
 413	__asm__ __volatile__(
 414	"wrpr	%1, %2, %%pstate\n\t"
 415	"stxa	%4, [%0] %3\n\t"
 416	"stxa	%5, [%0+%8] %3\n\t"
 417	"add	%0, %8, %0\n\t"
 418	"stxa	%6, [%0+%8] %3\n\t"
 419	"membar	#Sync\n\t"
 420	"stxa	%%g0, [%7] %3\n\t"
 421	"membar	#Sync\n\t"
 422	"mov	0x20, %%g1\n\t"
 423	"ldxa	[%%g1] 0x7f, %%g0\n\t"
 424	"membar	#Sync"
 425	: "=r" (tmp)
 426	: "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
 427	  "r" (data0), "r" (data1), "r" (data2), "r" (target),
 428	  "r" (0x10), "0" (tmp)
 429        : "g1");
 430
 431	/* NOTE: PSTATE_IE is still clear. */
 432	stuck = 100000;
 433	do {
 434		__asm__ __volatile__("ldxa [%%g0] %1, %0"
 435			: "=r" (result)
 436			: "i" (ASI_INTR_DISPATCH_STAT));
 437		if (result == 0) {
 438			__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 439					     : : "r" (pstate));
 440			return;
 441		}
 442		stuck -= 1;
 443		if (stuck == 0)
 444			break;
 445	} while (result & 0x1);
 446	__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 447			     : : "r" (pstate));
 448	if (stuck == 0) {
 449		printk("CPU[%d]: mondo stuckage result[%016llx]\n",
 450		       smp_processor_id(), result);
 451	} else {
 452		udelay(2);
 453		goto again;
 454	}
 455}
 456
 457static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
 458{
 459	u64 *mondo, data0, data1, data2;
 460	u16 *cpu_list;
 461	u64 pstate;
 462	int i;
 463
 464	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
 465	cpu_list = __va(tb->cpu_list_pa);
 466	mondo = __va(tb->cpu_mondo_block_pa);
 467	data0 = mondo[0];
 468	data1 = mondo[1];
 469	data2 = mondo[2];
 470	for (i = 0; i < cnt; i++)
 471		spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]);
 472}
 473
 474/* Cheetah now allows to send the whole 64-bytes of data in the interrupt
 475 * packet, but we have no use for that.  However we do take advantage of
 476 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
 477 */
 478static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
 479{
 480	int nack_busy_id, is_jbus, need_more;
 481	u64 *mondo, pstate, ver, busy_mask;
 482	u16 *cpu_list;
 483
 484	cpu_list = __va(tb->cpu_list_pa);
 485	mondo = __va(tb->cpu_mondo_block_pa);
 486
 487	/* Unfortunately, someone at Sun had the brilliant idea to make the
 488	 * busy/nack fields hard-coded by ITID number for this Ultra-III
 489	 * derivative processor.
 490	 */
 491	__asm__ ("rdpr %%ver, %0" : "=r" (ver));
 492	is_jbus = ((ver >> 32) == __JALAPENO_ID ||
 493		   (ver >> 32) == __SERRANO_ID);
 494
 495	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
 496
 497retry:
 498	need_more = 0;
 499	__asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
 500			     : : "r" (pstate), "i" (PSTATE_IE));
 501
 502	/* Setup the dispatch data registers. */
 503	__asm__ __volatile__("stxa	%0, [%3] %6\n\t"
 504			     "stxa	%1, [%4] %6\n\t"
 505			     "stxa	%2, [%5] %6\n\t"
 506			     "membar	#Sync\n\t"
 507			     : /* no outputs */
 508			     : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]),
 509			       "r" (0x40), "r" (0x50), "r" (0x60),
 510			       "i" (ASI_INTR_W));
 511
 512	nack_busy_id = 0;
 513	busy_mask = 0;
 514	{
 515		int i;
 516
 517		for (i = 0; i < cnt; i++) {
 518			u64 target, nr;
 519
 520			nr = cpu_list[i];
 521			if (nr == 0xffff)
 522				continue;
 523
 524			target = (nr << 14) | 0x70;
 525			if (is_jbus) {
 526				busy_mask |= (0x1UL << (nr * 2));
 527			} else {
 528				target |= (nack_busy_id << 24);
 529				busy_mask |= (0x1UL <<
 530					      (nack_busy_id * 2));
 531			}
 532			__asm__ __volatile__(
 533				"stxa	%%g0, [%0] %1\n\t"
 534				"membar	#Sync\n\t"
 535				: /* no outputs */
 536				: "r" (target), "i" (ASI_INTR_W));
 537			nack_busy_id++;
 538			if (nack_busy_id == 32) {
 539				need_more = 1;
 540				break;
 541			}
 542		}
 543	}
 544
 545	/* Now, poll for completion. */
 546	{
 547		u64 dispatch_stat, nack_mask;
 548		long stuck;
 549
 550		stuck = 100000 * nack_busy_id;
 551		nack_mask = busy_mask << 1;
 552		do {
 553			__asm__ __volatile__("ldxa	[%%g0] %1, %0"
 554					     : "=r" (dispatch_stat)
 555					     : "i" (ASI_INTR_DISPATCH_STAT));
 556			if (!(dispatch_stat & (busy_mask | nack_mask))) {
 557				__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 558						     : : "r" (pstate));
 559				if (unlikely(need_more)) {
 560					int i, this_cnt = 0;
 561					for (i = 0; i < cnt; i++) {
 562						if (cpu_list[i] == 0xffff)
 563							continue;
 564						cpu_list[i] = 0xffff;
 565						this_cnt++;
 566						if (this_cnt == 32)
 567							break;
 568					}
 569					goto retry;
 570				}
 571				return;
 572			}
 573			if (!--stuck)
 574				break;
 575		} while (dispatch_stat & busy_mask);
 576
 577		__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 578				     : : "r" (pstate));
 579
 580		if (dispatch_stat & busy_mask) {
 581			/* Busy bits will not clear, continue instead
 582			 * of freezing up on this cpu.
 583			 */
 584			printk("CPU[%d]: mondo stuckage result[%016llx]\n",
 585			       smp_processor_id(), dispatch_stat);
 586		} else {
 587			int i, this_busy_nack = 0;
 588
 589			/* Delay some random time with interrupts enabled
 590			 * to prevent deadlock.
 591			 */
 592			udelay(2 * nack_busy_id);
 593
 594			/* Clear out the mask bits for cpus which did not
 595			 * NACK us.
 596			 */
 597			for (i = 0; i < cnt; i++) {
 598				u64 check_mask, nr;
 599
 600				nr = cpu_list[i];
 601				if (nr == 0xffff)
 602					continue;
 603
 604				if (is_jbus)
 605					check_mask = (0x2UL << (2*nr));
 606				else
 607					check_mask = (0x2UL <<
 608						      this_busy_nack);
 609				if ((dispatch_stat & check_mask) == 0)
 610					cpu_list[i] = 0xffff;
 611				this_busy_nack += 2;
 612				if (this_busy_nack == 64)
 613					break;
 614			}
 615
 616			goto retry;
 617		}
 618	}
 619}
 620
 621/* Multi-cpu list version.  */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 622static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
 623{
 624	int retries, this_cpu, prev_sent, i, saw_cpu_error;
 
 
 
 625	unsigned long status;
 
 
 626	u16 *cpu_list;
 
 627
 628	this_cpu = smp_processor_id();
 629
 630	cpu_list = __va(tb->cpu_list_pa);
 
 
 
 
 
 
 631
 632	saw_cpu_error = 0;
 633	retries = 0;
 634	prev_sent = 0;
 635	do {
 636		int forward_progress, n_sent;
 637
 638		status = sun4v_cpu_mondo_send(cnt,
 639					      tb->cpu_list_pa,
 640					      tb->cpu_mondo_block_pa);
 641
 642		/* HV_EOK means all cpus received the xcall, we're done.  */
 643		if (likely(status == HV_EOK))
 644			break;
 
 
 
 
 
 
 645
 646		/* First, see if we made any forward progress.
 647		 *
 
 
 
 
 
 648		 * The hypervisor indicates successful sends by setting
 649		 * cpu list entries to the value 0xffff.
 
 
 
 
 
 
 
 
 
 
 650		 */
 
 651		n_sent = 0;
 652		for (i = 0; i < cnt; i++) {
 653			if (likely(cpu_list[i] == 0xffff))
 
 654				n_sent++;
 
 
 
 
 
 
 
 
 655		}
 656
 657		forward_progress = 0;
 658		if (n_sent > prev_sent)
 659			forward_progress = 1;
 660
 661		prev_sent = n_sent;
 
 662
 663		/* If we get a HV_ECPUERROR, then one or more of the cpus
 664		 * in the list are in error state.  Use the cpu_state()
 665		 * hypervisor call to find out which cpus are in error state.
 666		 */
 667		if (unlikely(status == HV_ECPUERROR)) {
 668			for (i = 0; i < cnt; i++) {
 669				long err;
 670				u16 cpu;
 
 
 
 
 671
 672				cpu = cpu_list[i];
 673				if (cpu == 0xffff)
 674					continue;
 675
 676				err = sun4v_cpu_state(cpu);
 677				if (err == HV_CPU_STATE_ERROR) {
 678					saw_cpu_error = (cpu + 1);
 679					cpu_list[i] = 0xffff;
 680				}
 681			}
 682		} else if (unlikely(status != HV_EWOULDBLOCK))
 683			goto fatal_mondo_error;
 
 684
 685		/* Don't bother rewriting the CPU list, just leave the
 686		 * 0xffff and non-0xffff entries in there and the
 687		 * hypervisor will do the right thing.
 688		 *
 689		 * Only advance timeout state if we didn't make any
 690		 * forward progress.
 691		 */
 692		if (unlikely(!forward_progress)) {
 693			if (unlikely(++retries > 10000))
 694				goto fatal_mondo_timeout;
 695
 696			/* Delay a little bit to let other cpus catch up
 697			 * on their cpu mondo queue work.
 698			 */
 699			udelay(2 * cnt);
 700		}
 701	} while (1);
 702
 703	if (unlikely(saw_cpu_error))
 704		goto fatal_mondo_cpu_error;
 705
 
 
 
 
 
 706	return;
 707
 708fatal_mondo_cpu_error:
 709	printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
 710	       "(including %d) were in error state\n",
 711	       this_cpu, saw_cpu_error - 1);
 712	return;
 713
 714fatal_mondo_timeout:
 715	printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
 716	       " progress after %d retries.\n",
 717	       this_cpu, retries);
 718	goto dump_cpu_list_and_out;
 719
 720fatal_mondo_error:
 721	printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
 722	       this_cpu, status);
 723	printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
 724	       "mondo_block_pa(%lx)\n",
 725	       this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
 726
 727dump_cpu_list_and_out:
 728	printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
 729	for (i = 0; i < cnt; i++)
 730		printk("%u ", cpu_list[i]);
 731	printk("]\n");
 732}
 733
 734static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
 735
 736static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
 737{
 738	struct trap_per_cpu *tb;
 739	int this_cpu, i, cnt;
 740	unsigned long flags;
 741	u16 *cpu_list;
 742	u64 *mondo;
 743
 744	/* We have to do this whole thing with interrupts fully disabled.
 745	 * Otherwise if we send an xcall from interrupt context it will
 746	 * corrupt both our mondo block and cpu list state.
 747	 *
 748	 * One consequence of this is that we cannot use timeout mechanisms
 749	 * that depend upon interrupts being delivered locally.  So, for
 750	 * example, we cannot sample jiffies and expect it to advance.
 751	 *
 752	 * Fortunately, udelay() uses %stick/%tick so we can use that.
 753	 */
 754	local_irq_save(flags);
 755
 756	this_cpu = smp_processor_id();
 757	tb = &trap_block[this_cpu];
 758
 759	mondo = __va(tb->cpu_mondo_block_pa);
 760	mondo[0] = data0;
 761	mondo[1] = data1;
 762	mondo[2] = data2;
 763	wmb();
 764
 765	cpu_list = __va(tb->cpu_list_pa);
 766
 767	/* Setup the initial cpu list.  */
 768	cnt = 0;
 769	for_each_cpu(i, mask) {
 770		if (i == this_cpu || !cpu_online(i))
 771			continue;
 772		cpu_list[cnt++] = i;
 773	}
 774
 775	if (cnt)
 776		xcall_deliver_impl(tb, cnt);
 777
 778	local_irq_restore(flags);
 779}
 780
 781/* Send cross call to all processors mentioned in MASK_P
 782 * except self.  Really, there are only two cases currently,
 783 * "cpu_online_mask" and "mm_cpumask(mm)".
 784 */
 785static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
 786{
 787	u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
 788
 789	xcall_deliver(data0, data1, data2, mask);
 790}
 791
 792/* Send cross call to all processors except self. */
 793static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
 794{
 795	smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
 796}
 797
 798extern unsigned long xcall_sync_tick;
 799
 800static void smp_start_sync_tick_client(int cpu)
 801{
 802	xcall_deliver((u64) &xcall_sync_tick, 0, 0,
 803		      cpumask_of(cpu));
 804}
 805
 806extern unsigned long xcall_call_function;
 807
 808void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 809{
 810	xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
 811}
 812
 813extern unsigned long xcall_call_function_single;
 814
 815void arch_send_call_function_single_ipi(int cpu)
 816{
 817	xcall_deliver((u64) &xcall_call_function_single, 0, 0,
 818		      cpumask_of(cpu));
 819}
 820
 821void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
 822{
 823	clear_softint(1 << irq);
 
 824	generic_smp_call_function_interrupt();
 
 825}
 826
 827void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
 828{
 829	clear_softint(1 << irq);
 
 830	generic_smp_call_function_single_interrupt();
 
 831}
 832
 833static void tsb_sync(void *info)
 834{
 835	struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
 836	struct mm_struct *mm = info;
 837
 838	/* It is not valid to test "current->active_mm == mm" here.
 839	 *
 840	 * The value of "current" is not changed atomically with
 841	 * switch_mm().  But that's OK, we just need to check the
 842	 * current cpu's trap block PGD physical address.
 843	 */
 844	if (tp->pgd_paddr == __pa(mm->pgd))
 845		tsb_context_switch(mm);
 846}
 847
 848void smp_tsb_sync(struct mm_struct *mm)
 849{
 850	smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
 851}
 852
 853extern unsigned long xcall_flush_tlb_mm;
 854extern unsigned long xcall_flush_tlb_page;
 855extern unsigned long xcall_flush_tlb_kernel_range;
 856extern unsigned long xcall_fetch_glob_regs;
 857extern unsigned long xcall_fetch_glob_pmu;
 858extern unsigned long xcall_fetch_glob_pmu_n4;
 859extern unsigned long xcall_receive_signal;
 860extern unsigned long xcall_new_mmu_context_version;
 861#ifdef CONFIG_KGDB
 862extern unsigned long xcall_kgdb_capture;
 863#endif
 864
 865#ifdef DCACHE_ALIASING_POSSIBLE
 866extern unsigned long xcall_flush_dcache_page_cheetah;
 867#endif
 868extern unsigned long xcall_flush_dcache_page_spitfire;
 869
 870#ifdef CONFIG_DEBUG_DCFLUSH
 871extern atomic_t dcpage_flushes;
 872extern atomic_t dcpage_flushes_xcall;
 873#endif
 874
 875static inline void __local_flush_dcache_page(struct page *page)
 876{
 877#ifdef DCACHE_ALIASING_POSSIBLE
 878	__flush_dcache_page(page_address(page),
 879			    ((tlb_type == spitfire) &&
 880			     page_mapping(page) != NULL));
 881#else
 882	if (page_mapping(page) != NULL &&
 883	    tlb_type == spitfire)
 884		__flush_icache_page(__pa(page_address(page)));
 885#endif
 886}
 887
 888void smp_flush_dcache_page_impl(struct page *page, int cpu)
 889{
 890	int this_cpu;
 891
 892	if (tlb_type == hypervisor)
 893		return;
 894
 895#ifdef CONFIG_DEBUG_DCFLUSH
 896	atomic_inc(&dcpage_flushes);
 897#endif
 898
 899	this_cpu = get_cpu();
 900
 901	if (cpu == this_cpu) {
 902		__local_flush_dcache_page(page);
 903	} else if (cpu_online(cpu)) {
 904		void *pg_addr = page_address(page);
 905		u64 data0 = 0;
 906
 907		if (tlb_type == spitfire) {
 908			data0 = ((u64)&xcall_flush_dcache_page_spitfire);
 909			if (page_mapping(page) != NULL)
 910				data0 |= ((u64)1 << 32);
 911		} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
 912#ifdef DCACHE_ALIASING_POSSIBLE
 913			data0 =	((u64)&xcall_flush_dcache_page_cheetah);
 914#endif
 915		}
 916		if (data0) {
 917			xcall_deliver(data0, __pa(pg_addr),
 918				      (u64) pg_addr, cpumask_of(cpu));
 919#ifdef CONFIG_DEBUG_DCFLUSH
 920			atomic_inc(&dcpage_flushes_xcall);
 921#endif
 922		}
 923	}
 924
 925	put_cpu();
 926}
 927
 928void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
 929{
 930	void *pg_addr;
 931	u64 data0;
 932
 933	if (tlb_type == hypervisor)
 934		return;
 935
 936	preempt_disable();
 937
 938#ifdef CONFIG_DEBUG_DCFLUSH
 939	atomic_inc(&dcpage_flushes);
 940#endif
 941	data0 = 0;
 942	pg_addr = page_address(page);
 943	if (tlb_type == spitfire) {
 944		data0 = ((u64)&xcall_flush_dcache_page_spitfire);
 945		if (page_mapping(page) != NULL)
 946			data0 |= ((u64)1 << 32);
 947	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
 948#ifdef DCACHE_ALIASING_POSSIBLE
 949		data0 = ((u64)&xcall_flush_dcache_page_cheetah);
 950#endif
 951	}
 952	if (data0) {
 953		xcall_deliver(data0, __pa(pg_addr),
 954			      (u64) pg_addr, cpu_online_mask);
 955#ifdef CONFIG_DEBUG_DCFLUSH
 956		atomic_inc(&dcpage_flushes_xcall);
 957#endif
 958	}
 959	__local_flush_dcache_page(page);
 960
 961	preempt_enable();
 962}
 963
 964void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
 965{
 966	struct mm_struct *mm;
 967	unsigned long flags;
 968
 969	clear_softint(1 << irq);
 970
 971	/* See if we need to allocate a new TLB context because
 972	 * the version of the one we are using is now out of date.
 973	 */
 974	mm = current->active_mm;
 975	if (unlikely(!mm || (mm == &init_mm)))
 976		return;
 977
 978	spin_lock_irqsave(&mm->context.lock, flags);
 979
 980	if (unlikely(!CTX_VALID(mm->context)))
 981		get_new_mmu_context(mm);
 982
 983	spin_unlock_irqrestore(&mm->context.lock, flags);
 984
 985	load_secondary_context(mm);
 986	__flush_tlb_mm(CTX_HWBITS(mm->context),
 987		       SECONDARY_CONTEXT);
 988}
 989
 990void smp_new_mmu_context_version(void)
 991{
 992	smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
 993}
 994
 995#ifdef CONFIG_KGDB
 996void kgdb_roundup_cpus(unsigned long flags)
 997{
 998	smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
 999}
1000#endif
1001
1002void smp_fetch_global_regs(void)
1003{
1004	smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
1005}
1006
1007void smp_fetch_global_pmu(void)
1008{
1009	if (tlb_type == hypervisor &&
1010	    sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
1011		smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0);
1012	else
1013		smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0);
1014}
1015
1016/* We know that the window frames of the user have been flushed
1017 * to the stack before we get here because all callers of us
1018 * are flush_tlb_*() routines, and these run after flush_cache_*()
1019 * which performs the flushw.
1020 *
1021 * The SMP TLB coherency scheme we use works as follows:
1022 *
1023 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1024 *    space has (potentially) executed on, this is the heuristic
1025 *    we use to avoid doing cross calls.
1026 *
1027 *    Also, for flushing from kswapd and also for clones, we
1028 *    use cpu_vm_mask as the list of cpus to make run the TLB.
1029 *
1030 * 2) TLB context numbers are shared globally across all processors
1031 *    in the system, this allows us to play several games to avoid
1032 *    cross calls.
1033 *
1034 *    One invariant is that when a cpu switches to a process, and
1035 *    that processes tsk->active_mm->cpu_vm_mask does not have the
1036 *    current cpu's bit set, that tlb context is flushed locally.
1037 *
1038 *    If the address space is non-shared (ie. mm->count == 1) we avoid
1039 *    cross calls when we want to flush the currently running process's
1040 *    tlb state.  This is done by clearing all cpu bits except the current
1041 *    processor's in current->mm->cpu_vm_mask and performing the
1042 *    flush locally only.  This will force any subsequent cpus which run
1043 *    this task to flush the context from the local tlb if the process
1044 *    migrates to another cpu (again).
1045 *
1046 * 3) For shared address spaces (threads) and swapping we bite the
1047 *    bullet for most cases and perform the cross call (but only to
1048 *    the cpus listed in cpu_vm_mask).
1049 *
1050 *    The performance gain from "optimizing" away the cross call for threads is
1051 *    questionable (in theory the big win for threads is the massive sharing of
1052 *    address space state across processors).
1053 */
1054
1055/* This currently is only used by the hugetlb arch pre-fault
1056 * hook on UltraSPARC-III+ and later when changing the pagesize
1057 * bits of the context register for an address space.
1058 */
1059void smp_flush_tlb_mm(struct mm_struct *mm)
1060{
1061	u32 ctx = CTX_HWBITS(mm->context);
1062	int cpu = get_cpu();
1063
1064	if (atomic_read(&mm->mm_users) == 1) {
1065		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1066		goto local_flush_and_out;
1067	}
1068
1069	smp_cross_call_masked(&xcall_flush_tlb_mm,
1070			      ctx, 0, 0,
1071			      mm_cpumask(mm));
1072
1073local_flush_and_out:
1074	__flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1075
1076	put_cpu();
1077}
1078
1079struct tlb_pending_info {
1080	unsigned long ctx;
1081	unsigned long nr;
1082	unsigned long *vaddrs;
1083};
1084
1085static void tlb_pending_func(void *info)
1086{
1087	struct tlb_pending_info *t = info;
1088
1089	__flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
1090}
1091
1092void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1093{
1094	u32 ctx = CTX_HWBITS(mm->context);
1095	struct tlb_pending_info info;
1096	int cpu = get_cpu();
 
1097
1098	info.ctx = ctx;
1099	info.nr = nr;
1100	info.vaddrs = vaddrs;
1101
1102	if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1103		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1104	else
1105		smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
1106				       &info, 1);
1107
1108	__flush_tlb_pending(ctx, nr, vaddrs);
1109
1110	put_cpu();
1111}
1112
1113void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
1114{
1115	unsigned long context = CTX_HWBITS(mm->context);
1116	int cpu = get_cpu();
1117
1118	if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1119		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1120	else
1121		smp_cross_call_masked(&xcall_flush_tlb_page,
1122				      context, vaddr, 0,
1123				      mm_cpumask(mm));
1124	__flush_tlb_page(context, vaddr);
1125
1126	put_cpu();
1127}
1128
1129void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1130{
1131	start &= PAGE_MASK;
1132	end    = PAGE_ALIGN(end);
1133	if (start != end) {
1134		smp_cross_call(&xcall_flush_tlb_kernel_range,
1135			       0, start, end);
1136
1137		__flush_tlb_kernel_range(start, end);
1138	}
1139}
1140
1141/* CPU capture. */
1142/* #define CAPTURE_DEBUG */
1143extern unsigned long xcall_capture;
1144
1145static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1146static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1147static unsigned long penguins_are_doing_time;
1148
1149void smp_capture(void)
1150{
1151	int result = atomic_add_ret(1, &smp_capture_depth);
1152
1153	if (result == 1) {
1154		int ncpus = num_online_cpus();
1155
1156#ifdef CAPTURE_DEBUG
1157		printk("CPU[%d]: Sending penguins to jail...",
1158		       smp_processor_id());
1159#endif
1160		penguins_are_doing_time = 1;
1161		atomic_inc(&smp_capture_registry);
1162		smp_cross_call(&xcall_capture, 0, 0, 0);
1163		while (atomic_read(&smp_capture_registry) != ncpus)
1164			rmb();
1165#ifdef CAPTURE_DEBUG
1166		printk("done\n");
1167#endif
1168	}
1169}
1170
1171void smp_release(void)
1172{
1173	if (atomic_dec_and_test(&smp_capture_depth)) {
1174#ifdef CAPTURE_DEBUG
1175		printk("CPU[%d]: Giving pardon to "
1176		       "imprisoned penguins\n",
1177		       smp_processor_id());
1178#endif
1179		penguins_are_doing_time = 0;
1180		membar_safe("#StoreLoad");
1181		atomic_dec(&smp_capture_registry);
1182	}
1183}
1184
1185/* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE
1186 * set, so they can service tlb flush xcalls...
1187 */
1188extern void prom_world(int);
1189
1190void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
1191{
1192	clear_softint(1 << irq);
1193
1194	preempt_disable();
1195
1196	__asm__ __volatile__("flushw");
1197	prom_world(1);
1198	atomic_inc(&smp_capture_registry);
1199	membar_safe("#StoreLoad");
1200	while (penguins_are_doing_time)
1201		rmb();
1202	atomic_dec(&smp_capture_registry);
1203	prom_world(0);
1204
1205	preempt_enable();
1206}
1207
1208/* /proc/profile writes can call this, don't __init it please. */
1209int setup_profiling_timer(unsigned int multiplier)
1210{
1211	return -EINVAL;
1212}
1213
1214void __init smp_prepare_cpus(unsigned int max_cpus)
1215{
1216}
1217
1218void smp_prepare_boot_cpu(void)
1219{
1220}
1221
1222void __init smp_setup_processor_id(void)
1223{
1224	if (tlb_type == spitfire)
1225		xcall_deliver_impl = spitfire_xcall_deliver;
1226	else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1227		xcall_deliver_impl = cheetah_xcall_deliver;
1228	else
1229		xcall_deliver_impl = hypervisor_xcall_deliver;
1230}
1231
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1232void smp_fill_in_sib_core_maps(void)
1233{
1234	unsigned int i;
1235
1236	for_each_present_cpu(i) {
1237		unsigned int j;
1238
1239		cpumask_clear(&cpu_core_map[i]);
1240		if (cpu_data(i).core_id == 0) {
1241			cpumask_set_cpu(i, &cpu_core_map[i]);
1242			continue;
1243		}
1244
1245		for_each_present_cpu(j) {
1246			if (cpu_data(i).core_id ==
1247			    cpu_data(j).core_id)
1248				cpumask_set_cpu(j, &cpu_core_map[i]);
1249		}
1250	}
1251
 
 
 
 
 
 
 
 
 
 
 
 
 
1252	for_each_present_cpu(i) {
1253		unsigned int j;
1254
1255		cpumask_clear(&per_cpu(cpu_sibling_map, i));
1256		if (cpu_data(i).proc_id == -1) {
1257			cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
1258			continue;
1259		}
1260
1261		for_each_present_cpu(j) {
1262			if (cpu_data(i).proc_id ==
1263			    cpu_data(j).proc_id)
1264				cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
1265		}
1266	}
1267}
1268
1269int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1270{
1271	int ret = smp_boot_one_cpu(cpu, tidle);
1272
1273	if (!ret) {
1274		cpumask_set_cpu(cpu, &smp_commenced_mask);
1275		while (!cpu_online(cpu))
1276			mb();
1277		if (!cpu_online(cpu)) {
1278			ret = -ENODEV;
1279		} else {
1280			/* On SUN4V, writes to %tick and %stick are
1281			 * not allowed.
1282			 */
1283			if (tlb_type != hypervisor)
1284				smp_synchronize_one_tick(cpu);
1285		}
1286	}
1287	return ret;
1288}
1289
1290#ifdef CONFIG_HOTPLUG_CPU
1291void cpu_play_dead(void)
1292{
1293	int cpu = smp_processor_id();
1294	unsigned long pstate;
1295
1296	idle_task_exit();
1297
1298	if (tlb_type == hypervisor) {
1299		struct trap_per_cpu *tb = &trap_block[cpu];
1300
1301		sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1302				tb->cpu_mondo_pa, 0);
1303		sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1304				tb->dev_mondo_pa, 0);
1305		sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1306				tb->resum_mondo_pa, 0);
1307		sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1308				tb->nonresum_mondo_pa, 0);
1309	}
1310
1311	cpumask_clear_cpu(cpu, &smp_commenced_mask);
1312	membar_safe("#Sync");
1313
1314	local_irq_disable();
1315
1316	__asm__ __volatile__(
1317		"rdpr	%%pstate, %0\n\t"
1318		"wrpr	%0, %1, %%pstate"
1319		: "=r" (pstate)
1320		: "i" (PSTATE_IE));
1321
1322	while (1)
1323		barrier();
1324}
1325
1326int __cpu_disable(void)
1327{
1328	int cpu = smp_processor_id();
1329	cpuinfo_sparc *c;
1330	int i;
1331
1332	for_each_cpu(i, &cpu_core_map[cpu])
1333		cpumask_clear_cpu(cpu, &cpu_core_map[i]);
1334	cpumask_clear(&cpu_core_map[cpu]);
1335
1336	for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
1337		cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
1338	cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
1339
1340	c = &cpu_data(cpu);
1341
1342	c->core_id = 0;
1343	c->proc_id = -1;
1344
1345	smp_wmb();
1346
1347	/* Make sure no interrupts point to this cpu.  */
1348	fixup_irqs();
1349
1350	local_irq_enable();
1351	mdelay(1);
1352	local_irq_disable();
1353
1354	set_cpu_online(cpu, false);
1355
1356	cpu_map_rebuild();
1357
1358	return 0;
1359}
1360
1361void __cpu_die(unsigned int cpu)
1362{
1363	int i;
1364
1365	for (i = 0; i < 100; i++) {
1366		smp_rmb();
1367		if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
1368			break;
1369		msleep(100);
1370	}
1371	if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
1372		printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1373	} else {
1374#if defined(CONFIG_SUN_LDOMS)
1375		unsigned long hv_err;
1376		int limit = 100;
1377
1378		do {
1379			hv_err = sun4v_cpu_stop(cpu);
1380			if (hv_err == HV_EOK) {
1381				set_cpu_present(cpu, false);
1382				break;
1383			}
1384		} while (--limit > 0);
1385		if (limit <= 0) {
1386			printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1387			       hv_err);
1388		}
1389#endif
1390	}
1391}
1392#endif
1393
1394void __init smp_cpus_done(unsigned int max_cpus)
1395{
1396	pcr_arch_init();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1397}
1398
1399void smp_send_reschedule(int cpu)
1400{
1401	if (cpu == smp_processor_id()) {
1402		WARN_ON_ONCE(preemptible());
1403		set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
1404	} else {
1405		xcall_deliver((u64) &xcall_receive_signal,
1406			      0, 0, cpumask_of(cpu));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1407	}
 
 
1408}
1409
1410void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1411{
1412	clear_softint(1 << irq);
1413	scheduler_ipi();
1414}
1415
1416/* This is a nop because we capture all other cpus
1417 * anyways when making the PROM active.
1418 */
1419void smp_send_stop(void)
1420{
 
 
1421}
1422
1423/**
1424 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
1425 * @cpu: cpu to allocate for
1426 * @size: size allocation in bytes
1427 * @align: alignment
1428 *
1429 * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
1430 * does the right thing for NUMA regardless of the current
1431 * configuration.
1432 *
1433 * RETURNS:
1434 * Pointer to the allocated area on success, NULL on failure.
1435 */
1436static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
1437					size_t align)
1438{
1439	const unsigned long goal = __pa(MAX_DMA_ADDRESS);
1440#ifdef CONFIG_NEED_MULTIPLE_NODES
1441	int node = cpu_to_node(cpu);
1442	void *ptr;
1443
1444	if (!node_online(node) || !NODE_DATA(node)) {
1445		ptr = __alloc_bootmem(size, align, goal);
1446		pr_info("cpu %d has no node %d or node-local memory\n",
1447			cpu, node);
1448		pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
1449			 cpu, size, __pa(ptr));
1450	} else {
1451		ptr = __alloc_bootmem_node(NODE_DATA(node),
1452					   size, align, goal);
1453		pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
1454			 "%016lx\n", cpu, size, node, __pa(ptr));
1455	}
1456	return ptr;
1457#else
1458	return __alloc_bootmem(size, align, goal);
1459#endif
1460}
 
 
1461
1462static void __init pcpu_free_bootmem(void *ptr, size_t size)
1463{
1464	free_bootmem(__pa(ptr), size);
 
 
 
 
 
 
 
 
 
 
 
1465}
1466
1467static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
1468{
1469	if (cpu_to_node(from) == cpu_to_node(to))
1470		return LOCAL_DISTANCE;
1471	else
1472		return REMOTE_DISTANCE;
1473}
1474
1475static void __init pcpu_populate_pte(unsigned long addr)
1476{
1477	pgd_t *pgd = pgd_offset_k(addr);
1478	pud_t *pud;
1479	pmd_t *pmd;
1480
1481	pud = pud_offset(pgd, addr);
1482	if (pud_none(*pud)) {
1483		pmd_t *new;
1484
1485		new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1486		pud_populate(&init_mm, pud, new);
1487	}
1488
1489	pmd = pmd_offset(pud, addr);
1490	if (!pmd_present(*pmd)) {
1491		pte_t *new;
1492
1493		new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1494		pmd_populate_kernel(&init_mm, pmd, new);
1495	}
1496}
1497
1498void __init setup_per_cpu_areas(void)
1499{
1500	unsigned long delta;
1501	unsigned int cpu;
1502	int rc = -EINVAL;
1503
1504	if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1505		rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1506					    PERCPU_DYNAMIC_RESERVE, 4 << 20,
1507					    pcpu_cpu_distance,
1508					    pcpu_alloc_bootmem,
1509					    pcpu_free_bootmem);
1510		if (rc)
1511			pr_warning("PERCPU: %s allocator failed (%d), "
1512				   "falling back to page size\n",
1513				   pcpu_fc_names[pcpu_chosen_fc], rc);
1514	}
1515	if (rc < 0)
1516		rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
1517					   pcpu_alloc_bootmem,
1518					   pcpu_free_bootmem,
1519					   pcpu_populate_pte);
1520	if (rc < 0)
1521		panic("cannot initialize percpu area (err=%d)", rc);
1522
1523	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1524	for_each_possible_cpu(cpu)
1525		__per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1526
1527	/* Setup %g5 for the boot cpu.  */
1528	__local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1529
1530	of_fill_in_cpu_data();
1531	if (tlb_type == hypervisor)
1532		mdesc_fill_in_cpu_data(cpu_all_mask);
1533}