Loading...
1/* smp.c: Sparc64 SMP support.
2 *
3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/export.h>
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pagemap.h>
11#include <linux/threads.h>
12#include <linux/smp.h>
13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
15#include <linux/delay.h>
16#include <linux/init.h>
17#include <linux/spinlock.h>
18#include <linux/fs.h>
19#include <linux/seq_file.h>
20#include <linux/cache.h>
21#include <linux/jiffies.h>
22#include <linux/profile.h>
23#include <linux/bootmem.h>
24#include <linux/vmalloc.h>
25#include <linux/ftrace.h>
26#include <linux/cpu.h>
27#include <linux/slab.h>
28
29#include <asm/head.h>
30#include <asm/ptrace.h>
31#include <linux/atomic.h>
32#include <asm/tlbflush.h>
33#include <asm/mmu_context.h>
34#include <asm/cpudata.h>
35#include <asm/hvtramp.h>
36#include <asm/io.h>
37#include <asm/timer.h>
38
39#include <asm/irq.h>
40#include <asm/irq_regs.h>
41#include <asm/page.h>
42#include <asm/pgtable.h>
43#include <asm/oplib.h>
44#include <asm/uaccess.h>
45#include <asm/starfire.h>
46#include <asm/tlb.h>
47#include <asm/sections.h>
48#include <asm/prom.h>
49#include <asm/mdesc.h>
50#include <asm/ldc.h>
51#include <asm/hypervisor.h>
52#include <asm/pcr.h>
53
54#include "cpumap.h"
55
56int sparc64_multi_core __read_mostly;
57
58DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
59cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
60 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
61
62EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
63EXPORT_SYMBOL(cpu_core_map);
64
65static cpumask_t smp_commenced_mask;
66
67void smp_info(struct seq_file *m)
68{
69 int i;
70
71 seq_printf(m, "State:\n");
72 for_each_online_cpu(i)
73 seq_printf(m, "CPU%d:\t\tonline\n", i);
74}
75
76void smp_bogo(struct seq_file *m)
77{
78 int i;
79
80 for_each_online_cpu(i)
81 seq_printf(m,
82 "Cpu%dClkTck\t: %016lx\n",
83 i, cpu_data(i).clock_tick);
84}
85
86extern void setup_sparc64_timer(void);
87
88static volatile unsigned long callin_flag = 0;
89
90void __cpuinit smp_callin(void)
91{
92 int cpuid = hard_smp_processor_id();
93
94 __local_per_cpu_offset = __per_cpu_offset(cpuid);
95
96 if (tlb_type == hypervisor)
97 sun4v_ktsb_register();
98
99 __flush_tlb_all();
100
101 setup_sparc64_timer();
102
103 if (cheetah_pcache_forced_on)
104 cheetah_enable_pcache();
105
106 local_irq_enable();
107
108 callin_flag = 1;
109 __asm__ __volatile__("membar #Sync\n\t"
110 "flush %%g6" : : : "memory");
111
112 /* Clear this or we will die instantly when we
113 * schedule back to this idler...
114 */
115 current_thread_info()->new_child = 0;
116
117 /* Attach to the address space of init_task. */
118 atomic_inc(&init_mm.mm_count);
119 current->active_mm = &init_mm;
120
121 /* inform the notifiers about the new cpu */
122 notify_cpu_starting(cpuid);
123
124 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
125 rmb();
126
127 ipi_call_lock_irq();
128 set_cpu_online(cpuid, true);
129 ipi_call_unlock_irq();
130
131 /* idle thread is expected to have preempt disabled */
132 preempt_disable();
133}
134
135void cpu_panic(void)
136{
137 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
138 panic("SMP bolixed\n");
139}
140
141/* This tick register synchronization scheme is taken entirely from
142 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
143 *
144 * The only change I've made is to rework it so that the master
145 * initiates the synchonization instead of the slave. -DaveM
146 */
147
148#define MASTER 0
149#define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
150
151#define NUM_ROUNDS 64 /* magic value */
152#define NUM_ITERS 5 /* likewise */
153
154static DEFINE_SPINLOCK(itc_sync_lock);
155static unsigned long go[SLAVE + 1];
156
157#define DEBUG_TICK_SYNC 0
158
159static inline long get_delta (long *rt, long *master)
160{
161 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
162 unsigned long tcenter, t0, t1, tm;
163 unsigned long i;
164
165 for (i = 0; i < NUM_ITERS; i++) {
166 t0 = tick_ops->get_tick();
167 go[MASTER] = 1;
168 membar_safe("#StoreLoad");
169 while (!(tm = go[SLAVE]))
170 rmb();
171 go[SLAVE] = 0;
172 wmb();
173 t1 = tick_ops->get_tick();
174
175 if (t1 - t0 < best_t1 - best_t0)
176 best_t0 = t0, best_t1 = t1, best_tm = tm;
177 }
178
179 *rt = best_t1 - best_t0;
180 *master = best_tm - best_t0;
181
182 /* average best_t0 and best_t1 without overflow: */
183 tcenter = (best_t0/2 + best_t1/2);
184 if (best_t0 % 2 + best_t1 % 2 == 2)
185 tcenter++;
186 return tcenter - best_tm;
187}
188
189void smp_synchronize_tick_client(void)
190{
191 long i, delta, adj, adjust_latency = 0, done = 0;
192 unsigned long flags, rt, master_time_stamp;
193#if DEBUG_TICK_SYNC
194 struct {
195 long rt; /* roundtrip time */
196 long master; /* master's timestamp */
197 long diff; /* difference between midpoint and master's timestamp */
198 long lat; /* estimate of itc adjustment latency */
199 } t[NUM_ROUNDS];
200#endif
201
202 go[MASTER] = 1;
203
204 while (go[MASTER])
205 rmb();
206
207 local_irq_save(flags);
208 {
209 for (i = 0; i < NUM_ROUNDS; i++) {
210 delta = get_delta(&rt, &master_time_stamp);
211 if (delta == 0)
212 done = 1; /* let's lock on to this... */
213
214 if (!done) {
215 if (i > 0) {
216 adjust_latency += -delta;
217 adj = -delta + adjust_latency/4;
218 } else
219 adj = -delta;
220
221 tick_ops->add_tick(adj);
222 }
223#if DEBUG_TICK_SYNC
224 t[i].rt = rt;
225 t[i].master = master_time_stamp;
226 t[i].diff = delta;
227 t[i].lat = adjust_latency/4;
228#endif
229 }
230 }
231 local_irq_restore(flags);
232
233#if DEBUG_TICK_SYNC
234 for (i = 0; i < NUM_ROUNDS; i++)
235 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
236 t[i].rt, t[i].master, t[i].diff, t[i].lat);
237#endif
238
239 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
240 "(last diff %ld cycles, maxerr %lu cycles)\n",
241 smp_processor_id(), delta, rt);
242}
243
244static void smp_start_sync_tick_client(int cpu);
245
246static void smp_synchronize_one_tick(int cpu)
247{
248 unsigned long flags, i;
249
250 go[MASTER] = 0;
251
252 smp_start_sync_tick_client(cpu);
253
254 /* wait for client to be ready */
255 while (!go[MASTER])
256 rmb();
257
258 /* now let the client proceed into his loop */
259 go[MASTER] = 0;
260 membar_safe("#StoreLoad");
261
262 spin_lock_irqsave(&itc_sync_lock, flags);
263 {
264 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
265 while (!go[MASTER])
266 rmb();
267 go[MASTER] = 0;
268 wmb();
269 go[SLAVE] = tick_ops->get_tick();
270 membar_safe("#StoreLoad");
271 }
272 }
273 spin_unlock_irqrestore(&itc_sync_lock, flags);
274}
275
276#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
277/* XXX Put this in some common place. XXX */
278static unsigned long kimage_addr_to_ra(void *p)
279{
280 unsigned long val = (unsigned long) p;
281
282 return kern_base + (val - KERNBASE);
283}
284
285static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, void **descrp)
286{
287 extern unsigned long sparc64_ttable_tl0;
288 extern unsigned long kern_locked_tte_data;
289 struct hvtramp_descr *hdesc;
290 unsigned long trampoline_ra;
291 struct trap_per_cpu *tb;
292 u64 tte_vaddr, tte_data;
293 unsigned long hv_err;
294 int i;
295
296 hdesc = kzalloc(sizeof(*hdesc) +
297 (sizeof(struct hvtramp_mapping) *
298 num_kernel_image_mappings - 1),
299 GFP_KERNEL);
300 if (!hdesc) {
301 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
302 "hvtramp_descr.\n");
303 return;
304 }
305 *descrp = hdesc;
306
307 hdesc->cpu = cpu;
308 hdesc->num_mappings = num_kernel_image_mappings;
309
310 tb = &trap_block[cpu];
311
312 hdesc->fault_info_va = (unsigned long) &tb->fault_info;
313 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
314
315 hdesc->thread_reg = thread_reg;
316
317 tte_vaddr = (unsigned long) KERNBASE;
318 tte_data = kern_locked_tte_data;
319
320 for (i = 0; i < hdesc->num_mappings; i++) {
321 hdesc->maps[i].vaddr = tte_vaddr;
322 hdesc->maps[i].tte = tte_data;
323 tte_vaddr += 0x400000;
324 tte_data += 0x400000;
325 }
326
327 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
328
329 hv_err = sun4v_cpu_start(cpu, trampoline_ra,
330 kimage_addr_to_ra(&sparc64_ttable_tl0),
331 __pa(hdesc));
332 if (hv_err)
333 printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
334 "gives error %lu\n", hv_err);
335}
336#endif
337
338extern unsigned long sparc64_cpu_startup;
339
340/* The OBP cpu startup callback truncates the 3rd arg cookie to
341 * 32-bits (I think) so to be safe we have it read the pointer
342 * contained here so we work on >4GB machines. -DaveM
343 */
344static struct thread_info *cpu_new_thread = NULL;
345
346static int __cpuinit smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
347{
348 unsigned long entry =
349 (unsigned long)(&sparc64_cpu_startup);
350 unsigned long cookie =
351 (unsigned long)(&cpu_new_thread);
352 void *descr = NULL;
353 int timeout, ret;
354
355 callin_flag = 0;
356 cpu_new_thread = task_thread_info(idle);
357
358 if (tlb_type == hypervisor) {
359#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
360 if (ldom_domaining_enabled)
361 ldom_startcpu_cpuid(cpu,
362 (unsigned long) cpu_new_thread,
363 &descr);
364 else
365#endif
366 prom_startcpu_cpuid(cpu, entry, cookie);
367 } else {
368 struct device_node *dp = of_find_node_by_cpuid(cpu);
369
370 prom_startcpu(dp->phandle, entry, cookie);
371 }
372
373 for (timeout = 0; timeout < 50000; timeout++) {
374 if (callin_flag)
375 break;
376 udelay(100);
377 }
378
379 if (callin_flag) {
380 ret = 0;
381 } else {
382 printk("Processor %d is stuck.\n", cpu);
383 ret = -ENODEV;
384 }
385 cpu_new_thread = NULL;
386
387 kfree(descr);
388
389 return ret;
390}
391
392static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
393{
394 u64 result, target;
395 int stuck, tmp;
396
397 if (this_is_starfire) {
398 /* map to real upaid */
399 cpu = (((cpu & 0x3c) << 1) |
400 ((cpu & 0x40) >> 4) |
401 (cpu & 0x3));
402 }
403
404 target = (cpu << 14) | 0x70;
405again:
406 /* Ok, this is the real Spitfire Errata #54.
407 * One must read back from a UDB internal register
408 * after writes to the UDB interrupt dispatch, but
409 * before the membar Sync for that write.
410 * So we use the high UDB control register (ASI 0x7f,
411 * ADDR 0x20) for the dummy read. -DaveM
412 */
413 tmp = 0x40;
414 __asm__ __volatile__(
415 "wrpr %1, %2, %%pstate\n\t"
416 "stxa %4, [%0] %3\n\t"
417 "stxa %5, [%0+%8] %3\n\t"
418 "add %0, %8, %0\n\t"
419 "stxa %6, [%0+%8] %3\n\t"
420 "membar #Sync\n\t"
421 "stxa %%g0, [%7] %3\n\t"
422 "membar #Sync\n\t"
423 "mov 0x20, %%g1\n\t"
424 "ldxa [%%g1] 0x7f, %%g0\n\t"
425 "membar #Sync"
426 : "=r" (tmp)
427 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
428 "r" (data0), "r" (data1), "r" (data2), "r" (target),
429 "r" (0x10), "0" (tmp)
430 : "g1");
431
432 /* NOTE: PSTATE_IE is still clear. */
433 stuck = 100000;
434 do {
435 __asm__ __volatile__("ldxa [%%g0] %1, %0"
436 : "=r" (result)
437 : "i" (ASI_INTR_DISPATCH_STAT));
438 if (result == 0) {
439 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
440 : : "r" (pstate));
441 return;
442 }
443 stuck -= 1;
444 if (stuck == 0)
445 break;
446 } while (result & 0x1);
447 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
448 : : "r" (pstate));
449 if (stuck == 0) {
450 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
451 smp_processor_id(), result);
452 } else {
453 udelay(2);
454 goto again;
455 }
456}
457
458static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
459{
460 u64 *mondo, data0, data1, data2;
461 u16 *cpu_list;
462 u64 pstate;
463 int i;
464
465 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
466 cpu_list = __va(tb->cpu_list_pa);
467 mondo = __va(tb->cpu_mondo_block_pa);
468 data0 = mondo[0];
469 data1 = mondo[1];
470 data2 = mondo[2];
471 for (i = 0; i < cnt; i++)
472 spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]);
473}
474
475/* Cheetah now allows to send the whole 64-bytes of data in the interrupt
476 * packet, but we have no use for that. However we do take advantage of
477 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
478 */
479static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
480{
481 int nack_busy_id, is_jbus, need_more;
482 u64 *mondo, pstate, ver, busy_mask;
483 u16 *cpu_list;
484
485 cpu_list = __va(tb->cpu_list_pa);
486 mondo = __va(tb->cpu_mondo_block_pa);
487
488 /* Unfortunately, someone at Sun had the brilliant idea to make the
489 * busy/nack fields hard-coded by ITID number for this Ultra-III
490 * derivative processor.
491 */
492 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
493 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
494 (ver >> 32) == __SERRANO_ID);
495
496 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
497
498retry:
499 need_more = 0;
500 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
501 : : "r" (pstate), "i" (PSTATE_IE));
502
503 /* Setup the dispatch data registers. */
504 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
505 "stxa %1, [%4] %6\n\t"
506 "stxa %2, [%5] %6\n\t"
507 "membar #Sync\n\t"
508 : /* no outputs */
509 : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]),
510 "r" (0x40), "r" (0x50), "r" (0x60),
511 "i" (ASI_INTR_W));
512
513 nack_busy_id = 0;
514 busy_mask = 0;
515 {
516 int i;
517
518 for (i = 0; i < cnt; i++) {
519 u64 target, nr;
520
521 nr = cpu_list[i];
522 if (nr == 0xffff)
523 continue;
524
525 target = (nr << 14) | 0x70;
526 if (is_jbus) {
527 busy_mask |= (0x1UL << (nr * 2));
528 } else {
529 target |= (nack_busy_id << 24);
530 busy_mask |= (0x1UL <<
531 (nack_busy_id * 2));
532 }
533 __asm__ __volatile__(
534 "stxa %%g0, [%0] %1\n\t"
535 "membar #Sync\n\t"
536 : /* no outputs */
537 : "r" (target), "i" (ASI_INTR_W));
538 nack_busy_id++;
539 if (nack_busy_id == 32) {
540 need_more = 1;
541 break;
542 }
543 }
544 }
545
546 /* Now, poll for completion. */
547 {
548 u64 dispatch_stat, nack_mask;
549 long stuck;
550
551 stuck = 100000 * nack_busy_id;
552 nack_mask = busy_mask << 1;
553 do {
554 __asm__ __volatile__("ldxa [%%g0] %1, %0"
555 : "=r" (dispatch_stat)
556 : "i" (ASI_INTR_DISPATCH_STAT));
557 if (!(dispatch_stat & (busy_mask | nack_mask))) {
558 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
559 : : "r" (pstate));
560 if (unlikely(need_more)) {
561 int i, this_cnt = 0;
562 for (i = 0; i < cnt; i++) {
563 if (cpu_list[i] == 0xffff)
564 continue;
565 cpu_list[i] = 0xffff;
566 this_cnt++;
567 if (this_cnt == 32)
568 break;
569 }
570 goto retry;
571 }
572 return;
573 }
574 if (!--stuck)
575 break;
576 } while (dispatch_stat & busy_mask);
577
578 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
579 : : "r" (pstate));
580
581 if (dispatch_stat & busy_mask) {
582 /* Busy bits will not clear, continue instead
583 * of freezing up on this cpu.
584 */
585 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
586 smp_processor_id(), dispatch_stat);
587 } else {
588 int i, this_busy_nack = 0;
589
590 /* Delay some random time with interrupts enabled
591 * to prevent deadlock.
592 */
593 udelay(2 * nack_busy_id);
594
595 /* Clear out the mask bits for cpus which did not
596 * NACK us.
597 */
598 for (i = 0; i < cnt; i++) {
599 u64 check_mask, nr;
600
601 nr = cpu_list[i];
602 if (nr == 0xffff)
603 continue;
604
605 if (is_jbus)
606 check_mask = (0x2UL << (2*nr));
607 else
608 check_mask = (0x2UL <<
609 this_busy_nack);
610 if ((dispatch_stat & check_mask) == 0)
611 cpu_list[i] = 0xffff;
612 this_busy_nack += 2;
613 if (this_busy_nack == 64)
614 break;
615 }
616
617 goto retry;
618 }
619 }
620}
621
622/* Multi-cpu list version. */
623static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
624{
625 int retries, this_cpu, prev_sent, i, saw_cpu_error;
626 unsigned long status;
627 u16 *cpu_list;
628
629 this_cpu = smp_processor_id();
630
631 cpu_list = __va(tb->cpu_list_pa);
632
633 saw_cpu_error = 0;
634 retries = 0;
635 prev_sent = 0;
636 do {
637 int forward_progress, n_sent;
638
639 status = sun4v_cpu_mondo_send(cnt,
640 tb->cpu_list_pa,
641 tb->cpu_mondo_block_pa);
642
643 /* HV_EOK means all cpus received the xcall, we're done. */
644 if (likely(status == HV_EOK))
645 break;
646
647 /* First, see if we made any forward progress.
648 *
649 * The hypervisor indicates successful sends by setting
650 * cpu list entries to the value 0xffff.
651 */
652 n_sent = 0;
653 for (i = 0; i < cnt; i++) {
654 if (likely(cpu_list[i] == 0xffff))
655 n_sent++;
656 }
657
658 forward_progress = 0;
659 if (n_sent > prev_sent)
660 forward_progress = 1;
661
662 prev_sent = n_sent;
663
664 /* If we get a HV_ECPUERROR, then one or more of the cpus
665 * in the list are in error state. Use the cpu_state()
666 * hypervisor call to find out which cpus are in error state.
667 */
668 if (unlikely(status == HV_ECPUERROR)) {
669 for (i = 0; i < cnt; i++) {
670 long err;
671 u16 cpu;
672
673 cpu = cpu_list[i];
674 if (cpu == 0xffff)
675 continue;
676
677 err = sun4v_cpu_state(cpu);
678 if (err == HV_CPU_STATE_ERROR) {
679 saw_cpu_error = (cpu + 1);
680 cpu_list[i] = 0xffff;
681 }
682 }
683 } else if (unlikely(status != HV_EWOULDBLOCK))
684 goto fatal_mondo_error;
685
686 /* Don't bother rewriting the CPU list, just leave the
687 * 0xffff and non-0xffff entries in there and the
688 * hypervisor will do the right thing.
689 *
690 * Only advance timeout state if we didn't make any
691 * forward progress.
692 */
693 if (unlikely(!forward_progress)) {
694 if (unlikely(++retries > 10000))
695 goto fatal_mondo_timeout;
696
697 /* Delay a little bit to let other cpus catch up
698 * on their cpu mondo queue work.
699 */
700 udelay(2 * cnt);
701 }
702 } while (1);
703
704 if (unlikely(saw_cpu_error))
705 goto fatal_mondo_cpu_error;
706
707 return;
708
709fatal_mondo_cpu_error:
710 printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
711 "(including %d) were in error state\n",
712 this_cpu, saw_cpu_error - 1);
713 return;
714
715fatal_mondo_timeout:
716 printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
717 " progress after %d retries.\n",
718 this_cpu, retries);
719 goto dump_cpu_list_and_out;
720
721fatal_mondo_error:
722 printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
723 this_cpu, status);
724 printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
725 "mondo_block_pa(%lx)\n",
726 this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
727
728dump_cpu_list_and_out:
729 printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
730 for (i = 0; i < cnt; i++)
731 printk("%u ", cpu_list[i]);
732 printk("]\n");
733}
734
735static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
736
737static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
738{
739 struct trap_per_cpu *tb;
740 int this_cpu, i, cnt;
741 unsigned long flags;
742 u16 *cpu_list;
743 u64 *mondo;
744
745 /* We have to do this whole thing with interrupts fully disabled.
746 * Otherwise if we send an xcall from interrupt context it will
747 * corrupt both our mondo block and cpu list state.
748 *
749 * One consequence of this is that we cannot use timeout mechanisms
750 * that depend upon interrupts being delivered locally. So, for
751 * example, we cannot sample jiffies and expect it to advance.
752 *
753 * Fortunately, udelay() uses %stick/%tick so we can use that.
754 */
755 local_irq_save(flags);
756
757 this_cpu = smp_processor_id();
758 tb = &trap_block[this_cpu];
759
760 mondo = __va(tb->cpu_mondo_block_pa);
761 mondo[0] = data0;
762 mondo[1] = data1;
763 mondo[2] = data2;
764 wmb();
765
766 cpu_list = __va(tb->cpu_list_pa);
767
768 /* Setup the initial cpu list. */
769 cnt = 0;
770 for_each_cpu(i, mask) {
771 if (i == this_cpu || !cpu_online(i))
772 continue;
773 cpu_list[cnt++] = i;
774 }
775
776 if (cnt)
777 xcall_deliver_impl(tb, cnt);
778
779 local_irq_restore(flags);
780}
781
782/* Send cross call to all processors mentioned in MASK_P
783 * except self. Really, there are only two cases currently,
784 * "cpu_online_mask" and "mm_cpumask(mm)".
785 */
786static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
787{
788 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
789
790 xcall_deliver(data0, data1, data2, mask);
791}
792
793/* Send cross call to all processors except self. */
794static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
795{
796 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
797}
798
799extern unsigned long xcall_sync_tick;
800
801static void smp_start_sync_tick_client(int cpu)
802{
803 xcall_deliver((u64) &xcall_sync_tick, 0, 0,
804 cpumask_of(cpu));
805}
806
807extern unsigned long xcall_call_function;
808
809void arch_send_call_function_ipi_mask(const struct cpumask *mask)
810{
811 xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
812}
813
814extern unsigned long xcall_call_function_single;
815
816void arch_send_call_function_single_ipi(int cpu)
817{
818 xcall_deliver((u64) &xcall_call_function_single, 0, 0,
819 cpumask_of(cpu));
820}
821
822void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
823{
824 clear_softint(1 << irq);
825 generic_smp_call_function_interrupt();
826}
827
828void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
829{
830 clear_softint(1 << irq);
831 generic_smp_call_function_single_interrupt();
832}
833
834static void tsb_sync(void *info)
835{
836 struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
837 struct mm_struct *mm = info;
838
839 /* It is not valid to test "current->active_mm == mm" here.
840 *
841 * The value of "current" is not changed atomically with
842 * switch_mm(). But that's OK, we just need to check the
843 * current cpu's trap block PGD physical address.
844 */
845 if (tp->pgd_paddr == __pa(mm->pgd))
846 tsb_context_switch(mm);
847}
848
849void smp_tsb_sync(struct mm_struct *mm)
850{
851 smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
852}
853
854extern unsigned long xcall_flush_tlb_mm;
855extern unsigned long xcall_flush_tlb_pending;
856extern unsigned long xcall_flush_tlb_kernel_range;
857extern unsigned long xcall_fetch_glob_regs;
858extern unsigned long xcall_receive_signal;
859extern unsigned long xcall_new_mmu_context_version;
860#ifdef CONFIG_KGDB
861extern unsigned long xcall_kgdb_capture;
862#endif
863
864#ifdef DCACHE_ALIASING_POSSIBLE
865extern unsigned long xcall_flush_dcache_page_cheetah;
866#endif
867extern unsigned long xcall_flush_dcache_page_spitfire;
868
869#ifdef CONFIG_DEBUG_DCFLUSH
870extern atomic_t dcpage_flushes;
871extern atomic_t dcpage_flushes_xcall;
872#endif
873
874static inline void __local_flush_dcache_page(struct page *page)
875{
876#ifdef DCACHE_ALIASING_POSSIBLE
877 __flush_dcache_page(page_address(page),
878 ((tlb_type == spitfire) &&
879 page_mapping(page) != NULL));
880#else
881 if (page_mapping(page) != NULL &&
882 tlb_type == spitfire)
883 __flush_icache_page(__pa(page_address(page)));
884#endif
885}
886
887void smp_flush_dcache_page_impl(struct page *page, int cpu)
888{
889 int this_cpu;
890
891 if (tlb_type == hypervisor)
892 return;
893
894#ifdef CONFIG_DEBUG_DCFLUSH
895 atomic_inc(&dcpage_flushes);
896#endif
897
898 this_cpu = get_cpu();
899
900 if (cpu == this_cpu) {
901 __local_flush_dcache_page(page);
902 } else if (cpu_online(cpu)) {
903 void *pg_addr = page_address(page);
904 u64 data0 = 0;
905
906 if (tlb_type == spitfire) {
907 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
908 if (page_mapping(page) != NULL)
909 data0 |= ((u64)1 << 32);
910 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
911#ifdef DCACHE_ALIASING_POSSIBLE
912 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
913#endif
914 }
915 if (data0) {
916 xcall_deliver(data0, __pa(pg_addr),
917 (u64) pg_addr, cpumask_of(cpu));
918#ifdef CONFIG_DEBUG_DCFLUSH
919 atomic_inc(&dcpage_flushes_xcall);
920#endif
921 }
922 }
923
924 put_cpu();
925}
926
927void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
928{
929 void *pg_addr;
930 u64 data0;
931
932 if (tlb_type == hypervisor)
933 return;
934
935 preempt_disable();
936
937#ifdef CONFIG_DEBUG_DCFLUSH
938 atomic_inc(&dcpage_flushes);
939#endif
940 data0 = 0;
941 pg_addr = page_address(page);
942 if (tlb_type == spitfire) {
943 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
944 if (page_mapping(page) != NULL)
945 data0 |= ((u64)1 << 32);
946 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
947#ifdef DCACHE_ALIASING_POSSIBLE
948 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
949#endif
950 }
951 if (data0) {
952 xcall_deliver(data0, __pa(pg_addr),
953 (u64) pg_addr, cpu_online_mask);
954#ifdef CONFIG_DEBUG_DCFLUSH
955 atomic_inc(&dcpage_flushes_xcall);
956#endif
957 }
958 __local_flush_dcache_page(page);
959
960 preempt_enable();
961}
962
963void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
964{
965 struct mm_struct *mm;
966 unsigned long flags;
967
968 clear_softint(1 << irq);
969
970 /* See if we need to allocate a new TLB context because
971 * the version of the one we are using is now out of date.
972 */
973 mm = current->active_mm;
974 if (unlikely(!mm || (mm == &init_mm)))
975 return;
976
977 spin_lock_irqsave(&mm->context.lock, flags);
978
979 if (unlikely(!CTX_VALID(mm->context)))
980 get_new_mmu_context(mm);
981
982 spin_unlock_irqrestore(&mm->context.lock, flags);
983
984 load_secondary_context(mm);
985 __flush_tlb_mm(CTX_HWBITS(mm->context),
986 SECONDARY_CONTEXT);
987}
988
989void smp_new_mmu_context_version(void)
990{
991 smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
992}
993
994#ifdef CONFIG_KGDB
995void kgdb_roundup_cpus(unsigned long flags)
996{
997 smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
998}
999#endif
1000
1001void smp_fetch_global_regs(void)
1002{
1003 smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
1004}
1005
1006/* We know that the window frames of the user have been flushed
1007 * to the stack before we get here because all callers of us
1008 * are flush_tlb_*() routines, and these run after flush_cache_*()
1009 * which performs the flushw.
1010 *
1011 * The SMP TLB coherency scheme we use works as follows:
1012 *
1013 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1014 * space has (potentially) executed on, this is the heuristic
1015 * we use to avoid doing cross calls.
1016 *
1017 * Also, for flushing from kswapd and also for clones, we
1018 * use cpu_vm_mask as the list of cpus to make run the TLB.
1019 *
1020 * 2) TLB context numbers are shared globally across all processors
1021 * in the system, this allows us to play several games to avoid
1022 * cross calls.
1023 *
1024 * One invariant is that when a cpu switches to a process, and
1025 * that processes tsk->active_mm->cpu_vm_mask does not have the
1026 * current cpu's bit set, that tlb context is flushed locally.
1027 *
1028 * If the address space is non-shared (ie. mm->count == 1) we avoid
1029 * cross calls when we want to flush the currently running process's
1030 * tlb state. This is done by clearing all cpu bits except the current
1031 * processor's in current->mm->cpu_vm_mask and performing the
1032 * flush locally only. This will force any subsequent cpus which run
1033 * this task to flush the context from the local tlb if the process
1034 * migrates to another cpu (again).
1035 *
1036 * 3) For shared address spaces (threads) and swapping we bite the
1037 * bullet for most cases and perform the cross call (but only to
1038 * the cpus listed in cpu_vm_mask).
1039 *
1040 * The performance gain from "optimizing" away the cross call for threads is
1041 * questionable (in theory the big win for threads is the massive sharing of
1042 * address space state across processors).
1043 */
1044
1045/* This currently is only used by the hugetlb arch pre-fault
1046 * hook on UltraSPARC-III+ and later when changing the pagesize
1047 * bits of the context register for an address space.
1048 */
1049void smp_flush_tlb_mm(struct mm_struct *mm)
1050{
1051 u32 ctx = CTX_HWBITS(mm->context);
1052 int cpu = get_cpu();
1053
1054 if (atomic_read(&mm->mm_users) == 1) {
1055 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1056 goto local_flush_and_out;
1057 }
1058
1059 smp_cross_call_masked(&xcall_flush_tlb_mm,
1060 ctx, 0, 0,
1061 mm_cpumask(mm));
1062
1063local_flush_and_out:
1064 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1065
1066 put_cpu();
1067}
1068
1069void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1070{
1071 u32 ctx = CTX_HWBITS(mm->context);
1072 int cpu = get_cpu();
1073
1074 if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1075 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1076 else
1077 smp_cross_call_masked(&xcall_flush_tlb_pending,
1078 ctx, nr, (unsigned long) vaddrs,
1079 mm_cpumask(mm));
1080
1081 __flush_tlb_pending(ctx, nr, vaddrs);
1082
1083 put_cpu();
1084}
1085
1086void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1087{
1088 start &= PAGE_MASK;
1089 end = PAGE_ALIGN(end);
1090 if (start != end) {
1091 smp_cross_call(&xcall_flush_tlb_kernel_range,
1092 0, start, end);
1093
1094 __flush_tlb_kernel_range(start, end);
1095 }
1096}
1097
1098/* CPU capture. */
1099/* #define CAPTURE_DEBUG */
1100extern unsigned long xcall_capture;
1101
1102static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1103static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1104static unsigned long penguins_are_doing_time;
1105
1106void smp_capture(void)
1107{
1108 int result = atomic_add_ret(1, &smp_capture_depth);
1109
1110 if (result == 1) {
1111 int ncpus = num_online_cpus();
1112
1113#ifdef CAPTURE_DEBUG
1114 printk("CPU[%d]: Sending penguins to jail...",
1115 smp_processor_id());
1116#endif
1117 penguins_are_doing_time = 1;
1118 atomic_inc(&smp_capture_registry);
1119 smp_cross_call(&xcall_capture, 0, 0, 0);
1120 while (atomic_read(&smp_capture_registry) != ncpus)
1121 rmb();
1122#ifdef CAPTURE_DEBUG
1123 printk("done\n");
1124#endif
1125 }
1126}
1127
1128void smp_release(void)
1129{
1130 if (atomic_dec_and_test(&smp_capture_depth)) {
1131#ifdef CAPTURE_DEBUG
1132 printk("CPU[%d]: Giving pardon to "
1133 "imprisoned penguins\n",
1134 smp_processor_id());
1135#endif
1136 penguins_are_doing_time = 0;
1137 membar_safe("#StoreLoad");
1138 atomic_dec(&smp_capture_registry);
1139 }
1140}
1141
1142/* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE
1143 * set, so they can service tlb flush xcalls...
1144 */
1145extern void prom_world(int);
1146
1147void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
1148{
1149 clear_softint(1 << irq);
1150
1151 preempt_disable();
1152
1153 __asm__ __volatile__("flushw");
1154 prom_world(1);
1155 atomic_inc(&smp_capture_registry);
1156 membar_safe("#StoreLoad");
1157 while (penguins_are_doing_time)
1158 rmb();
1159 atomic_dec(&smp_capture_registry);
1160 prom_world(0);
1161
1162 preempt_enable();
1163}
1164
1165/* /proc/profile writes can call this, don't __init it please. */
1166int setup_profiling_timer(unsigned int multiplier)
1167{
1168 return -EINVAL;
1169}
1170
1171void __init smp_prepare_cpus(unsigned int max_cpus)
1172{
1173}
1174
1175void __devinit smp_prepare_boot_cpu(void)
1176{
1177}
1178
1179void __init smp_setup_processor_id(void)
1180{
1181 if (tlb_type == spitfire)
1182 xcall_deliver_impl = spitfire_xcall_deliver;
1183 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1184 xcall_deliver_impl = cheetah_xcall_deliver;
1185 else
1186 xcall_deliver_impl = hypervisor_xcall_deliver;
1187}
1188
1189void __devinit smp_fill_in_sib_core_maps(void)
1190{
1191 unsigned int i;
1192
1193 for_each_present_cpu(i) {
1194 unsigned int j;
1195
1196 cpumask_clear(&cpu_core_map[i]);
1197 if (cpu_data(i).core_id == 0) {
1198 cpumask_set_cpu(i, &cpu_core_map[i]);
1199 continue;
1200 }
1201
1202 for_each_present_cpu(j) {
1203 if (cpu_data(i).core_id ==
1204 cpu_data(j).core_id)
1205 cpumask_set_cpu(j, &cpu_core_map[i]);
1206 }
1207 }
1208
1209 for_each_present_cpu(i) {
1210 unsigned int j;
1211
1212 cpumask_clear(&per_cpu(cpu_sibling_map, i));
1213 if (cpu_data(i).proc_id == -1) {
1214 cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
1215 continue;
1216 }
1217
1218 for_each_present_cpu(j) {
1219 if (cpu_data(i).proc_id ==
1220 cpu_data(j).proc_id)
1221 cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
1222 }
1223 }
1224}
1225
1226int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
1227{
1228 int ret = smp_boot_one_cpu(cpu, tidle);
1229
1230 if (!ret) {
1231 cpumask_set_cpu(cpu, &smp_commenced_mask);
1232 while (!cpu_online(cpu))
1233 mb();
1234 if (!cpu_online(cpu)) {
1235 ret = -ENODEV;
1236 } else {
1237 /* On SUN4V, writes to %tick and %stick are
1238 * not allowed.
1239 */
1240 if (tlb_type != hypervisor)
1241 smp_synchronize_one_tick(cpu);
1242 }
1243 }
1244 return ret;
1245}
1246
1247#ifdef CONFIG_HOTPLUG_CPU
1248void cpu_play_dead(void)
1249{
1250 int cpu = smp_processor_id();
1251 unsigned long pstate;
1252
1253 idle_task_exit();
1254
1255 if (tlb_type == hypervisor) {
1256 struct trap_per_cpu *tb = &trap_block[cpu];
1257
1258 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1259 tb->cpu_mondo_pa, 0);
1260 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1261 tb->dev_mondo_pa, 0);
1262 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1263 tb->resum_mondo_pa, 0);
1264 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1265 tb->nonresum_mondo_pa, 0);
1266 }
1267
1268 cpumask_clear_cpu(cpu, &smp_commenced_mask);
1269 membar_safe("#Sync");
1270
1271 local_irq_disable();
1272
1273 __asm__ __volatile__(
1274 "rdpr %%pstate, %0\n\t"
1275 "wrpr %0, %1, %%pstate"
1276 : "=r" (pstate)
1277 : "i" (PSTATE_IE));
1278
1279 while (1)
1280 barrier();
1281}
1282
1283int __cpu_disable(void)
1284{
1285 int cpu = smp_processor_id();
1286 cpuinfo_sparc *c;
1287 int i;
1288
1289 for_each_cpu(i, &cpu_core_map[cpu])
1290 cpumask_clear_cpu(cpu, &cpu_core_map[i]);
1291 cpumask_clear(&cpu_core_map[cpu]);
1292
1293 for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
1294 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
1295 cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
1296
1297 c = &cpu_data(cpu);
1298
1299 c->core_id = 0;
1300 c->proc_id = -1;
1301
1302 smp_wmb();
1303
1304 /* Make sure no interrupts point to this cpu. */
1305 fixup_irqs();
1306
1307 local_irq_enable();
1308 mdelay(1);
1309 local_irq_disable();
1310
1311 ipi_call_lock();
1312 set_cpu_online(cpu, false);
1313 ipi_call_unlock();
1314
1315 cpu_map_rebuild();
1316
1317 return 0;
1318}
1319
1320void __cpu_die(unsigned int cpu)
1321{
1322 int i;
1323
1324 for (i = 0; i < 100; i++) {
1325 smp_rmb();
1326 if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
1327 break;
1328 msleep(100);
1329 }
1330 if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
1331 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1332 } else {
1333#if defined(CONFIG_SUN_LDOMS)
1334 unsigned long hv_err;
1335 int limit = 100;
1336
1337 do {
1338 hv_err = sun4v_cpu_stop(cpu);
1339 if (hv_err == HV_EOK) {
1340 set_cpu_present(cpu, false);
1341 break;
1342 }
1343 } while (--limit > 0);
1344 if (limit <= 0) {
1345 printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1346 hv_err);
1347 }
1348#endif
1349 }
1350}
1351#endif
1352
1353void __init smp_cpus_done(unsigned int max_cpus)
1354{
1355 pcr_arch_init();
1356}
1357
1358void smp_send_reschedule(int cpu)
1359{
1360 xcall_deliver((u64) &xcall_receive_signal, 0, 0,
1361 cpumask_of(cpu));
1362}
1363
1364void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1365{
1366 clear_softint(1 << irq);
1367 scheduler_ipi();
1368}
1369
1370/* This is a nop because we capture all other cpus
1371 * anyways when making the PROM active.
1372 */
1373void smp_send_stop(void)
1374{
1375}
1376
1377/**
1378 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
1379 * @cpu: cpu to allocate for
1380 * @size: size allocation in bytes
1381 * @align: alignment
1382 *
1383 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
1384 * does the right thing for NUMA regardless of the current
1385 * configuration.
1386 *
1387 * RETURNS:
1388 * Pointer to the allocated area on success, NULL on failure.
1389 */
1390static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
1391 size_t align)
1392{
1393 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
1394#ifdef CONFIG_NEED_MULTIPLE_NODES
1395 int node = cpu_to_node(cpu);
1396 void *ptr;
1397
1398 if (!node_online(node) || !NODE_DATA(node)) {
1399 ptr = __alloc_bootmem(size, align, goal);
1400 pr_info("cpu %d has no node %d or node-local memory\n",
1401 cpu, node);
1402 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
1403 cpu, size, __pa(ptr));
1404 } else {
1405 ptr = __alloc_bootmem_node(NODE_DATA(node),
1406 size, align, goal);
1407 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
1408 "%016lx\n", cpu, size, node, __pa(ptr));
1409 }
1410 return ptr;
1411#else
1412 return __alloc_bootmem(size, align, goal);
1413#endif
1414}
1415
1416static void __init pcpu_free_bootmem(void *ptr, size_t size)
1417{
1418 free_bootmem(__pa(ptr), size);
1419}
1420
1421static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
1422{
1423 if (cpu_to_node(from) == cpu_to_node(to))
1424 return LOCAL_DISTANCE;
1425 else
1426 return REMOTE_DISTANCE;
1427}
1428
1429static void __init pcpu_populate_pte(unsigned long addr)
1430{
1431 pgd_t *pgd = pgd_offset_k(addr);
1432 pud_t *pud;
1433 pmd_t *pmd;
1434
1435 pud = pud_offset(pgd, addr);
1436 if (pud_none(*pud)) {
1437 pmd_t *new;
1438
1439 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1440 pud_populate(&init_mm, pud, new);
1441 }
1442
1443 pmd = pmd_offset(pud, addr);
1444 if (!pmd_present(*pmd)) {
1445 pte_t *new;
1446
1447 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1448 pmd_populate_kernel(&init_mm, pmd, new);
1449 }
1450}
1451
1452void __init setup_per_cpu_areas(void)
1453{
1454 unsigned long delta;
1455 unsigned int cpu;
1456 int rc = -EINVAL;
1457
1458 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1459 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1460 PERCPU_DYNAMIC_RESERVE, 4 << 20,
1461 pcpu_cpu_distance,
1462 pcpu_alloc_bootmem,
1463 pcpu_free_bootmem);
1464 if (rc)
1465 pr_warning("PERCPU: %s allocator failed (%d), "
1466 "falling back to page size\n",
1467 pcpu_fc_names[pcpu_chosen_fc], rc);
1468 }
1469 if (rc < 0)
1470 rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
1471 pcpu_alloc_bootmem,
1472 pcpu_free_bootmem,
1473 pcpu_populate_pte);
1474 if (rc < 0)
1475 panic("cannot initialize percpu area (err=%d)", rc);
1476
1477 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1478 for_each_possible_cpu(cpu)
1479 __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1480
1481 /* Setup %g5 for the boot cpu. */
1482 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1483
1484 of_fill_in_cpu_data();
1485 if (tlb_type == hypervisor)
1486 mdesc_fill_in_cpu_data(cpu_all_mask);
1487}
1/* smp.c: Sparc64 SMP support.
2 *
3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/export.h>
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pagemap.h>
11#include <linux/threads.h>
12#include <linux/smp.h>
13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
15#include <linux/delay.h>
16#include <linux/init.h>
17#include <linux/spinlock.h>
18#include <linux/fs.h>
19#include <linux/seq_file.h>
20#include <linux/cache.h>
21#include <linux/jiffies.h>
22#include <linux/profile.h>
23#include <linux/bootmem.h>
24#include <linux/vmalloc.h>
25#include <linux/ftrace.h>
26#include <linux/cpu.h>
27#include <linux/slab.h>
28#include <linux/kgdb.h>
29
30#include <asm/head.h>
31#include <asm/ptrace.h>
32#include <linux/atomic.h>
33#include <asm/tlbflush.h>
34#include <asm/mmu_context.h>
35#include <asm/cpudata.h>
36#include <asm/hvtramp.h>
37#include <asm/io.h>
38#include <asm/timer.h>
39#include <asm/setup.h>
40
41#include <asm/irq.h>
42#include <asm/irq_regs.h>
43#include <asm/page.h>
44#include <asm/pgtable.h>
45#include <asm/oplib.h>
46#include <linux/uaccess.h>
47#include <asm/starfire.h>
48#include <asm/tlb.h>
49#include <asm/sections.h>
50#include <asm/prom.h>
51#include <asm/mdesc.h>
52#include <asm/ldc.h>
53#include <asm/hypervisor.h>
54#include <asm/pcr.h>
55
56#include "cpumap.h"
57#include "kernel.h"
58
59DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
60cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
61 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
62
63cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
64 [0 ... NR_CPUS-1] = CPU_MASK_NONE };
65
66cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = {
67 [0 ... NR_CPUS - 1] = CPU_MASK_NONE };
68
69EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
70EXPORT_SYMBOL(cpu_core_map);
71EXPORT_SYMBOL(cpu_core_sib_map);
72EXPORT_SYMBOL(cpu_core_sib_cache_map);
73
74static cpumask_t smp_commenced_mask;
75
76void smp_info(struct seq_file *m)
77{
78 int i;
79
80 seq_printf(m, "State:\n");
81 for_each_online_cpu(i)
82 seq_printf(m, "CPU%d:\t\tonline\n", i);
83}
84
85void smp_bogo(struct seq_file *m)
86{
87 int i;
88
89 for_each_online_cpu(i)
90 seq_printf(m,
91 "Cpu%dClkTck\t: %016lx\n",
92 i, cpu_data(i).clock_tick);
93}
94
95extern void setup_sparc64_timer(void);
96
97static volatile unsigned long callin_flag = 0;
98
99void smp_callin(void)
100{
101 int cpuid = hard_smp_processor_id();
102
103 __local_per_cpu_offset = __per_cpu_offset(cpuid);
104
105 if (tlb_type == hypervisor)
106 sun4v_ktsb_register();
107
108 __flush_tlb_all();
109
110 setup_sparc64_timer();
111
112 if (cheetah_pcache_forced_on)
113 cheetah_enable_pcache();
114
115 callin_flag = 1;
116 __asm__ __volatile__("membar #Sync\n\t"
117 "flush %%g6" : : : "memory");
118
119 /* Clear this or we will die instantly when we
120 * schedule back to this idler...
121 */
122 current_thread_info()->new_child = 0;
123
124 /* Attach to the address space of init_task. */
125 atomic_inc(&init_mm.mm_count);
126 current->active_mm = &init_mm;
127
128 /* inform the notifiers about the new cpu */
129 notify_cpu_starting(cpuid);
130
131 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
132 rmb();
133
134 set_cpu_online(cpuid, true);
135
136 /* idle thread is expected to have preempt disabled */
137 preempt_disable();
138
139 local_irq_enable();
140
141 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
142}
143
144void cpu_panic(void)
145{
146 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
147 panic("SMP bolixed\n");
148}
149
150/* This tick register synchronization scheme is taken entirely from
151 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
152 *
153 * The only change I've made is to rework it so that the master
154 * initiates the synchonization instead of the slave. -DaveM
155 */
156
157#define MASTER 0
158#define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
159
160#define NUM_ROUNDS 64 /* magic value */
161#define NUM_ITERS 5 /* likewise */
162
163static DEFINE_RAW_SPINLOCK(itc_sync_lock);
164static unsigned long go[SLAVE + 1];
165
166#define DEBUG_TICK_SYNC 0
167
168static inline long get_delta (long *rt, long *master)
169{
170 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
171 unsigned long tcenter, t0, t1, tm;
172 unsigned long i;
173
174 for (i = 0; i < NUM_ITERS; i++) {
175 t0 = tick_ops->get_tick();
176 go[MASTER] = 1;
177 membar_safe("#StoreLoad");
178 while (!(tm = go[SLAVE]))
179 rmb();
180 go[SLAVE] = 0;
181 wmb();
182 t1 = tick_ops->get_tick();
183
184 if (t1 - t0 < best_t1 - best_t0)
185 best_t0 = t0, best_t1 = t1, best_tm = tm;
186 }
187
188 *rt = best_t1 - best_t0;
189 *master = best_tm - best_t0;
190
191 /* average best_t0 and best_t1 without overflow: */
192 tcenter = (best_t0/2 + best_t1/2);
193 if (best_t0 % 2 + best_t1 % 2 == 2)
194 tcenter++;
195 return tcenter - best_tm;
196}
197
198void smp_synchronize_tick_client(void)
199{
200 long i, delta, adj, adjust_latency = 0, done = 0;
201 unsigned long flags, rt, master_time_stamp;
202#if DEBUG_TICK_SYNC
203 struct {
204 long rt; /* roundtrip time */
205 long master; /* master's timestamp */
206 long diff; /* difference between midpoint and master's timestamp */
207 long lat; /* estimate of itc adjustment latency */
208 } t[NUM_ROUNDS];
209#endif
210
211 go[MASTER] = 1;
212
213 while (go[MASTER])
214 rmb();
215
216 local_irq_save(flags);
217 {
218 for (i = 0; i < NUM_ROUNDS; i++) {
219 delta = get_delta(&rt, &master_time_stamp);
220 if (delta == 0)
221 done = 1; /* let's lock on to this... */
222
223 if (!done) {
224 if (i > 0) {
225 adjust_latency += -delta;
226 adj = -delta + adjust_latency/4;
227 } else
228 adj = -delta;
229
230 tick_ops->add_tick(adj);
231 }
232#if DEBUG_TICK_SYNC
233 t[i].rt = rt;
234 t[i].master = master_time_stamp;
235 t[i].diff = delta;
236 t[i].lat = adjust_latency/4;
237#endif
238 }
239 }
240 local_irq_restore(flags);
241
242#if DEBUG_TICK_SYNC
243 for (i = 0; i < NUM_ROUNDS; i++)
244 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
245 t[i].rt, t[i].master, t[i].diff, t[i].lat);
246#endif
247
248 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
249 "(last diff %ld cycles, maxerr %lu cycles)\n",
250 smp_processor_id(), delta, rt);
251}
252
253static void smp_start_sync_tick_client(int cpu);
254
255static void smp_synchronize_one_tick(int cpu)
256{
257 unsigned long flags, i;
258
259 go[MASTER] = 0;
260
261 smp_start_sync_tick_client(cpu);
262
263 /* wait for client to be ready */
264 while (!go[MASTER])
265 rmb();
266
267 /* now let the client proceed into his loop */
268 go[MASTER] = 0;
269 membar_safe("#StoreLoad");
270
271 raw_spin_lock_irqsave(&itc_sync_lock, flags);
272 {
273 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
274 while (!go[MASTER])
275 rmb();
276 go[MASTER] = 0;
277 wmb();
278 go[SLAVE] = tick_ops->get_tick();
279 membar_safe("#StoreLoad");
280 }
281 }
282 raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
283}
284
285#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
286static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
287 void **descrp)
288{
289 extern unsigned long sparc64_ttable_tl0;
290 extern unsigned long kern_locked_tte_data;
291 struct hvtramp_descr *hdesc;
292 unsigned long trampoline_ra;
293 struct trap_per_cpu *tb;
294 u64 tte_vaddr, tte_data;
295 unsigned long hv_err;
296 int i;
297
298 hdesc = kzalloc(sizeof(*hdesc) +
299 (sizeof(struct hvtramp_mapping) *
300 num_kernel_image_mappings - 1),
301 GFP_KERNEL);
302 if (!hdesc) {
303 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
304 "hvtramp_descr.\n");
305 return;
306 }
307 *descrp = hdesc;
308
309 hdesc->cpu = cpu;
310 hdesc->num_mappings = num_kernel_image_mappings;
311
312 tb = &trap_block[cpu];
313
314 hdesc->fault_info_va = (unsigned long) &tb->fault_info;
315 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
316
317 hdesc->thread_reg = thread_reg;
318
319 tte_vaddr = (unsigned long) KERNBASE;
320 tte_data = kern_locked_tte_data;
321
322 for (i = 0; i < hdesc->num_mappings; i++) {
323 hdesc->maps[i].vaddr = tte_vaddr;
324 hdesc->maps[i].tte = tte_data;
325 tte_vaddr += 0x400000;
326 tte_data += 0x400000;
327 }
328
329 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
330
331 hv_err = sun4v_cpu_start(cpu, trampoline_ra,
332 kimage_addr_to_ra(&sparc64_ttable_tl0),
333 __pa(hdesc));
334 if (hv_err)
335 printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
336 "gives error %lu\n", hv_err);
337}
338#endif
339
340extern unsigned long sparc64_cpu_startup;
341
342/* The OBP cpu startup callback truncates the 3rd arg cookie to
343 * 32-bits (I think) so to be safe we have it read the pointer
344 * contained here so we work on >4GB machines. -DaveM
345 */
346static struct thread_info *cpu_new_thread = NULL;
347
348static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
349{
350 unsigned long entry =
351 (unsigned long)(&sparc64_cpu_startup);
352 unsigned long cookie =
353 (unsigned long)(&cpu_new_thread);
354 void *descr = NULL;
355 int timeout, ret;
356
357 callin_flag = 0;
358 cpu_new_thread = task_thread_info(idle);
359
360 if (tlb_type == hypervisor) {
361#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
362 if (ldom_domaining_enabled)
363 ldom_startcpu_cpuid(cpu,
364 (unsigned long) cpu_new_thread,
365 &descr);
366 else
367#endif
368 prom_startcpu_cpuid(cpu, entry, cookie);
369 } else {
370 struct device_node *dp = of_find_node_by_cpuid(cpu);
371
372 prom_startcpu(dp->phandle, entry, cookie);
373 }
374
375 for (timeout = 0; timeout < 50000; timeout++) {
376 if (callin_flag)
377 break;
378 udelay(100);
379 }
380
381 if (callin_flag) {
382 ret = 0;
383 } else {
384 printk("Processor %d is stuck.\n", cpu);
385 ret = -ENODEV;
386 }
387 cpu_new_thread = NULL;
388
389 kfree(descr);
390
391 return ret;
392}
393
394static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
395{
396 u64 result, target;
397 int stuck, tmp;
398
399 if (this_is_starfire) {
400 /* map to real upaid */
401 cpu = (((cpu & 0x3c) << 1) |
402 ((cpu & 0x40) >> 4) |
403 (cpu & 0x3));
404 }
405
406 target = (cpu << 14) | 0x70;
407again:
408 /* Ok, this is the real Spitfire Errata #54.
409 * One must read back from a UDB internal register
410 * after writes to the UDB interrupt dispatch, but
411 * before the membar Sync for that write.
412 * So we use the high UDB control register (ASI 0x7f,
413 * ADDR 0x20) for the dummy read. -DaveM
414 */
415 tmp = 0x40;
416 __asm__ __volatile__(
417 "wrpr %1, %2, %%pstate\n\t"
418 "stxa %4, [%0] %3\n\t"
419 "stxa %5, [%0+%8] %3\n\t"
420 "add %0, %8, %0\n\t"
421 "stxa %6, [%0+%8] %3\n\t"
422 "membar #Sync\n\t"
423 "stxa %%g0, [%7] %3\n\t"
424 "membar #Sync\n\t"
425 "mov 0x20, %%g1\n\t"
426 "ldxa [%%g1] 0x7f, %%g0\n\t"
427 "membar #Sync"
428 : "=r" (tmp)
429 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
430 "r" (data0), "r" (data1), "r" (data2), "r" (target),
431 "r" (0x10), "0" (tmp)
432 : "g1");
433
434 /* NOTE: PSTATE_IE is still clear. */
435 stuck = 100000;
436 do {
437 __asm__ __volatile__("ldxa [%%g0] %1, %0"
438 : "=r" (result)
439 : "i" (ASI_INTR_DISPATCH_STAT));
440 if (result == 0) {
441 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
442 : : "r" (pstate));
443 return;
444 }
445 stuck -= 1;
446 if (stuck == 0)
447 break;
448 } while (result & 0x1);
449 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
450 : : "r" (pstate));
451 if (stuck == 0) {
452 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
453 smp_processor_id(), result);
454 } else {
455 udelay(2);
456 goto again;
457 }
458}
459
460static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
461{
462 u64 *mondo, data0, data1, data2;
463 u16 *cpu_list;
464 u64 pstate;
465 int i;
466
467 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
468 cpu_list = __va(tb->cpu_list_pa);
469 mondo = __va(tb->cpu_mondo_block_pa);
470 data0 = mondo[0];
471 data1 = mondo[1];
472 data2 = mondo[2];
473 for (i = 0; i < cnt; i++)
474 spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]);
475}
476
477/* Cheetah now allows to send the whole 64-bytes of data in the interrupt
478 * packet, but we have no use for that. However we do take advantage of
479 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
480 */
481static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
482{
483 int nack_busy_id, is_jbus, need_more;
484 u64 *mondo, pstate, ver, busy_mask;
485 u16 *cpu_list;
486
487 cpu_list = __va(tb->cpu_list_pa);
488 mondo = __va(tb->cpu_mondo_block_pa);
489
490 /* Unfortunately, someone at Sun had the brilliant idea to make the
491 * busy/nack fields hard-coded by ITID number for this Ultra-III
492 * derivative processor.
493 */
494 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
495 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
496 (ver >> 32) == __SERRANO_ID);
497
498 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
499
500retry:
501 need_more = 0;
502 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
503 : : "r" (pstate), "i" (PSTATE_IE));
504
505 /* Setup the dispatch data registers. */
506 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
507 "stxa %1, [%4] %6\n\t"
508 "stxa %2, [%5] %6\n\t"
509 "membar #Sync\n\t"
510 : /* no outputs */
511 : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]),
512 "r" (0x40), "r" (0x50), "r" (0x60),
513 "i" (ASI_INTR_W));
514
515 nack_busy_id = 0;
516 busy_mask = 0;
517 {
518 int i;
519
520 for (i = 0; i < cnt; i++) {
521 u64 target, nr;
522
523 nr = cpu_list[i];
524 if (nr == 0xffff)
525 continue;
526
527 target = (nr << 14) | 0x70;
528 if (is_jbus) {
529 busy_mask |= (0x1UL << (nr * 2));
530 } else {
531 target |= (nack_busy_id << 24);
532 busy_mask |= (0x1UL <<
533 (nack_busy_id * 2));
534 }
535 __asm__ __volatile__(
536 "stxa %%g0, [%0] %1\n\t"
537 "membar #Sync\n\t"
538 : /* no outputs */
539 : "r" (target), "i" (ASI_INTR_W));
540 nack_busy_id++;
541 if (nack_busy_id == 32) {
542 need_more = 1;
543 break;
544 }
545 }
546 }
547
548 /* Now, poll for completion. */
549 {
550 u64 dispatch_stat, nack_mask;
551 long stuck;
552
553 stuck = 100000 * nack_busy_id;
554 nack_mask = busy_mask << 1;
555 do {
556 __asm__ __volatile__("ldxa [%%g0] %1, %0"
557 : "=r" (dispatch_stat)
558 : "i" (ASI_INTR_DISPATCH_STAT));
559 if (!(dispatch_stat & (busy_mask | nack_mask))) {
560 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
561 : : "r" (pstate));
562 if (unlikely(need_more)) {
563 int i, this_cnt = 0;
564 for (i = 0; i < cnt; i++) {
565 if (cpu_list[i] == 0xffff)
566 continue;
567 cpu_list[i] = 0xffff;
568 this_cnt++;
569 if (this_cnt == 32)
570 break;
571 }
572 goto retry;
573 }
574 return;
575 }
576 if (!--stuck)
577 break;
578 } while (dispatch_stat & busy_mask);
579
580 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
581 : : "r" (pstate));
582
583 if (dispatch_stat & busy_mask) {
584 /* Busy bits will not clear, continue instead
585 * of freezing up on this cpu.
586 */
587 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
588 smp_processor_id(), dispatch_stat);
589 } else {
590 int i, this_busy_nack = 0;
591
592 /* Delay some random time with interrupts enabled
593 * to prevent deadlock.
594 */
595 udelay(2 * nack_busy_id);
596
597 /* Clear out the mask bits for cpus which did not
598 * NACK us.
599 */
600 for (i = 0; i < cnt; i++) {
601 u64 check_mask, nr;
602
603 nr = cpu_list[i];
604 if (nr == 0xffff)
605 continue;
606
607 if (is_jbus)
608 check_mask = (0x2UL << (2*nr));
609 else
610 check_mask = (0x2UL <<
611 this_busy_nack);
612 if ((dispatch_stat & check_mask) == 0)
613 cpu_list[i] = 0xffff;
614 this_busy_nack += 2;
615 if (this_busy_nack == 64)
616 break;
617 }
618
619 goto retry;
620 }
621 }
622}
623
624/* Multi-cpu list version. */
625static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
626{
627 int retries, this_cpu, prev_sent, i, saw_cpu_error;
628 unsigned long status;
629 u16 *cpu_list;
630
631 this_cpu = smp_processor_id();
632
633 cpu_list = __va(tb->cpu_list_pa);
634
635 saw_cpu_error = 0;
636 retries = 0;
637 prev_sent = 0;
638 do {
639 int forward_progress, n_sent;
640
641 status = sun4v_cpu_mondo_send(cnt,
642 tb->cpu_list_pa,
643 tb->cpu_mondo_block_pa);
644
645 /* HV_EOK means all cpus received the xcall, we're done. */
646 if (likely(status == HV_EOK))
647 break;
648
649 /* First, see if we made any forward progress.
650 *
651 * The hypervisor indicates successful sends by setting
652 * cpu list entries to the value 0xffff.
653 */
654 n_sent = 0;
655 for (i = 0; i < cnt; i++) {
656 if (likely(cpu_list[i] == 0xffff))
657 n_sent++;
658 }
659
660 forward_progress = 0;
661 if (n_sent > prev_sent)
662 forward_progress = 1;
663
664 prev_sent = n_sent;
665
666 /* If we get a HV_ECPUERROR, then one or more of the cpus
667 * in the list are in error state. Use the cpu_state()
668 * hypervisor call to find out which cpus are in error state.
669 */
670 if (unlikely(status == HV_ECPUERROR)) {
671 for (i = 0; i < cnt; i++) {
672 long err;
673 u16 cpu;
674
675 cpu = cpu_list[i];
676 if (cpu == 0xffff)
677 continue;
678
679 err = sun4v_cpu_state(cpu);
680 if (err == HV_CPU_STATE_ERROR) {
681 saw_cpu_error = (cpu + 1);
682 cpu_list[i] = 0xffff;
683 }
684 }
685 } else if (unlikely(status != HV_EWOULDBLOCK))
686 goto fatal_mondo_error;
687
688 /* Don't bother rewriting the CPU list, just leave the
689 * 0xffff and non-0xffff entries in there and the
690 * hypervisor will do the right thing.
691 *
692 * Only advance timeout state if we didn't make any
693 * forward progress.
694 */
695 if (unlikely(!forward_progress)) {
696 if (unlikely(++retries > 10000))
697 goto fatal_mondo_timeout;
698
699 /* Delay a little bit to let other cpus catch up
700 * on their cpu mondo queue work.
701 */
702 udelay(2 * cnt);
703 }
704 } while (1);
705
706 if (unlikely(saw_cpu_error))
707 goto fatal_mondo_cpu_error;
708
709 return;
710
711fatal_mondo_cpu_error:
712 printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
713 "(including %d) were in error state\n",
714 this_cpu, saw_cpu_error - 1);
715 return;
716
717fatal_mondo_timeout:
718 printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
719 " progress after %d retries.\n",
720 this_cpu, retries);
721 goto dump_cpu_list_and_out;
722
723fatal_mondo_error:
724 printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
725 this_cpu, status);
726 printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
727 "mondo_block_pa(%lx)\n",
728 this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
729
730dump_cpu_list_and_out:
731 printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
732 for (i = 0; i < cnt; i++)
733 printk("%u ", cpu_list[i]);
734 printk("]\n");
735}
736
737static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
738
739static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
740{
741 struct trap_per_cpu *tb;
742 int this_cpu, i, cnt;
743 unsigned long flags;
744 u16 *cpu_list;
745 u64 *mondo;
746
747 /* We have to do this whole thing with interrupts fully disabled.
748 * Otherwise if we send an xcall from interrupt context it will
749 * corrupt both our mondo block and cpu list state.
750 *
751 * One consequence of this is that we cannot use timeout mechanisms
752 * that depend upon interrupts being delivered locally. So, for
753 * example, we cannot sample jiffies and expect it to advance.
754 *
755 * Fortunately, udelay() uses %stick/%tick so we can use that.
756 */
757 local_irq_save(flags);
758
759 this_cpu = smp_processor_id();
760 tb = &trap_block[this_cpu];
761
762 mondo = __va(tb->cpu_mondo_block_pa);
763 mondo[0] = data0;
764 mondo[1] = data1;
765 mondo[2] = data2;
766 wmb();
767
768 cpu_list = __va(tb->cpu_list_pa);
769
770 /* Setup the initial cpu list. */
771 cnt = 0;
772 for_each_cpu(i, mask) {
773 if (i == this_cpu || !cpu_online(i))
774 continue;
775 cpu_list[cnt++] = i;
776 }
777
778 if (cnt)
779 xcall_deliver_impl(tb, cnt);
780
781 local_irq_restore(flags);
782}
783
784/* Send cross call to all processors mentioned in MASK_P
785 * except self. Really, there are only two cases currently,
786 * "cpu_online_mask" and "mm_cpumask(mm)".
787 */
788static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
789{
790 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
791
792 xcall_deliver(data0, data1, data2, mask);
793}
794
795/* Send cross call to all processors except self. */
796static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
797{
798 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
799}
800
801extern unsigned long xcall_sync_tick;
802
803static void smp_start_sync_tick_client(int cpu)
804{
805 xcall_deliver((u64) &xcall_sync_tick, 0, 0,
806 cpumask_of(cpu));
807}
808
809extern unsigned long xcall_call_function;
810
811void arch_send_call_function_ipi_mask(const struct cpumask *mask)
812{
813 xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
814}
815
816extern unsigned long xcall_call_function_single;
817
818void arch_send_call_function_single_ipi(int cpu)
819{
820 xcall_deliver((u64) &xcall_call_function_single, 0, 0,
821 cpumask_of(cpu));
822}
823
824void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
825{
826 clear_softint(1 << irq);
827 irq_enter();
828 generic_smp_call_function_interrupt();
829 irq_exit();
830}
831
832void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
833{
834 clear_softint(1 << irq);
835 irq_enter();
836 generic_smp_call_function_single_interrupt();
837 irq_exit();
838}
839
840static void tsb_sync(void *info)
841{
842 struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
843 struct mm_struct *mm = info;
844
845 /* It is not valid to test "current->active_mm == mm" here.
846 *
847 * The value of "current" is not changed atomically with
848 * switch_mm(). But that's OK, we just need to check the
849 * current cpu's trap block PGD physical address.
850 */
851 if (tp->pgd_paddr == __pa(mm->pgd))
852 tsb_context_switch(mm);
853}
854
855void smp_tsb_sync(struct mm_struct *mm)
856{
857 smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
858}
859
860extern unsigned long xcall_flush_tlb_mm;
861extern unsigned long xcall_flush_tlb_page;
862extern unsigned long xcall_flush_tlb_kernel_range;
863extern unsigned long xcall_fetch_glob_regs;
864extern unsigned long xcall_fetch_glob_pmu;
865extern unsigned long xcall_fetch_glob_pmu_n4;
866extern unsigned long xcall_receive_signal;
867extern unsigned long xcall_new_mmu_context_version;
868#ifdef CONFIG_KGDB
869extern unsigned long xcall_kgdb_capture;
870#endif
871
872#ifdef DCACHE_ALIASING_POSSIBLE
873extern unsigned long xcall_flush_dcache_page_cheetah;
874#endif
875extern unsigned long xcall_flush_dcache_page_spitfire;
876
877static inline void __local_flush_dcache_page(struct page *page)
878{
879#ifdef DCACHE_ALIASING_POSSIBLE
880 __flush_dcache_page(page_address(page),
881 ((tlb_type == spitfire) &&
882 page_mapping(page) != NULL));
883#else
884 if (page_mapping(page) != NULL &&
885 tlb_type == spitfire)
886 __flush_icache_page(__pa(page_address(page)));
887#endif
888}
889
890void smp_flush_dcache_page_impl(struct page *page, int cpu)
891{
892 int this_cpu;
893
894 if (tlb_type == hypervisor)
895 return;
896
897#ifdef CONFIG_DEBUG_DCFLUSH
898 atomic_inc(&dcpage_flushes);
899#endif
900
901 this_cpu = get_cpu();
902
903 if (cpu == this_cpu) {
904 __local_flush_dcache_page(page);
905 } else if (cpu_online(cpu)) {
906 void *pg_addr = page_address(page);
907 u64 data0 = 0;
908
909 if (tlb_type == spitfire) {
910 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
911 if (page_mapping(page) != NULL)
912 data0 |= ((u64)1 << 32);
913 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
914#ifdef DCACHE_ALIASING_POSSIBLE
915 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
916#endif
917 }
918 if (data0) {
919 xcall_deliver(data0, __pa(pg_addr),
920 (u64) pg_addr, cpumask_of(cpu));
921#ifdef CONFIG_DEBUG_DCFLUSH
922 atomic_inc(&dcpage_flushes_xcall);
923#endif
924 }
925 }
926
927 put_cpu();
928}
929
930void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
931{
932 void *pg_addr;
933 u64 data0;
934
935 if (tlb_type == hypervisor)
936 return;
937
938 preempt_disable();
939
940#ifdef CONFIG_DEBUG_DCFLUSH
941 atomic_inc(&dcpage_flushes);
942#endif
943 data0 = 0;
944 pg_addr = page_address(page);
945 if (tlb_type == spitfire) {
946 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
947 if (page_mapping(page) != NULL)
948 data0 |= ((u64)1 << 32);
949 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
950#ifdef DCACHE_ALIASING_POSSIBLE
951 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
952#endif
953 }
954 if (data0) {
955 xcall_deliver(data0, __pa(pg_addr),
956 (u64) pg_addr, cpu_online_mask);
957#ifdef CONFIG_DEBUG_DCFLUSH
958 atomic_inc(&dcpage_flushes_xcall);
959#endif
960 }
961 __local_flush_dcache_page(page);
962
963 preempt_enable();
964}
965
966void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
967{
968 struct mm_struct *mm;
969 unsigned long flags;
970
971 clear_softint(1 << irq);
972
973 /* See if we need to allocate a new TLB context because
974 * the version of the one we are using is now out of date.
975 */
976 mm = current->active_mm;
977 if (unlikely(!mm || (mm == &init_mm)))
978 return;
979
980 spin_lock_irqsave(&mm->context.lock, flags);
981
982 if (unlikely(!CTX_VALID(mm->context)))
983 get_new_mmu_context(mm);
984
985 spin_unlock_irqrestore(&mm->context.lock, flags);
986
987 load_secondary_context(mm);
988 __flush_tlb_mm(CTX_HWBITS(mm->context),
989 SECONDARY_CONTEXT);
990}
991
992void smp_new_mmu_context_version(void)
993{
994 smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
995}
996
997#ifdef CONFIG_KGDB
998void kgdb_roundup_cpus(unsigned long flags)
999{
1000 smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
1001}
1002#endif
1003
1004void smp_fetch_global_regs(void)
1005{
1006 smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
1007}
1008
1009void smp_fetch_global_pmu(void)
1010{
1011 if (tlb_type == hypervisor &&
1012 sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
1013 smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0);
1014 else
1015 smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0);
1016}
1017
1018/* We know that the window frames of the user have been flushed
1019 * to the stack before we get here because all callers of us
1020 * are flush_tlb_*() routines, and these run after flush_cache_*()
1021 * which performs the flushw.
1022 *
1023 * The SMP TLB coherency scheme we use works as follows:
1024 *
1025 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1026 * space has (potentially) executed on, this is the heuristic
1027 * we use to avoid doing cross calls.
1028 *
1029 * Also, for flushing from kswapd and also for clones, we
1030 * use cpu_vm_mask as the list of cpus to make run the TLB.
1031 *
1032 * 2) TLB context numbers are shared globally across all processors
1033 * in the system, this allows us to play several games to avoid
1034 * cross calls.
1035 *
1036 * One invariant is that when a cpu switches to a process, and
1037 * that processes tsk->active_mm->cpu_vm_mask does not have the
1038 * current cpu's bit set, that tlb context is flushed locally.
1039 *
1040 * If the address space is non-shared (ie. mm->count == 1) we avoid
1041 * cross calls when we want to flush the currently running process's
1042 * tlb state. This is done by clearing all cpu bits except the current
1043 * processor's in current->mm->cpu_vm_mask and performing the
1044 * flush locally only. This will force any subsequent cpus which run
1045 * this task to flush the context from the local tlb if the process
1046 * migrates to another cpu (again).
1047 *
1048 * 3) For shared address spaces (threads) and swapping we bite the
1049 * bullet for most cases and perform the cross call (but only to
1050 * the cpus listed in cpu_vm_mask).
1051 *
1052 * The performance gain from "optimizing" away the cross call for threads is
1053 * questionable (in theory the big win for threads is the massive sharing of
1054 * address space state across processors).
1055 */
1056
1057/* This currently is only used by the hugetlb arch pre-fault
1058 * hook on UltraSPARC-III+ and later when changing the pagesize
1059 * bits of the context register for an address space.
1060 */
1061void smp_flush_tlb_mm(struct mm_struct *mm)
1062{
1063 u32 ctx = CTX_HWBITS(mm->context);
1064 int cpu = get_cpu();
1065
1066 if (atomic_read(&mm->mm_users) == 1) {
1067 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1068 goto local_flush_and_out;
1069 }
1070
1071 smp_cross_call_masked(&xcall_flush_tlb_mm,
1072 ctx, 0, 0,
1073 mm_cpumask(mm));
1074
1075local_flush_and_out:
1076 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1077
1078 put_cpu();
1079}
1080
1081struct tlb_pending_info {
1082 unsigned long ctx;
1083 unsigned long nr;
1084 unsigned long *vaddrs;
1085};
1086
1087static void tlb_pending_func(void *info)
1088{
1089 struct tlb_pending_info *t = info;
1090
1091 __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
1092}
1093
1094void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1095{
1096 u32 ctx = CTX_HWBITS(mm->context);
1097 struct tlb_pending_info info;
1098 int cpu = get_cpu();
1099
1100 info.ctx = ctx;
1101 info.nr = nr;
1102 info.vaddrs = vaddrs;
1103
1104 if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1105 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1106 else
1107 smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
1108 &info, 1);
1109
1110 __flush_tlb_pending(ctx, nr, vaddrs);
1111
1112 put_cpu();
1113}
1114
1115void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
1116{
1117 unsigned long context = CTX_HWBITS(mm->context);
1118 int cpu = get_cpu();
1119
1120 if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1121 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1122 else
1123 smp_cross_call_masked(&xcall_flush_tlb_page,
1124 context, vaddr, 0,
1125 mm_cpumask(mm));
1126 __flush_tlb_page(context, vaddr);
1127
1128 put_cpu();
1129}
1130
1131void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1132{
1133 start &= PAGE_MASK;
1134 end = PAGE_ALIGN(end);
1135 if (start != end) {
1136 smp_cross_call(&xcall_flush_tlb_kernel_range,
1137 0, start, end);
1138
1139 __flush_tlb_kernel_range(start, end);
1140 }
1141}
1142
1143/* CPU capture. */
1144/* #define CAPTURE_DEBUG */
1145extern unsigned long xcall_capture;
1146
1147static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1148static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1149static unsigned long penguins_are_doing_time;
1150
1151void smp_capture(void)
1152{
1153 int result = atomic_add_return(1, &smp_capture_depth);
1154
1155 if (result == 1) {
1156 int ncpus = num_online_cpus();
1157
1158#ifdef CAPTURE_DEBUG
1159 printk("CPU[%d]: Sending penguins to jail...",
1160 smp_processor_id());
1161#endif
1162 penguins_are_doing_time = 1;
1163 atomic_inc(&smp_capture_registry);
1164 smp_cross_call(&xcall_capture, 0, 0, 0);
1165 while (atomic_read(&smp_capture_registry) != ncpus)
1166 rmb();
1167#ifdef CAPTURE_DEBUG
1168 printk("done\n");
1169#endif
1170 }
1171}
1172
1173void smp_release(void)
1174{
1175 if (atomic_dec_and_test(&smp_capture_depth)) {
1176#ifdef CAPTURE_DEBUG
1177 printk("CPU[%d]: Giving pardon to "
1178 "imprisoned penguins\n",
1179 smp_processor_id());
1180#endif
1181 penguins_are_doing_time = 0;
1182 membar_safe("#StoreLoad");
1183 atomic_dec(&smp_capture_registry);
1184 }
1185}
1186
1187/* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE
1188 * set, so they can service tlb flush xcalls...
1189 */
1190extern void prom_world(int);
1191
1192void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
1193{
1194 clear_softint(1 << irq);
1195
1196 preempt_disable();
1197
1198 __asm__ __volatile__("flushw");
1199 prom_world(1);
1200 atomic_inc(&smp_capture_registry);
1201 membar_safe("#StoreLoad");
1202 while (penguins_are_doing_time)
1203 rmb();
1204 atomic_dec(&smp_capture_registry);
1205 prom_world(0);
1206
1207 preempt_enable();
1208}
1209
1210/* /proc/profile writes can call this, don't __init it please. */
1211int setup_profiling_timer(unsigned int multiplier)
1212{
1213 return -EINVAL;
1214}
1215
1216void __init smp_prepare_cpus(unsigned int max_cpus)
1217{
1218}
1219
1220void smp_prepare_boot_cpu(void)
1221{
1222}
1223
1224void __init smp_setup_processor_id(void)
1225{
1226 if (tlb_type == spitfire)
1227 xcall_deliver_impl = spitfire_xcall_deliver;
1228 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1229 xcall_deliver_impl = cheetah_xcall_deliver;
1230 else
1231 xcall_deliver_impl = hypervisor_xcall_deliver;
1232}
1233
1234void __init smp_fill_in_cpu_possible_map(void)
1235{
1236 int possible_cpus = num_possible_cpus();
1237 int i;
1238
1239 if (possible_cpus > nr_cpu_ids)
1240 possible_cpus = nr_cpu_ids;
1241
1242 for (i = 0; i < possible_cpus; i++)
1243 set_cpu_possible(i, true);
1244 for (; i < NR_CPUS; i++)
1245 set_cpu_possible(i, false);
1246}
1247
1248void smp_fill_in_sib_core_maps(void)
1249{
1250 unsigned int i;
1251
1252 for_each_present_cpu(i) {
1253 unsigned int j;
1254
1255 cpumask_clear(&cpu_core_map[i]);
1256 if (cpu_data(i).core_id == 0) {
1257 cpumask_set_cpu(i, &cpu_core_map[i]);
1258 continue;
1259 }
1260
1261 for_each_present_cpu(j) {
1262 if (cpu_data(i).core_id ==
1263 cpu_data(j).core_id)
1264 cpumask_set_cpu(j, &cpu_core_map[i]);
1265 }
1266 }
1267
1268 for_each_present_cpu(i) {
1269 unsigned int j;
1270
1271 for_each_present_cpu(j) {
1272 if (cpu_data(i).max_cache_id ==
1273 cpu_data(j).max_cache_id)
1274 cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]);
1275
1276 if (cpu_data(i).sock_id == cpu_data(j).sock_id)
1277 cpumask_set_cpu(j, &cpu_core_sib_map[i]);
1278 }
1279 }
1280
1281 for_each_present_cpu(i) {
1282 unsigned int j;
1283
1284 cpumask_clear(&per_cpu(cpu_sibling_map, i));
1285 if (cpu_data(i).proc_id == -1) {
1286 cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
1287 continue;
1288 }
1289
1290 for_each_present_cpu(j) {
1291 if (cpu_data(i).proc_id ==
1292 cpu_data(j).proc_id)
1293 cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
1294 }
1295 }
1296}
1297
1298int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1299{
1300 int ret = smp_boot_one_cpu(cpu, tidle);
1301
1302 if (!ret) {
1303 cpumask_set_cpu(cpu, &smp_commenced_mask);
1304 while (!cpu_online(cpu))
1305 mb();
1306 if (!cpu_online(cpu)) {
1307 ret = -ENODEV;
1308 } else {
1309 /* On SUN4V, writes to %tick and %stick are
1310 * not allowed.
1311 */
1312 if (tlb_type != hypervisor)
1313 smp_synchronize_one_tick(cpu);
1314 }
1315 }
1316 return ret;
1317}
1318
1319#ifdef CONFIG_HOTPLUG_CPU
1320void cpu_play_dead(void)
1321{
1322 int cpu = smp_processor_id();
1323 unsigned long pstate;
1324
1325 idle_task_exit();
1326
1327 if (tlb_type == hypervisor) {
1328 struct trap_per_cpu *tb = &trap_block[cpu];
1329
1330 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1331 tb->cpu_mondo_pa, 0);
1332 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1333 tb->dev_mondo_pa, 0);
1334 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1335 tb->resum_mondo_pa, 0);
1336 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1337 tb->nonresum_mondo_pa, 0);
1338 }
1339
1340 cpumask_clear_cpu(cpu, &smp_commenced_mask);
1341 membar_safe("#Sync");
1342
1343 local_irq_disable();
1344
1345 __asm__ __volatile__(
1346 "rdpr %%pstate, %0\n\t"
1347 "wrpr %0, %1, %%pstate"
1348 : "=r" (pstate)
1349 : "i" (PSTATE_IE));
1350
1351 while (1)
1352 barrier();
1353}
1354
1355int __cpu_disable(void)
1356{
1357 int cpu = smp_processor_id();
1358 cpuinfo_sparc *c;
1359 int i;
1360
1361 for_each_cpu(i, &cpu_core_map[cpu])
1362 cpumask_clear_cpu(cpu, &cpu_core_map[i]);
1363 cpumask_clear(&cpu_core_map[cpu]);
1364
1365 for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
1366 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
1367 cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
1368
1369 c = &cpu_data(cpu);
1370
1371 c->core_id = 0;
1372 c->proc_id = -1;
1373
1374 smp_wmb();
1375
1376 /* Make sure no interrupts point to this cpu. */
1377 fixup_irqs();
1378
1379 local_irq_enable();
1380 mdelay(1);
1381 local_irq_disable();
1382
1383 set_cpu_online(cpu, false);
1384
1385 cpu_map_rebuild();
1386
1387 return 0;
1388}
1389
1390void __cpu_die(unsigned int cpu)
1391{
1392 int i;
1393
1394 for (i = 0; i < 100; i++) {
1395 smp_rmb();
1396 if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
1397 break;
1398 msleep(100);
1399 }
1400 if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
1401 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1402 } else {
1403#if defined(CONFIG_SUN_LDOMS)
1404 unsigned long hv_err;
1405 int limit = 100;
1406
1407 do {
1408 hv_err = sun4v_cpu_stop(cpu);
1409 if (hv_err == HV_EOK) {
1410 set_cpu_present(cpu, false);
1411 break;
1412 }
1413 } while (--limit > 0);
1414 if (limit <= 0) {
1415 printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1416 hv_err);
1417 }
1418#endif
1419 }
1420}
1421#endif
1422
1423void __init smp_cpus_done(unsigned int max_cpus)
1424{
1425}
1426
1427void smp_send_reschedule(int cpu)
1428{
1429 if (cpu == smp_processor_id()) {
1430 WARN_ON_ONCE(preemptible());
1431 set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
1432 } else {
1433 xcall_deliver((u64) &xcall_receive_signal,
1434 0, 0, cpumask_of(cpu));
1435 }
1436}
1437
1438void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1439{
1440 clear_softint(1 << irq);
1441 scheduler_ipi();
1442}
1443
1444static void stop_this_cpu(void *dummy)
1445{
1446 prom_stopself();
1447}
1448
1449void smp_send_stop(void)
1450{
1451 int cpu;
1452
1453 if (tlb_type == hypervisor) {
1454 for_each_online_cpu(cpu) {
1455 if (cpu == smp_processor_id())
1456 continue;
1457#ifdef CONFIG_SUN_LDOMS
1458 if (ldom_domaining_enabled) {
1459 unsigned long hv_err;
1460 hv_err = sun4v_cpu_stop(cpu);
1461 if (hv_err)
1462 printk(KERN_ERR "sun4v_cpu_stop() "
1463 "failed err=%lu\n", hv_err);
1464 } else
1465#endif
1466 prom_stopcpu_cpuid(cpu);
1467 }
1468 } else
1469 smp_call_function(stop_this_cpu, NULL, 0);
1470}
1471
1472/**
1473 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
1474 * @cpu: cpu to allocate for
1475 * @size: size allocation in bytes
1476 * @align: alignment
1477 *
1478 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
1479 * does the right thing for NUMA regardless of the current
1480 * configuration.
1481 *
1482 * RETURNS:
1483 * Pointer to the allocated area on success, NULL on failure.
1484 */
1485static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
1486 size_t align)
1487{
1488 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
1489#ifdef CONFIG_NEED_MULTIPLE_NODES
1490 int node = cpu_to_node(cpu);
1491 void *ptr;
1492
1493 if (!node_online(node) || !NODE_DATA(node)) {
1494 ptr = __alloc_bootmem(size, align, goal);
1495 pr_info("cpu %d has no node %d or node-local memory\n",
1496 cpu, node);
1497 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
1498 cpu, size, __pa(ptr));
1499 } else {
1500 ptr = __alloc_bootmem_node(NODE_DATA(node),
1501 size, align, goal);
1502 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
1503 "%016lx\n", cpu, size, node, __pa(ptr));
1504 }
1505 return ptr;
1506#else
1507 return __alloc_bootmem(size, align, goal);
1508#endif
1509}
1510
1511static void __init pcpu_free_bootmem(void *ptr, size_t size)
1512{
1513 free_bootmem(__pa(ptr), size);
1514}
1515
1516static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
1517{
1518 if (cpu_to_node(from) == cpu_to_node(to))
1519 return LOCAL_DISTANCE;
1520 else
1521 return REMOTE_DISTANCE;
1522}
1523
1524static void __init pcpu_populate_pte(unsigned long addr)
1525{
1526 pgd_t *pgd = pgd_offset_k(addr);
1527 pud_t *pud;
1528 pmd_t *pmd;
1529
1530 if (pgd_none(*pgd)) {
1531 pud_t *new;
1532
1533 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1534 pgd_populate(&init_mm, pgd, new);
1535 }
1536
1537 pud = pud_offset(pgd, addr);
1538 if (pud_none(*pud)) {
1539 pmd_t *new;
1540
1541 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1542 pud_populate(&init_mm, pud, new);
1543 }
1544
1545 pmd = pmd_offset(pud, addr);
1546 if (!pmd_present(*pmd)) {
1547 pte_t *new;
1548
1549 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1550 pmd_populate_kernel(&init_mm, pmd, new);
1551 }
1552}
1553
1554void __init setup_per_cpu_areas(void)
1555{
1556 unsigned long delta;
1557 unsigned int cpu;
1558 int rc = -EINVAL;
1559
1560 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1561 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1562 PERCPU_DYNAMIC_RESERVE, 4 << 20,
1563 pcpu_cpu_distance,
1564 pcpu_alloc_bootmem,
1565 pcpu_free_bootmem);
1566 if (rc)
1567 pr_warning("PERCPU: %s allocator failed (%d), "
1568 "falling back to page size\n",
1569 pcpu_fc_names[pcpu_chosen_fc], rc);
1570 }
1571 if (rc < 0)
1572 rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
1573 pcpu_alloc_bootmem,
1574 pcpu_free_bootmem,
1575 pcpu_populate_pte);
1576 if (rc < 0)
1577 panic("cannot initialize percpu area (err=%d)", rc);
1578
1579 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1580 for_each_possible_cpu(cpu)
1581 __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1582
1583 /* Setup %g5 for the boot cpu. */
1584 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1585
1586 of_fill_in_cpu_data();
1587 if (tlb_type == hypervisor)
1588 mdesc_fill_in_cpu_data(cpu_all_mask);
1589}