Loading...
1/* leon_smp.c: Sparc-Leon SMP support.
2 *
3 * based on sun4m_smp.c
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
6 * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
7 */
8
9#include <asm/head.h>
10
11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/threads.h>
14#include <linux/smp.h>
15#include <linux/interrupt.h>
16#include <linux/kernel_stat.h>
17#include <linux/of.h>
18#include <linux/init.h>
19#include <linux/spinlock.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/profile.h>
23#include <linux/pm.h>
24#include <linux/delay.h>
25#include <linux/gfp.h>
26#include <linux/cpu.h>
27#include <linux/clockchips.h>
28
29#include <asm/cacheflush.h>
30#include <asm/tlbflush.h>
31
32#include <asm/ptrace.h>
33#include <linux/atomic.h>
34#include <asm/irq_regs.h>
35#include <asm/traps.h>
36
37#include <asm/delay.h>
38#include <asm/irq.h>
39#include <asm/page.h>
40#include <asm/pgalloc.h>
41#include <asm/pgtable.h>
42#include <asm/oplib.h>
43#include <asm/cpudata.h>
44#include <asm/asi.h>
45#include <asm/leon.h>
46#include <asm/leon_amba.h>
47#include <asm/timer.h>
48
49#include "kernel.h"
50
51#include "irq.h"
52
53extern ctxd_t *srmmu_ctx_table_phys;
54static int smp_processors_ready;
55extern volatile unsigned long cpu_callin_map[NR_CPUS];
56extern cpumask_t smp_commenced_mask;
57void __cpuinit leon_configure_cache_smp(void);
58static void leon_ipi_init(void);
59
60/* IRQ number of LEON IPIs */
61int leon_ipi_irq = LEON3_IRQ_IPI_DEFAULT;
62
63static inline unsigned long do_swap(volatile unsigned long *ptr,
64 unsigned long val)
65{
66 __asm__ __volatile__("swapa [%2] %3, %0\n\t" : "=&r"(val)
67 : "0"(val), "r"(ptr), "i"(ASI_LEON_DCACHE_MISS)
68 : "memory");
69 return val;
70}
71
72void __cpuinit leon_callin(void)
73{
74 int cpuid = hard_smp_processor_id();
75
76 local_ops->cache_all();
77 local_ops->tlb_all();
78 leon_configure_cache_smp();
79
80 notify_cpu_starting(cpuid);
81
82 /* Get our local ticker going. */
83 register_percpu_ce(cpuid);
84
85 calibrate_delay();
86 smp_store_cpu_info(cpuid);
87
88 local_ops->cache_all();
89 local_ops->tlb_all();
90
91 /*
92 * Unblock the master CPU _only_ when the scheduler state
93 * of all secondary CPUs will be up-to-date, so after
94 * the SMP initialization the master will be just allowed
95 * to call the scheduler code.
96 * Allow master to continue.
97 */
98 do_swap(&cpu_callin_map[cpuid], 1);
99
100 local_ops->cache_all();
101 local_ops->tlb_all();
102
103 /* Fix idle thread fields. */
104 __asm__ __volatile__("ld [%0], %%g6\n\t" : : "r"(¤t_set[cpuid])
105 : "memory" /* paranoid */);
106
107 /* Attach to the address space of init_task. */
108 atomic_inc(&init_mm.mm_count);
109 current->active_mm = &init_mm;
110
111 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
112 mb();
113
114 local_irq_enable();
115 set_cpu_online(cpuid, true);
116}
117
118/*
119 * Cycle through the processors asking the PROM to start each one.
120 */
121
122extern struct linux_prom_registers smp_penguin_ctable;
123
124void __cpuinit leon_configure_cache_smp(void)
125{
126 unsigned long cfg = sparc_leon3_get_dcachecfg();
127 int me = smp_processor_id();
128
129 if (ASI_LEON3_SYSCTRL_CFG_SSIZE(cfg) > 4) {
130 printk(KERN_INFO "Note: SMP with snooping only works on 4k cache, found %dk(0x%x) on cpu %d, disabling caches\n",
131 (unsigned int)ASI_LEON3_SYSCTRL_CFG_SSIZE(cfg),
132 (unsigned int)cfg, (unsigned int)me);
133 sparc_leon3_disable_cache();
134 } else {
135 if (cfg & ASI_LEON3_SYSCTRL_CFG_SNOOPING) {
136 sparc_leon3_enable_snooping();
137 } else {
138 printk(KERN_INFO "Note: You have to enable snooping in the vhdl model cpu %d, disabling caches\n",
139 me);
140 sparc_leon3_disable_cache();
141 }
142 }
143
144 local_ops->cache_all();
145 local_ops->tlb_all();
146}
147
148void leon_smp_setbroadcast(unsigned int mask)
149{
150 int broadcast =
151 ((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >>
152 LEON3_IRQMPSTATUS_BROADCAST) & 1);
153 if (!broadcast) {
154 prom_printf("######## !!!! The irqmp-ctrl must have broadcast enabled, smp wont work !!!!! ####### nr cpus: %d\n",
155 leon_smp_nrcpus());
156 if (leon_smp_nrcpus() > 1) {
157 BUG();
158 } else {
159 prom_printf("continue anyway\n");
160 return;
161 }
162 }
163 LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpbroadcast), mask);
164}
165
166unsigned int leon_smp_getbroadcast(void)
167{
168 unsigned int mask;
169 mask = LEON_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpbroadcast));
170 return mask;
171}
172
173int leon_smp_nrcpus(void)
174{
175 int nrcpu =
176 ((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >>
177 LEON3_IRQMPSTATUS_CPUNR) & 0xf) + 1;
178 return nrcpu;
179}
180
181void __init leon_boot_cpus(void)
182{
183 int nrcpu = leon_smp_nrcpus();
184 int me = smp_processor_id();
185
186 /* Setup IPI */
187 leon_ipi_init();
188
189 printk(KERN_INFO "%d:(%d:%d) cpus mpirq at 0x%x\n", (unsigned int)me,
190 (unsigned int)nrcpu, (unsigned int)NR_CPUS,
191 (unsigned int)&(leon3_irqctrl_regs->mpstatus));
192
193 leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, me);
194 leon_enable_irq_cpu(LEON3_IRQ_TICKER, me);
195 leon_enable_irq_cpu(leon_ipi_irq, me);
196
197 leon_smp_setbroadcast(1 << LEON3_IRQ_TICKER);
198
199 leon_configure_cache_smp();
200 local_ops->cache_all();
201
202}
203
204int __cpuinit leon_boot_one_cpu(int i, struct task_struct *idle)
205{
206 int timeout;
207
208 current_set[i] = task_thread_info(idle);
209
210 /* See trampoline.S:leon_smp_cpu_startup for details...
211 * Initialize the contexts table
212 * Since the call to prom_startcpu() trashes the structure,
213 * we need to re-initialize it for each cpu
214 */
215 smp_penguin_ctable.which_io = 0;
216 smp_penguin_ctable.phys_addr = (unsigned int)srmmu_ctx_table_phys;
217 smp_penguin_ctable.reg_size = 0;
218
219 /* whirrr, whirrr, whirrrrrrrrr... */
220 printk(KERN_INFO "Starting CPU %d : (irqmp: 0x%x)\n", (unsigned int)i,
221 (unsigned int)&leon3_irqctrl_regs->mpstatus);
222 local_ops->cache_all();
223
224 /* Make sure all IRQs are of from the start for this new CPU */
225 LEON_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[i], 0);
226
227 /* Wake one CPU */
228 LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpstatus), 1 << i);
229
230 /* wheee... it's going... */
231 for (timeout = 0; timeout < 10000; timeout++) {
232 if (cpu_callin_map[i])
233 break;
234 udelay(200);
235 }
236 printk(KERN_INFO "Started CPU %d\n", (unsigned int)i);
237
238 if (!(cpu_callin_map[i])) {
239 printk(KERN_ERR "Processor %d is stuck.\n", i);
240 return -ENODEV;
241 } else {
242 leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, i);
243 leon_enable_irq_cpu(LEON3_IRQ_TICKER, i);
244 leon_enable_irq_cpu(leon_ipi_irq, i);
245 }
246
247 local_ops->cache_all();
248 return 0;
249}
250
251void __init leon_smp_done(void)
252{
253
254 int i, first;
255 int *prev;
256
257 /* setup cpu list for irq rotation */
258 first = 0;
259 prev = &first;
260 for (i = 0; i < NR_CPUS; i++) {
261 if (cpu_online(i)) {
262 *prev = i;
263 prev = &cpu_data(i).next;
264 }
265 }
266 *prev = first;
267 local_ops->cache_all();
268
269 /* Free unneeded trap tables */
270 if (!cpu_present(1)) {
271 ClearPageReserved(virt_to_page(&trapbase_cpu1));
272 init_page_count(virt_to_page(&trapbase_cpu1));
273 free_page((unsigned long)&trapbase_cpu1);
274 totalram_pages++;
275 num_physpages++;
276 }
277 if (!cpu_present(2)) {
278 ClearPageReserved(virt_to_page(&trapbase_cpu2));
279 init_page_count(virt_to_page(&trapbase_cpu2));
280 free_page((unsigned long)&trapbase_cpu2);
281 totalram_pages++;
282 num_physpages++;
283 }
284 if (!cpu_present(3)) {
285 ClearPageReserved(virt_to_page(&trapbase_cpu3));
286 init_page_count(virt_to_page(&trapbase_cpu3));
287 free_page((unsigned long)&trapbase_cpu3);
288 totalram_pages++;
289 num_physpages++;
290 }
291 /* Ok, they are spinning and ready to go. */
292 smp_processors_ready = 1;
293
294}
295
296void leon_irq_rotate(int cpu)
297{
298}
299
300struct leon_ipi_work {
301 int single;
302 int msk;
303 int resched;
304};
305
306static DEFINE_PER_CPU_SHARED_ALIGNED(struct leon_ipi_work, leon_ipi_work);
307
308/* Initialize IPIs on the LEON, in order to save IRQ resources only one IRQ
309 * is used for all three types of IPIs.
310 */
311static void __init leon_ipi_init(void)
312{
313 int cpu, len;
314 struct leon_ipi_work *work;
315 struct property *pp;
316 struct device_node *rootnp;
317 struct tt_entry *trap_table;
318 unsigned long flags;
319
320 /* Find IPI IRQ or stick with default value */
321 rootnp = of_find_node_by_path("/ambapp0");
322 if (rootnp) {
323 pp = of_find_property(rootnp, "ipi_num", &len);
324 if (pp && (*(int *)pp->value))
325 leon_ipi_irq = *(int *)pp->value;
326 }
327 printk(KERN_INFO "leon: SMP IPIs at IRQ %d\n", leon_ipi_irq);
328
329 /* Adjust so that we jump directly to smpleon_ipi */
330 local_irq_save(flags);
331 trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (leon_ipi_irq - 1)];
332 trap_table->inst_three += smpleon_ipi - real_irq_entry;
333 local_ops->cache_all();
334 local_irq_restore(flags);
335
336 for_each_possible_cpu(cpu) {
337 work = &per_cpu(leon_ipi_work, cpu);
338 work->single = work->msk = work->resched = 0;
339 }
340}
341
342static void leon_send_ipi(int cpu, int level)
343{
344 unsigned long mask;
345 mask = leon_get_irqmask(level);
346 LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->force[cpu], mask);
347}
348
349static void leon_ipi_single(int cpu)
350{
351 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
352
353 /* Mark work */
354 work->single = 1;
355
356 /* Generate IRQ on the CPU */
357 leon_send_ipi(cpu, leon_ipi_irq);
358}
359
360static void leon_ipi_mask_one(int cpu)
361{
362 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
363
364 /* Mark work */
365 work->msk = 1;
366
367 /* Generate IRQ on the CPU */
368 leon_send_ipi(cpu, leon_ipi_irq);
369}
370
371static void leon_ipi_resched(int cpu)
372{
373 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
374
375 /* Mark work */
376 work->resched = 1;
377
378 /* Generate IRQ on the CPU (any IRQ will cause resched) */
379 leon_send_ipi(cpu, leon_ipi_irq);
380}
381
382void leonsmp_ipi_interrupt(void)
383{
384 struct leon_ipi_work *work = &__get_cpu_var(leon_ipi_work);
385
386 if (work->single) {
387 work->single = 0;
388 smp_call_function_single_interrupt();
389 }
390 if (work->msk) {
391 work->msk = 0;
392 smp_call_function_interrupt();
393 }
394 if (work->resched) {
395 work->resched = 0;
396 smp_resched_interrupt();
397 }
398}
399
400static struct smp_funcall {
401 smpfunc_t func;
402 unsigned long arg1;
403 unsigned long arg2;
404 unsigned long arg3;
405 unsigned long arg4;
406 unsigned long arg5;
407 unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */
408 unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */
409} ccall_info;
410
411static DEFINE_SPINLOCK(cross_call_lock);
412
413/* Cross calls must be serialized, at least currently. */
414static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
415 unsigned long arg2, unsigned long arg3,
416 unsigned long arg4)
417{
418 if (smp_processors_ready) {
419 register int high = NR_CPUS - 1;
420 unsigned long flags;
421
422 spin_lock_irqsave(&cross_call_lock, flags);
423
424 {
425 /* If you make changes here, make sure gcc generates proper code... */
426 register smpfunc_t f asm("i0") = func;
427 register unsigned long a1 asm("i1") = arg1;
428 register unsigned long a2 asm("i2") = arg2;
429 register unsigned long a3 asm("i3") = arg3;
430 register unsigned long a4 asm("i4") = arg4;
431 register unsigned long a5 asm("i5") = 0;
432
433 __asm__ __volatile__("std %0, [%6]\n\t"
434 "std %2, [%6 + 8]\n\t"
435 "std %4, [%6 + 16]\n\t" : :
436 "r"(f), "r"(a1), "r"(a2), "r"(a3),
437 "r"(a4), "r"(a5),
438 "r"(&ccall_info.func));
439 }
440
441 /* Init receive/complete mapping, plus fire the IPI's off. */
442 {
443 register int i;
444
445 cpumask_clear_cpu(smp_processor_id(), &mask);
446 cpumask_and(&mask, cpu_online_mask, &mask);
447 for (i = 0; i <= high; i++) {
448 if (cpumask_test_cpu(i, &mask)) {
449 ccall_info.processors_in[i] = 0;
450 ccall_info.processors_out[i] = 0;
451 leon_send_ipi(i, LEON3_IRQ_CROSS_CALL);
452
453 }
454 }
455 }
456
457 {
458 register int i;
459
460 i = 0;
461 do {
462 if (!cpumask_test_cpu(i, &mask))
463 continue;
464
465 while (!ccall_info.processors_in[i])
466 barrier();
467 } while (++i <= high);
468
469 i = 0;
470 do {
471 if (!cpumask_test_cpu(i, &mask))
472 continue;
473
474 while (!ccall_info.processors_out[i])
475 barrier();
476 } while (++i <= high);
477 }
478
479 spin_unlock_irqrestore(&cross_call_lock, flags);
480 }
481}
482
483/* Running cross calls. */
484void leon_cross_call_irq(void)
485{
486 int i = smp_processor_id();
487
488 ccall_info.processors_in[i] = 1;
489 ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
490 ccall_info.arg4, ccall_info.arg5);
491 ccall_info.processors_out[i] = 1;
492}
493
494static const struct sparc32_ipi_ops leon_ipi_ops = {
495 .cross_call = leon_cross_call,
496 .resched = leon_ipi_resched,
497 .single = leon_ipi_single,
498 .mask_one = leon_ipi_mask_one,
499};
500
501void __init leon_init_smp(void)
502{
503 /* Patch ipi15 trap table */
504 t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_leon - linux_trap_ipi15_sun4m);
505
506 sparc32_ipi_ops = &leon_ipi_ops;
507}
1/* leon_smp.c: Sparc-Leon SMP support.
2 *
3 * based on sun4m_smp.c
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
6 * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
7 */
8
9#include <asm/head.h>
10
11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/threads.h>
14#include <linux/smp.h>
15#include <linux/interrupt.h>
16#include <linux/kernel_stat.h>
17#include <linux/of.h>
18#include <linux/init.h>
19#include <linux/spinlock.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/profile.h>
23#include <linux/pm.h>
24#include <linux/delay.h>
25#include <linux/gfp.h>
26#include <linux/cpu.h>
27#include <linux/clockchips.h>
28
29#include <asm/cacheflush.h>
30#include <asm/tlbflush.h>
31
32#include <asm/ptrace.h>
33#include <linux/atomic.h>
34#include <asm/irq_regs.h>
35#include <asm/traps.h>
36
37#include <asm/delay.h>
38#include <asm/irq.h>
39#include <asm/page.h>
40#include <asm/pgalloc.h>
41#include <asm/pgtable.h>
42#include <asm/oplib.h>
43#include <asm/cpudata.h>
44#include <asm/asi.h>
45#include <asm/leon.h>
46#include <asm/leon_amba.h>
47#include <asm/timer.h>
48
49#include "kernel.h"
50
51#include "irq.h"
52
53extern ctxd_t *srmmu_ctx_table_phys;
54static int smp_processors_ready;
55extern volatile unsigned long cpu_callin_map[NR_CPUS];
56extern cpumask_t smp_commenced_mask;
57void leon_configure_cache_smp(void);
58static void leon_ipi_init(void);
59
60/* IRQ number of LEON IPIs */
61int leon_ipi_irq = LEON3_IRQ_IPI_DEFAULT;
62
63static inline unsigned long do_swap(volatile unsigned long *ptr,
64 unsigned long val)
65{
66 __asm__ __volatile__("swapa [%2] %3, %0\n\t" : "=&r"(val)
67 : "0"(val), "r"(ptr), "i"(ASI_LEON_DCACHE_MISS)
68 : "memory");
69 return val;
70}
71
72void leon_cpu_pre_starting(void *arg)
73{
74 leon_configure_cache_smp();
75}
76
77void leon_cpu_pre_online(void *arg)
78{
79 int cpuid = hard_smp_processor_id();
80
81 /* Allow master to continue. The master will then give us the
82 * go-ahead by setting the smp_commenced_mask and will wait without
83 * timeouts until our setup is completed fully (signified by
84 * our bit being set in the cpu_online_mask).
85 */
86 do_swap(&cpu_callin_map[cpuid], 1);
87
88 local_ops->cache_all();
89 local_ops->tlb_all();
90
91 /* Fix idle thread fields. */
92 __asm__ __volatile__("ld [%0], %%g6\n\t" : : "r"(¤t_set[cpuid])
93 : "memory" /* paranoid */);
94
95 /* Attach to the address space of init_task. */
96 atomic_inc(&init_mm.mm_count);
97 current->active_mm = &init_mm;
98
99 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
100 mb();
101}
102
103/*
104 * Cycle through the processors asking the PROM to start each one.
105 */
106
107extern struct linux_prom_registers smp_penguin_ctable;
108
109void leon_configure_cache_smp(void)
110{
111 unsigned long cfg = sparc_leon3_get_dcachecfg();
112 int me = smp_processor_id();
113
114 if (ASI_LEON3_SYSCTRL_CFG_SSIZE(cfg) > 4) {
115 printk(KERN_INFO "Note: SMP with snooping only works on 4k cache, found %dk(0x%x) on cpu %d, disabling caches\n",
116 (unsigned int)ASI_LEON3_SYSCTRL_CFG_SSIZE(cfg),
117 (unsigned int)cfg, (unsigned int)me);
118 sparc_leon3_disable_cache();
119 } else {
120 if (cfg & ASI_LEON3_SYSCTRL_CFG_SNOOPING) {
121 sparc_leon3_enable_snooping();
122 } else {
123 printk(KERN_INFO "Note: You have to enable snooping in the vhdl model cpu %d, disabling caches\n",
124 me);
125 sparc_leon3_disable_cache();
126 }
127 }
128
129 local_ops->cache_all();
130 local_ops->tlb_all();
131}
132
133static void leon_smp_setbroadcast(unsigned int mask)
134{
135 int broadcast =
136 ((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >>
137 LEON3_IRQMPSTATUS_BROADCAST) & 1);
138 if (!broadcast) {
139 prom_printf("######## !!!! The irqmp-ctrl must have broadcast enabled, smp wont work !!!!! ####### nr cpus: %d\n",
140 leon_smp_nrcpus());
141 if (leon_smp_nrcpus() > 1) {
142 BUG();
143 } else {
144 prom_printf("continue anyway\n");
145 return;
146 }
147 }
148 LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpbroadcast), mask);
149}
150
151int leon_smp_nrcpus(void)
152{
153 int nrcpu =
154 ((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >>
155 LEON3_IRQMPSTATUS_CPUNR) & 0xf) + 1;
156 return nrcpu;
157}
158
159void __init leon_boot_cpus(void)
160{
161 int nrcpu = leon_smp_nrcpus();
162 int me = smp_processor_id();
163
164 /* Setup IPI */
165 leon_ipi_init();
166
167 printk(KERN_INFO "%d:(%d:%d) cpus mpirq at 0x%x\n", (unsigned int)me,
168 (unsigned int)nrcpu, (unsigned int)NR_CPUS,
169 (unsigned int)&(leon3_irqctrl_regs->mpstatus));
170
171 leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, me);
172 leon_enable_irq_cpu(LEON3_IRQ_TICKER, me);
173 leon_enable_irq_cpu(leon_ipi_irq, me);
174
175 leon_smp_setbroadcast(1 << LEON3_IRQ_TICKER);
176
177 leon_configure_cache_smp();
178 local_ops->cache_all();
179
180}
181
182int leon_boot_one_cpu(int i, struct task_struct *idle)
183{
184 int timeout;
185
186 current_set[i] = task_thread_info(idle);
187
188 /* See trampoline.S:leon_smp_cpu_startup for details...
189 * Initialize the contexts table
190 * Since the call to prom_startcpu() trashes the structure,
191 * we need to re-initialize it for each cpu
192 */
193 smp_penguin_ctable.which_io = 0;
194 smp_penguin_ctable.phys_addr = (unsigned int)srmmu_ctx_table_phys;
195 smp_penguin_ctable.reg_size = 0;
196
197 /* whirrr, whirrr, whirrrrrrrrr... */
198 printk(KERN_INFO "Starting CPU %d : (irqmp: 0x%x)\n", (unsigned int)i,
199 (unsigned int)&leon3_irqctrl_regs->mpstatus);
200 local_ops->cache_all();
201
202 /* Make sure all IRQs are of from the start for this new CPU */
203 LEON_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[i], 0);
204
205 /* Wake one CPU */
206 LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpstatus), 1 << i);
207
208 /* wheee... it's going... */
209 for (timeout = 0; timeout < 10000; timeout++) {
210 if (cpu_callin_map[i])
211 break;
212 udelay(200);
213 }
214 printk(KERN_INFO "Started CPU %d\n", (unsigned int)i);
215
216 if (!(cpu_callin_map[i])) {
217 printk(KERN_ERR "Processor %d is stuck.\n", i);
218 return -ENODEV;
219 } else {
220 leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, i);
221 leon_enable_irq_cpu(LEON3_IRQ_TICKER, i);
222 leon_enable_irq_cpu(leon_ipi_irq, i);
223 }
224
225 local_ops->cache_all();
226 return 0;
227}
228
229void __init leon_smp_done(void)
230{
231
232 int i, first;
233 int *prev;
234
235 /* setup cpu list for irq rotation */
236 first = 0;
237 prev = &first;
238 for (i = 0; i < NR_CPUS; i++) {
239 if (cpu_online(i)) {
240 *prev = i;
241 prev = &cpu_data(i).next;
242 }
243 }
244 *prev = first;
245 local_ops->cache_all();
246
247 /* Free unneeded trap tables */
248 if (!cpu_present(1)) {
249 free_reserved_page(virt_to_page(&trapbase_cpu1));
250 }
251 if (!cpu_present(2)) {
252 free_reserved_page(virt_to_page(&trapbase_cpu2));
253 }
254 if (!cpu_present(3)) {
255 free_reserved_page(virt_to_page(&trapbase_cpu3));
256 }
257 /* Ok, they are spinning and ready to go. */
258 smp_processors_ready = 1;
259
260}
261
262struct leon_ipi_work {
263 int single;
264 int msk;
265 int resched;
266};
267
268static DEFINE_PER_CPU_SHARED_ALIGNED(struct leon_ipi_work, leon_ipi_work);
269
270/* Initialize IPIs on the LEON, in order to save IRQ resources only one IRQ
271 * is used for all three types of IPIs.
272 */
273static void __init leon_ipi_init(void)
274{
275 int cpu, len;
276 struct leon_ipi_work *work;
277 struct property *pp;
278 struct device_node *rootnp;
279 struct tt_entry *trap_table;
280 unsigned long flags;
281
282 /* Find IPI IRQ or stick with default value */
283 rootnp = of_find_node_by_path("/ambapp0");
284 if (rootnp) {
285 pp = of_find_property(rootnp, "ipi_num", &len);
286 if (pp && (*(int *)pp->value))
287 leon_ipi_irq = *(int *)pp->value;
288 }
289 printk(KERN_INFO "leon: SMP IPIs at IRQ %d\n", leon_ipi_irq);
290
291 /* Adjust so that we jump directly to smpleon_ipi */
292 local_irq_save(flags);
293 trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (leon_ipi_irq - 1)];
294 trap_table->inst_three += smpleon_ipi - real_irq_entry;
295 local_ops->cache_all();
296 local_irq_restore(flags);
297
298 for_each_possible_cpu(cpu) {
299 work = &per_cpu(leon_ipi_work, cpu);
300 work->single = work->msk = work->resched = 0;
301 }
302}
303
304static void leon_send_ipi(int cpu, int level)
305{
306 unsigned long mask;
307 mask = leon_get_irqmask(level);
308 LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->force[cpu], mask);
309}
310
311static void leon_ipi_single(int cpu)
312{
313 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
314
315 /* Mark work */
316 work->single = 1;
317
318 /* Generate IRQ on the CPU */
319 leon_send_ipi(cpu, leon_ipi_irq);
320}
321
322static void leon_ipi_mask_one(int cpu)
323{
324 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
325
326 /* Mark work */
327 work->msk = 1;
328
329 /* Generate IRQ on the CPU */
330 leon_send_ipi(cpu, leon_ipi_irq);
331}
332
333static void leon_ipi_resched(int cpu)
334{
335 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
336
337 /* Mark work */
338 work->resched = 1;
339
340 /* Generate IRQ on the CPU (any IRQ will cause resched) */
341 leon_send_ipi(cpu, leon_ipi_irq);
342}
343
344void leonsmp_ipi_interrupt(void)
345{
346 struct leon_ipi_work *work = this_cpu_ptr(&leon_ipi_work);
347
348 if (work->single) {
349 work->single = 0;
350 smp_call_function_single_interrupt();
351 }
352 if (work->msk) {
353 work->msk = 0;
354 smp_call_function_interrupt();
355 }
356 if (work->resched) {
357 work->resched = 0;
358 smp_resched_interrupt();
359 }
360}
361
362static struct smp_funcall {
363 smpfunc_t func;
364 unsigned long arg1;
365 unsigned long arg2;
366 unsigned long arg3;
367 unsigned long arg4;
368 unsigned long arg5;
369 unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */
370 unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */
371} ccall_info __attribute__((aligned(8)));
372
373static DEFINE_SPINLOCK(cross_call_lock);
374
375/* Cross calls must be serialized, at least currently. */
376static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
377 unsigned long arg2, unsigned long arg3,
378 unsigned long arg4)
379{
380 if (smp_processors_ready) {
381 register int high = NR_CPUS - 1;
382 unsigned long flags;
383
384 spin_lock_irqsave(&cross_call_lock, flags);
385
386 {
387 /* If you make changes here, make sure gcc generates proper code... */
388 register smpfunc_t f asm("i0") = func;
389 register unsigned long a1 asm("i1") = arg1;
390 register unsigned long a2 asm("i2") = arg2;
391 register unsigned long a3 asm("i3") = arg3;
392 register unsigned long a4 asm("i4") = arg4;
393 register unsigned long a5 asm("i5") = 0;
394
395 __asm__ __volatile__("std %0, [%6]\n\t"
396 "std %2, [%6 + 8]\n\t"
397 "std %4, [%6 + 16]\n\t" : :
398 "r"(f), "r"(a1), "r"(a2), "r"(a3),
399 "r"(a4), "r"(a5),
400 "r"(&ccall_info.func));
401 }
402
403 /* Init receive/complete mapping, plus fire the IPI's off. */
404 {
405 register int i;
406
407 cpumask_clear_cpu(smp_processor_id(), &mask);
408 cpumask_and(&mask, cpu_online_mask, &mask);
409 for (i = 0; i <= high; i++) {
410 if (cpumask_test_cpu(i, &mask)) {
411 ccall_info.processors_in[i] = 0;
412 ccall_info.processors_out[i] = 0;
413 leon_send_ipi(i, LEON3_IRQ_CROSS_CALL);
414
415 }
416 }
417 }
418
419 {
420 register int i;
421
422 i = 0;
423 do {
424 if (!cpumask_test_cpu(i, &mask))
425 continue;
426
427 while (!ccall_info.processors_in[i])
428 barrier();
429 } while (++i <= high);
430
431 i = 0;
432 do {
433 if (!cpumask_test_cpu(i, &mask))
434 continue;
435
436 while (!ccall_info.processors_out[i])
437 barrier();
438 } while (++i <= high);
439 }
440
441 spin_unlock_irqrestore(&cross_call_lock, flags);
442 }
443}
444
445/* Running cross calls. */
446void leon_cross_call_irq(void)
447{
448 int i = smp_processor_id();
449
450 ccall_info.processors_in[i] = 1;
451 ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
452 ccall_info.arg4, ccall_info.arg5);
453 ccall_info.processors_out[i] = 1;
454}
455
456static const struct sparc32_ipi_ops leon_ipi_ops = {
457 .cross_call = leon_cross_call,
458 .resched = leon_ipi_resched,
459 .single = leon_ipi_single,
460 .mask_one = leon_ipi_mask_one,
461};
462
463void __init leon_init_smp(void)
464{
465 /* Patch ipi15 trap table */
466 t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_leon - linux_trap_ipi15_sun4m);
467
468 sparc32_ipi_ops = &leon_ipi_ops;
469}