Loading...
1/*
2 * linux/arch/arm/kernel/smp.c
3 *
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/module.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/spinlock.h>
14#include <linux/sched.h>
15#include <linux/interrupt.h>
16#include <linux/cache.h>
17#include <linux/profile.h>
18#include <linux/errno.h>
19#include <linux/ftrace.h>
20#include <linux/mm.h>
21#include <linux/err.h>
22#include <linux/cpu.h>
23#include <linux/smp.h>
24#include <linux/seq_file.h>
25#include <linux/irq.h>
26#include <linux/percpu.h>
27#include <linux/clockchips.h>
28#include <linux/completion.h>
29
30#include <linux/atomic.h>
31#include <asm/cacheflush.h>
32#include <asm/cpu.h>
33#include <asm/cputype.h>
34#include <asm/mmu_context.h>
35#include <asm/pgtable.h>
36#include <asm/pgalloc.h>
37#include <asm/processor.h>
38#include <asm/sections.h>
39#include <asm/tlbflush.h>
40#include <asm/ptrace.h>
41#include <asm/localtimer.h>
42
43/*
44 * as from 2.5, kernels no longer have an init_tasks structure
45 * so we need some other way of telling a new secondary core
46 * where to place its SVC stack
47 */
48struct secondary_data secondary_data;
49
50enum ipi_msg_type {
51 IPI_TIMER = 2,
52 IPI_RESCHEDULE,
53 IPI_CALL_FUNC,
54 IPI_CALL_FUNC_SINGLE,
55 IPI_CPU_STOP,
56};
57
58int __cpuinit __cpu_up(unsigned int cpu)
59{
60 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
61 struct task_struct *idle = ci->idle;
62 pgd_t *pgd;
63 int ret;
64
65 /*
66 * Spawn a new process manually, if not already done.
67 * Grab a pointer to its task struct so we can mess with it
68 */
69 if (!idle) {
70 idle = fork_idle(cpu);
71 if (IS_ERR(idle)) {
72 printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
73 return PTR_ERR(idle);
74 }
75 ci->idle = idle;
76 } else {
77 /*
78 * Since this idle thread is being re-used, call
79 * init_idle() to reinitialize the thread structure.
80 */
81 init_idle(idle, cpu);
82 }
83
84 /*
85 * Allocate initial page tables to allow the new CPU to
86 * enable the MMU safely. This essentially means a set
87 * of our "standard" page tables, with the addition of
88 * a 1:1 mapping for the physical address of the kernel.
89 */
90 pgd = pgd_alloc(&init_mm);
91 if (!pgd)
92 return -ENOMEM;
93
94 if (PHYS_OFFSET != PAGE_OFFSET) {
95#ifndef CONFIG_HOTPLUG_CPU
96 identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end));
97#endif
98 identity_mapping_add(pgd, __pa(_stext), __pa(_etext));
99 identity_mapping_add(pgd, __pa(_sdata), __pa(_edata));
100 }
101
102 /*
103 * We need to tell the secondary core where to find
104 * its stack and the page tables.
105 */
106 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
107 secondary_data.pgdir = virt_to_phys(pgd);
108 secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
109 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
110 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
111
112 /*
113 * Now bring the CPU into our world.
114 */
115 ret = boot_secondary(cpu, idle);
116 if (ret == 0) {
117 unsigned long timeout;
118
119 /*
120 * CPU was successfully started, wait for it
121 * to come online or time out.
122 */
123 timeout = jiffies + HZ;
124 while (time_before(jiffies, timeout)) {
125 if (cpu_online(cpu))
126 break;
127
128 udelay(10);
129 barrier();
130 }
131
132 if (!cpu_online(cpu)) {
133 pr_crit("CPU%u: failed to come online\n", cpu);
134 ret = -EIO;
135 }
136 } else {
137 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
138 }
139
140 secondary_data.stack = NULL;
141 secondary_data.pgdir = 0;
142
143 if (PHYS_OFFSET != PAGE_OFFSET) {
144#ifndef CONFIG_HOTPLUG_CPU
145 identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end));
146#endif
147 identity_mapping_del(pgd, __pa(_stext), __pa(_etext));
148 identity_mapping_del(pgd, __pa(_sdata), __pa(_edata));
149 }
150
151 pgd_free(&init_mm, pgd);
152
153 return ret;
154}
155
156#ifdef CONFIG_HOTPLUG_CPU
157static void percpu_timer_stop(void);
158
159/*
160 * __cpu_disable runs on the processor to be shutdown.
161 */
162int __cpu_disable(void)
163{
164 unsigned int cpu = smp_processor_id();
165 struct task_struct *p;
166 int ret;
167
168 ret = platform_cpu_disable(cpu);
169 if (ret)
170 return ret;
171
172 /*
173 * Take this CPU offline. Once we clear this, we can't return,
174 * and we must not schedule until we're ready to give up the cpu.
175 */
176 set_cpu_online(cpu, false);
177
178 /*
179 * OK - migrate IRQs away from this CPU
180 */
181 migrate_irqs();
182
183 /*
184 * Stop the local timer for this CPU.
185 */
186 percpu_timer_stop();
187
188 /*
189 * Flush user cache and TLB mappings, and then remove this CPU
190 * from the vm mask set of all processes.
191 */
192 flush_cache_all();
193 local_flush_tlb_all();
194
195 read_lock(&tasklist_lock);
196 for_each_process(p) {
197 if (p->mm)
198 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
199 }
200 read_unlock(&tasklist_lock);
201
202 return 0;
203}
204
205static DECLARE_COMPLETION(cpu_died);
206
207/*
208 * called on the thread which is asking for a CPU to be shutdown -
209 * waits until shutdown has completed, or it is timed out.
210 */
211void __cpu_die(unsigned int cpu)
212{
213 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
214 pr_err("CPU%u: cpu didn't die\n", cpu);
215 return;
216 }
217 printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
218
219 if (!platform_cpu_kill(cpu))
220 printk("CPU%u: unable to kill\n", cpu);
221}
222
223/*
224 * Called from the idle thread for the CPU which has been shutdown.
225 *
226 * Note that we disable IRQs here, but do not re-enable them
227 * before returning to the caller. This is also the behaviour
228 * of the other hotplug-cpu capable cores, so presumably coming
229 * out of idle fixes this.
230 */
231void __ref cpu_die(void)
232{
233 unsigned int cpu = smp_processor_id();
234
235 idle_task_exit();
236
237 local_irq_disable();
238 mb();
239
240 /* Tell __cpu_die() that this CPU is now safe to dispose of */
241 complete(&cpu_died);
242
243 /*
244 * actual CPU shutdown procedure is at least platform (if not
245 * CPU) specific.
246 */
247 platform_cpu_die(cpu);
248
249 /*
250 * Do not return to the idle loop - jump back to the secondary
251 * cpu initialisation. There's some initialisation which needs
252 * to be repeated to undo the effects of taking the CPU offline.
253 */
254 __asm__("mov sp, %0\n"
255 " mov fp, #0\n"
256 " b secondary_start_kernel"
257 :
258 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
259}
260#endif /* CONFIG_HOTPLUG_CPU */
261
262/*
263 * Called by both boot and secondaries to move global data into
264 * per-processor storage.
265 */
266static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
267{
268 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
269
270 cpu_info->loops_per_jiffy = loops_per_jiffy;
271}
272
273/*
274 * This is the secondary CPU boot entry. We're using this CPUs
275 * idle thread stack, but a set of temporary page tables.
276 */
277asmlinkage void __cpuinit secondary_start_kernel(void)
278{
279 struct mm_struct *mm = &init_mm;
280 unsigned int cpu = smp_processor_id();
281
282 printk("CPU%u: Booted secondary processor\n", cpu);
283
284 /*
285 * All kernel threads share the same mm context; grab a
286 * reference and switch to it.
287 */
288 atomic_inc(&mm->mm_count);
289 current->active_mm = mm;
290 cpumask_set_cpu(cpu, mm_cpumask(mm));
291 cpu_switch_mm(mm->pgd, mm);
292 enter_lazy_tlb(mm, current);
293 local_flush_tlb_all();
294
295 cpu_init();
296 preempt_disable();
297 trace_hardirqs_off();
298
299 /*
300 * Give the platform a chance to do its own initialisation.
301 */
302 platform_secondary_init(cpu);
303
304 /*
305 * Enable local interrupts.
306 */
307 notify_cpu_starting(cpu);
308 local_irq_enable();
309 local_fiq_enable();
310
311 /*
312 * Setup the percpu timer for this CPU.
313 */
314 percpu_timer_setup();
315
316 calibrate_delay();
317
318 smp_store_cpu_info(cpu);
319
320 /*
321 * OK, now it's safe to let the boot CPU continue. Wait for
322 * the CPU migration code to notice that the CPU is online
323 * before we continue.
324 */
325 set_cpu_online(cpu, true);
326 while (!cpu_active(cpu))
327 cpu_relax();
328
329 /*
330 * OK, it's off to the idle thread for us
331 */
332 cpu_idle();
333}
334
335void __init smp_cpus_done(unsigned int max_cpus)
336{
337 int cpu;
338 unsigned long bogosum = 0;
339
340 for_each_online_cpu(cpu)
341 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
342
343 printk(KERN_INFO "SMP: Total of %d processors activated "
344 "(%lu.%02lu BogoMIPS).\n",
345 num_online_cpus(),
346 bogosum / (500000/HZ),
347 (bogosum / (5000/HZ)) % 100);
348}
349
350void __init smp_prepare_boot_cpu(void)
351{
352 unsigned int cpu = smp_processor_id();
353
354 per_cpu(cpu_data, cpu).idle = current;
355}
356
357void __init smp_prepare_cpus(unsigned int max_cpus)
358{
359 unsigned int ncores = num_possible_cpus();
360
361 smp_store_cpu_info(smp_processor_id());
362
363 /*
364 * are we trying to boot more cores than exist?
365 */
366 if (max_cpus > ncores)
367 max_cpus = ncores;
368 if (ncores > 1 && max_cpus) {
369 /*
370 * Enable the local timer or broadcast device for the
371 * boot CPU, but only if we have more than one CPU.
372 */
373 percpu_timer_setup();
374
375 /*
376 * Initialise the present map, which describes the set of CPUs
377 * actually populated at the present time. A platform should
378 * re-initialize the map in platform_smp_prepare_cpus() if
379 * present != possible (e.g. physical hotplug).
380 */
381 init_cpu_present(&cpu_possible_map);
382
383 /*
384 * Initialise the SCU if there are more than one CPU
385 * and let them know where to start.
386 */
387 platform_smp_prepare_cpus(max_cpus);
388 }
389}
390
391static void (*smp_cross_call)(const struct cpumask *, unsigned int);
392
393void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
394{
395 smp_cross_call = fn;
396}
397
398void arch_send_call_function_ipi_mask(const struct cpumask *mask)
399{
400 smp_cross_call(mask, IPI_CALL_FUNC);
401}
402
403void arch_send_call_function_single_ipi(int cpu)
404{
405 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
406}
407
408static const char *ipi_types[NR_IPI] = {
409#define S(x,s) [x - IPI_TIMER] = s
410 S(IPI_TIMER, "Timer broadcast interrupts"),
411 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
412 S(IPI_CALL_FUNC, "Function call interrupts"),
413 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
414 S(IPI_CPU_STOP, "CPU stop interrupts"),
415};
416
417void show_ipi_list(struct seq_file *p, int prec)
418{
419 unsigned int cpu, i;
420
421 for (i = 0; i < NR_IPI; i++) {
422 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
423
424 for_each_present_cpu(cpu)
425 seq_printf(p, "%10u ",
426 __get_irq_stat(cpu, ipi_irqs[i]));
427
428 seq_printf(p, " %s\n", ipi_types[i]);
429 }
430}
431
432u64 smp_irq_stat_cpu(unsigned int cpu)
433{
434 u64 sum = 0;
435 int i;
436
437 for (i = 0; i < NR_IPI; i++)
438 sum += __get_irq_stat(cpu, ipi_irqs[i]);
439
440#ifdef CONFIG_LOCAL_TIMERS
441 sum += __get_irq_stat(cpu, local_timer_irqs);
442#endif
443
444 return sum;
445}
446
447/*
448 * Timer (local or broadcast) support
449 */
450static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
451
452static void ipi_timer(void)
453{
454 struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
455 irq_enter();
456 evt->event_handler(evt);
457 irq_exit();
458}
459
460#ifdef CONFIG_LOCAL_TIMERS
461asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
462{
463 struct pt_regs *old_regs = set_irq_regs(regs);
464 int cpu = smp_processor_id();
465
466 if (local_timer_ack()) {
467 __inc_irq_stat(cpu, local_timer_irqs);
468 ipi_timer();
469 }
470
471 set_irq_regs(old_regs);
472}
473
474void show_local_irqs(struct seq_file *p, int prec)
475{
476 unsigned int cpu;
477
478 seq_printf(p, "%*s: ", prec, "LOC");
479
480 for_each_present_cpu(cpu)
481 seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs));
482
483 seq_printf(p, " Local timer interrupts\n");
484}
485#endif
486
487#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
488static void smp_timer_broadcast(const struct cpumask *mask)
489{
490 smp_cross_call(mask, IPI_TIMER);
491}
492#else
493#define smp_timer_broadcast NULL
494#endif
495
496static void broadcast_timer_set_mode(enum clock_event_mode mode,
497 struct clock_event_device *evt)
498{
499}
500
501static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
502{
503 evt->name = "dummy_timer";
504 evt->features = CLOCK_EVT_FEAT_ONESHOT |
505 CLOCK_EVT_FEAT_PERIODIC |
506 CLOCK_EVT_FEAT_DUMMY;
507 evt->rating = 400;
508 evt->mult = 1;
509 evt->set_mode = broadcast_timer_set_mode;
510
511 clockevents_register_device(evt);
512}
513
514void __cpuinit percpu_timer_setup(void)
515{
516 unsigned int cpu = smp_processor_id();
517 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
518
519 evt->cpumask = cpumask_of(cpu);
520 evt->broadcast = smp_timer_broadcast;
521
522 if (local_timer_setup(evt))
523 broadcast_timer_setup(evt);
524}
525
526#ifdef CONFIG_HOTPLUG_CPU
527/*
528 * The generic clock events code purposely does not stop the local timer
529 * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
530 * manually here.
531 */
532static void percpu_timer_stop(void)
533{
534 unsigned int cpu = smp_processor_id();
535 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
536
537 evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
538}
539#endif
540
541static DEFINE_SPINLOCK(stop_lock);
542
543/*
544 * ipi_cpu_stop - handle IPI from smp_send_stop()
545 */
546static void ipi_cpu_stop(unsigned int cpu)
547{
548 if (system_state == SYSTEM_BOOTING ||
549 system_state == SYSTEM_RUNNING) {
550 spin_lock(&stop_lock);
551 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
552 dump_stack();
553 spin_unlock(&stop_lock);
554 }
555
556 set_cpu_online(cpu, false);
557
558 local_fiq_disable();
559 local_irq_disable();
560
561 while (1)
562 cpu_relax();
563}
564
565/*
566 * Main handler for inter-processor interrupts
567 */
568asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
569{
570 unsigned int cpu = smp_processor_id();
571 struct pt_regs *old_regs = set_irq_regs(regs);
572
573 if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI)
574 __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]);
575
576 switch (ipinr) {
577 case IPI_TIMER:
578 ipi_timer();
579 break;
580
581 case IPI_RESCHEDULE:
582 scheduler_ipi();
583 break;
584
585 case IPI_CALL_FUNC:
586 generic_smp_call_function_interrupt();
587 break;
588
589 case IPI_CALL_FUNC_SINGLE:
590 generic_smp_call_function_single_interrupt();
591 break;
592
593 case IPI_CPU_STOP:
594 ipi_cpu_stop(cpu);
595 break;
596
597 default:
598 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
599 cpu, ipinr);
600 break;
601 }
602 set_irq_regs(old_regs);
603}
604
605void smp_send_reschedule(int cpu)
606{
607 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
608}
609
610void smp_send_stop(void)
611{
612 unsigned long timeout;
613
614 if (num_online_cpus() > 1) {
615 cpumask_t mask = cpu_online_map;
616 cpu_clear(smp_processor_id(), mask);
617
618 smp_cross_call(&mask, IPI_CPU_STOP);
619 }
620
621 /* Wait up to one second for other CPUs to stop */
622 timeout = USEC_PER_SEC;
623 while (num_online_cpus() > 1 && timeout--)
624 udelay(1);
625
626 if (num_online_cpus() > 1)
627 pr_warning("SMP: failed to stop secondary CPUs\n");
628}
629
630/*
631 * not supported here
632 */
633int setup_profiling_timer(unsigned int multiplier)
634{
635 return -EINVAL;
636}
1/*
2 * linux/arch/arm/kernel/smp.c
3 *
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/module.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/spinlock.h>
14#include <linux/sched.h>
15#include <linux/interrupt.h>
16#include <linux/cache.h>
17#include <linux/profile.h>
18#include <linux/errno.h>
19#include <linux/mm.h>
20#include <linux/err.h>
21#include <linux/cpu.h>
22#include <linux/smp.h>
23#include <linux/seq_file.h>
24#include <linux/irq.h>
25#include <linux/percpu.h>
26#include <linux/clockchips.h>
27#include <linux/completion.h>
28
29#include <linux/atomic.h>
30#include <asm/cacheflush.h>
31#include <asm/cpu.h>
32#include <asm/cputype.h>
33#include <asm/exception.h>
34#include <asm/idmap.h>
35#include <asm/topology.h>
36#include <asm/mmu_context.h>
37#include <asm/pgtable.h>
38#include <asm/pgalloc.h>
39#include <asm/processor.h>
40#include <asm/sections.h>
41#include <asm/tlbflush.h>
42#include <asm/ptrace.h>
43#include <asm/localtimer.h>
44#include <asm/smp_plat.h>
45
46/*
47 * as from 2.5, kernels no longer have an init_tasks structure
48 * so we need some other way of telling a new secondary core
49 * where to place its SVC stack
50 */
51struct secondary_data secondary_data;
52
53enum ipi_msg_type {
54 IPI_TIMER = 2,
55 IPI_RESCHEDULE,
56 IPI_CALL_FUNC,
57 IPI_CALL_FUNC_SINGLE,
58 IPI_CPU_STOP,
59};
60
61static DECLARE_COMPLETION(cpu_running);
62
63int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
64{
65 int ret;
66
67 /*
68 * We need to tell the secondary core where to find
69 * its stack and the page tables.
70 */
71 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
72 secondary_data.pgdir = virt_to_phys(idmap_pgd);
73 secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
74 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
75 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
76
77 /*
78 * Now bring the CPU into our world.
79 */
80 ret = boot_secondary(cpu, idle);
81 if (ret == 0) {
82 /*
83 * CPU was successfully started, wait for it
84 * to come online or time out.
85 */
86 wait_for_completion_timeout(&cpu_running,
87 msecs_to_jiffies(1000));
88
89 if (!cpu_online(cpu)) {
90 pr_crit("CPU%u: failed to come online\n", cpu);
91 ret = -EIO;
92 }
93 } else {
94 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
95 }
96
97 secondary_data.stack = NULL;
98 secondary_data.pgdir = 0;
99
100 return ret;
101}
102
103#ifdef CONFIG_HOTPLUG_CPU
104static void percpu_timer_stop(void);
105
106/*
107 * __cpu_disable runs on the processor to be shutdown.
108 */
109int __cpu_disable(void)
110{
111 unsigned int cpu = smp_processor_id();
112 int ret;
113
114 ret = platform_cpu_disable(cpu);
115 if (ret)
116 return ret;
117
118 /*
119 * Take this CPU offline. Once we clear this, we can't return,
120 * and we must not schedule until we're ready to give up the cpu.
121 */
122 set_cpu_online(cpu, false);
123
124 /*
125 * OK - migrate IRQs away from this CPU
126 */
127 migrate_irqs();
128
129 /*
130 * Stop the local timer for this CPU.
131 */
132 percpu_timer_stop();
133
134 /*
135 * Flush user cache and TLB mappings, and then remove this CPU
136 * from the vm mask set of all processes.
137 */
138 flush_cache_all();
139 local_flush_tlb_all();
140
141 clear_tasks_mm_cpumask(cpu);
142
143 return 0;
144}
145
146static DECLARE_COMPLETION(cpu_died);
147
148/*
149 * called on the thread which is asking for a CPU to be shutdown -
150 * waits until shutdown has completed, or it is timed out.
151 */
152void __cpu_die(unsigned int cpu)
153{
154 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
155 pr_err("CPU%u: cpu didn't die\n", cpu);
156 return;
157 }
158 printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
159
160 if (!platform_cpu_kill(cpu))
161 printk("CPU%u: unable to kill\n", cpu);
162}
163
164/*
165 * Called from the idle thread for the CPU which has been shutdown.
166 *
167 * Note that we disable IRQs here, but do not re-enable them
168 * before returning to the caller. This is also the behaviour
169 * of the other hotplug-cpu capable cores, so presumably coming
170 * out of idle fixes this.
171 */
172void __ref cpu_die(void)
173{
174 unsigned int cpu = smp_processor_id();
175
176 idle_task_exit();
177
178 local_irq_disable();
179 mb();
180
181 /* Tell __cpu_die() that this CPU is now safe to dispose of */
182 complete(&cpu_died);
183
184 /*
185 * actual CPU shutdown procedure is at least platform (if not
186 * CPU) specific.
187 */
188 platform_cpu_die(cpu);
189
190 /*
191 * Do not return to the idle loop - jump back to the secondary
192 * cpu initialisation. There's some initialisation which needs
193 * to be repeated to undo the effects of taking the CPU offline.
194 */
195 __asm__("mov sp, %0\n"
196 " mov fp, #0\n"
197 " b secondary_start_kernel"
198 :
199 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
200}
201#endif /* CONFIG_HOTPLUG_CPU */
202
203/*
204 * Called by both boot and secondaries to move global data into
205 * per-processor storage.
206 */
207static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
208{
209 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
210
211 cpu_info->loops_per_jiffy = loops_per_jiffy;
212
213 store_cpu_topology(cpuid);
214}
215
216static void percpu_timer_setup(void);
217
218/*
219 * This is the secondary CPU boot entry. We're using this CPUs
220 * idle thread stack, but a set of temporary page tables.
221 */
222asmlinkage void __cpuinit secondary_start_kernel(void)
223{
224 struct mm_struct *mm = &init_mm;
225 unsigned int cpu = smp_processor_id();
226
227 /*
228 * All kernel threads share the same mm context; grab a
229 * reference and switch to it.
230 */
231 atomic_inc(&mm->mm_count);
232 current->active_mm = mm;
233 cpumask_set_cpu(cpu, mm_cpumask(mm));
234 cpu_switch_mm(mm->pgd, mm);
235 enter_lazy_tlb(mm, current);
236 local_flush_tlb_all();
237
238 printk("CPU%u: Booted secondary processor\n", cpu);
239
240 cpu_init();
241 preempt_disable();
242 trace_hardirqs_off();
243
244 /*
245 * Give the platform a chance to do its own initialisation.
246 */
247 platform_secondary_init(cpu);
248
249 notify_cpu_starting(cpu);
250
251 calibrate_delay();
252
253 smp_store_cpu_info(cpu);
254
255 /*
256 * OK, now it's safe to let the boot CPU continue. Wait for
257 * the CPU migration code to notice that the CPU is online
258 * before we continue - which happens after __cpu_up returns.
259 */
260 set_cpu_online(cpu, true);
261 complete(&cpu_running);
262
263 /*
264 * Setup the percpu timer for this CPU.
265 */
266 percpu_timer_setup();
267
268 local_irq_enable();
269 local_fiq_enable();
270
271 /*
272 * OK, it's off to the idle thread for us
273 */
274 cpu_idle();
275}
276
277void __init smp_cpus_done(unsigned int max_cpus)
278{
279 int cpu;
280 unsigned long bogosum = 0;
281
282 for_each_online_cpu(cpu)
283 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
284
285 printk(KERN_INFO "SMP: Total of %d processors activated "
286 "(%lu.%02lu BogoMIPS).\n",
287 num_online_cpus(),
288 bogosum / (500000/HZ),
289 (bogosum / (5000/HZ)) % 100);
290}
291
292void __init smp_prepare_boot_cpu(void)
293{
294}
295
296void __init smp_prepare_cpus(unsigned int max_cpus)
297{
298 unsigned int ncores = num_possible_cpus();
299
300 init_cpu_topology();
301
302 smp_store_cpu_info(smp_processor_id());
303
304 /*
305 * are we trying to boot more cores than exist?
306 */
307 if (max_cpus > ncores)
308 max_cpus = ncores;
309 if (ncores > 1 && max_cpus) {
310 /*
311 * Enable the local timer or broadcast device for the
312 * boot CPU, but only if we have more than one CPU.
313 */
314 percpu_timer_setup();
315
316 /*
317 * Initialise the present map, which describes the set of CPUs
318 * actually populated at the present time. A platform should
319 * re-initialize the map in platform_smp_prepare_cpus() if
320 * present != possible (e.g. physical hotplug).
321 */
322 init_cpu_present(cpu_possible_mask);
323
324 /*
325 * Initialise the SCU if there are more than one CPU
326 * and let them know where to start.
327 */
328 platform_smp_prepare_cpus(max_cpus);
329 }
330}
331
332static void (*smp_cross_call)(const struct cpumask *, unsigned int);
333
334void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
335{
336 smp_cross_call = fn;
337}
338
339void arch_send_call_function_ipi_mask(const struct cpumask *mask)
340{
341 smp_cross_call(mask, IPI_CALL_FUNC);
342}
343
344void arch_send_call_function_single_ipi(int cpu)
345{
346 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
347}
348
349static const char *ipi_types[NR_IPI] = {
350#define S(x,s) [x - IPI_TIMER] = s
351 S(IPI_TIMER, "Timer broadcast interrupts"),
352 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
353 S(IPI_CALL_FUNC, "Function call interrupts"),
354 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
355 S(IPI_CPU_STOP, "CPU stop interrupts"),
356};
357
358void show_ipi_list(struct seq_file *p, int prec)
359{
360 unsigned int cpu, i;
361
362 for (i = 0; i < NR_IPI; i++) {
363 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
364
365 for_each_present_cpu(cpu)
366 seq_printf(p, "%10u ",
367 __get_irq_stat(cpu, ipi_irqs[i]));
368
369 seq_printf(p, " %s\n", ipi_types[i]);
370 }
371}
372
373u64 smp_irq_stat_cpu(unsigned int cpu)
374{
375 u64 sum = 0;
376 int i;
377
378 for (i = 0; i < NR_IPI; i++)
379 sum += __get_irq_stat(cpu, ipi_irqs[i]);
380
381 return sum;
382}
383
384/*
385 * Timer (local or broadcast) support
386 */
387static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
388
389static void ipi_timer(void)
390{
391 struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
392 evt->event_handler(evt);
393}
394
395#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
396static void smp_timer_broadcast(const struct cpumask *mask)
397{
398 smp_cross_call(mask, IPI_TIMER);
399}
400#else
401#define smp_timer_broadcast NULL
402#endif
403
404static void broadcast_timer_set_mode(enum clock_event_mode mode,
405 struct clock_event_device *evt)
406{
407}
408
409static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
410{
411 evt->name = "dummy_timer";
412 evt->features = CLOCK_EVT_FEAT_ONESHOT |
413 CLOCK_EVT_FEAT_PERIODIC |
414 CLOCK_EVT_FEAT_DUMMY;
415 evt->rating = 400;
416 evt->mult = 1;
417 evt->set_mode = broadcast_timer_set_mode;
418
419 clockevents_register_device(evt);
420}
421
422static struct local_timer_ops *lt_ops;
423
424#ifdef CONFIG_LOCAL_TIMERS
425int local_timer_register(struct local_timer_ops *ops)
426{
427 if (!is_smp() || !setup_max_cpus)
428 return -ENXIO;
429
430 if (lt_ops)
431 return -EBUSY;
432
433 lt_ops = ops;
434 return 0;
435}
436#endif
437
438static void __cpuinit percpu_timer_setup(void)
439{
440 unsigned int cpu = smp_processor_id();
441 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
442
443 evt->cpumask = cpumask_of(cpu);
444 evt->broadcast = smp_timer_broadcast;
445
446 if (!lt_ops || lt_ops->setup(evt))
447 broadcast_timer_setup(evt);
448}
449
450#ifdef CONFIG_HOTPLUG_CPU
451/*
452 * The generic clock events code purposely does not stop the local timer
453 * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
454 * manually here.
455 */
456static void percpu_timer_stop(void)
457{
458 unsigned int cpu = smp_processor_id();
459 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
460
461 if (lt_ops)
462 lt_ops->stop(evt);
463}
464#endif
465
466static DEFINE_RAW_SPINLOCK(stop_lock);
467
468/*
469 * ipi_cpu_stop - handle IPI from smp_send_stop()
470 */
471static void ipi_cpu_stop(unsigned int cpu)
472{
473 if (system_state == SYSTEM_BOOTING ||
474 system_state == SYSTEM_RUNNING) {
475 raw_spin_lock(&stop_lock);
476 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
477 dump_stack();
478 raw_spin_unlock(&stop_lock);
479 }
480
481 set_cpu_online(cpu, false);
482
483 local_fiq_disable();
484 local_irq_disable();
485
486 while (1)
487 cpu_relax();
488}
489
490/*
491 * Main handler for inter-processor interrupts
492 */
493asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
494{
495 handle_IPI(ipinr, regs);
496}
497
498void handle_IPI(int ipinr, struct pt_regs *regs)
499{
500 unsigned int cpu = smp_processor_id();
501 struct pt_regs *old_regs = set_irq_regs(regs);
502
503 if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI)
504 __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]);
505
506 switch (ipinr) {
507 case IPI_TIMER:
508 irq_enter();
509 ipi_timer();
510 irq_exit();
511 break;
512
513 case IPI_RESCHEDULE:
514 scheduler_ipi();
515 break;
516
517 case IPI_CALL_FUNC:
518 irq_enter();
519 generic_smp_call_function_interrupt();
520 irq_exit();
521 break;
522
523 case IPI_CALL_FUNC_SINGLE:
524 irq_enter();
525 generic_smp_call_function_single_interrupt();
526 irq_exit();
527 break;
528
529 case IPI_CPU_STOP:
530 irq_enter();
531 ipi_cpu_stop(cpu);
532 irq_exit();
533 break;
534
535 default:
536 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
537 cpu, ipinr);
538 break;
539 }
540 set_irq_regs(old_regs);
541}
542
543void smp_send_reschedule(int cpu)
544{
545 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
546}
547
548#ifdef CONFIG_HOTPLUG_CPU
549static void smp_kill_cpus(cpumask_t *mask)
550{
551 unsigned int cpu;
552 for_each_cpu(cpu, mask)
553 platform_cpu_kill(cpu);
554}
555#else
556static void smp_kill_cpus(cpumask_t *mask) { }
557#endif
558
559void smp_send_stop(void)
560{
561 unsigned long timeout;
562 struct cpumask mask;
563
564 cpumask_copy(&mask, cpu_online_mask);
565 cpumask_clear_cpu(smp_processor_id(), &mask);
566 if (!cpumask_empty(&mask))
567 smp_cross_call(&mask, IPI_CPU_STOP);
568
569 /* Wait up to one second for other CPUs to stop */
570 timeout = USEC_PER_SEC;
571 while (num_online_cpus() > 1 && timeout--)
572 udelay(1);
573
574 if (num_online_cpus() > 1)
575 pr_warning("SMP: failed to stop secondary CPUs\n");
576
577 smp_kill_cpus(&mask);
578}
579
580/*
581 * not supported here
582 */
583int setup_profiling_timer(unsigned int multiplier)
584{
585 return -EINVAL;
586}