Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/kernel/smp.c
4 *
5 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 */
7#include <linux/module.h>
8#include <linux/delay.h>
9#include <linux/init.h>
10#include <linux/spinlock.h>
11#include <linux/sched/mm.h>
12#include <linux/sched/hotplug.h>
13#include <linux/sched/task_stack.h>
14#include <linux/interrupt.h>
15#include <linux/cache.h>
16#include <linux/profile.h>
17#include <linux/errno.h>
18#include <linux/mm.h>
19#include <linux/err.h>
20#include <linux/cpu.h>
21#include <linux/seq_file.h>
22#include <linux/irq.h>
23#include <linux/nmi.h>
24#include <linux/percpu.h>
25#include <linux/clockchips.h>
26#include <linux/completion.h>
27#include <linux/cpufreq.h>
28#include <linux/irq_work.h>
29
30#include <linux/atomic.h>
31#include <asm/bugs.h>
32#include <asm/smp.h>
33#include <asm/cacheflush.h>
34#include <asm/cpu.h>
35#include <asm/cputype.h>
36#include <asm/exception.h>
37#include <asm/idmap.h>
38#include <asm/topology.h>
39#include <asm/mmu_context.h>
40#include <asm/procinfo.h>
41#include <asm/processor.h>
42#include <asm/sections.h>
43#include <asm/tlbflush.h>
44#include <asm/ptrace.h>
45#include <asm/smp_plat.h>
46#include <asm/virt.h>
47#include <asm/mach/arch.h>
48#include <asm/mpu.h>
49
50#define CREATE_TRACE_POINTS
51#include <trace/events/ipi.h>
52
53/*
54 * as from 2.5, kernels no longer have an init_tasks structure
55 * so we need some other way of telling a new secondary core
56 * where to place its SVC stack
57 */
58struct secondary_data secondary_data;
59
60enum ipi_msg_type {
61 IPI_WAKEUP,
62 IPI_TIMER,
63 IPI_RESCHEDULE,
64 IPI_CALL_FUNC,
65 IPI_CPU_STOP,
66 IPI_IRQ_WORK,
67 IPI_COMPLETION,
68 /*
69 * CPU_BACKTRACE is special and not included in NR_IPI
70 * or tracable with trace_ipi_*
71 */
72 IPI_CPU_BACKTRACE,
73 /*
74 * SGI8-15 can be reserved by secure firmware, and thus may
75 * not be usable by the kernel. Please keep the above limited
76 * to at most 8 entries.
77 */
78};
79
80static DECLARE_COMPLETION(cpu_running);
81
82static struct smp_operations smp_ops __ro_after_init;
83
84void __init smp_set_ops(const struct smp_operations *ops)
85{
86 if (ops)
87 smp_ops = *ops;
88};
89
90static unsigned long get_arch_pgd(pgd_t *pgd)
91{
92#ifdef CONFIG_ARM_LPAE
93 return __phys_to_pfn(virt_to_phys(pgd));
94#else
95 return virt_to_phys(pgd);
96#endif
97}
98
99#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
100static int secondary_biglittle_prepare(unsigned int cpu)
101{
102 if (!cpu_vtable[cpu])
103 cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
104
105 return cpu_vtable[cpu] ? 0 : -ENOMEM;
106}
107
108static void secondary_biglittle_init(void)
109{
110 init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
111}
112#else
113static int secondary_biglittle_prepare(unsigned int cpu)
114{
115 return 0;
116}
117
118static void secondary_biglittle_init(void)
119{
120}
121#endif
122
123int __cpu_up(unsigned int cpu, struct task_struct *idle)
124{
125 int ret;
126
127 if (!smp_ops.smp_boot_secondary)
128 return -ENOSYS;
129
130 ret = secondary_biglittle_prepare(cpu);
131 if (ret)
132 return ret;
133
134 /*
135 * We need to tell the secondary core where to find
136 * its stack and the page tables.
137 */
138 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
139#ifdef CONFIG_ARM_MPU
140 secondary_data.mpu_rgn_info = &mpu_rgn_info;
141#endif
142
143#ifdef CONFIG_MMU
144 secondary_data.pgdir = virt_to_phys(idmap_pgd);
145 secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
146#endif
147 sync_cache_w(&secondary_data);
148
149 /*
150 * Now bring the CPU into our world.
151 */
152 ret = smp_ops.smp_boot_secondary(cpu, idle);
153 if (ret == 0) {
154 /*
155 * CPU was successfully started, wait for it
156 * to come online or time out.
157 */
158 wait_for_completion_timeout(&cpu_running,
159 msecs_to_jiffies(1000));
160
161 if (!cpu_online(cpu)) {
162 pr_crit("CPU%u: failed to come online\n", cpu);
163 ret = -EIO;
164 }
165 } else {
166 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
167 }
168
169
170 memset(&secondary_data, 0, sizeof(secondary_data));
171 return ret;
172}
173
174/* platform specific SMP operations */
175void __init smp_init_cpus(void)
176{
177 if (smp_ops.smp_init_cpus)
178 smp_ops.smp_init_cpus();
179}
180
181int platform_can_secondary_boot(void)
182{
183 return !!smp_ops.smp_boot_secondary;
184}
185
186int platform_can_cpu_hotplug(void)
187{
188#ifdef CONFIG_HOTPLUG_CPU
189 if (smp_ops.cpu_kill)
190 return 1;
191#endif
192
193 return 0;
194}
195
196#ifdef CONFIG_HOTPLUG_CPU
197static int platform_cpu_kill(unsigned int cpu)
198{
199 if (smp_ops.cpu_kill)
200 return smp_ops.cpu_kill(cpu);
201 return 1;
202}
203
204static int platform_cpu_disable(unsigned int cpu)
205{
206 if (smp_ops.cpu_disable)
207 return smp_ops.cpu_disable(cpu);
208
209 return 0;
210}
211
212int platform_can_hotplug_cpu(unsigned int cpu)
213{
214 /* cpu_die must be specified to support hotplug */
215 if (!smp_ops.cpu_die)
216 return 0;
217
218 if (smp_ops.cpu_can_disable)
219 return smp_ops.cpu_can_disable(cpu);
220
221 /*
222 * By default, allow disabling all CPUs except the first one,
223 * since this is special on a lot of platforms, e.g. because
224 * of clock tick interrupts.
225 */
226 return cpu != 0;
227}
228
229/*
230 * __cpu_disable runs on the processor to be shutdown.
231 */
232int __cpu_disable(void)
233{
234 unsigned int cpu = smp_processor_id();
235 int ret;
236
237 ret = platform_cpu_disable(cpu);
238 if (ret)
239 return ret;
240
241#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
242 remove_cpu_topology(cpu);
243#endif
244
245 /*
246 * Take this CPU offline. Once we clear this, we can't return,
247 * and we must not schedule until we're ready to give up the cpu.
248 */
249 set_cpu_online(cpu, false);
250
251 /*
252 * OK - migrate IRQs away from this CPU
253 */
254 irq_migrate_all_off_this_cpu();
255
256 /*
257 * Flush user cache and TLB mappings, and then remove this CPU
258 * from the vm mask set of all processes.
259 *
260 * Caches are flushed to the Level of Unification Inner Shareable
261 * to write-back dirty lines to unified caches shared by all CPUs.
262 */
263 flush_cache_louis();
264 local_flush_tlb_all();
265
266 return 0;
267}
268
269/*
270 * called on the thread which is asking for a CPU to be shutdown -
271 * waits until shutdown has completed, or it is timed out.
272 */
273void __cpu_die(unsigned int cpu)
274{
275 if (!cpu_wait_death(cpu, 5)) {
276 pr_err("CPU%u: cpu didn't die\n", cpu);
277 return;
278 }
279 pr_debug("CPU%u: shutdown\n", cpu);
280
281 clear_tasks_mm_cpumask(cpu);
282 /*
283 * platform_cpu_kill() is generally expected to do the powering off
284 * and/or cutting of clocks to the dying CPU. Optionally, this may
285 * be done by the CPU which is dying in preference to supporting
286 * this call, but that means there is _no_ synchronisation between
287 * the requesting CPU and the dying CPU actually losing power.
288 */
289 if (!platform_cpu_kill(cpu))
290 pr_err("CPU%u: unable to kill\n", cpu);
291}
292
293/*
294 * Called from the idle thread for the CPU which has been shutdown.
295 *
296 * Note that we disable IRQs here, but do not re-enable them
297 * before returning to the caller. This is also the behaviour
298 * of the other hotplug-cpu capable cores, so presumably coming
299 * out of idle fixes this.
300 */
301void arch_cpu_idle_dead(void)
302{
303 unsigned int cpu = smp_processor_id();
304
305 idle_task_exit();
306
307 local_irq_disable();
308
309 /*
310 * Flush the data out of the L1 cache for this CPU. This must be
311 * before the completion to ensure that data is safely written out
312 * before platform_cpu_kill() gets called - which may disable
313 * *this* CPU and power down its cache.
314 */
315 flush_cache_louis();
316
317 /*
318 * Tell __cpu_die() that this CPU is now safe to dispose of. Once
319 * this returns, power and/or clocks can be removed at any point
320 * from this CPU and its cache by platform_cpu_kill().
321 */
322 (void)cpu_report_death();
323
324 /*
325 * Ensure that the cache lines associated with that completion are
326 * written out. This covers the case where _this_ CPU is doing the
327 * powering down, to ensure that the completion is visible to the
328 * CPU waiting for this one.
329 */
330 flush_cache_louis();
331
332 /*
333 * The actual CPU shutdown procedure is at least platform (if not
334 * CPU) specific. This may remove power, or it may simply spin.
335 *
336 * Platforms are generally expected *NOT* to return from this call,
337 * although there are some which do because they have no way to
338 * power down the CPU. These platforms are the _only_ reason we
339 * have a return path which uses the fragment of assembly below.
340 *
341 * The return path should not be used for platforms which can
342 * power off the CPU.
343 */
344 if (smp_ops.cpu_die)
345 smp_ops.cpu_die(cpu);
346
347 pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
348 cpu);
349
350 /*
351 * Do not return to the idle loop - jump back to the secondary
352 * cpu initialisation. There's some initialisation which needs
353 * to be repeated to undo the effects of taking the CPU offline.
354 */
355 __asm__("mov sp, %0\n"
356 " mov fp, #0\n"
357 " b secondary_start_kernel"
358 :
359 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
360}
361#endif /* CONFIG_HOTPLUG_CPU */
362
363/*
364 * Called by both boot and secondaries to move global data into
365 * per-processor storage.
366 */
367static void smp_store_cpu_info(unsigned int cpuid)
368{
369 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
370
371 cpu_info->loops_per_jiffy = loops_per_jiffy;
372 cpu_info->cpuid = read_cpuid_id();
373
374 store_cpu_topology(cpuid);
375 check_cpu_icache_size(cpuid);
376}
377
378/*
379 * This is the secondary CPU boot entry. We're using this CPUs
380 * idle thread stack, but a set of temporary page tables.
381 */
382asmlinkage void secondary_start_kernel(void)
383{
384 struct mm_struct *mm = &init_mm;
385 unsigned int cpu;
386
387 secondary_biglittle_init();
388
389 /*
390 * The identity mapping is uncached (strongly ordered), so
391 * switch away from it before attempting any exclusive accesses.
392 */
393 cpu_switch_mm(mm->pgd, mm);
394 local_flush_bp_all();
395 enter_lazy_tlb(mm, current);
396 local_flush_tlb_all();
397
398 /*
399 * All kernel threads share the same mm context; grab a
400 * reference and switch to it.
401 */
402 cpu = smp_processor_id();
403 mmgrab(mm);
404 current->active_mm = mm;
405 cpumask_set_cpu(cpu, mm_cpumask(mm));
406
407 cpu_init();
408
409#ifndef CONFIG_MMU
410 setup_vectors_base();
411#endif
412 pr_debug("CPU%u: Booted secondary processor\n", cpu);
413
414 preempt_disable();
415 trace_hardirqs_off();
416
417 /*
418 * Give the platform a chance to do its own initialisation.
419 */
420 if (smp_ops.smp_secondary_init)
421 smp_ops.smp_secondary_init(cpu);
422
423 notify_cpu_starting(cpu);
424
425 calibrate_delay();
426
427 smp_store_cpu_info(cpu);
428
429 /*
430 * OK, now it's safe to let the boot CPU continue. Wait for
431 * the CPU migration code to notice that the CPU is online
432 * before we continue - which happens after __cpu_up returns.
433 */
434 set_cpu_online(cpu, true);
435
436 check_other_bugs();
437
438 complete(&cpu_running);
439
440 local_irq_enable();
441 local_fiq_enable();
442 local_abt_enable();
443
444 /*
445 * OK, it's off to the idle thread for us
446 */
447 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
448}
449
450void __init smp_cpus_done(unsigned int max_cpus)
451{
452 int cpu;
453 unsigned long bogosum = 0;
454
455 for_each_online_cpu(cpu)
456 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
457
458 printk(KERN_INFO "SMP: Total of %d processors activated "
459 "(%lu.%02lu BogoMIPS).\n",
460 num_online_cpus(),
461 bogosum / (500000/HZ),
462 (bogosum / (5000/HZ)) % 100);
463
464 hyp_mode_check();
465}
466
467void __init smp_prepare_boot_cpu(void)
468{
469 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
470}
471
472void __init smp_prepare_cpus(unsigned int max_cpus)
473{
474 unsigned int ncores = num_possible_cpus();
475
476 init_cpu_topology();
477
478 smp_store_cpu_info(smp_processor_id());
479
480 /*
481 * are we trying to boot more cores than exist?
482 */
483 if (max_cpus > ncores)
484 max_cpus = ncores;
485 if (ncores > 1 && max_cpus) {
486 /*
487 * Initialise the present map, which describes the set of CPUs
488 * actually populated at the present time. A platform should
489 * re-initialize the map in the platforms smp_prepare_cpus()
490 * if present != possible (e.g. physical hotplug).
491 */
492 init_cpu_present(cpu_possible_mask);
493
494 /*
495 * Initialise the SCU if there are more than one CPU
496 * and let them know where to start.
497 */
498 if (smp_ops.smp_prepare_cpus)
499 smp_ops.smp_prepare_cpus(max_cpus);
500 }
501}
502
503static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
504
505void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
506{
507 if (!__smp_cross_call)
508 __smp_cross_call = fn;
509}
510
511static const char *ipi_types[NR_IPI] __tracepoint_string = {
512#define S(x,s) [x] = s
513 S(IPI_WAKEUP, "CPU wakeup interrupts"),
514 S(IPI_TIMER, "Timer broadcast interrupts"),
515 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
516 S(IPI_CALL_FUNC, "Function call interrupts"),
517 S(IPI_CPU_STOP, "CPU stop interrupts"),
518 S(IPI_IRQ_WORK, "IRQ work interrupts"),
519 S(IPI_COMPLETION, "completion interrupts"),
520};
521
522static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
523{
524 trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
525 __smp_cross_call(target, ipinr);
526}
527
528void show_ipi_list(struct seq_file *p, int prec)
529{
530 unsigned int cpu, i;
531
532 for (i = 0; i < NR_IPI; i++) {
533 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
534
535 for_each_online_cpu(cpu)
536 seq_printf(p, "%10u ",
537 __get_irq_stat(cpu, ipi_irqs[i]));
538
539 seq_printf(p, " %s\n", ipi_types[i]);
540 }
541}
542
543u64 smp_irq_stat_cpu(unsigned int cpu)
544{
545 u64 sum = 0;
546 int i;
547
548 for (i = 0; i < NR_IPI; i++)
549 sum += __get_irq_stat(cpu, ipi_irqs[i]);
550
551 return sum;
552}
553
554void arch_send_call_function_ipi_mask(const struct cpumask *mask)
555{
556 smp_cross_call(mask, IPI_CALL_FUNC);
557}
558
559void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
560{
561 smp_cross_call(mask, IPI_WAKEUP);
562}
563
564void arch_send_call_function_single_ipi(int cpu)
565{
566 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
567}
568
569#ifdef CONFIG_IRQ_WORK
570void arch_irq_work_raise(void)
571{
572 if (arch_irq_work_has_interrupt())
573 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
574}
575#endif
576
577#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
578void tick_broadcast(const struct cpumask *mask)
579{
580 smp_cross_call(mask, IPI_TIMER);
581}
582#endif
583
584static DEFINE_RAW_SPINLOCK(stop_lock);
585
586/*
587 * ipi_cpu_stop - handle IPI from smp_send_stop()
588 */
589static void ipi_cpu_stop(unsigned int cpu)
590{
591 if (system_state <= SYSTEM_RUNNING) {
592 raw_spin_lock(&stop_lock);
593 pr_crit("CPU%u: stopping\n", cpu);
594 dump_stack();
595 raw_spin_unlock(&stop_lock);
596 }
597
598 set_cpu_online(cpu, false);
599
600 local_fiq_disable();
601 local_irq_disable();
602
603 while (1) {
604 cpu_relax();
605 wfe();
606 }
607}
608
609static DEFINE_PER_CPU(struct completion *, cpu_completion);
610
611int register_ipi_completion(struct completion *completion, int cpu)
612{
613 per_cpu(cpu_completion, cpu) = completion;
614 return IPI_COMPLETION;
615}
616
617static void ipi_complete(unsigned int cpu)
618{
619 complete(per_cpu(cpu_completion, cpu));
620}
621
622/*
623 * Main handler for inter-processor interrupts
624 */
625asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
626{
627 handle_IPI(ipinr, regs);
628}
629
630void handle_IPI(int ipinr, struct pt_regs *regs)
631{
632 unsigned int cpu = smp_processor_id();
633 struct pt_regs *old_regs = set_irq_regs(regs);
634
635 if ((unsigned)ipinr < NR_IPI) {
636 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
637 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
638 }
639
640 switch (ipinr) {
641 case IPI_WAKEUP:
642 break;
643
644#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
645 case IPI_TIMER:
646 irq_enter();
647 tick_receive_broadcast();
648 irq_exit();
649 break;
650#endif
651
652 case IPI_RESCHEDULE:
653 scheduler_ipi();
654 break;
655
656 case IPI_CALL_FUNC:
657 irq_enter();
658 generic_smp_call_function_interrupt();
659 irq_exit();
660 break;
661
662 case IPI_CPU_STOP:
663 irq_enter();
664 ipi_cpu_stop(cpu);
665 irq_exit();
666 break;
667
668#ifdef CONFIG_IRQ_WORK
669 case IPI_IRQ_WORK:
670 irq_enter();
671 irq_work_run();
672 irq_exit();
673 break;
674#endif
675
676 case IPI_COMPLETION:
677 irq_enter();
678 ipi_complete(cpu);
679 irq_exit();
680 break;
681
682 case IPI_CPU_BACKTRACE:
683 printk_nmi_enter();
684 irq_enter();
685 nmi_cpu_backtrace(regs);
686 irq_exit();
687 printk_nmi_exit();
688 break;
689
690 default:
691 pr_crit("CPU%u: Unknown IPI message 0x%x\n",
692 cpu, ipinr);
693 break;
694 }
695
696 if ((unsigned)ipinr < NR_IPI)
697 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
698 set_irq_regs(old_regs);
699}
700
701void smp_send_reschedule(int cpu)
702{
703 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
704}
705
706void smp_send_stop(void)
707{
708 unsigned long timeout;
709 struct cpumask mask;
710
711 cpumask_copy(&mask, cpu_online_mask);
712 cpumask_clear_cpu(smp_processor_id(), &mask);
713 if (!cpumask_empty(&mask))
714 smp_cross_call(&mask, IPI_CPU_STOP);
715
716 /* Wait up to one second for other CPUs to stop */
717 timeout = USEC_PER_SEC;
718 while (num_online_cpus() > 1 && timeout--)
719 udelay(1);
720
721 if (num_online_cpus() > 1)
722 pr_warn("SMP: failed to stop secondary CPUs\n");
723}
724
725/* In case panic() and panic() called at the same time on CPU1 and CPU2,
726 * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop()
727 * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online,
728 * kdump fails. So split out the panic_smp_self_stop() and add
729 * set_cpu_online(smp_processor_id(), false).
730 */
731void panic_smp_self_stop(void)
732{
733 pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
734 smp_processor_id());
735 set_cpu_online(smp_processor_id(), false);
736 while (1)
737 cpu_relax();
738}
739
740/*
741 * not supported here
742 */
743int setup_profiling_timer(unsigned int multiplier)
744{
745 return -EINVAL;
746}
747
748#ifdef CONFIG_CPU_FREQ
749
750static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
751static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
752static unsigned long global_l_p_j_ref;
753static unsigned long global_l_p_j_ref_freq;
754
755static int cpufreq_callback(struct notifier_block *nb,
756 unsigned long val, void *data)
757{
758 struct cpufreq_freqs *freq = data;
759 struct cpumask *cpus = freq->policy->cpus;
760 int cpu, first = cpumask_first(cpus);
761 unsigned int lpj;
762
763 if (freq->flags & CPUFREQ_CONST_LOOPS)
764 return NOTIFY_OK;
765
766 if (!per_cpu(l_p_j_ref, first)) {
767 for_each_cpu(cpu, cpus) {
768 per_cpu(l_p_j_ref, cpu) =
769 per_cpu(cpu_data, cpu).loops_per_jiffy;
770 per_cpu(l_p_j_ref_freq, cpu) = freq->old;
771 }
772
773 if (!global_l_p_j_ref) {
774 global_l_p_j_ref = loops_per_jiffy;
775 global_l_p_j_ref_freq = freq->old;
776 }
777 }
778
779 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
780 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
781 loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
782 global_l_p_j_ref_freq,
783 freq->new);
784
785 lpj = cpufreq_scale(per_cpu(l_p_j_ref, first),
786 per_cpu(l_p_j_ref_freq, first), freq->new);
787 for_each_cpu(cpu, cpus)
788 per_cpu(cpu_data, cpu).loops_per_jiffy = lpj;
789 }
790 return NOTIFY_OK;
791}
792
793static struct notifier_block cpufreq_notifier = {
794 .notifier_call = cpufreq_callback,
795};
796
797static int __init register_cpufreq_notifier(void)
798{
799 return cpufreq_register_notifier(&cpufreq_notifier,
800 CPUFREQ_TRANSITION_NOTIFIER);
801}
802core_initcall(register_cpufreq_notifier);
803
804#endif
805
806static void raise_nmi(cpumask_t *mask)
807{
808 __smp_cross_call(mask, IPI_CPU_BACKTRACE);
809}
810
811void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
812{
813 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi);
814}
1/*
2 * linux/arch/arm/kernel/smp.c
3 *
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/module.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/spinlock.h>
14#include <linux/sched.h>
15#include <linux/interrupt.h>
16#include <linux/cache.h>
17#include <linux/profile.h>
18#include <linux/errno.h>
19#include <linux/mm.h>
20#include <linux/err.h>
21#include <linux/cpu.h>
22#include <linux/seq_file.h>
23#include <linux/irq.h>
24#include <linux/nmi.h>
25#include <linux/percpu.h>
26#include <linux/clockchips.h>
27#include <linux/completion.h>
28#include <linux/cpufreq.h>
29#include <linux/irq_work.h>
30
31#include <linux/atomic.h>
32#include <asm/smp.h>
33#include <asm/cacheflush.h>
34#include <asm/cpu.h>
35#include <asm/cputype.h>
36#include <asm/exception.h>
37#include <asm/idmap.h>
38#include <asm/topology.h>
39#include <asm/mmu_context.h>
40#include <asm/pgtable.h>
41#include <asm/pgalloc.h>
42#include <asm/processor.h>
43#include <asm/sections.h>
44#include <asm/tlbflush.h>
45#include <asm/ptrace.h>
46#include <asm/smp_plat.h>
47#include <asm/virt.h>
48#include <asm/mach/arch.h>
49#include <asm/mpu.h>
50
51#define CREATE_TRACE_POINTS
52#include <trace/events/ipi.h>
53
54/*
55 * as from 2.5, kernels no longer have an init_tasks structure
56 * so we need some other way of telling a new secondary core
57 * where to place its SVC stack
58 */
59struct secondary_data secondary_data;
60
61/*
62 * control for which core is the next to come out of the secondary
63 * boot "holding pen"
64 */
65volatile int pen_release = -1;
66
67enum ipi_msg_type {
68 IPI_WAKEUP,
69 IPI_TIMER,
70 IPI_RESCHEDULE,
71 IPI_CALL_FUNC,
72 IPI_CPU_STOP,
73 IPI_IRQ_WORK,
74 IPI_COMPLETION,
75 IPI_CPU_BACKTRACE,
76 /*
77 * SGI8-15 can be reserved by secure firmware, and thus may
78 * not be usable by the kernel. Please keep the above limited
79 * to at most 8 entries.
80 */
81};
82
83static DECLARE_COMPLETION(cpu_running);
84
85static struct smp_operations smp_ops;
86
87void __init smp_set_ops(const struct smp_operations *ops)
88{
89 if (ops)
90 smp_ops = *ops;
91};
92
93static unsigned long get_arch_pgd(pgd_t *pgd)
94{
95#ifdef CONFIG_ARM_LPAE
96 return __phys_to_pfn(virt_to_phys(pgd));
97#else
98 return virt_to_phys(pgd);
99#endif
100}
101
102int __cpu_up(unsigned int cpu, struct task_struct *idle)
103{
104 int ret;
105
106 if (!smp_ops.smp_boot_secondary)
107 return -ENOSYS;
108
109 /*
110 * We need to tell the secondary core where to find
111 * its stack and the page tables.
112 */
113 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
114#ifdef CONFIG_ARM_MPU
115 secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr;
116#endif
117
118#ifdef CONFIG_MMU
119 secondary_data.pgdir = virt_to_phys(idmap_pgd);
120 secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
121#endif
122 sync_cache_w(&secondary_data);
123
124 /*
125 * Now bring the CPU into our world.
126 */
127 ret = smp_ops.smp_boot_secondary(cpu, idle);
128 if (ret == 0) {
129 /*
130 * CPU was successfully started, wait for it
131 * to come online or time out.
132 */
133 wait_for_completion_timeout(&cpu_running,
134 msecs_to_jiffies(1000));
135
136 if (!cpu_online(cpu)) {
137 pr_crit("CPU%u: failed to come online\n", cpu);
138 ret = -EIO;
139 }
140 } else {
141 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
142 }
143
144
145 memset(&secondary_data, 0, sizeof(secondary_data));
146 return ret;
147}
148
149/* platform specific SMP operations */
150void __init smp_init_cpus(void)
151{
152 if (smp_ops.smp_init_cpus)
153 smp_ops.smp_init_cpus();
154}
155
156int platform_can_secondary_boot(void)
157{
158 return !!smp_ops.smp_boot_secondary;
159}
160
161int platform_can_cpu_hotplug(void)
162{
163#ifdef CONFIG_HOTPLUG_CPU
164 if (smp_ops.cpu_kill)
165 return 1;
166#endif
167
168 return 0;
169}
170
171#ifdef CONFIG_HOTPLUG_CPU
172static int platform_cpu_kill(unsigned int cpu)
173{
174 if (smp_ops.cpu_kill)
175 return smp_ops.cpu_kill(cpu);
176 return 1;
177}
178
179static int platform_cpu_disable(unsigned int cpu)
180{
181 if (smp_ops.cpu_disable)
182 return smp_ops.cpu_disable(cpu);
183
184 return 0;
185}
186
187int platform_can_hotplug_cpu(unsigned int cpu)
188{
189 /* cpu_die must be specified to support hotplug */
190 if (!smp_ops.cpu_die)
191 return 0;
192
193 if (smp_ops.cpu_can_disable)
194 return smp_ops.cpu_can_disable(cpu);
195
196 /*
197 * By default, allow disabling all CPUs except the first one,
198 * since this is special on a lot of platforms, e.g. because
199 * of clock tick interrupts.
200 */
201 return cpu != 0;
202}
203
204/*
205 * __cpu_disable runs on the processor to be shutdown.
206 */
207int __cpu_disable(void)
208{
209 unsigned int cpu = smp_processor_id();
210 int ret;
211
212 ret = platform_cpu_disable(cpu);
213 if (ret)
214 return ret;
215
216 /*
217 * Take this CPU offline. Once we clear this, we can't return,
218 * and we must not schedule until we're ready to give up the cpu.
219 */
220 set_cpu_online(cpu, false);
221
222 /*
223 * OK - migrate IRQs away from this CPU
224 */
225 migrate_irqs();
226
227 /*
228 * Flush user cache and TLB mappings, and then remove this CPU
229 * from the vm mask set of all processes.
230 *
231 * Caches are flushed to the Level of Unification Inner Shareable
232 * to write-back dirty lines to unified caches shared by all CPUs.
233 */
234 flush_cache_louis();
235 local_flush_tlb_all();
236
237 clear_tasks_mm_cpumask(cpu);
238
239 return 0;
240}
241
242static DECLARE_COMPLETION(cpu_died);
243
244/*
245 * called on the thread which is asking for a CPU to be shutdown -
246 * waits until shutdown has completed, or it is timed out.
247 */
248void __cpu_die(unsigned int cpu)
249{
250 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
251 pr_err("CPU%u: cpu didn't die\n", cpu);
252 return;
253 }
254 pr_notice("CPU%u: shutdown\n", cpu);
255
256 /*
257 * platform_cpu_kill() is generally expected to do the powering off
258 * and/or cutting of clocks to the dying CPU. Optionally, this may
259 * be done by the CPU which is dying in preference to supporting
260 * this call, but that means there is _no_ synchronisation between
261 * the requesting CPU and the dying CPU actually losing power.
262 */
263 if (!platform_cpu_kill(cpu))
264 pr_err("CPU%u: unable to kill\n", cpu);
265}
266
267/*
268 * Called from the idle thread for the CPU which has been shutdown.
269 *
270 * Note that we disable IRQs here, but do not re-enable them
271 * before returning to the caller. This is also the behaviour
272 * of the other hotplug-cpu capable cores, so presumably coming
273 * out of idle fixes this.
274 */
275void arch_cpu_idle_dead(void)
276{
277 unsigned int cpu = smp_processor_id();
278
279 idle_task_exit();
280
281 local_irq_disable();
282
283 /*
284 * Flush the data out of the L1 cache for this CPU. This must be
285 * before the completion to ensure that data is safely written out
286 * before platform_cpu_kill() gets called - which may disable
287 * *this* CPU and power down its cache.
288 */
289 flush_cache_louis();
290
291 /*
292 * Tell __cpu_die() that this CPU is now safe to dispose of. Once
293 * this returns, power and/or clocks can be removed at any point
294 * from this CPU and its cache by platform_cpu_kill().
295 */
296 complete(&cpu_died);
297
298 /*
299 * Ensure that the cache lines associated with that completion are
300 * written out. This covers the case where _this_ CPU is doing the
301 * powering down, to ensure that the completion is visible to the
302 * CPU waiting for this one.
303 */
304 flush_cache_louis();
305
306 /*
307 * The actual CPU shutdown procedure is at least platform (if not
308 * CPU) specific. This may remove power, or it may simply spin.
309 *
310 * Platforms are generally expected *NOT* to return from this call,
311 * although there are some which do because they have no way to
312 * power down the CPU. These platforms are the _only_ reason we
313 * have a return path which uses the fragment of assembly below.
314 *
315 * The return path should not be used for platforms which can
316 * power off the CPU.
317 */
318 if (smp_ops.cpu_die)
319 smp_ops.cpu_die(cpu);
320
321 pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
322 cpu);
323
324 /*
325 * Do not return to the idle loop - jump back to the secondary
326 * cpu initialisation. There's some initialisation which needs
327 * to be repeated to undo the effects of taking the CPU offline.
328 */
329 __asm__("mov sp, %0\n"
330 " mov fp, #0\n"
331 " b secondary_start_kernel"
332 :
333 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
334}
335#endif /* CONFIG_HOTPLUG_CPU */
336
337/*
338 * Called by both boot and secondaries to move global data into
339 * per-processor storage.
340 */
341static void smp_store_cpu_info(unsigned int cpuid)
342{
343 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
344
345 cpu_info->loops_per_jiffy = loops_per_jiffy;
346 cpu_info->cpuid = read_cpuid_id();
347
348 store_cpu_topology(cpuid);
349}
350
351/*
352 * This is the secondary CPU boot entry. We're using this CPUs
353 * idle thread stack, but a set of temporary page tables.
354 */
355asmlinkage void secondary_start_kernel(void)
356{
357 struct mm_struct *mm = &init_mm;
358 unsigned int cpu;
359
360 /*
361 * The identity mapping is uncached (strongly ordered), so
362 * switch away from it before attempting any exclusive accesses.
363 */
364 cpu_switch_mm(mm->pgd, mm);
365 local_flush_bp_all();
366 enter_lazy_tlb(mm, current);
367 local_flush_tlb_all();
368
369 /*
370 * All kernel threads share the same mm context; grab a
371 * reference and switch to it.
372 */
373 cpu = smp_processor_id();
374 atomic_inc(&mm->mm_count);
375 current->active_mm = mm;
376 cpumask_set_cpu(cpu, mm_cpumask(mm));
377
378 cpu_init();
379
380 pr_debug("CPU%u: Booted secondary processor\n", cpu);
381
382 preempt_disable();
383 trace_hardirqs_off();
384
385 /*
386 * Give the platform a chance to do its own initialisation.
387 */
388 if (smp_ops.smp_secondary_init)
389 smp_ops.smp_secondary_init(cpu);
390
391 notify_cpu_starting(cpu);
392
393 calibrate_delay();
394
395 smp_store_cpu_info(cpu);
396
397 /*
398 * OK, now it's safe to let the boot CPU continue. Wait for
399 * the CPU migration code to notice that the CPU is online
400 * before we continue - which happens after __cpu_up returns.
401 */
402 set_cpu_online(cpu, true);
403 complete(&cpu_running);
404
405 local_irq_enable();
406 local_fiq_enable();
407 local_abt_enable();
408
409 /*
410 * OK, it's off to the idle thread for us
411 */
412 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
413}
414
415void __init smp_cpus_done(unsigned int max_cpus)
416{
417 int cpu;
418 unsigned long bogosum = 0;
419
420 for_each_online_cpu(cpu)
421 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
422
423 printk(KERN_INFO "SMP: Total of %d processors activated "
424 "(%lu.%02lu BogoMIPS).\n",
425 num_online_cpus(),
426 bogosum / (500000/HZ),
427 (bogosum / (5000/HZ)) % 100);
428
429 hyp_mode_check();
430}
431
432void __init smp_prepare_boot_cpu(void)
433{
434 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
435}
436
437void __init smp_prepare_cpus(unsigned int max_cpus)
438{
439 unsigned int ncores = num_possible_cpus();
440
441 init_cpu_topology();
442
443 smp_store_cpu_info(smp_processor_id());
444
445 /*
446 * are we trying to boot more cores than exist?
447 */
448 if (max_cpus > ncores)
449 max_cpus = ncores;
450 if (ncores > 1 && max_cpus) {
451 /*
452 * Initialise the present map, which describes the set of CPUs
453 * actually populated at the present time. A platform should
454 * re-initialize the map in the platforms smp_prepare_cpus()
455 * if present != possible (e.g. physical hotplug).
456 */
457 init_cpu_present(cpu_possible_mask);
458
459 /*
460 * Initialise the SCU if there are more than one CPU
461 * and let them know where to start.
462 */
463 if (smp_ops.smp_prepare_cpus)
464 smp_ops.smp_prepare_cpus(max_cpus);
465 }
466}
467
468static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
469
470void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
471{
472 if (!__smp_cross_call)
473 __smp_cross_call = fn;
474}
475
476static const char *ipi_types[NR_IPI] __tracepoint_string = {
477#define S(x,s) [x] = s
478 S(IPI_WAKEUP, "CPU wakeup interrupts"),
479 S(IPI_TIMER, "Timer broadcast interrupts"),
480 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
481 S(IPI_CALL_FUNC, "Function call interrupts"),
482 S(IPI_CPU_STOP, "CPU stop interrupts"),
483 S(IPI_IRQ_WORK, "IRQ work interrupts"),
484 S(IPI_COMPLETION, "completion interrupts"),
485};
486
487static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
488{
489 trace_ipi_raise(target, ipi_types[ipinr]);
490 __smp_cross_call(target, ipinr);
491}
492
493void show_ipi_list(struct seq_file *p, int prec)
494{
495 unsigned int cpu, i;
496
497 for (i = 0; i < NR_IPI; i++) {
498 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
499
500 for_each_online_cpu(cpu)
501 seq_printf(p, "%10u ",
502 __get_irq_stat(cpu, ipi_irqs[i]));
503
504 seq_printf(p, " %s\n", ipi_types[i]);
505 }
506}
507
508u64 smp_irq_stat_cpu(unsigned int cpu)
509{
510 u64 sum = 0;
511 int i;
512
513 for (i = 0; i < NR_IPI; i++)
514 sum += __get_irq_stat(cpu, ipi_irqs[i]);
515
516 return sum;
517}
518
519void arch_send_call_function_ipi_mask(const struct cpumask *mask)
520{
521 smp_cross_call(mask, IPI_CALL_FUNC);
522}
523
524void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
525{
526 smp_cross_call(mask, IPI_WAKEUP);
527}
528
529void arch_send_call_function_single_ipi(int cpu)
530{
531 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
532}
533
534#ifdef CONFIG_IRQ_WORK
535void arch_irq_work_raise(void)
536{
537 if (arch_irq_work_has_interrupt())
538 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
539}
540#endif
541
542#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
543void tick_broadcast(const struct cpumask *mask)
544{
545 smp_cross_call(mask, IPI_TIMER);
546}
547#endif
548
549static DEFINE_RAW_SPINLOCK(stop_lock);
550
551/*
552 * ipi_cpu_stop - handle IPI from smp_send_stop()
553 */
554static void ipi_cpu_stop(unsigned int cpu)
555{
556 if (system_state == SYSTEM_BOOTING ||
557 system_state == SYSTEM_RUNNING) {
558 raw_spin_lock(&stop_lock);
559 pr_crit("CPU%u: stopping\n", cpu);
560 dump_stack();
561 raw_spin_unlock(&stop_lock);
562 }
563
564 set_cpu_online(cpu, false);
565
566 local_fiq_disable();
567 local_irq_disable();
568
569 while (1)
570 cpu_relax();
571}
572
573static DEFINE_PER_CPU(struct completion *, cpu_completion);
574
575int register_ipi_completion(struct completion *completion, int cpu)
576{
577 per_cpu(cpu_completion, cpu) = completion;
578 return IPI_COMPLETION;
579}
580
581static void ipi_complete(unsigned int cpu)
582{
583 complete(per_cpu(cpu_completion, cpu));
584}
585
586/*
587 * Main handler for inter-processor interrupts
588 */
589asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
590{
591 handle_IPI(ipinr, regs);
592}
593
594void handle_IPI(int ipinr, struct pt_regs *regs)
595{
596 unsigned int cpu = smp_processor_id();
597 struct pt_regs *old_regs = set_irq_regs(regs);
598
599 if ((unsigned)ipinr < NR_IPI) {
600 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
601 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
602 }
603
604 switch (ipinr) {
605 case IPI_WAKEUP:
606 break;
607
608#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
609 case IPI_TIMER:
610 irq_enter();
611 tick_receive_broadcast();
612 irq_exit();
613 break;
614#endif
615
616 case IPI_RESCHEDULE:
617 scheduler_ipi();
618 break;
619
620 case IPI_CALL_FUNC:
621 irq_enter();
622 generic_smp_call_function_interrupt();
623 irq_exit();
624 break;
625
626 case IPI_CPU_STOP:
627 irq_enter();
628 ipi_cpu_stop(cpu);
629 irq_exit();
630 break;
631
632#ifdef CONFIG_IRQ_WORK
633 case IPI_IRQ_WORK:
634 irq_enter();
635 irq_work_run();
636 irq_exit();
637 break;
638#endif
639
640 case IPI_COMPLETION:
641 irq_enter();
642 ipi_complete(cpu);
643 irq_exit();
644 break;
645
646 case IPI_CPU_BACKTRACE:
647 irq_enter();
648 nmi_cpu_backtrace(regs);
649 irq_exit();
650 break;
651
652 default:
653 pr_crit("CPU%u: Unknown IPI message 0x%x\n",
654 cpu, ipinr);
655 break;
656 }
657
658 if ((unsigned)ipinr < NR_IPI)
659 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
660 set_irq_regs(old_regs);
661}
662
663void smp_send_reschedule(int cpu)
664{
665 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
666}
667
668void smp_send_stop(void)
669{
670 unsigned long timeout;
671 struct cpumask mask;
672
673 cpumask_copy(&mask, cpu_online_mask);
674 cpumask_clear_cpu(smp_processor_id(), &mask);
675 if (!cpumask_empty(&mask))
676 smp_cross_call(&mask, IPI_CPU_STOP);
677
678 /* Wait up to one second for other CPUs to stop */
679 timeout = USEC_PER_SEC;
680 while (num_online_cpus() > 1 && timeout--)
681 udelay(1);
682
683 if (num_online_cpus() > 1)
684 pr_warn("SMP: failed to stop secondary CPUs\n");
685}
686
687/*
688 * not supported here
689 */
690int setup_profiling_timer(unsigned int multiplier)
691{
692 return -EINVAL;
693}
694
695#ifdef CONFIG_CPU_FREQ
696
697static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
698static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
699static unsigned long global_l_p_j_ref;
700static unsigned long global_l_p_j_ref_freq;
701
702static int cpufreq_callback(struct notifier_block *nb,
703 unsigned long val, void *data)
704{
705 struct cpufreq_freqs *freq = data;
706 int cpu = freq->cpu;
707
708 if (freq->flags & CPUFREQ_CONST_LOOPS)
709 return NOTIFY_OK;
710
711 if (!per_cpu(l_p_j_ref, cpu)) {
712 per_cpu(l_p_j_ref, cpu) =
713 per_cpu(cpu_data, cpu).loops_per_jiffy;
714 per_cpu(l_p_j_ref_freq, cpu) = freq->old;
715 if (!global_l_p_j_ref) {
716 global_l_p_j_ref = loops_per_jiffy;
717 global_l_p_j_ref_freq = freq->old;
718 }
719 }
720
721 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
722 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
723 loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
724 global_l_p_j_ref_freq,
725 freq->new);
726 per_cpu(cpu_data, cpu).loops_per_jiffy =
727 cpufreq_scale(per_cpu(l_p_j_ref, cpu),
728 per_cpu(l_p_j_ref_freq, cpu),
729 freq->new);
730 }
731 return NOTIFY_OK;
732}
733
734static struct notifier_block cpufreq_notifier = {
735 .notifier_call = cpufreq_callback,
736};
737
738static int __init register_cpufreq_notifier(void)
739{
740 return cpufreq_register_notifier(&cpufreq_notifier,
741 CPUFREQ_TRANSITION_NOTIFIER);
742}
743core_initcall(register_cpufreq_notifier);
744
745#endif
746
747static void raise_nmi(cpumask_t *mask)
748{
749 /*
750 * Generate the backtrace directly if we are running in a calling
751 * context that is not preemptible by the backtrace IPI. Note
752 * that nmi_cpu_backtrace() automatically removes the current cpu
753 * from mask.
754 */
755 if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled())
756 nmi_cpu_backtrace(NULL);
757
758 smp_cross_call(mask, IPI_CPU_BACKTRACE);
759}
760
761void arch_trigger_all_cpu_backtrace(bool include_self)
762{
763 nmi_trigger_all_cpu_backtrace(include_self, raise_nmi);
764}