Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/kernel/smp.c
4 *
5 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 */
7#include <linux/module.h>
8#include <linux/delay.h>
9#include <linux/init.h>
10#include <linux/spinlock.h>
11#include <linux/sched/mm.h>
12#include <linux/sched/hotplug.h>
13#include <linux/sched/task_stack.h>
14#include <linux/interrupt.h>
15#include <linux/cache.h>
16#include <linux/profile.h>
17#include <linux/errno.h>
18#include <linux/mm.h>
19#include <linux/err.h>
20#include <linux/cpu.h>
21#include <linux/seq_file.h>
22#include <linux/irq.h>
23#include <linux/nmi.h>
24#include <linux/percpu.h>
25#include <linux/clockchips.h>
26#include <linux/completion.h>
27#include <linux/cpufreq.h>
28#include <linux/irq_work.h>
29
30#include <linux/atomic.h>
31#include <asm/bugs.h>
32#include <asm/smp.h>
33#include <asm/cacheflush.h>
34#include <asm/cpu.h>
35#include <asm/cputype.h>
36#include <asm/exception.h>
37#include <asm/idmap.h>
38#include <asm/topology.h>
39#include <asm/mmu_context.h>
40#include <asm/procinfo.h>
41#include <asm/processor.h>
42#include <asm/sections.h>
43#include <asm/tlbflush.h>
44#include <asm/ptrace.h>
45#include <asm/smp_plat.h>
46#include <asm/virt.h>
47#include <asm/mach/arch.h>
48#include <asm/mpu.h>
49
50#define CREATE_TRACE_POINTS
51#include <trace/events/ipi.h>
52
53/*
54 * as from 2.5, kernels no longer have an init_tasks structure
55 * so we need some other way of telling a new secondary core
56 * where to place its SVC stack
57 */
58struct secondary_data secondary_data;
59
60enum ipi_msg_type {
61 IPI_WAKEUP,
62 IPI_TIMER,
63 IPI_RESCHEDULE,
64 IPI_CALL_FUNC,
65 IPI_CPU_STOP,
66 IPI_IRQ_WORK,
67 IPI_COMPLETION,
68 /*
69 * CPU_BACKTRACE is special and not included in NR_IPI
70 * or tracable with trace_ipi_*
71 */
72 IPI_CPU_BACKTRACE,
73 /*
74 * SGI8-15 can be reserved by secure firmware, and thus may
75 * not be usable by the kernel. Please keep the above limited
76 * to at most 8 entries.
77 */
78};
79
80static DECLARE_COMPLETION(cpu_running);
81
82static struct smp_operations smp_ops __ro_after_init;
83
84void __init smp_set_ops(const struct smp_operations *ops)
85{
86 if (ops)
87 smp_ops = *ops;
88};
89
90static unsigned long get_arch_pgd(pgd_t *pgd)
91{
92#ifdef CONFIG_ARM_LPAE
93 return __phys_to_pfn(virt_to_phys(pgd));
94#else
95 return virt_to_phys(pgd);
96#endif
97}
98
99#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
100static int secondary_biglittle_prepare(unsigned int cpu)
101{
102 if (!cpu_vtable[cpu])
103 cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
104
105 return cpu_vtable[cpu] ? 0 : -ENOMEM;
106}
107
108static void secondary_biglittle_init(void)
109{
110 init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
111}
112#else
113static int secondary_biglittle_prepare(unsigned int cpu)
114{
115 return 0;
116}
117
118static void secondary_biglittle_init(void)
119{
120}
121#endif
122
123int __cpu_up(unsigned int cpu, struct task_struct *idle)
124{
125 int ret;
126
127 if (!smp_ops.smp_boot_secondary)
128 return -ENOSYS;
129
130 ret = secondary_biglittle_prepare(cpu);
131 if (ret)
132 return ret;
133
134 /*
135 * We need to tell the secondary core where to find
136 * its stack and the page tables.
137 */
138 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
139#ifdef CONFIG_ARM_MPU
140 secondary_data.mpu_rgn_info = &mpu_rgn_info;
141#endif
142
143#ifdef CONFIG_MMU
144 secondary_data.pgdir = virt_to_phys(idmap_pgd);
145 secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
146#endif
147 sync_cache_w(&secondary_data);
148
149 /*
150 * Now bring the CPU into our world.
151 */
152 ret = smp_ops.smp_boot_secondary(cpu, idle);
153 if (ret == 0) {
154 /*
155 * CPU was successfully started, wait for it
156 * to come online or time out.
157 */
158 wait_for_completion_timeout(&cpu_running,
159 msecs_to_jiffies(1000));
160
161 if (!cpu_online(cpu)) {
162 pr_crit("CPU%u: failed to come online\n", cpu);
163 ret = -EIO;
164 }
165 } else {
166 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
167 }
168
169
170 memset(&secondary_data, 0, sizeof(secondary_data));
171 return ret;
172}
173
174/* platform specific SMP operations */
175void __init smp_init_cpus(void)
176{
177 if (smp_ops.smp_init_cpus)
178 smp_ops.smp_init_cpus();
179}
180
181int platform_can_secondary_boot(void)
182{
183 return !!smp_ops.smp_boot_secondary;
184}
185
186int platform_can_cpu_hotplug(void)
187{
188#ifdef CONFIG_HOTPLUG_CPU
189 if (smp_ops.cpu_kill)
190 return 1;
191#endif
192
193 return 0;
194}
195
196#ifdef CONFIG_HOTPLUG_CPU
197static int platform_cpu_kill(unsigned int cpu)
198{
199 if (smp_ops.cpu_kill)
200 return smp_ops.cpu_kill(cpu);
201 return 1;
202}
203
204static int platform_cpu_disable(unsigned int cpu)
205{
206 if (smp_ops.cpu_disable)
207 return smp_ops.cpu_disable(cpu);
208
209 return 0;
210}
211
212int platform_can_hotplug_cpu(unsigned int cpu)
213{
214 /* cpu_die must be specified to support hotplug */
215 if (!smp_ops.cpu_die)
216 return 0;
217
218 if (smp_ops.cpu_can_disable)
219 return smp_ops.cpu_can_disable(cpu);
220
221 /*
222 * By default, allow disabling all CPUs except the first one,
223 * since this is special on a lot of platforms, e.g. because
224 * of clock tick interrupts.
225 */
226 return cpu != 0;
227}
228
229/*
230 * __cpu_disable runs on the processor to be shutdown.
231 */
232int __cpu_disable(void)
233{
234 unsigned int cpu = smp_processor_id();
235 int ret;
236
237 ret = platform_cpu_disable(cpu);
238 if (ret)
239 return ret;
240
241#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
242 remove_cpu_topology(cpu);
243#endif
244
245 /*
246 * Take this CPU offline. Once we clear this, we can't return,
247 * and we must not schedule until we're ready to give up the cpu.
248 */
249 set_cpu_online(cpu, false);
250
251 /*
252 * OK - migrate IRQs away from this CPU
253 */
254 irq_migrate_all_off_this_cpu();
255
256 /*
257 * Flush user cache and TLB mappings, and then remove this CPU
258 * from the vm mask set of all processes.
259 *
260 * Caches are flushed to the Level of Unification Inner Shareable
261 * to write-back dirty lines to unified caches shared by all CPUs.
262 */
263 flush_cache_louis();
264 local_flush_tlb_all();
265
266 return 0;
267}
268
269/*
270 * called on the thread which is asking for a CPU to be shutdown -
271 * waits until shutdown has completed, or it is timed out.
272 */
273void __cpu_die(unsigned int cpu)
274{
275 if (!cpu_wait_death(cpu, 5)) {
276 pr_err("CPU%u: cpu didn't die\n", cpu);
277 return;
278 }
279 pr_debug("CPU%u: shutdown\n", cpu);
280
281 clear_tasks_mm_cpumask(cpu);
282 /*
283 * platform_cpu_kill() is generally expected to do the powering off
284 * and/or cutting of clocks to the dying CPU. Optionally, this may
285 * be done by the CPU which is dying in preference to supporting
286 * this call, but that means there is _no_ synchronisation between
287 * the requesting CPU and the dying CPU actually losing power.
288 */
289 if (!platform_cpu_kill(cpu))
290 pr_err("CPU%u: unable to kill\n", cpu);
291}
292
293/*
294 * Called from the idle thread for the CPU which has been shutdown.
295 *
296 * Note that we disable IRQs here, but do not re-enable them
297 * before returning to the caller. This is also the behaviour
298 * of the other hotplug-cpu capable cores, so presumably coming
299 * out of idle fixes this.
300 */
301void arch_cpu_idle_dead(void)
302{
303 unsigned int cpu = smp_processor_id();
304
305 idle_task_exit();
306
307 local_irq_disable();
308
309 /*
310 * Flush the data out of the L1 cache for this CPU. This must be
311 * before the completion to ensure that data is safely written out
312 * before platform_cpu_kill() gets called - which may disable
313 * *this* CPU and power down its cache.
314 */
315 flush_cache_louis();
316
317 /*
318 * Tell __cpu_die() that this CPU is now safe to dispose of. Once
319 * this returns, power and/or clocks can be removed at any point
320 * from this CPU and its cache by platform_cpu_kill().
321 */
322 (void)cpu_report_death();
323
324 /*
325 * Ensure that the cache lines associated with that completion are
326 * written out. This covers the case where _this_ CPU is doing the
327 * powering down, to ensure that the completion is visible to the
328 * CPU waiting for this one.
329 */
330 flush_cache_louis();
331
332 /*
333 * The actual CPU shutdown procedure is at least platform (if not
334 * CPU) specific. This may remove power, or it may simply spin.
335 *
336 * Platforms are generally expected *NOT* to return from this call,
337 * although there are some which do because they have no way to
338 * power down the CPU. These platforms are the _only_ reason we
339 * have a return path which uses the fragment of assembly below.
340 *
341 * The return path should not be used for platforms which can
342 * power off the CPU.
343 */
344 if (smp_ops.cpu_die)
345 smp_ops.cpu_die(cpu);
346
347 pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
348 cpu);
349
350 /*
351 * Do not return to the idle loop - jump back to the secondary
352 * cpu initialisation. There's some initialisation which needs
353 * to be repeated to undo the effects of taking the CPU offline.
354 */
355 __asm__("mov sp, %0\n"
356 " mov fp, #0\n"
357 " b secondary_start_kernel"
358 :
359 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
360}
361#endif /* CONFIG_HOTPLUG_CPU */
362
363/*
364 * Called by both boot and secondaries to move global data into
365 * per-processor storage.
366 */
367static void smp_store_cpu_info(unsigned int cpuid)
368{
369 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
370
371 cpu_info->loops_per_jiffy = loops_per_jiffy;
372 cpu_info->cpuid = read_cpuid_id();
373
374 store_cpu_topology(cpuid);
375 check_cpu_icache_size(cpuid);
376}
377
378/*
379 * This is the secondary CPU boot entry. We're using this CPUs
380 * idle thread stack, but a set of temporary page tables.
381 */
382asmlinkage void secondary_start_kernel(void)
383{
384 struct mm_struct *mm = &init_mm;
385 unsigned int cpu;
386
387 secondary_biglittle_init();
388
389 /*
390 * The identity mapping is uncached (strongly ordered), so
391 * switch away from it before attempting any exclusive accesses.
392 */
393 cpu_switch_mm(mm->pgd, mm);
394 local_flush_bp_all();
395 enter_lazy_tlb(mm, current);
396 local_flush_tlb_all();
397
398 /*
399 * All kernel threads share the same mm context; grab a
400 * reference and switch to it.
401 */
402 cpu = smp_processor_id();
403 mmgrab(mm);
404 current->active_mm = mm;
405 cpumask_set_cpu(cpu, mm_cpumask(mm));
406
407 cpu_init();
408
409#ifndef CONFIG_MMU
410 setup_vectors_base();
411#endif
412 pr_debug("CPU%u: Booted secondary processor\n", cpu);
413
414 preempt_disable();
415 trace_hardirqs_off();
416
417 /*
418 * Give the platform a chance to do its own initialisation.
419 */
420 if (smp_ops.smp_secondary_init)
421 smp_ops.smp_secondary_init(cpu);
422
423 notify_cpu_starting(cpu);
424
425 calibrate_delay();
426
427 smp_store_cpu_info(cpu);
428
429 /*
430 * OK, now it's safe to let the boot CPU continue. Wait for
431 * the CPU migration code to notice that the CPU is online
432 * before we continue - which happens after __cpu_up returns.
433 */
434 set_cpu_online(cpu, true);
435
436 check_other_bugs();
437
438 complete(&cpu_running);
439
440 local_irq_enable();
441 local_fiq_enable();
442 local_abt_enable();
443
444 /*
445 * OK, it's off to the idle thread for us
446 */
447 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
448}
449
450void __init smp_cpus_done(unsigned int max_cpus)
451{
452 int cpu;
453 unsigned long bogosum = 0;
454
455 for_each_online_cpu(cpu)
456 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
457
458 printk(KERN_INFO "SMP: Total of %d processors activated "
459 "(%lu.%02lu BogoMIPS).\n",
460 num_online_cpus(),
461 bogosum / (500000/HZ),
462 (bogosum / (5000/HZ)) % 100);
463
464 hyp_mode_check();
465}
466
467void __init smp_prepare_boot_cpu(void)
468{
469 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
470}
471
472void __init smp_prepare_cpus(unsigned int max_cpus)
473{
474 unsigned int ncores = num_possible_cpus();
475
476 init_cpu_topology();
477
478 smp_store_cpu_info(smp_processor_id());
479
480 /*
481 * are we trying to boot more cores than exist?
482 */
483 if (max_cpus > ncores)
484 max_cpus = ncores;
485 if (ncores > 1 && max_cpus) {
486 /*
487 * Initialise the present map, which describes the set of CPUs
488 * actually populated at the present time. A platform should
489 * re-initialize the map in the platforms smp_prepare_cpus()
490 * if present != possible (e.g. physical hotplug).
491 */
492 init_cpu_present(cpu_possible_mask);
493
494 /*
495 * Initialise the SCU if there are more than one CPU
496 * and let them know where to start.
497 */
498 if (smp_ops.smp_prepare_cpus)
499 smp_ops.smp_prepare_cpus(max_cpus);
500 }
501}
502
503static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
504
505void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
506{
507 if (!__smp_cross_call)
508 __smp_cross_call = fn;
509}
510
511static const char *ipi_types[NR_IPI] __tracepoint_string = {
512#define S(x,s) [x] = s
513 S(IPI_WAKEUP, "CPU wakeup interrupts"),
514 S(IPI_TIMER, "Timer broadcast interrupts"),
515 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
516 S(IPI_CALL_FUNC, "Function call interrupts"),
517 S(IPI_CPU_STOP, "CPU stop interrupts"),
518 S(IPI_IRQ_WORK, "IRQ work interrupts"),
519 S(IPI_COMPLETION, "completion interrupts"),
520};
521
522static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
523{
524 trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
525 __smp_cross_call(target, ipinr);
526}
527
528void show_ipi_list(struct seq_file *p, int prec)
529{
530 unsigned int cpu, i;
531
532 for (i = 0; i < NR_IPI; i++) {
533 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
534
535 for_each_online_cpu(cpu)
536 seq_printf(p, "%10u ",
537 __get_irq_stat(cpu, ipi_irqs[i]));
538
539 seq_printf(p, " %s\n", ipi_types[i]);
540 }
541}
542
543u64 smp_irq_stat_cpu(unsigned int cpu)
544{
545 u64 sum = 0;
546 int i;
547
548 for (i = 0; i < NR_IPI; i++)
549 sum += __get_irq_stat(cpu, ipi_irqs[i]);
550
551 return sum;
552}
553
554void arch_send_call_function_ipi_mask(const struct cpumask *mask)
555{
556 smp_cross_call(mask, IPI_CALL_FUNC);
557}
558
559void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
560{
561 smp_cross_call(mask, IPI_WAKEUP);
562}
563
564void arch_send_call_function_single_ipi(int cpu)
565{
566 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
567}
568
569#ifdef CONFIG_IRQ_WORK
570void arch_irq_work_raise(void)
571{
572 if (arch_irq_work_has_interrupt())
573 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
574}
575#endif
576
577#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
578void tick_broadcast(const struct cpumask *mask)
579{
580 smp_cross_call(mask, IPI_TIMER);
581}
582#endif
583
584static DEFINE_RAW_SPINLOCK(stop_lock);
585
586/*
587 * ipi_cpu_stop - handle IPI from smp_send_stop()
588 */
589static void ipi_cpu_stop(unsigned int cpu)
590{
591 if (system_state <= SYSTEM_RUNNING) {
592 raw_spin_lock(&stop_lock);
593 pr_crit("CPU%u: stopping\n", cpu);
594 dump_stack();
595 raw_spin_unlock(&stop_lock);
596 }
597
598 set_cpu_online(cpu, false);
599
600 local_fiq_disable();
601 local_irq_disable();
602
603 while (1) {
604 cpu_relax();
605 wfe();
606 }
607}
608
609static DEFINE_PER_CPU(struct completion *, cpu_completion);
610
611int register_ipi_completion(struct completion *completion, int cpu)
612{
613 per_cpu(cpu_completion, cpu) = completion;
614 return IPI_COMPLETION;
615}
616
617static void ipi_complete(unsigned int cpu)
618{
619 complete(per_cpu(cpu_completion, cpu));
620}
621
622/*
623 * Main handler for inter-processor interrupts
624 */
625asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
626{
627 handle_IPI(ipinr, regs);
628}
629
630void handle_IPI(int ipinr, struct pt_regs *regs)
631{
632 unsigned int cpu = smp_processor_id();
633 struct pt_regs *old_regs = set_irq_regs(regs);
634
635 if ((unsigned)ipinr < NR_IPI) {
636 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
637 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
638 }
639
640 switch (ipinr) {
641 case IPI_WAKEUP:
642 break;
643
644#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
645 case IPI_TIMER:
646 irq_enter();
647 tick_receive_broadcast();
648 irq_exit();
649 break;
650#endif
651
652 case IPI_RESCHEDULE:
653 scheduler_ipi();
654 break;
655
656 case IPI_CALL_FUNC:
657 irq_enter();
658 generic_smp_call_function_interrupt();
659 irq_exit();
660 break;
661
662 case IPI_CPU_STOP:
663 irq_enter();
664 ipi_cpu_stop(cpu);
665 irq_exit();
666 break;
667
668#ifdef CONFIG_IRQ_WORK
669 case IPI_IRQ_WORK:
670 irq_enter();
671 irq_work_run();
672 irq_exit();
673 break;
674#endif
675
676 case IPI_COMPLETION:
677 irq_enter();
678 ipi_complete(cpu);
679 irq_exit();
680 break;
681
682 case IPI_CPU_BACKTRACE:
683 printk_nmi_enter();
684 irq_enter();
685 nmi_cpu_backtrace(regs);
686 irq_exit();
687 printk_nmi_exit();
688 break;
689
690 default:
691 pr_crit("CPU%u: Unknown IPI message 0x%x\n",
692 cpu, ipinr);
693 break;
694 }
695
696 if ((unsigned)ipinr < NR_IPI)
697 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
698 set_irq_regs(old_regs);
699}
700
701void smp_send_reschedule(int cpu)
702{
703 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
704}
705
706void smp_send_stop(void)
707{
708 unsigned long timeout;
709 struct cpumask mask;
710
711 cpumask_copy(&mask, cpu_online_mask);
712 cpumask_clear_cpu(smp_processor_id(), &mask);
713 if (!cpumask_empty(&mask))
714 smp_cross_call(&mask, IPI_CPU_STOP);
715
716 /* Wait up to one second for other CPUs to stop */
717 timeout = USEC_PER_SEC;
718 while (num_online_cpus() > 1 && timeout--)
719 udelay(1);
720
721 if (num_online_cpus() > 1)
722 pr_warn("SMP: failed to stop secondary CPUs\n");
723}
724
725/* In case panic() and panic() called at the same time on CPU1 and CPU2,
726 * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop()
727 * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online,
728 * kdump fails. So split out the panic_smp_self_stop() and add
729 * set_cpu_online(smp_processor_id(), false).
730 */
731void panic_smp_self_stop(void)
732{
733 pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
734 smp_processor_id());
735 set_cpu_online(smp_processor_id(), false);
736 while (1)
737 cpu_relax();
738}
739
740/*
741 * not supported here
742 */
743int setup_profiling_timer(unsigned int multiplier)
744{
745 return -EINVAL;
746}
747
748#ifdef CONFIG_CPU_FREQ
749
750static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
751static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
752static unsigned long global_l_p_j_ref;
753static unsigned long global_l_p_j_ref_freq;
754
755static int cpufreq_callback(struct notifier_block *nb,
756 unsigned long val, void *data)
757{
758 struct cpufreq_freqs *freq = data;
759 struct cpumask *cpus = freq->policy->cpus;
760 int cpu, first = cpumask_first(cpus);
761 unsigned int lpj;
762
763 if (freq->flags & CPUFREQ_CONST_LOOPS)
764 return NOTIFY_OK;
765
766 if (!per_cpu(l_p_j_ref, first)) {
767 for_each_cpu(cpu, cpus) {
768 per_cpu(l_p_j_ref, cpu) =
769 per_cpu(cpu_data, cpu).loops_per_jiffy;
770 per_cpu(l_p_j_ref_freq, cpu) = freq->old;
771 }
772
773 if (!global_l_p_j_ref) {
774 global_l_p_j_ref = loops_per_jiffy;
775 global_l_p_j_ref_freq = freq->old;
776 }
777 }
778
779 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
780 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
781 loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
782 global_l_p_j_ref_freq,
783 freq->new);
784
785 lpj = cpufreq_scale(per_cpu(l_p_j_ref, first),
786 per_cpu(l_p_j_ref_freq, first), freq->new);
787 for_each_cpu(cpu, cpus)
788 per_cpu(cpu_data, cpu).loops_per_jiffy = lpj;
789 }
790 return NOTIFY_OK;
791}
792
793static struct notifier_block cpufreq_notifier = {
794 .notifier_call = cpufreq_callback,
795};
796
797static int __init register_cpufreq_notifier(void)
798{
799 return cpufreq_register_notifier(&cpufreq_notifier,
800 CPUFREQ_TRANSITION_NOTIFIER);
801}
802core_initcall(register_cpufreq_notifier);
803
804#endif
805
806static void raise_nmi(cpumask_t *mask)
807{
808 __smp_cross_call(mask, IPI_CPU_BACKTRACE);
809}
810
811void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
812{
813 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi);
814}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/kernel/smp.c
4 *
5 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 */
7#include <linux/module.h>
8#include <linux/delay.h>
9#include <linux/init.h>
10#include <linux/spinlock.h>
11#include <linux/sched/mm.h>
12#include <linux/sched/hotplug.h>
13#include <linux/sched/task_stack.h>
14#include <linux/interrupt.h>
15#include <linux/cache.h>
16#include <linux/profile.h>
17#include <linux/errno.h>
18#include <linux/mm.h>
19#include <linux/err.h>
20#include <linux/cpu.h>
21#include <linux/seq_file.h>
22#include <linux/irq.h>
23#include <linux/nmi.h>
24#include <linux/percpu.h>
25#include <linux/clockchips.h>
26#include <linux/completion.h>
27#include <linux/cpufreq.h>
28#include <linux/irq_work.h>
29#include <linux/kernel_stat.h>
30
31#include <linux/atomic.h>
32#include <asm/bugs.h>
33#include <asm/smp.h>
34#include <asm/cacheflush.h>
35#include <asm/cpu.h>
36#include <asm/cputype.h>
37#include <asm/exception.h>
38#include <asm/idmap.h>
39#include <asm/topology.h>
40#include <asm/mmu_context.h>
41#include <asm/procinfo.h>
42#include <asm/processor.h>
43#include <asm/sections.h>
44#include <asm/tlbflush.h>
45#include <asm/ptrace.h>
46#include <asm/smp_plat.h>
47#include <asm/virt.h>
48#include <asm/mach/arch.h>
49#include <asm/mpu.h>
50
51#include <trace/events/ipi.h>
52
53/*
54 * as from 2.5, kernels no longer have an init_tasks structure
55 * so we need some other way of telling a new secondary core
56 * where to place its SVC stack
57 */
58struct secondary_data secondary_data;
59
60enum ipi_msg_type {
61 IPI_WAKEUP,
62 IPI_TIMER,
63 IPI_RESCHEDULE,
64 IPI_CALL_FUNC,
65 IPI_CPU_STOP,
66 IPI_IRQ_WORK,
67 IPI_COMPLETION,
68 NR_IPI,
69 /*
70 * CPU_BACKTRACE is special and not included in NR_IPI
71 * or tracable with trace_ipi_*
72 */
73 IPI_CPU_BACKTRACE = NR_IPI,
74 /*
75 * SGI8-15 can be reserved by secure firmware, and thus may
76 * not be usable by the kernel. Please keep the above limited
77 * to at most 8 entries.
78 */
79 MAX_IPI
80};
81
82static int ipi_irq_base __read_mostly;
83static int nr_ipi __read_mostly = NR_IPI;
84static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly;
85
86static void ipi_setup(int cpu);
87
88static DECLARE_COMPLETION(cpu_running);
89
90static struct smp_operations smp_ops __ro_after_init;
91
92void __init smp_set_ops(const struct smp_operations *ops)
93{
94 if (ops)
95 smp_ops = *ops;
96};
97
98static unsigned long get_arch_pgd(pgd_t *pgd)
99{
100#ifdef CONFIG_ARM_LPAE
101 return __phys_to_pfn(virt_to_phys(pgd));
102#else
103 return virt_to_phys(pgd);
104#endif
105}
106
107#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
108static int secondary_biglittle_prepare(unsigned int cpu)
109{
110 if (!cpu_vtable[cpu])
111 cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
112
113 return cpu_vtable[cpu] ? 0 : -ENOMEM;
114}
115
116static void secondary_biglittle_init(void)
117{
118 init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
119}
120#else
121static int secondary_biglittle_prepare(unsigned int cpu)
122{
123 return 0;
124}
125
126static void secondary_biglittle_init(void)
127{
128}
129#endif
130
131int __cpu_up(unsigned int cpu, struct task_struct *idle)
132{
133 int ret;
134
135 if (!smp_ops.smp_boot_secondary)
136 return -ENOSYS;
137
138 ret = secondary_biglittle_prepare(cpu);
139 if (ret)
140 return ret;
141
142 /*
143 * We need to tell the secondary core where to find
144 * its stack and the page tables.
145 */
146 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
147#ifdef CONFIG_ARM_MPU
148 secondary_data.mpu_rgn_info = &mpu_rgn_info;
149#endif
150
151#ifdef CONFIG_MMU
152 secondary_data.pgdir = virt_to_phys(idmap_pgd);
153 secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
154#endif
155 secondary_data.task = idle;
156 sync_cache_w(&secondary_data);
157
158 /*
159 * Now bring the CPU into our world.
160 */
161 ret = smp_ops.smp_boot_secondary(cpu, idle);
162 if (ret == 0) {
163 /*
164 * CPU was successfully started, wait for it
165 * to come online or time out.
166 */
167 wait_for_completion_timeout(&cpu_running,
168 msecs_to_jiffies(1000));
169
170 if (!cpu_online(cpu)) {
171 pr_crit("CPU%u: failed to come online\n", cpu);
172 ret = -EIO;
173 }
174 } else {
175 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
176 }
177
178
179 memset(&secondary_data, 0, sizeof(secondary_data));
180 return ret;
181}
182
183/* platform specific SMP operations */
184void __init smp_init_cpus(void)
185{
186 if (smp_ops.smp_init_cpus)
187 smp_ops.smp_init_cpus();
188}
189
190int platform_can_secondary_boot(void)
191{
192 return !!smp_ops.smp_boot_secondary;
193}
194
195int platform_can_cpu_hotplug(void)
196{
197#ifdef CONFIG_HOTPLUG_CPU
198 if (smp_ops.cpu_kill)
199 return 1;
200#endif
201
202 return 0;
203}
204
205#ifdef CONFIG_HOTPLUG_CPU
206static int platform_cpu_kill(unsigned int cpu)
207{
208 if (smp_ops.cpu_kill)
209 return smp_ops.cpu_kill(cpu);
210 return 1;
211}
212
213static int platform_cpu_disable(unsigned int cpu)
214{
215 if (smp_ops.cpu_disable)
216 return smp_ops.cpu_disable(cpu);
217
218 return 0;
219}
220
221int platform_can_hotplug_cpu(unsigned int cpu)
222{
223 /* cpu_die must be specified to support hotplug */
224 if (!smp_ops.cpu_die)
225 return 0;
226
227 if (smp_ops.cpu_can_disable)
228 return smp_ops.cpu_can_disable(cpu);
229
230 /*
231 * By default, allow disabling all CPUs except the first one,
232 * since this is special on a lot of platforms, e.g. because
233 * of clock tick interrupts.
234 */
235 return cpu != 0;
236}
237
238static void ipi_teardown(int cpu)
239{
240 int i;
241
242 if (WARN_ON_ONCE(!ipi_irq_base))
243 return;
244
245 for (i = 0; i < nr_ipi; i++)
246 disable_percpu_irq(ipi_irq_base + i);
247}
248
249/*
250 * __cpu_disable runs on the processor to be shutdown.
251 */
252int __cpu_disable(void)
253{
254 unsigned int cpu = smp_processor_id();
255 int ret;
256
257 ret = platform_cpu_disable(cpu);
258 if (ret)
259 return ret;
260
261#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
262 remove_cpu_topology(cpu);
263#endif
264
265 /*
266 * Take this CPU offline. Once we clear this, we can't return,
267 * and we must not schedule until we're ready to give up the cpu.
268 */
269 set_cpu_online(cpu, false);
270 ipi_teardown(cpu);
271
272 /*
273 * OK - migrate IRQs away from this CPU
274 */
275 irq_migrate_all_off_this_cpu();
276
277 /*
278 * Flush user cache and TLB mappings, and then remove this CPU
279 * from the vm mask set of all processes.
280 *
281 * Caches are flushed to the Level of Unification Inner Shareable
282 * to write-back dirty lines to unified caches shared by all CPUs.
283 */
284 flush_cache_louis();
285 local_flush_tlb_all();
286
287 return 0;
288}
289
290/*
291 * called on the thread which is asking for a CPU to be shutdown after the
292 * shutdown completed.
293 */
294void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
295{
296 pr_debug("CPU%u: shutdown\n", cpu);
297
298 clear_tasks_mm_cpumask(cpu);
299 /*
300 * platform_cpu_kill() is generally expected to do the powering off
301 * and/or cutting of clocks to the dying CPU. Optionally, this may
302 * be done by the CPU which is dying in preference to supporting
303 * this call, but that means there is _no_ synchronisation between
304 * the requesting CPU and the dying CPU actually losing power.
305 */
306 if (!platform_cpu_kill(cpu))
307 pr_err("CPU%u: unable to kill\n", cpu);
308}
309
310/*
311 * Called from the idle thread for the CPU which has been shutdown.
312 *
313 * Note that we disable IRQs here, but do not re-enable them
314 * before returning to the caller. This is also the behaviour
315 * of the other hotplug-cpu capable cores, so presumably coming
316 * out of idle fixes this.
317 */
318void __noreturn arch_cpu_idle_dead(void)
319{
320 unsigned int cpu = smp_processor_id();
321
322 idle_task_exit();
323
324 local_irq_disable();
325
326 /*
327 * Flush the data out of the L1 cache for this CPU. This must be
328 * before the completion to ensure that data is safely written out
329 * before platform_cpu_kill() gets called - which may disable
330 * *this* CPU and power down its cache.
331 */
332 flush_cache_louis();
333
334 /*
335 * Tell cpuhp_bp_sync_dead() that this CPU is now safe to dispose
336 * of. Once this returns, power and/or clocks can be removed at
337 * any point from this CPU and its cache by platform_cpu_kill().
338 */
339 cpuhp_ap_report_dead();
340
341 /*
342 * Ensure that the cache lines associated with that completion are
343 * written out. This covers the case where _this_ CPU is doing the
344 * powering down, to ensure that the completion is visible to the
345 * CPU waiting for this one.
346 */
347 flush_cache_louis();
348
349 /*
350 * The actual CPU shutdown procedure is at least platform (if not
351 * CPU) specific. This may remove power, or it may simply spin.
352 *
353 * Platforms are generally expected *NOT* to return from this call,
354 * although there are some which do because they have no way to
355 * power down the CPU. These platforms are the _only_ reason we
356 * have a return path which uses the fragment of assembly below.
357 *
358 * The return path should not be used for platforms which can
359 * power off the CPU.
360 */
361 if (smp_ops.cpu_die)
362 smp_ops.cpu_die(cpu);
363
364 pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
365 cpu);
366
367 /*
368 * Do not return to the idle loop - jump back to the secondary
369 * cpu initialisation. There's some initialisation which needs
370 * to be repeated to undo the effects of taking the CPU offline.
371 */
372 __asm__("mov sp, %0\n"
373 " mov fp, #0\n"
374 " mov r0, %1\n"
375 " b secondary_start_kernel"
376 :
377 : "r" (task_stack_page(current) + THREAD_SIZE - 8),
378 "r" (current)
379 : "r0");
380
381 unreachable();
382}
383#endif /* CONFIG_HOTPLUG_CPU */
384
385/*
386 * Called by both boot and secondaries to move global data into
387 * per-processor storage.
388 */
389static void smp_store_cpu_info(unsigned int cpuid)
390{
391 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
392
393 cpu_info->loops_per_jiffy = loops_per_jiffy;
394 cpu_info->cpuid = read_cpuid_id();
395
396 store_cpu_topology(cpuid);
397 check_cpu_icache_size(cpuid);
398}
399
400static void set_current(struct task_struct *cur)
401{
402 /* Set TPIDRURO */
403 asm("mcr p15, 0, %0, c13, c0, 3" :: "r"(cur) : "memory");
404}
405
406/*
407 * This is the secondary CPU boot entry. We're using this CPUs
408 * idle thread stack, but a set of temporary page tables.
409 */
410asmlinkage void secondary_start_kernel(struct task_struct *task)
411{
412 struct mm_struct *mm = &init_mm;
413 unsigned int cpu;
414
415 set_current(task);
416
417 secondary_biglittle_init();
418
419 /*
420 * The identity mapping is uncached (strongly ordered), so
421 * switch away from it before attempting any exclusive accesses.
422 */
423 cpu_switch_mm(mm->pgd, mm);
424 local_flush_bp_all();
425 enter_lazy_tlb(mm, current);
426 local_flush_tlb_all();
427
428 /*
429 * All kernel threads share the same mm context; grab a
430 * reference and switch to it.
431 */
432 cpu = smp_processor_id();
433 mmgrab(mm);
434 current->active_mm = mm;
435 cpumask_set_cpu(cpu, mm_cpumask(mm));
436
437 cpu_init();
438
439#ifndef CONFIG_MMU
440 setup_vectors_base();
441#endif
442 pr_debug("CPU%u: Booted secondary processor\n", cpu);
443
444 trace_hardirqs_off();
445
446 /*
447 * Give the platform a chance to do its own initialisation.
448 */
449 if (smp_ops.smp_secondary_init)
450 smp_ops.smp_secondary_init(cpu);
451
452 notify_cpu_starting(cpu);
453
454 ipi_setup(cpu);
455
456 calibrate_delay();
457
458 smp_store_cpu_info(cpu);
459
460 /*
461 * OK, now it's safe to let the boot CPU continue. Wait for
462 * the CPU migration code to notice that the CPU is online
463 * before we continue - which happens after __cpu_up returns.
464 */
465 set_cpu_online(cpu, true);
466
467 check_other_bugs();
468
469 complete(&cpu_running);
470
471 local_irq_enable();
472 local_fiq_enable();
473 local_abt_enable();
474
475 /*
476 * OK, it's off to the idle thread for us
477 */
478 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
479}
480
481void __init smp_cpus_done(unsigned int max_cpus)
482{
483 int cpu;
484 unsigned long bogosum = 0;
485
486 for_each_online_cpu(cpu)
487 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
488
489 printk(KERN_INFO "SMP: Total of %d processors activated "
490 "(%lu.%02lu BogoMIPS).\n",
491 num_online_cpus(),
492 bogosum / (500000/HZ),
493 (bogosum / (5000/HZ)) % 100);
494
495 hyp_mode_check();
496}
497
498void __init smp_prepare_boot_cpu(void)
499{
500 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
501}
502
503void __init smp_prepare_cpus(unsigned int max_cpus)
504{
505 unsigned int ncores = num_possible_cpus();
506
507 init_cpu_topology();
508
509 smp_store_cpu_info(smp_processor_id());
510
511 /*
512 * are we trying to boot more cores than exist?
513 */
514 if (max_cpus > ncores)
515 max_cpus = ncores;
516 if (ncores > 1 && max_cpus) {
517 /*
518 * Initialise the present map, which describes the set of CPUs
519 * actually populated at the present time. A platform should
520 * re-initialize the map in the platforms smp_prepare_cpus()
521 * if present != possible (e.g. physical hotplug).
522 */
523 init_cpu_present(cpu_possible_mask);
524
525 /*
526 * Initialise the SCU if there are more than one CPU
527 * and let them know where to start.
528 */
529 if (smp_ops.smp_prepare_cpus)
530 smp_ops.smp_prepare_cpus(max_cpus);
531 }
532}
533
534static const char *ipi_types[NR_IPI] __tracepoint_string = {
535 [IPI_WAKEUP] = "CPU wakeup interrupts",
536 [IPI_TIMER] = "Timer broadcast interrupts",
537 [IPI_RESCHEDULE] = "Rescheduling interrupts",
538 [IPI_CALL_FUNC] = "Function call interrupts",
539 [IPI_CPU_STOP] = "CPU stop interrupts",
540 [IPI_IRQ_WORK] = "IRQ work interrupts",
541 [IPI_COMPLETION] = "completion interrupts",
542};
543
544static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
545
546void show_ipi_list(struct seq_file *p, int prec)
547{
548 unsigned int cpu, i;
549
550 for (i = 0; i < NR_IPI; i++) {
551 if (!ipi_desc[i])
552 continue;
553
554 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
555
556 for_each_online_cpu(cpu)
557 seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
558
559 seq_printf(p, " %s\n", ipi_types[i]);
560 }
561}
562
563void arch_send_call_function_ipi_mask(const struct cpumask *mask)
564{
565 smp_cross_call(mask, IPI_CALL_FUNC);
566}
567
568void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
569{
570 smp_cross_call(mask, IPI_WAKEUP);
571}
572
573void arch_send_call_function_single_ipi(int cpu)
574{
575 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
576}
577
578#ifdef CONFIG_IRQ_WORK
579void arch_irq_work_raise(void)
580{
581 if (arch_irq_work_has_interrupt())
582 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
583}
584#endif
585
586#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
587void tick_broadcast(const struct cpumask *mask)
588{
589 smp_cross_call(mask, IPI_TIMER);
590}
591#endif
592
593static DEFINE_RAW_SPINLOCK(stop_lock);
594
595/*
596 * ipi_cpu_stop - handle IPI from smp_send_stop()
597 */
598static void ipi_cpu_stop(unsigned int cpu)
599{
600 local_fiq_disable();
601
602 if (system_state <= SYSTEM_RUNNING) {
603 raw_spin_lock(&stop_lock);
604 pr_crit("CPU%u: stopping\n", cpu);
605 dump_stack();
606 raw_spin_unlock(&stop_lock);
607 }
608
609 set_cpu_online(cpu, false);
610
611 while (1) {
612 cpu_relax();
613 wfe();
614 }
615}
616
617static DEFINE_PER_CPU(struct completion *, cpu_completion);
618
619int register_ipi_completion(struct completion *completion, int cpu)
620{
621 per_cpu(cpu_completion, cpu) = completion;
622 return IPI_COMPLETION;
623}
624
625static void ipi_complete(unsigned int cpu)
626{
627 complete(per_cpu(cpu_completion, cpu));
628}
629
630/*
631 * Main handler for inter-processor interrupts
632 */
633static void do_handle_IPI(int ipinr)
634{
635 unsigned int cpu = smp_processor_id();
636
637 if ((unsigned)ipinr < NR_IPI)
638 trace_ipi_entry(ipi_types[ipinr]);
639
640 switch (ipinr) {
641 case IPI_WAKEUP:
642 break;
643
644#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
645 case IPI_TIMER:
646 tick_receive_broadcast();
647 break;
648#endif
649
650 case IPI_RESCHEDULE:
651 scheduler_ipi();
652 break;
653
654 case IPI_CALL_FUNC:
655 generic_smp_call_function_interrupt();
656 break;
657
658 case IPI_CPU_STOP:
659 ipi_cpu_stop(cpu);
660 break;
661
662#ifdef CONFIG_IRQ_WORK
663 case IPI_IRQ_WORK:
664 irq_work_run();
665 break;
666#endif
667
668 case IPI_COMPLETION:
669 ipi_complete(cpu);
670 break;
671
672 case IPI_CPU_BACKTRACE:
673 printk_deferred_enter();
674 nmi_cpu_backtrace(get_irq_regs());
675 printk_deferred_exit();
676 break;
677
678 default:
679 pr_crit("CPU%u: Unknown IPI message 0x%x\n",
680 cpu, ipinr);
681 break;
682 }
683
684 if ((unsigned)ipinr < NR_IPI)
685 trace_ipi_exit(ipi_types[ipinr]);
686}
687
688/* Legacy version, should go away once all irqchips have been converted */
689void handle_IPI(int ipinr, struct pt_regs *regs)
690{
691 struct pt_regs *old_regs = set_irq_regs(regs);
692
693 irq_enter();
694 do_handle_IPI(ipinr);
695 irq_exit();
696
697 set_irq_regs(old_regs);
698}
699
700static irqreturn_t ipi_handler(int irq, void *data)
701{
702 do_handle_IPI(irq - ipi_irq_base);
703 return IRQ_HANDLED;
704}
705
706static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
707{
708 trace_ipi_raise(target, ipi_types[ipinr]);
709 __ipi_send_mask(ipi_desc[ipinr], target);
710}
711
712static void ipi_setup(int cpu)
713{
714 int i;
715
716 if (WARN_ON_ONCE(!ipi_irq_base))
717 return;
718
719 for (i = 0; i < nr_ipi; i++)
720 enable_percpu_irq(ipi_irq_base + i, 0);
721}
722
723void __init set_smp_ipi_range(int ipi_base, int n)
724{
725 int i;
726
727 WARN_ON(n < MAX_IPI);
728 nr_ipi = min(n, MAX_IPI);
729
730 for (i = 0; i < nr_ipi; i++) {
731 int err;
732
733 err = request_percpu_irq(ipi_base + i, ipi_handler,
734 "IPI", &irq_stat);
735 WARN_ON(err);
736
737 ipi_desc[i] = irq_to_desc(ipi_base + i);
738 irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
739 }
740
741 ipi_irq_base = ipi_base;
742
743 /* Setup the boot CPU immediately */
744 ipi_setup(smp_processor_id());
745}
746
747void arch_smp_send_reschedule(int cpu)
748{
749 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
750}
751
752void smp_send_stop(void)
753{
754 unsigned long timeout;
755 struct cpumask mask;
756
757 cpumask_copy(&mask, cpu_online_mask);
758 cpumask_clear_cpu(smp_processor_id(), &mask);
759 if (!cpumask_empty(&mask))
760 smp_cross_call(&mask, IPI_CPU_STOP);
761
762 /* Wait up to one second for other CPUs to stop */
763 timeout = USEC_PER_SEC;
764 while (num_online_cpus() > 1 && timeout--)
765 udelay(1);
766
767 if (num_online_cpus() > 1)
768 pr_warn("SMP: failed to stop secondary CPUs\n");
769}
770
771/* In case panic() and panic() called at the same time on CPU1 and CPU2,
772 * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop()
773 * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online,
774 * kdump fails. So split out the panic_smp_self_stop() and add
775 * set_cpu_online(smp_processor_id(), false).
776 */
777void __noreturn panic_smp_self_stop(void)
778{
779 pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
780 smp_processor_id());
781 set_cpu_online(smp_processor_id(), false);
782 while (1)
783 cpu_relax();
784}
785
786#ifdef CONFIG_CPU_FREQ
787
788static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
789static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
790static unsigned long global_l_p_j_ref;
791static unsigned long global_l_p_j_ref_freq;
792
793static int cpufreq_callback(struct notifier_block *nb,
794 unsigned long val, void *data)
795{
796 struct cpufreq_freqs *freq = data;
797 struct cpumask *cpus = freq->policy->cpus;
798 int cpu, first = cpumask_first(cpus);
799 unsigned int lpj;
800
801 if (freq->flags & CPUFREQ_CONST_LOOPS)
802 return NOTIFY_OK;
803
804 if (!per_cpu(l_p_j_ref, first)) {
805 for_each_cpu(cpu, cpus) {
806 per_cpu(l_p_j_ref, cpu) =
807 per_cpu(cpu_data, cpu).loops_per_jiffy;
808 per_cpu(l_p_j_ref_freq, cpu) = freq->old;
809 }
810
811 if (!global_l_p_j_ref) {
812 global_l_p_j_ref = loops_per_jiffy;
813 global_l_p_j_ref_freq = freq->old;
814 }
815 }
816
817 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
818 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
819 loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
820 global_l_p_j_ref_freq,
821 freq->new);
822
823 lpj = cpufreq_scale(per_cpu(l_p_j_ref, first),
824 per_cpu(l_p_j_ref_freq, first), freq->new);
825 for_each_cpu(cpu, cpus)
826 per_cpu(cpu_data, cpu).loops_per_jiffy = lpj;
827 }
828 return NOTIFY_OK;
829}
830
831static struct notifier_block cpufreq_notifier = {
832 .notifier_call = cpufreq_callback,
833};
834
835static int __init register_cpufreq_notifier(void)
836{
837 return cpufreq_register_notifier(&cpufreq_notifier,
838 CPUFREQ_TRANSITION_NOTIFIER);
839}
840core_initcall(register_cpufreq_notifier);
841
842#endif
843
844static void raise_nmi(cpumask_t *mask)
845{
846 __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
847}
848
849void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
850{
851 nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_nmi);
852}