Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 *  linux/arch/arm/kernel/smp.c
  3 *
  4 *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 */
 10#include <linux/module.h>
 11#include <linux/delay.h>
 12#include <linux/init.h>
 13#include <linux/spinlock.h>
 14#include <linux/sched.h>
 
 
 15#include <linux/interrupt.h>
 16#include <linux/cache.h>
 17#include <linux/profile.h>
 18#include <linux/errno.h>
 19#include <linux/mm.h>
 20#include <linux/err.h>
 21#include <linux/cpu.h>
 22#include <linux/seq_file.h>
 23#include <linux/irq.h>
 24#include <linux/nmi.h>
 25#include <linux/percpu.h>
 26#include <linux/clockchips.h>
 27#include <linux/completion.h>
 28#include <linux/cpufreq.h>
 29#include <linux/irq_work.h>
 30
 31#include <linux/atomic.h>
 
 32#include <asm/smp.h>
 33#include <asm/cacheflush.h>
 34#include <asm/cpu.h>
 35#include <asm/cputype.h>
 36#include <asm/exception.h>
 37#include <asm/idmap.h>
 38#include <asm/topology.h>
 39#include <asm/mmu_context.h>
 40#include <asm/pgtable.h>
 41#include <asm/pgalloc.h>
 
 42#include <asm/processor.h>
 43#include <asm/sections.h>
 44#include <asm/tlbflush.h>
 45#include <asm/ptrace.h>
 46#include <asm/smp_plat.h>
 47#include <asm/virt.h>
 48#include <asm/mach/arch.h>
 49#include <asm/mpu.h>
 50
 51#define CREATE_TRACE_POINTS
 52#include <trace/events/ipi.h>
 53
 54/*
 55 * as from 2.5, kernels no longer have an init_tasks structure
 56 * so we need some other way of telling a new secondary core
 57 * where to place its SVC stack
 58 */
 59struct secondary_data secondary_data;
 60
 61/*
 62 * control for which core is the next to come out of the secondary
 63 * boot "holding pen"
 64 */
 65volatile int pen_release = -1;
 66
 67enum ipi_msg_type {
 68	IPI_WAKEUP,
 69	IPI_TIMER,
 70	IPI_RESCHEDULE,
 71	IPI_CALL_FUNC,
 72	IPI_CPU_STOP,
 73	IPI_IRQ_WORK,
 74	IPI_COMPLETION,
 
 
 
 
 75	IPI_CPU_BACKTRACE,
 76	/*
 77	 * SGI8-15 can be reserved by secure firmware, and thus may
 78	 * not be usable by the kernel. Please keep the above limited
 79	 * to at most 8 entries.
 80	 */
 81};
 82
 83static DECLARE_COMPLETION(cpu_running);
 84
 85static struct smp_operations smp_ops;
 86
 87void __init smp_set_ops(const struct smp_operations *ops)
 88{
 89	if (ops)
 90		smp_ops = *ops;
 91};
 92
 93static unsigned long get_arch_pgd(pgd_t *pgd)
 94{
 95#ifdef CONFIG_ARM_LPAE
 96	return __phys_to_pfn(virt_to_phys(pgd));
 97#else
 98	return virt_to_phys(pgd);
 99#endif
100}
101
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102int __cpu_up(unsigned int cpu, struct task_struct *idle)
103{
104	int ret;
105
106	if (!smp_ops.smp_boot_secondary)
107		return -ENOSYS;
108
 
 
 
 
109	/*
110	 * We need to tell the secondary core where to find
111	 * its stack and the page tables.
112	 */
113	secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
114#ifdef CONFIG_ARM_MPU
115	secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr;
116#endif
117
118#ifdef CONFIG_MMU
119	secondary_data.pgdir = virt_to_phys(idmap_pgd);
120	secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
121#endif
122	sync_cache_w(&secondary_data);
123
124	/*
125	 * Now bring the CPU into our world.
126	 */
127	ret = smp_ops.smp_boot_secondary(cpu, idle);
128	if (ret == 0) {
129		/*
130		 * CPU was successfully started, wait for it
131		 * to come online or time out.
132		 */
133		wait_for_completion_timeout(&cpu_running,
134						 msecs_to_jiffies(1000));
135
136		if (!cpu_online(cpu)) {
137			pr_crit("CPU%u: failed to come online\n", cpu);
138			ret = -EIO;
139		}
140	} else {
141		pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
142	}
143
144
145	memset(&secondary_data, 0, sizeof(secondary_data));
146	return ret;
147}
148
149/* platform specific SMP operations */
150void __init smp_init_cpus(void)
151{
152	if (smp_ops.smp_init_cpus)
153		smp_ops.smp_init_cpus();
154}
155
156int platform_can_secondary_boot(void)
157{
158	return !!smp_ops.smp_boot_secondary;
159}
160
161int platform_can_cpu_hotplug(void)
162{
163#ifdef CONFIG_HOTPLUG_CPU
164	if (smp_ops.cpu_kill)
165		return 1;
166#endif
167
168	return 0;
169}
170
171#ifdef CONFIG_HOTPLUG_CPU
172static int platform_cpu_kill(unsigned int cpu)
173{
174	if (smp_ops.cpu_kill)
175		return smp_ops.cpu_kill(cpu);
176	return 1;
177}
178
179static int platform_cpu_disable(unsigned int cpu)
180{
181	if (smp_ops.cpu_disable)
182		return smp_ops.cpu_disable(cpu);
183
184	return 0;
185}
186
187int platform_can_hotplug_cpu(unsigned int cpu)
188{
189	/* cpu_die must be specified to support hotplug */
190	if (!smp_ops.cpu_die)
191		return 0;
192
193	if (smp_ops.cpu_can_disable)
194		return smp_ops.cpu_can_disable(cpu);
195
196	/*
197	 * By default, allow disabling all CPUs except the first one,
198	 * since this is special on a lot of platforms, e.g. because
199	 * of clock tick interrupts.
200	 */
201	return cpu != 0;
202}
203
204/*
205 * __cpu_disable runs on the processor to be shutdown.
206 */
207int __cpu_disable(void)
208{
209	unsigned int cpu = smp_processor_id();
210	int ret;
211
212	ret = platform_cpu_disable(cpu);
213	if (ret)
214		return ret;
215
216	/*
217	 * Take this CPU offline.  Once we clear this, we can't return,
218	 * and we must not schedule until we're ready to give up the cpu.
219	 */
220	set_cpu_online(cpu, false);
221
222	/*
223	 * OK - migrate IRQs away from this CPU
224	 */
225	migrate_irqs();
226
227	/*
228	 * Flush user cache and TLB mappings, and then remove this CPU
229	 * from the vm mask set of all processes.
230	 *
231	 * Caches are flushed to the Level of Unification Inner Shareable
232	 * to write-back dirty lines to unified caches shared by all CPUs.
233	 */
234	flush_cache_louis();
235	local_flush_tlb_all();
236
237	clear_tasks_mm_cpumask(cpu);
238
239	return 0;
240}
241
242static DECLARE_COMPLETION(cpu_died);
243
244/*
245 * called on the thread which is asking for a CPU to be shutdown -
246 * waits until shutdown has completed, or it is timed out.
247 */
248void __cpu_die(unsigned int cpu)
249{
250	if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
251		pr_err("CPU%u: cpu didn't die\n", cpu);
252		return;
253	}
254	pr_notice("CPU%u: shutdown\n", cpu);
255
 
256	/*
257	 * platform_cpu_kill() is generally expected to do the powering off
258	 * and/or cutting of clocks to the dying CPU.  Optionally, this may
259	 * be done by the CPU which is dying in preference to supporting
260	 * this call, but that means there is _no_ synchronisation between
261	 * the requesting CPU and the dying CPU actually losing power.
262	 */
263	if (!platform_cpu_kill(cpu))
264		pr_err("CPU%u: unable to kill\n", cpu);
265}
266
267/*
268 * Called from the idle thread for the CPU which has been shutdown.
269 *
270 * Note that we disable IRQs here, but do not re-enable them
271 * before returning to the caller. This is also the behaviour
272 * of the other hotplug-cpu capable cores, so presumably coming
273 * out of idle fixes this.
274 */
275void arch_cpu_idle_dead(void)
276{
277	unsigned int cpu = smp_processor_id();
278
279	idle_task_exit();
280
281	local_irq_disable();
282
283	/*
284	 * Flush the data out of the L1 cache for this CPU.  This must be
285	 * before the completion to ensure that data is safely written out
286	 * before platform_cpu_kill() gets called - which may disable
287	 * *this* CPU and power down its cache.
288	 */
289	flush_cache_louis();
290
291	/*
292	 * Tell __cpu_die() that this CPU is now safe to dispose of.  Once
293	 * this returns, power and/or clocks can be removed at any point
294	 * from this CPU and its cache by platform_cpu_kill().
295	 */
296	complete(&cpu_died);
297
298	/*
299	 * Ensure that the cache lines associated with that completion are
300	 * written out.  This covers the case where _this_ CPU is doing the
301	 * powering down, to ensure that the completion is visible to the
302	 * CPU waiting for this one.
303	 */
304	flush_cache_louis();
305
306	/*
307	 * The actual CPU shutdown procedure is at least platform (if not
308	 * CPU) specific.  This may remove power, or it may simply spin.
309	 *
310	 * Platforms are generally expected *NOT* to return from this call,
311	 * although there are some which do because they have no way to
312	 * power down the CPU.  These platforms are the _only_ reason we
313	 * have a return path which uses the fragment of assembly below.
314	 *
315	 * The return path should not be used for platforms which can
316	 * power off the CPU.
317	 */
318	if (smp_ops.cpu_die)
319		smp_ops.cpu_die(cpu);
320
321	pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
322		cpu);
323
324	/*
325	 * Do not return to the idle loop - jump back to the secondary
326	 * cpu initialisation.  There's some initialisation which needs
327	 * to be repeated to undo the effects of taking the CPU offline.
328	 */
329	__asm__("mov	sp, %0\n"
330	"	mov	fp, #0\n"
331	"	b	secondary_start_kernel"
332		:
333		: "r" (task_stack_page(current) + THREAD_SIZE - 8));
334}
335#endif /* CONFIG_HOTPLUG_CPU */
336
337/*
338 * Called by both boot and secondaries to move global data into
339 * per-processor storage.
340 */
341static void smp_store_cpu_info(unsigned int cpuid)
342{
343	struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
344
345	cpu_info->loops_per_jiffy = loops_per_jiffy;
346	cpu_info->cpuid = read_cpuid_id();
347
348	store_cpu_topology(cpuid);
 
349}
350
351/*
352 * This is the secondary CPU boot entry.  We're using this CPUs
353 * idle thread stack, but a set of temporary page tables.
354 */
355asmlinkage void secondary_start_kernel(void)
356{
357	struct mm_struct *mm = &init_mm;
358	unsigned int cpu;
359
 
 
360	/*
361	 * The identity mapping is uncached (strongly ordered), so
362	 * switch away from it before attempting any exclusive accesses.
363	 */
364	cpu_switch_mm(mm->pgd, mm);
365	local_flush_bp_all();
366	enter_lazy_tlb(mm, current);
367	local_flush_tlb_all();
368
369	/*
370	 * All kernel threads share the same mm context; grab a
371	 * reference and switch to it.
372	 */
373	cpu = smp_processor_id();
374	atomic_inc(&mm->mm_count);
375	current->active_mm = mm;
376	cpumask_set_cpu(cpu, mm_cpumask(mm));
377
378	cpu_init();
379
 
 
 
380	pr_debug("CPU%u: Booted secondary processor\n", cpu);
381
382	preempt_disable();
383	trace_hardirqs_off();
384
385	/*
386	 * Give the platform a chance to do its own initialisation.
387	 */
388	if (smp_ops.smp_secondary_init)
389		smp_ops.smp_secondary_init(cpu);
390
391	notify_cpu_starting(cpu);
392
393	calibrate_delay();
394
395	smp_store_cpu_info(cpu);
396
397	/*
398	 * OK, now it's safe to let the boot CPU continue.  Wait for
399	 * the CPU migration code to notice that the CPU is online
400	 * before we continue - which happens after __cpu_up returns.
401	 */
402	set_cpu_online(cpu, true);
 
 
 
403	complete(&cpu_running);
404
405	local_irq_enable();
406	local_fiq_enable();
407	local_abt_enable();
408
409	/*
410	 * OK, it's off to the idle thread for us
411	 */
412	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
413}
414
415void __init smp_cpus_done(unsigned int max_cpus)
416{
417	int cpu;
418	unsigned long bogosum = 0;
419
420	for_each_online_cpu(cpu)
421		bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
422
423	printk(KERN_INFO "SMP: Total of %d processors activated "
424	       "(%lu.%02lu BogoMIPS).\n",
425	       num_online_cpus(),
426	       bogosum / (500000/HZ),
427	       (bogosum / (5000/HZ)) % 100);
428
429	hyp_mode_check();
430}
431
432void __init smp_prepare_boot_cpu(void)
433{
434	set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
435}
436
437void __init smp_prepare_cpus(unsigned int max_cpus)
438{
439	unsigned int ncores = num_possible_cpus();
440
441	init_cpu_topology();
442
443	smp_store_cpu_info(smp_processor_id());
444
445	/*
446	 * are we trying to boot more cores than exist?
447	 */
448	if (max_cpus > ncores)
449		max_cpus = ncores;
450	if (ncores > 1 && max_cpus) {
451		/*
452		 * Initialise the present map, which describes the set of CPUs
453		 * actually populated at the present time. A platform should
454		 * re-initialize the map in the platforms smp_prepare_cpus()
455		 * if present != possible (e.g. physical hotplug).
456		 */
457		init_cpu_present(cpu_possible_mask);
458
459		/*
460		 * Initialise the SCU if there are more than one CPU
461		 * and let them know where to start.
462		 */
463		if (smp_ops.smp_prepare_cpus)
464			smp_ops.smp_prepare_cpus(max_cpus);
465	}
466}
467
468static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
469
470void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
471{
472	if (!__smp_cross_call)
473		__smp_cross_call = fn;
474}
475
476static const char *ipi_types[NR_IPI] __tracepoint_string = {
477#define S(x,s)	[x] = s
478	S(IPI_WAKEUP, "CPU wakeup interrupts"),
479	S(IPI_TIMER, "Timer broadcast interrupts"),
480	S(IPI_RESCHEDULE, "Rescheduling interrupts"),
481	S(IPI_CALL_FUNC, "Function call interrupts"),
482	S(IPI_CPU_STOP, "CPU stop interrupts"),
483	S(IPI_IRQ_WORK, "IRQ work interrupts"),
484	S(IPI_COMPLETION, "completion interrupts"),
485};
486
487static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
488{
489	trace_ipi_raise(target, ipi_types[ipinr]);
490	__smp_cross_call(target, ipinr);
491}
492
493void show_ipi_list(struct seq_file *p, int prec)
494{
495	unsigned int cpu, i;
496
497	for (i = 0; i < NR_IPI; i++) {
498		seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
499
500		for_each_online_cpu(cpu)
501			seq_printf(p, "%10u ",
502				   __get_irq_stat(cpu, ipi_irqs[i]));
503
504		seq_printf(p, " %s\n", ipi_types[i]);
505	}
506}
507
508u64 smp_irq_stat_cpu(unsigned int cpu)
509{
510	u64 sum = 0;
511	int i;
512
513	for (i = 0; i < NR_IPI; i++)
514		sum += __get_irq_stat(cpu, ipi_irqs[i]);
515
516	return sum;
517}
518
519void arch_send_call_function_ipi_mask(const struct cpumask *mask)
520{
521	smp_cross_call(mask, IPI_CALL_FUNC);
522}
523
524void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
525{
526	smp_cross_call(mask, IPI_WAKEUP);
527}
528
529void arch_send_call_function_single_ipi(int cpu)
530{
531	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
532}
533
534#ifdef CONFIG_IRQ_WORK
535void arch_irq_work_raise(void)
536{
537	if (arch_irq_work_has_interrupt())
538		smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
539}
540#endif
541
542#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
543void tick_broadcast(const struct cpumask *mask)
544{
545	smp_cross_call(mask, IPI_TIMER);
546}
547#endif
548
549static DEFINE_RAW_SPINLOCK(stop_lock);
550
551/*
552 * ipi_cpu_stop - handle IPI from smp_send_stop()
553 */
554static void ipi_cpu_stop(unsigned int cpu)
555{
556	if (system_state == SYSTEM_BOOTING ||
557	    system_state == SYSTEM_RUNNING) {
558		raw_spin_lock(&stop_lock);
559		pr_crit("CPU%u: stopping\n", cpu);
560		dump_stack();
561		raw_spin_unlock(&stop_lock);
562	}
563
564	set_cpu_online(cpu, false);
565
566	local_fiq_disable();
567	local_irq_disable();
568
569	while (1)
570		cpu_relax();
 
 
571}
572
573static DEFINE_PER_CPU(struct completion *, cpu_completion);
574
575int register_ipi_completion(struct completion *completion, int cpu)
576{
577	per_cpu(cpu_completion, cpu) = completion;
578	return IPI_COMPLETION;
579}
580
581static void ipi_complete(unsigned int cpu)
582{
583	complete(per_cpu(cpu_completion, cpu));
584}
585
586/*
587 * Main handler for inter-processor interrupts
588 */
589asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
590{
591	handle_IPI(ipinr, regs);
592}
593
594void handle_IPI(int ipinr, struct pt_regs *regs)
595{
596	unsigned int cpu = smp_processor_id();
597	struct pt_regs *old_regs = set_irq_regs(regs);
598
599	if ((unsigned)ipinr < NR_IPI) {
600		trace_ipi_entry_rcuidle(ipi_types[ipinr]);
601		__inc_irq_stat(cpu, ipi_irqs[ipinr]);
602	}
603
604	switch (ipinr) {
605	case IPI_WAKEUP:
606		break;
607
608#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
609	case IPI_TIMER:
610		irq_enter();
611		tick_receive_broadcast();
612		irq_exit();
613		break;
614#endif
615
616	case IPI_RESCHEDULE:
617		scheduler_ipi();
618		break;
619
620	case IPI_CALL_FUNC:
621		irq_enter();
622		generic_smp_call_function_interrupt();
623		irq_exit();
624		break;
625
626	case IPI_CPU_STOP:
627		irq_enter();
628		ipi_cpu_stop(cpu);
629		irq_exit();
630		break;
631
632#ifdef CONFIG_IRQ_WORK
633	case IPI_IRQ_WORK:
634		irq_enter();
635		irq_work_run();
636		irq_exit();
637		break;
638#endif
639
640	case IPI_COMPLETION:
641		irq_enter();
642		ipi_complete(cpu);
643		irq_exit();
644		break;
645
646	case IPI_CPU_BACKTRACE:
 
647		irq_enter();
648		nmi_cpu_backtrace(regs);
649		irq_exit();
 
650		break;
651
652	default:
653		pr_crit("CPU%u: Unknown IPI message 0x%x\n",
654		        cpu, ipinr);
655		break;
656	}
657
658	if ((unsigned)ipinr < NR_IPI)
659		trace_ipi_exit_rcuidle(ipi_types[ipinr]);
660	set_irq_regs(old_regs);
661}
662
663void smp_send_reschedule(int cpu)
664{
665	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
666}
667
668void smp_send_stop(void)
669{
670	unsigned long timeout;
671	struct cpumask mask;
672
673	cpumask_copy(&mask, cpu_online_mask);
674	cpumask_clear_cpu(smp_processor_id(), &mask);
675	if (!cpumask_empty(&mask))
676		smp_cross_call(&mask, IPI_CPU_STOP);
677
678	/* Wait up to one second for other CPUs to stop */
679	timeout = USEC_PER_SEC;
680	while (num_online_cpus() > 1 && timeout--)
681		udelay(1);
682
683	if (num_online_cpus() > 1)
684		pr_warn("SMP: failed to stop secondary CPUs\n");
685}
686
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
687/*
688 * not supported here
689 */
690int setup_profiling_timer(unsigned int multiplier)
691{
692	return -EINVAL;
693}
694
695#ifdef CONFIG_CPU_FREQ
696
697static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
698static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
699static unsigned long global_l_p_j_ref;
700static unsigned long global_l_p_j_ref_freq;
701
702static int cpufreq_callback(struct notifier_block *nb,
703					unsigned long val, void *data)
704{
705	struct cpufreq_freqs *freq = data;
706	int cpu = freq->cpu;
 
 
707
708	if (freq->flags & CPUFREQ_CONST_LOOPS)
709		return NOTIFY_OK;
710
711	if (!per_cpu(l_p_j_ref, cpu)) {
712		per_cpu(l_p_j_ref, cpu) =
713			per_cpu(cpu_data, cpu).loops_per_jiffy;
714		per_cpu(l_p_j_ref_freq, cpu) = freq->old;
 
 
 
715		if (!global_l_p_j_ref) {
716			global_l_p_j_ref = loops_per_jiffy;
717			global_l_p_j_ref_freq = freq->old;
718		}
719	}
720
721	if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
722	    (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
723		loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
724						global_l_p_j_ref_freq,
725						freq->new);
726		per_cpu(cpu_data, cpu).loops_per_jiffy =
727			cpufreq_scale(per_cpu(l_p_j_ref, cpu),
728					per_cpu(l_p_j_ref_freq, cpu),
729					freq->new);
 
730	}
731	return NOTIFY_OK;
732}
733
734static struct notifier_block cpufreq_notifier = {
735	.notifier_call  = cpufreq_callback,
736};
737
738static int __init register_cpufreq_notifier(void)
739{
740	return cpufreq_register_notifier(&cpufreq_notifier,
741						CPUFREQ_TRANSITION_NOTIFIER);
742}
743core_initcall(register_cpufreq_notifier);
744
745#endif
746
747static void raise_nmi(cpumask_t *mask)
748{
749	/*
750	 * Generate the backtrace directly if we are running in a calling
751	 * context that is not preemptible by the backtrace IPI. Note
752	 * that nmi_cpu_backtrace() automatically removes the current cpu
753	 * from mask.
754	 */
755	if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled())
756		nmi_cpu_backtrace(NULL);
757
758	smp_cross_call(mask, IPI_CPU_BACKTRACE);
759}
760
761void arch_trigger_all_cpu_backtrace(bool include_self)
762{
763	nmi_trigger_all_cpu_backtrace(include_self, raise_nmi);
764}
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  linux/arch/arm/kernel/smp.c
  4 *
  5 *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
 
 
 
 
  6 */
  7#include <linux/module.h>
  8#include <linux/delay.h>
  9#include <linux/init.h>
 10#include <linux/spinlock.h>
 11#include <linux/sched/mm.h>
 12#include <linux/sched/hotplug.h>
 13#include <linux/sched/task_stack.h>
 14#include <linux/interrupt.h>
 15#include <linux/cache.h>
 16#include <linux/profile.h>
 17#include <linux/errno.h>
 18#include <linux/mm.h>
 19#include <linux/err.h>
 20#include <linux/cpu.h>
 21#include <linux/seq_file.h>
 22#include <linux/irq.h>
 23#include <linux/nmi.h>
 24#include <linux/percpu.h>
 25#include <linux/clockchips.h>
 26#include <linux/completion.h>
 27#include <linux/cpufreq.h>
 28#include <linux/irq_work.h>
 29
 30#include <linux/atomic.h>
 31#include <asm/bugs.h>
 32#include <asm/smp.h>
 33#include <asm/cacheflush.h>
 34#include <asm/cpu.h>
 35#include <asm/cputype.h>
 36#include <asm/exception.h>
 37#include <asm/idmap.h>
 38#include <asm/topology.h>
 39#include <asm/mmu_context.h>
 40#include <asm/pgtable.h>
 41#include <asm/pgalloc.h>
 42#include <asm/procinfo.h>
 43#include <asm/processor.h>
 44#include <asm/sections.h>
 45#include <asm/tlbflush.h>
 46#include <asm/ptrace.h>
 47#include <asm/smp_plat.h>
 48#include <asm/virt.h>
 49#include <asm/mach/arch.h>
 50#include <asm/mpu.h>
 51
 52#define CREATE_TRACE_POINTS
 53#include <trace/events/ipi.h>
 54
 55/*
 56 * as from 2.5, kernels no longer have an init_tasks structure
 57 * so we need some other way of telling a new secondary core
 58 * where to place its SVC stack
 59 */
 60struct secondary_data secondary_data;
 61
 
 
 
 
 
 
 62enum ipi_msg_type {
 63	IPI_WAKEUP,
 64	IPI_TIMER,
 65	IPI_RESCHEDULE,
 66	IPI_CALL_FUNC,
 67	IPI_CPU_STOP,
 68	IPI_IRQ_WORK,
 69	IPI_COMPLETION,
 70	/*
 71	 * CPU_BACKTRACE is special and not included in NR_IPI
 72	 * or tracable with trace_ipi_*
 73	 */
 74	IPI_CPU_BACKTRACE,
 75	/*
 76	 * SGI8-15 can be reserved by secure firmware, and thus may
 77	 * not be usable by the kernel. Please keep the above limited
 78	 * to at most 8 entries.
 79	 */
 80};
 81
 82static DECLARE_COMPLETION(cpu_running);
 83
 84static struct smp_operations smp_ops __ro_after_init;
 85
 86void __init smp_set_ops(const struct smp_operations *ops)
 87{
 88	if (ops)
 89		smp_ops = *ops;
 90};
 91
 92static unsigned long get_arch_pgd(pgd_t *pgd)
 93{
 94#ifdef CONFIG_ARM_LPAE
 95	return __phys_to_pfn(virt_to_phys(pgd));
 96#else
 97	return virt_to_phys(pgd);
 98#endif
 99}
100
101#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
102static int secondary_biglittle_prepare(unsigned int cpu)
103{
104	if (!cpu_vtable[cpu])
105		cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
106
107	return cpu_vtable[cpu] ? 0 : -ENOMEM;
108}
109
110static void secondary_biglittle_init(void)
111{
112	init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
113}
114#else
115static int secondary_biglittle_prepare(unsigned int cpu)
116{
117	return 0;
118}
119
120static void secondary_biglittle_init(void)
121{
122}
123#endif
124
125int __cpu_up(unsigned int cpu, struct task_struct *idle)
126{
127	int ret;
128
129	if (!smp_ops.smp_boot_secondary)
130		return -ENOSYS;
131
132	ret = secondary_biglittle_prepare(cpu);
133	if (ret)
134		return ret;
135
136	/*
137	 * We need to tell the secondary core where to find
138	 * its stack and the page tables.
139	 */
140	secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
141#ifdef CONFIG_ARM_MPU
142	secondary_data.mpu_rgn_info = &mpu_rgn_info;
143#endif
144
145#ifdef CONFIG_MMU
146	secondary_data.pgdir = virt_to_phys(idmap_pgd);
147	secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
148#endif
149	sync_cache_w(&secondary_data);
150
151	/*
152	 * Now bring the CPU into our world.
153	 */
154	ret = smp_ops.smp_boot_secondary(cpu, idle);
155	if (ret == 0) {
156		/*
157		 * CPU was successfully started, wait for it
158		 * to come online or time out.
159		 */
160		wait_for_completion_timeout(&cpu_running,
161						 msecs_to_jiffies(1000));
162
163		if (!cpu_online(cpu)) {
164			pr_crit("CPU%u: failed to come online\n", cpu);
165			ret = -EIO;
166		}
167	} else {
168		pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
169	}
170
171
172	memset(&secondary_data, 0, sizeof(secondary_data));
173	return ret;
174}
175
176/* platform specific SMP operations */
177void __init smp_init_cpus(void)
178{
179	if (smp_ops.smp_init_cpus)
180		smp_ops.smp_init_cpus();
181}
182
183int platform_can_secondary_boot(void)
184{
185	return !!smp_ops.smp_boot_secondary;
186}
187
188int platform_can_cpu_hotplug(void)
189{
190#ifdef CONFIG_HOTPLUG_CPU
191	if (smp_ops.cpu_kill)
192		return 1;
193#endif
194
195	return 0;
196}
197
198#ifdef CONFIG_HOTPLUG_CPU
199static int platform_cpu_kill(unsigned int cpu)
200{
201	if (smp_ops.cpu_kill)
202		return smp_ops.cpu_kill(cpu);
203	return 1;
204}
205
206static int platform_cpu_disable(unsigned int cpu)
207{
208	if (smp_ops.cpu_disable)
209		return smp_ops.cpu_disable(cpu);
210
211	return 0;
212}
213
214int platform_can_hotplug_cpu(unsigned int cpu)
215{
216	/* cpu_die must be specified to support hotplug */
217	if (!smp_ops.cpu_die)
218		return 0;
219
220	if (smp_ops.cpu_can_disable)
221		return smp_ops.cpu_can_disable(cpu);
222
223	/*
224	 * By default, allow disabling all CPUs except the first one,
225	 * since this is special on a lot of platforms, e.g. because
226	 * of clock tick interrupts.
227	 */
228	return cpu != 0;
229}
230
231/*
232 * __cpu_disable runs on the processor to be shutdown.
233 */
234int __cpu_disable(void)
235{
236	unsigned int cpu = smp_processor_id();
237	int ret;
238
239	ret = platform_cpu_disable(cpu);
240	if (ret)
241		return ret;
242
243	/*
244	 * Take this CPU offline.  Once we clear this, we can't return,
245	 * and we must not schedule until we're ready to give up the cpu.
246	 */
247	set_cpu_online(cpu, false);
248
249	/*
250	 * OK - migrate IRQs away from this CPU
251	 */
252	irq_migrate_all_off_this_cpu();
253
254	/*
255	 * Flush user cache and TLB mappings, and then remove this CPU
256	 * from the vm mask set of all processes.
257	 *
258	 * Caches are flushed to the Level of Unification Inner Shareable
259	 * to write-back dirty lines to unified caches shared by all CPUs.
260	 */
261	flush_cache_louis();
262	local_flush_tlb_all();
263
 
 
264	return 0;
265}
266
 
 
267/*
268 * called on the thread which is asking for a CPU to be shutdown -
269 * waits until shutdown has completed, or it is timed out.
270 */
271void __cpu_die(unsigned int cpu)
272{
273	if (!cpu_wait_death(cpu, 5)) {
274		pr_err("CPU%u: cpu didn't die\n", cpu);
275		return;
276	}
277	pr_debug("CPU%u: shutdown\n", cpu);
278
279	clear_tasks_mm_cpumask(cpu);
280	/*
281	 * platform_cpu_kill() is generally expected to do the powering off
282	 * and/or cutting of clocks to the dying CPU.  Optionally, this may
283	 * be done by the CPU which is dying in preference to supporting
284	 * this call, but that means there is _no_ synchronisation between
285	 * the requesting CPU and the dying CPU actually losing power.
286	 */
287	if (!platform_cpu_kill(cpu))
288		pr_err("CPU%u: unable to kill\n", cpu);
289}
290
291/*
292 * Called from the idle thread for the CPU which has been shutdown.
293 *
294 * Note that we disable IRQs here, but do not re-enable them
295 * before returning to the caller. This is also the behaviour
296 * of the other hotplug-cpu capable cores, so presumably coming
297 * out of idle fixes this.
298 */
299void arch_cpu_idle_dead(void)
300{
301	unsigned int cpu = smp_processor_id();
302
303	idle_task_exit();
304
305	local_irq_disable();
306
307	/*
308	 * Flush the data out of the L1 cache for this CPU.  This must be
309	 * before the completion to ensure that data is safely written out
310	 * before platform_cpu_kill() gets called - which may disable
311	 * *this* CPU and power down its cache.
312	 */
313	flush_cache_louis();
314
315	/*
316	 * Tell __cpu_die() that this CPU is now safe to dispose of.  Once
317	 * this returns, power and/or clocks can be removed at any point
318	 * from this CPU and its cache by platform_cpu_kill().
319	 */
320	(void)cpu_report_death();
321
322	/*
323	 * Ensure that the cache lines associated with that completion are
324	 * written out.  This covers the case where _this_ CPU is doing the
325	 * powering down, to ensure that the completion is visible to the
326	 * CPU waiting for this one.
327	 */
328	flush_cache_louis();
329
330	/*
331	 * The actual CPU shutdown procedure is at least platform (if not
332	 * CPU) specific.  This may remove power, or it may simply spin.
333	 *
334	 * Platforms are generally expected *NOT* to return from this call,
335	 * although there are some which do because they have no way to
336	 * power down the CPU.  These platforms are the _only_ reason we
337	 * have a return path which uses the fragment of assembly below.
338	 *
339	 * The return path should not be used for platforms which can
340	 * power off the CPU.
341	 */
342	if (smp_ops.cpu_die)
343		smp_ops.cpu_die(cpu);
344
345	pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
346		cpu);
347
348	/*
349	 * Do not return to the idle loop - jump back to the secondary
350	 * cpu initialisation.  There's some initialisation which needs
351	 * to be repeated to undo the effects of taking the CPU offline.
352	 */
353	__asm__("mov	sp, %0\n"
354	"	mov	fp, #0\n"
355	"	b	secondary_start_kernel"
356		:
357		: "r" (task_stack_page(current) + THREAD_SIZE - 8));
358}
359#endif /* CONFIG_HOTPLUG_CPU */
360
361/*
362 * Called by both boot and secondaries to move global data into
363 * per-processor storage.
364 */
365static void smp_store_cpu_info(unsigned int cpuid)
366{
367	struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
368
369	cpu_info->loops_per_jiffy = loops_per_jiffy;
370	cpu_info->cpuid = read_cpuid_id();
371
372	store_cpu_topology(cpuid);
373	check_cpu_icache_size(cpuid);
374}
375
376/*
377 * This is the secondary CPU boot entry.  We're using this CPUs
378 * idle thread stack, but a set of temporary page tables.
379 */
380asmlinkage void secondary_start_kernel(void)
381{
382	struct mm_struct *mm = &init_mm;
383	unsigned int cpu;
384
385	secondary_biglittle_init();
386
387	/*
388	 * The identity mapping is uncached (strongly ordered), so
389	 * switch away from it before attempting any exclusive accesses.
390	 */
391	cpu_switch_mm(mm->pgd, mm);
392	local_flush_bp_all();
393	enter_lazy_tlb(mm, current);
394	local_flush_tlb_all();
395
396	/*
397	 * All kernel threads share the same mm context; grab a
398	 * reference and switch to it.
399	 */
400	cpu = smp_processor_id();
401	mmgrab(mm);
402	current->active_mm = mm;
403	cpumask_set_cpu(cpu, mm_cpumask(mm));
404
405	cpu_init();
406
407#ifndef CONFIG_MMU
408	setup_vectors_base();
409#endif
410	pr_debug("CPU%u: Booted secondary processor\n", cpu);
411
412	preempt_disable();
413	trace_hardirqs_off();
414
415	/*
416	 * Give the platform a chance to do its own initialisation.
417	 */
418	if (smp_ops.smp_secondary_init)
419		smp_ops.smp_secondary_init(cpu);
420
421	notify_cpu_starting(cpu);
422
423	calibrate_delay();
424
425	smp_store_cpu_info(cpu);
426
427	/*
428	 * OK, now it's safe to let the boot CPU continue.  Wait for
429	 * the CPU migration code to notice that the CPU is online
430	 * before we continue - which happens after __cpu_up returns.
431	 */
432	set_cpu_online(cpu, true);
433
434	check_other_bugs();
435
436	complete(&cpu_running);
437
438	local_irq_enable();
439	local_fiq_enable();
440	local_abt_enable();
441
442	/*
443	 * OK, it's off to the idle thread for us
444	 */
445	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
446}
447
448void __init smp_cpus_done(unsigned int max_cpus)
449{
450	int cpu;
451	unsigned long bogosum = 0;
452
453	for_each_online_cpu(cpu)
454		bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
455
456	printk(KERN_INFO "SMP: Total of %d processors activated "
457	       "(%lu.%02lu BogoMIPS).\n",
458	       num_online_cpus(),
459	       bogosum / (500000/HZ),
460	       (bogosum / (5000/HZ)) % 100);
461
462	hyp_mode_check();
463}
464
465void __init smp_prepare_boot_cpu(void)
466{
467	set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
468}
469
470void __init smp_prepare_cpus(unsigned int max_cpus)
471{
472	unsigned int ncores = num_possible_cpus();
473
474	init_cpu_topology();
475
476	smp_store_cpu_info(smp_processor_id());
477
478	/*
479	 * are we trying to boot more cores than exist?
480	 */
481	if (max_cpus > ncores)
482		max_cpus = ncores;
483	if (ncores > 1 && max_cpus) {
484		/*
485		 * Initialise the present map, which describes the set of CPUs
486		 * actually populated at the present time. A platform should
487		 * re-initialize the map in the platforms smp_prepare_cpus()
488		 * if present != possible (e.g. physical hotplug).
489		 */
490		init_cpu_present(cpu_possible_mask);
491
492		/*
493		 * Initialise the SCU if there are more than one CPU
494		 * and let them know where to start.
495		 */
496		if (smp_ops.smp_prepare_cpus)
497			smp_ops.smp_prepare_cpus(max_cpus);
498	}
499}
500
501static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
502
503void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
504{
505	if (!__smp_cross_call)
506		__smp_cross_call = fn;
507}
508
509static const char *ipi_types[NR_IPI] __tracepoint_string = {
510#define S(x,s)	[x] = s
511	S(IPI_WAKEUP, "CPU wakeup interrupts"),
512	S(IPI_TIMER, "Timer broadcast interrupts"),
513	S(IPI_RESCHEDULE, "Rescheduling interrupts"),
514	S(IPI_CALL_FUNC, "Function call interrupts"),
515	S(IPI_CPU_STOP, "CPU stop interrupts"),
516	S(IPI_IRQ_WORK, "IRQ work interrupts"),
517	S(IPI_COMPLETION, "completion interrupts"),
518};
519
520static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
521{
522	trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
523	__smp_cross_call(target, ipinr);
524}
525
526void show_ipi_list(struct seq_file *p, int prec)
527{
528	unsigned int cpu, i;
529
530	for (i = 0; i < NR_IPI; i++) {
531		seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
532
533		for_each_online_cpu(cpu)
534			seq_printf(p, "%10u ",
535				   __get_irq_stat(cpu, ipi_irqs[i]));
536
537		seq_printf(p, " %s\n", ipi_types[i]);
538	}
539}
540
541u64 smp_irq_stat_cpu(unsigned int cpu)
542{
543	u64 sum = 0;
544	int i;
545
546	for (i = 0; i < NR_IPI; i++)
547		sum += __get_irq_stat(cpu, ipi_irqs[i]);
548
549	return sum;
550}
551
552void arch_send_call_function_ipi_mask(const struct cpumask *mask)
553{
554	smp_cross_call(mask, IPI_CALL_FUNC);
555}
556
557void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
558{
559	smp_cross_call(mask, IPI_WAKEUP);
560}
561
562void arch_send_call_function_single_ipi(int cpu)
563{
564	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
565}
566
567#ifdef CONFIG_IRQ_WORK
568void arch_irq_work_raise(void)
569{
570	if (arch_irq_work_has_interrupt())
571		smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
572}
573#endif
574
575#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
576void tick_broadcast(const struct cpumask *mask)
577{
578	smp_cross_call(mask, IPI_TIMER);
579}
580#endif
581
582static DEFINE_RAW_SPINLOCK(stop_lock);
583
584/*
585 * ipi_cpu_stop - handle IPI from smp_send_stop()
586 */
587static void ipi_cpu_stop(unsigned int cpu)
588{
589	if (system_state <= SYSTEM_RUNNING) {
 
590		raw_spin_lock(&stop_lock);
591		pr_crit("CPU%u: stopping\n", cpu);
592		dump_stack();
593		raw_spin_unlock(&stop_lock);
594	}
595
596	set_cpu_online(cpu, false);
597
598	local_fiq_disable();
599	local_irq_disable();
600
601	while (1) {
602		cpu_relax();
603		wfe();
604	}
605}
606
607static DEFINE_PER_CPU(struct completion *, cpu_completion);
608
609int register_ipi_completion(struct completion *completion, int cpu)
610{
611	per_cpu(cpu_completion, cpu) = completion;
612	return IPI_COMPLETION;
613}
614
615static void ipi_complete(unsigned int cpu)
616{
617	complete(per_cpu(cpu_completion, cpu));
618}
619
620/*
621 * Main handler for inter-processor interrupts
622 */
623asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
624{
625	handle_IPI(ipinr, regs);
626}
627
628void handle_IPI(int ipinr, struct pt_regs *regs)
629{
630	unsigned int cpu = smp_processor_id();
631	struct pt_regs *old_regs = set_irq_regs(regs);
632
633	if ((unsigned)ipinr < NR_IPI) {
634		trace_ipi_entry_rcuidle(ipi_types[ipinr]);
635		__inc_irq_stat(cpu, ipi_irqs[ipinr]);
636	}
637
638	switch (ipinr) {
639	case IPI_WAKEUP:
640		break;
641
642#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
643	case IPI_TIMER:
644		irq_enter();
645		tick_receive_broadcast();
646		irq_exit();
647		break;
648#endif
649
650	case IPI_RESCHEDULE:
651		scheduler_ipi();
652		break;
653
654	case IPI_CALL_FUNC:
655		irq_enter();
656		generic_smp_call_function_interrupt();
657		irq_exit();
658		break;
659
660	case IPI_CPU_STOP:
661		irq_enter();
662		ipi_cpu_stop(cpu);
663		irq_exit();
664		break;
665
666#ifdef CONFIG_IRQ_WORK
667	case IPI_IRQ_WORK:
668		irq_enter();
669		irq_work_run();
670		irq_exit();
671		break;
672#endif
673
674	case IPI_COMPLETION:
675		irq_enter();
676		ipi_complete(cpu);
677		irq_exit();
678		break;
679
680	case IPI_CPU_BACKTRACE:
681		printk_nmi_enter();
682		irq_enter();
683		nmi_cpu_backtrace(regs);
684		irq_exit();
685		printk_nmi_exit();
686		break;
687
688	default:
689		pr_crit("CPU%u: Unknown IPI message 0x%x\n",
690		        cpu, ipinr);
691		break;
692	}
693
694	if ((unsigned)ipinr < NR_IPI)
695		trace_ipi_exit_rcuidle(ipi_types[ipinr]);
696	set_irq_regs(old_regs);
697}
698
699void smp_send_reschedule(int cpu)
700{
701	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
702}
703
704void smp_send_stop(void)
705{
706	unsigned long timeout;
707	struct cpumask mask;
708
709	cpumask_copy(&mask, cpu_online_mask);
710	cpumask_clear_cpu(smp_processor_id(), &mask);
711	if (!cpumask_empty(&mask))
712		smp_cross_call(&mask, IPI_CPU_STOP);
713
714	/* Wait up to one second for other CPUs to stop */
715	timeout = USEC_PER_SEC;
716	while (num_online_cpus() > 1 && timeout--)
717		udelay(1);
718
719	if (num_online_cpus() > 1)
720		pr_warn("SMP: failed to stop secondary CPUs\n");
721}
722
723/* In case panic() and panic() called at the same time on CPU1 and CPU2,
724 * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop()
725 * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online,
726 * kdump fails. So split out the panic_smp_self_stop() and add
727 * set_cpu_online(smp_processor_id(), false).
728 */
729void panic_smp_self_stop(void)
730{
731	pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
732	         smp_processor_id());
733	set_cpu_online(smp_processor_id(), false);
734	while (1)
735		cpu_relax();
736}
737
738/*
739 * not supported here
740 */
741int setup_profiling_timer(unsigned int multiplier)
742{
743	return -EINVAL;
744}
745
746#ifdef CONFIG_CPU_FREQ
747
748static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
749static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
750static unsigned long global_l_p_j_ref;
751static unsigned long global_l_p_j_ref_freq;
752
753static int cpufreq_callback(struct notifier_block *nb,
754					unsigned long val, void *data)
755{
756	struct cpufreq_freqs *freq = data;
757	struct cpumask *cpus = freq->policy->cpus;
758	int cpu, first = cpumask_first(cpus);
759	unsigned int lpj;
760
761	if (freq->flags & CPUFREQ_CONST_LOOPS)
762		return NOTIFY_OK;
763
764	if (!per_cpu(l_p_j_ref, first)) {
765		for_each_cpu(cpu, cpus) {
766			per_cpu(l_p_j_ref, cpu) =
767				per_cpu(cpu_data, cpu).loops_per_jiffy;
768			per_cpu(l_p_j_ref_freq, cpu) = freq->old;
769		}
770
771		if (!global_l_p_j_ref) {
772			global_l_p_j_ref = loops_per_jiffy;
773			global_l_p_j_ref_freq = freq->old;
774		}
775	}
776
777	if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
778	    (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
779		loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
780						global_l_p_j_ref_freq,
781						freq->new);
782
783		lpj = cpufreq_scale(per_cpu(l_p_j_ref, first),
784				    per_cpu(l_p_j_ref_freq, first), freq->new);
785		for_each_cpu(cpu, cpus)
786			per_cpu(cpu_data, cpu).loops_per_jiffy = lpj;
787	}
788	return NOTIFY_OK;
789}
790
791static struct notifier_block cpufreq_notifier = {
792	.notifier_call  = cpufreq_callback,
793};
794
795static int __init register_cpufreq_notifier(void)
796{
797	return cpufreq_register_notifier(&cpufreq_notifier,
798						CPUFREQ_TRANSITION_NOTIFIER);
799}
800core_initcall(register_cpufreq_notifier);
801
802#endif
803
804static void raise_nmi(cpumask_t *mask)
805{
806	__smp_cross_call(mask, IPI_CPU_BACKTRACE);
 
 
 
 
 
 
 
 
 
807}
808
809void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
810{
811	nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi);
812}