Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  linux/arch/arm/kernel/smp.c
  4 *
  5 *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
 
 
 
 
  6 */
  7#include <linux/module.h>
  8#include <linux/delay.h>
  9#include <linux/init.h>
 10#include <linux/spinlock.h>
 11#include <linux/sched/mm.h>
 12#include <linux/sched/hotplug.h>
 13#include <linux/sched/task_stack.h>
 14#include <linux/interrupt.h>
 15#include <linux/cache.h>
 16#include <linux/profile.h>
 17#include <linux/errno.h>
 18#include <linux/mm.h>
 19#include <linux/err.h>
 20#include <linux/cpu.h>
 
 21#include <linux/seq_file.h>
 22#include <linux/irq.h>
 23#include <linux/nmi.h>
 24#include <linux/percpu.h>
 25#include <linux/clockchips.h>
 26#include <linux/completion.h>
 27#include <linux/cpufreq.h>
 28#include <linux/irq_work.h>
 29
 30#include <linux/atomic.h>
 31#include <asm/bugs.h>
 32#include <asm/smp.h>
 33#include <asm/cacheflush.h>
 34#include <asm/cpu.h>
 35#include <asm/cputype.h>
 36#include <asm/exception.h>
 37#include <asm/idmap.h>
 38#include <asm/topology.h>
 39#include <asm/mmu_context.h>
 40#include <asm/procinfo.h>
 
 41#include <asm/processor.h>
 42#include <asm/sections.h>
 43#include <asm/tlbflush.h>
 44#include <asm/ptrace.h>
 
 45#include <asm/smp_plat.h>
 46#include <asm/virt.h>
 47#include <asm/mach/arch.h>
 48#include <asm/mpu.h>
 49
 50#define CREATE_TRACE_POINTS
 51#include <trace/events/ipi.h>
 52
 53/*
 54 * as from 2.5, kernels no longer have an init_tasks structure
 55 * so we need some other way of telling a new secondary core
 56 * where to place its SVC stack
 57 */
 58struct secondary_data secondary_data;
 59
 60enum ipi_msg_type {
 61	IPI_WAKEUP,
 62	IPI_TIMER,
 63	IPI_RESCHEDULE,
 64	IPI_CALL_FUNC,
 
 65	IPI_CPU_STOP,
 66	IPI_IRQ_WORK,
 67	IPI_COMPLETION,
 68	/*
 69	 * CPU_BACKTRACE is special and not included in NR_IPI
 70	 * or tracable with trace_ipi_*
 71	 */
 72	IPI_CPU_BACKTRACE,
 73	/*
 74	 * SGI8-15 can be reserved by secure firmware, and thus may
 75	 * not be usable by the kernel. Please keep the above limited
 76	 * to at most 8 entries.
 77	 */
 78};
 79
 80static DECLARE_COMPLETION(cpu_running);
 81
 82static struct smp_operations smp_ops __ro_after_init;
 83
 84void __init smp_set_ops(const struct smp_operations *ops)
 85{
 86	if (ops)
 87		smp_ops = *ops;
 88};
 89
 90static unsigned long get_arch_pgd(pgd_t *pgd)
 91{
 92#ifdef CONFIG_ARM_LPAE
 93	return __phys_to_pfn(virt_to_phys(pgd));
 94#else
 95	return virt_to_phys(pgd);
 96#endif
 97}
 98
 99#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
100static int secondary_biglittle_prepare(unsigned int cpu)
101{
102	if (!cpu_vtable[cpu])
103		cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
104
105	return cpu_vtable[cpu] ? 0 : -ENOMEM;
106}
107
108static void secondary_biglittle_init(void)
109{
110	init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
111}
112#else
113static int secondary_biglittle_prepare(unsigned int cpu)
114{
115	return 0;
116}
117
118static void secondary_biglittle_init(void)
119{
120}
121#endif
122
123int __cpu_up(unsigned int cpu, struct task_struct *idle)
124{
125	int ret;
126
127	if (!smp_ops.smp_boot_secondary)
128		return -ENOSYS;
129
130	ret = secondary_biglittle_prepare(cpu);
131	if (ret)
132		return ret;
133
134	/*
135	 * We need to tell the secondary core where to find
136	 * its stack and the page tables.
137	 */
138	secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
139#ifdef CONFIG_ARM_MPU
140	secondary_data.mpu_rgn_info = &mpu_rgn_info;
141#endif
142
143#ifdef CONFIG_MMU
144	secondary_data.pgdir = virt_to_phys(idmap_pgd);
145	secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
146#endif
147	sync_cache_w(&secondary_data);
148
149	/*
150	 * Now bring the CPU into our world.
151	 */
152	ret = smp_ops.smp_boot_secondary(cpu, idle);
153	if (ret == 0) {
154		/*
155		 * CPU was successfully started, wait for it
156		 * to come online or time out.
157		 */
158		wait_for_completion_timeout(&cpu_running,
159						 msecs_to_jiffies(1000));
160
161		if (!cpu_online(cpu)) {
162			pr_crit("CPU%u: failed to come online\n", cpu);
163			ret = -EIO;
164		}
165	} else {
166		pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
167	}
168
 
 
169
170	memset(&secondary_data, 0, sizeof(secondary_data));
171	return ret;
172}
173
174/* platform specific SMP operations */
175void __init smp_init_cpus(void)
176{
177	if (smp_ops.smp_init_cpus)
178		smp_ops.smp_init_cpus();
179}
180
181int platform_can_secondary_boot(void)
182{
183	return !!smp_ops.smp_boot_secondary;
184}
185
186int platform_can_cpu_hotplug(void)
187{
188#ifdef CONFIG_HOTPLUG_CPU
189	if (smp_ops.cpu_kill)
190		return 1;
191#endif
192
193	return 0;
194}
195
196#ifdef CONFIG_HOTPLUG_CPU
197static int platform_cpu_kill(unsigned int cpu)
198{
199	if (smp_ops.cpu_kill)
200		return smp_ops.cpu_kill(cpu);
201	return 1;
202}
203
204static int platform_cpu_disable(unsigned int cpu)
205{
206	if (smp_ops.cpu_disable)
207		return smp_ops.cpu_disable(cpu);
208
209	return 0;
210}
211
212int platform_can_hotplug_cpu(unsigned int cpu)
213{
214	/* cpu_die must be specified to support hotplug */
215	if (!smp_ops.cpu_die)
216		return 0;
217
218	if (smp_ops.cpu_can_disable)
219		return smp_ops.cpu_can_disable(cpu);
220
221	/*
222	 * By default, allow disabling all CPUs except the first one,
223	 * since this is special on a lot of platforms, e.g. because
224	 * of clock tick interrupts.
225	 */
226	return cpu != 0;
227}
228
229/*
230 * __cpu_disable runs on the processor to be shutdown.
231 */
232int __cpu_disable(void)
233{
234	unsigned int cpu = smp_processor_id();
235	int ret;
236
237	ret = platform_cpu_disable(cpu);
238	if (ret)
239		return ret;
240
241#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
242	remove_cpu_topology(cpu);
243#endif
244
245	/*
246	 * Take this CPU offline.  Once we clear this, we can't return,
247	 * and we must not schedule until we're ready to give up the cpu.
248	 */
249	set_cpu_online(cpu, false);
250
251	/*
252	 * OK - migrate IRQs away from this CPU
253	 */
254	irq_migrate_all_off_this_cpu();
 
 
 
 
 
255
256	/*
257	 * Flush user cache and TLB mappings, and then remove this CPU
258	 * from the vm mask set of all processes.
259	 *
260	 * Caches are flushed to the Level of Unification Inner Shareable
261	 * to write-back dirty lines to unified caches shared by all CPUs.
262	 */
263	flush_cache_louis();
264	local_flush_tlb_all();
265
 
 
266	return 0;
267}
268
 
 
269/*
270 * called on the thread which is asking for a CPU to be shutdown -
271 * waits until shutdown has completed, or it is timed out.
272 */
273void __cpu_die(unsigned int cpu)
274{
275	if (!cpu_wait_death(cpu, 5)) {
276		pr_err("CPU%u: cpu didn't die\n", cpu);
277		return;
278	}
279	pr_debug("CPU%u: shutdown\n", cpu);
280
281	clear_tasks_mm_cpumask(cpu);
282	/*
283	 * platform_cpu_kill() is generally expected to do the powering off
284	 * and/or cutting of clocks to the dying CPU.  Optionally, this may
285	 * be done by the CPU which is dying in preference to supporting
286	 * this call, but that means there is _no_ synchronisation between
287	 * the requesting CPU and the dying CPU actually losing power.
288	 */
289	if (!platform_cpu_kill(cpu))
290		pr_err("CPU%u: unable to kill\n", cpu);
291}
292
293/*
294 * Called from the idle thread for the CPU which has been shutdown.
295 *
296 * Note that we disable IRQs here, but do not re-enable them
297 * before returning to the caller. This is also the behaviour
298 * of the other hotplug-cpu capable cores, so presumably coming
299 * out of idle fixes this.
300 */
301void arch_cpu_idle_dead(void)
302{
303	unsigned int cpu = smp_processor_id();
304
305	idle_task_exit();
306
307	local_irq_disable();
 
308
309	/*
310	 * Flush the data out of the L1 cache for this CPU.  This must be
311	 * before the completion to ensure that data is safely written out
312	 * before platform_cpu_kill() gets called - which may disable
313	 * *this* CPU and power down its cache.
314	 */
315	flush_cache_louis();
316
317	/*
318	 * Tell __cpu_die() that this CPU is now safe to dispose of.  Once
319	 * this returns, power and/or clocks can be removed at any point
320	 * from this CPU and its cache by platform_cpu_kill().
321	 */
322	(void)cpu_report_death();
323
324	/*
325	 * Ensure that the cache lines associated with that completion are
326	 * written out.  This covers the case where _this_ CPU is doing the
327	 * powering down, to ensure that the completion is visible to the
328	 * CPU waiting for this one.
329	 */
330	flush_cache_louis();
331
332	/*
333	 * The actual CPU shutdown procedure is at least platform (if not
334	 * CPU) specific.  This may remove power, or it may simply spin.
335	 *
336	 * Platforms are generally expected *NOT* to return from this call,
337	 * although there are some which do because they have no way to
338	 * power down the CPU.  These platforms are the _only_ reason we
339	 * have a return path which uses the fragment of assembly below.
340	 *
341	 * The return path should not be used for platforms which can
342	 * power off the CPU.
343	 */
344	if (smp_ops.cpu_die)
345		smp_ops.cpu_die(cpu);
346
347	pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
348		cpu);
349
350	/*
351	 * Do not return to the idle loop - jump back to the secondary
352	 * cpu initialisation.  There's some initialisation which needs
353	 * to be repeated to undo the effects of taking the CPU offline.
354	 */
355	__asm__("mov	sp, %0\n"
356	"	mov	fp, #0\n"
357	"	b	secondary_start_kernel"
358		:
359		: "r" (task_stack_page(current) + THREAD_SIZE - 8));
360}
361#endif /* CONFIG_HOTPLUG_CPU */
362
363/*
364 * Called by both boot and secondaries to move global data into
365 * per-processor storage.
366 */
367static void smp_store_cpu_info(unsigned int cpuid)
368{
369	struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
370
371	cpu_info->loops_per_jiffy = loops_per_jiffy;
372	cpu_info->cpuid = read_cpuid_id();
373
374	store_cpu_topology(cpuid);
375	check_cpu_icache_size(cpuid);
376}
377
 
 
378/*
379 * This is the secondary CPU boot entry.  We're using this CPUs
380 * idle thread stack, but a set of temporary page tables.
381 */
382asmlinkage void secondary_start_kernel(void)
383{
384	struct mm_struct *mm = &init_mm;
385	unsigned int cpu;
386
387	secondary_biglittle_init();
388
389	/*
390	 * The identity mapping is uncached (strongly ordered), so
391	 * switch away from it before attempting any exclusive accesses.
392	 */
393	cpu_switch_mm(mm->pgd, mm);
394	local_flush_bp_all();
395	enter_lazy_tlb(mm, current);
396	local_flush_tlb_all();
397
398	/*
399	 * All kernel threads share the same mm context; grab a
400	 * reference and switch to it.
401	 */
402	cpu = smp_processor_id();
403	mmgrab(mm);
404	current->active_mm = mm;
405	cpumask_set_cpu(cpu, mm_cpumask(mm));
 
 
 
406
407	cpu_init();
408
409#ifndef CONFIG_MMU
410	setup_vectors_base();
411#endif
412	pr_debug("CPU%u: Booted secondary processor\n", cpu);
413
 
414	preempt_disable();
415	trace_hardirqs_off();
416
417	/*
418	 * Give the platform a chance to do its own initialisation.
419	 */
420	if (smp_ops.smp_secondary_init)
421		smp_ops.smp_secondary_init(cpu);
422
423	notify_cpu_starting(cpu);
424
425	calibrate_delay();
426
427	smp_store_cpu_info(cpu);
428
429	/*
430	 * OK, now it's safe to let the boot CPU continue.  Wait for
431	 * the CPU migration code to notice that the CPU is online
432	 * before we continue - which happens after __cpu_up returns.
433	 */
434	set_cpu_online(cpu, true);
435
436	check_other_bugs();
437
438	complete(&cpu_running);
439
 
 
 
 
 
440	local_irq_enable();
441	local_fiq_enable();
442	local_abt_enable();
443
444	/*
445	 * OK, it's off to the idle thread for us
446	 */
447	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
448}
449
450void __init smp_cpus_done(unsigned int max_cpus)
451{
452	int cpu;
453	unsigned long bogosum = 0;
454
455	for_each_online_cpu(cpu)
456		bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
457
458	printk(KERN_INFO "SMP: Total of %d processors activated "
459	       "(%lu.%02lu BogoMIPS).\n",
460	       num_online_cpus(),
461	       bogosum / (500000/HZ),
462	       (bogosum / (5000/HZ)) % 100);
463
464	hyp_mode_check();
465}
466
467void __init smp_prepare_boot_cpu(void)
468{
469	set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
470}
471
472void __init smp_prepare_cpus(unsigned int max_cpus)
473{
474	unsigned int ncores = num_possible_cpus();
475
476	init_cpu_topology();
477
478	smp_store_cpu_info(smp_processor_id());
479
480	/*
481	 * are we trying to boot more cores than exist?
482	 */
483	if (max_cpus > ncores)
484		max_cpus = ncores;
485	if (ncores > 1 && max_cpus) {
486		/*
 
 
 
 
 
 
487		 * Initialise the present map, which describes the set of CPUs
488		 * actually populated at the present time. A platform should
489		 * re-initialize the map in the platforms smp_prepare_cpus()
490		 * if present != possible (e.g. physical hotplug).
491		 */
492		init_cpu_present(cpu_possible_mask);
493
494		/*
495		 * Initialise the SCU if there are more than one CPU
496		 * and let them know where to start.
497		 */
498		if (smp_ops.smp_prepare_cpus)
499			smp_ops.smp_prepare_cpus(max_cpus);
500	}
501}
502
503static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
504
505void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
506{
507	if (!__smp_cross_call)
508		__smp_cross_call = fn;
 
 
 
 
 
 
 
 
 
509}
510
511static const char *ipi_types[NR_IPI] __tracepoint_string = {
512#define S(x,s)	[x] = s
513	S(IPI_WAKEUP, "CPU wakeup interrupts"),
514	S(IPI_TIMER, "Timer broadcast interrupts"),
515	S(IPI_RESCHEDULE, "Rescheduling interrupts"),
516	S(IPI_CALL_FUNC, "Function call interrupts"),
 
517	S(IPI_CPU_STOP, "CPU stop interrupts"),
518	S(IPI_IRQ_WORK, "IRQ work interrupts"),
519	S(IPI_COMPLETION, "completion interrupts"),
520};
521
522static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
523{
524	trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
525	__smp_cross_call(target, ipinr);
526}
527
528void show_ipi_list(struct seq_file *p, int prec)
529{
530	unsigned int cpu, i;
531
532	for (i = 0; i < NR_IPI; i++) {
533		seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
534
535		for_each_online_cpu(cpu)
536			seq_printf(p, "%10u ",
537				   __get_irq_stat(cpu, ipi_irqs[i]));
538
539		seq_printf(p, " %s\n", ipi_types[i]);
540	}
541}
542
543u64 smp_irq_stat_cpu(unsigned int cpu)
544{
545	u64 sum = 0;
546	int i;
547
548	for (i = 0; i < NR_IPI; i++)
549		sum += __get_irq_stat(cpu, ipi_irqs[i]);
550
551	return sum;
552}
553
554void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 
 
 
 
 
555{
556	smp_cross_call(mask, IPI_CALL_FUNC);
 
557}
558
559void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
 
560{
561	smp_cross_call(mask, IPI_WAKEUP);
562}
 
 
 
563
564void arch_send_call_function_single_ipi(int cpu)
 
 
 
 
 
565{
566	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
 
 
 
 
 
 
 
 
567}
568
569#ifdef CONFIG_IRQ_WORK
570void arch_irq_work_raise(void)
 
 
571{
572	if (arch_irq_work_has_interrupt())
573		smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
 
 
 
 
 
 
574}
575#endif
576
577#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
578void tick_broadcast(const struct cpumask *mask)
579{
580	smp_cross_call(mask, IPI_TIMER);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
581}
582#endif
583
584static DEFINE_RAW_SPINLOCK(stop_lock);
585
586/*
587 * ipi_cpu_stop - handle IPI from smp_send_stop()
588 */
589static void ipi_cpu_stop(unsigned int cpu)
590{
591	if (system_state <= SYSTEM_RUNNING) {
 
592		raw_spin_lock(&stop_lock);
593		pr_crit("CPU%u: stopping\n", cpu);
594		dump_stack();
595		raw_spin_unlock(&stop_lock);
596	}
597
598	set_cpu_online(cpu, false);
599
600	local_fiq_disable();
601	local_irq_disable();
602
603	while (1) {
604		cpu_relax();
605		wfe();
606	}
607}
608
609static DEFINE_PER_CPU(struct completion *, cpu_completion);
610
611int register_ipi_completion(struct completion *completion, int cpu)
612{
613	per_cpu(cpu_completion, cpu) = completion;
614	return IPI_COMPLETION;
615}
616
617static void ipi_complete(unsigned int cpu)
618{
619	complete(per_cpu(cpu_completion, cpu));
620}
621
622/*
623 * Main handler for inter-processor interrupts
624 */
625asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
626{
627	handle_IPI(ipinr, regs);
628}
629
630void handle_IPI(int ipinr, struct pt_regs *regs)
631{
632	unsigned int cpu = smp_processor_id();
633	struct pt_regs *old_regs = set_irq_regs(regs);
634
635	if ((unsigned)ipinr < NR_IPI) {
636		trace_ipi_entry_rcuidle(ipi_types[ipinr]);
637		__inc_irq_stat(cpu, ipi_irqs[ipinr]);
638	}
639
640	switch (ipinr) {
641	case IPI_WAKEUP:
642		break;
643
644#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
645	case IPI_TIMER:
646		irq_enter();
647		tick_receive_broadcast();
648		irq_exit();
649		break;
650#endif
651
652	case IPI_RESCHEDULE:
653		scheduler_ipi();
654		break;
655
656	case IPI_CALL_FUNC:
657		irq_enter();
658		generic_smp_call_function_interrupt();
659		irq_exit();
660		break;
661
662	case IPI_CPU_STOP:
663		irq_enter();
664		ipi_cpu_stop(cpu);
665		irq_exit();
666		break;
667
668#ifdef CONFIG_IRQ_WORK
669	case IPI_IRQ_WORK:
670		irq_enter();
671		irq_work_run();
672		irq_exit();
673		break;
674#endif
675
676	case IPI_COMPLETION:
677		irq_enter();
678		ipi_complete(cpu);
679		irq_exit();
680		break;
681
682	case IPI_CPU_BACKTRACE:
683		printk_nmi_enter();
684		irq_enter();
685		nmi_cpu_backtrace(regs);
686		irq_exit();
687		printk_nmi_exit();
688		break;
689
690	default:
691		pr_crit("CPU%u: Unknown IPI message 0x%x\n",
692		        cpu, ipinr);
693		break;
694	}
695
696	if ((unsigned)ipinr < NR_IPI)
697		trace_ipi_exit_rcuidle(ipi_types[ipinr]);
698	set_irq_regs(old_regs);
699}
700
701void smp_send_reschedule(int cpu)
702{
703	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
704}
705
 
 
 
 
 
 
 
 
 
 
 
706void smp_send_stop(void)
707{
708	unsigned long timeout;
709	struct cpumask mask;
710
711	cpumask_copy(&mask, cpu_online_mask);
712	cpumask_clear_cpu(smp_processor_id(), &mask);
713	if (!cpumask_empty(&mask))
714		smp_cross_call(&mask, IPI_CPU_STOP);
715
716	/* Wait up to one second for other CPUs to stop */
717	timeout = USEC_PER_SEC;
718	while (num_online_cpus() > 1 && timeout--)
719		udelay(1);
720
721	if (num_online_cpus() > 1)
722		pr_warn("SMP: failed to stop secondary CPUs\n");
723}
724
725/* In case panic() and panic() called at the same time on CPU1 and CPU2,
726 * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop()
727 * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online,
728 * kdump fails. So split out the panic_smp_self_stop() and add
729 * set_cpu_online(smp_processor_id(), false).
730 */
731void panic_smp_self_stop(void)
732{
733	pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
734	         smp_processor_id());
735	set_cpu_online(smp_processor_id(), false);
736	while (1)
737		cpu_relax();
738}
739
740/*
741 * not supported here
742 */
743int setup_profiling_timer(unsigned int multiplier)
744{
745	return -EINVAL;
746}
747
748#ifdef CONFIG_CPU_FREQ
749
750static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
751static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
752static unsigned long global_l_p_j_ref;
753static unsigned long global_l_p_j_ref_freq;
754
755static int cpufreq_callback(struct notifier_block *nb,
756					unsigned long val, void *data)
757{
758	struct cpufreq_freqs *freq = data;
759	struct cpumask *cpus = freq->policy->cpus;
760	int cpu, first = cpumask_first(cpus);
761	unsigned int lpj;
762
763	if (freq->flags & CPUFREQ_CONST_LOOPS)
764		return NOTIFY_OK;
765
766	if (!per_cpu(l_p_j_ref, first)) {
767		for_each_cpu(cpu, cpus) {
768			per_cpu(l_p_j_ref, cpu) =
769				per_cpu(cpu_data, cpu).loops_per_jiffy;
770			per_cpu(l_p_j_ref_freq, cpu) = freq->old;
771		}
772
773		if (!global_l_p_j_ref) {
774			global_l_p_j_ref = loops_per_jiffy;
775			global_l_p_j_ref_freq = freq->old;
776		}
777	}
778
779	if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
780	    (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
781		loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
782						global_l_p_j_ref_freq,
783						freq->new);
784
785		lpj = cpufreq_scale(per_cpu(l_p_j_ref, first),
786				    per_cpu(l_p_j_ref_freq, first), freq->new);
787		for_each_cpu(cpu, cpus)
788			per_cpu(cpu_data, cpu).loops_per_jiffy = lpj;
789	}
790	return NOTIFY_OK;
791}
792
793static struct notifier_block cpufreq_notifier = {
794	.notifier_call  = cpufreq_callback,
795};
796
797static int __init register_cpufreq_notifier(void)
798{
799	return cpufreq_register_notifier(&cpufreq_notifier,
800						CPUFREQ_TRANSITION_NOTIFIER);
801}
802core_initcall(register_cpufreq_notifier);
803
804#endif
805
806static void raise_nmi(cpumask_t *mask)
807{
808	__smp_cross_call(mask, IPI_CPU_BACKTRACE);
809}
810
811void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
812{
813	nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi);
814}
v3.5.6
 
  1/*
  2 *  linux/arch/arm/kernel/smp.c
  3 *
  4 *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 */
 10#include <linux/module.h>
 11#include <linux/delay.h>
 12#include <linux/init.h>
 13#include <linux/spinlock.h>
 14#include <linux/sched.h>
 
 
 15#include <linux/interrupt.h>
 16#include <linux/cache.h>
 17#include <linux/profile.h>
 18#include <linux/errno.h>
 19#include <linux/mm.h>
 20#include <linux/err.h>
 21#include <linux/cpu.h>
 22#include <linux/smp.h>
 23#include <linux/seq_file.h>
 24#include <linux/irq.h>
 
 25#include <linux/percpu.h>
 26#include <linux/clockchips.h>
 27#include <linux/completion.h>
 
 
 28
 29#include <linux/atomic.h>
 
 
 30#include <asm/cacheflush.h>
 31#include <asm/cpu.h>
 32#include <asm/cputype.h>
 33#include <asm/exception.h>
 34#include <asm/idmap.h>
 35#include <asm/topology.h>
 36#include <asm/mmu_context.h>
 37#include <asm/pgtable.h>
 38#include <asm/pgalloc.h>
 39#include <asm/processor.h>
 40#include <asm/sections.h>
 41#include <asm/tlbflush.h>
 42#include <asm/ptrace.h>
 43#include <asm/localtimer.h>
 44#include <asm/smp_plat.h>
 
 
 
 
 
 
 45
 46/*
 47 * as from 2.5, kernels no longer have an init_tasks structure
 48 * so we need some other way of telling a new secondary core
 49 * where to place its SVC stack
 50 */
 51struct secondary_data secondary_data;
 52
 53enum ipi_msg_type {
 54	IPI_TIMER = 2,
 
 55	IPI_RESCHEDULE,
 56	IPI_CALL_FUNC,
 57	IPI_CALL_FUNC_SINGLE,
 58	IPI_CPU_STOP,
 
 
 
 
 
 
 
 
 
 
 
 
 59};
 60
 61static DECLARE_COMPLETION(cpu_running);
 62
 63int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 64{
 65	int ret;
 66
 
 
 
 
 
 
 
 67	/*
 68	 * We need to tell the secondary core where to find
 69	 * its stack and the page tables.
 70	 */
 71	secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
 
 
 
 
 
 72	secondary_data.pgdir = virt_to_phys(idmap_pgd);
 73	secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
 74	__cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
 75	outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
 76
 77	/*
 78	 * Now bring the CPU into our world.
 79	 */
 80	ret = boot_secondary(cpu, idle);
 81	if (ret == 0) {
 82		/*
 83		 * CPU was successfully started, wait for it
 84		 * to come online or time out.
 85		 */
 86		wait_for_completion_timeout(&cpu_running,
 87						 msecs_to_jiffies(1000));
 88
 89		if (!cpu_online(cpu)) {
 90			pr_crit("CPU%u: failed to come online\n", cpu);
 91			ret = -EIO;
 92		}
 93	} else {
 94		pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
 95	}
 96
 97	secondary_data.stack = NULL;
 98	secondary_data.pgdir = 0;
 99
 
100	return ret;
101}
102
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103#ifdef CONFIG_HOTPLUG_CPU
104static void percpu_timer_stop(void);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
106/*
107 * __cpu_disable runs on the processor to be shutdown.
108 */
109int __cpu_disable(void)
110{
111	unsigned int cpu = smp_processor_id();
112	int ret;
113
114	ret = platform_cpu_disable(cpu);
115	if (ret)
116		return ret;
117
 
 
 
 
118	/*
119	 * Take this CPU offline.  Once we clear this, we can't return,
120	 * and we must not schedule until we're ready to give up the cpu.
121	 */
122	set_cpu_online(cpu, false);
123
124	/*
125	 * OK - migrate IRQs away from this CPU
126	 */
127	migrate_irqs();
128
129	/*
130	 * Stop the local timer for this CPU.
131	 */
132	percpu_timer_stop();
133
134	/*
135	 * Flush user cache and TLB mappings, and then remove this CPU
136	 * from the vm mask set of all processes.
 
 
 
137	 */
138	flush_cache_all();
139	local_flush_tlb_all();
140
141	clear_tasks_mm_cpumask(cpu);
142
143	return 0;
144}
145
146static DECLARE_COMPLETION(cpu_died);
147
148/*
149 * called on the thread which is asking for a CPU to be shutdown -
150 * waits until shutdown has completed, or it is timed out.
151 */
152void __cpu_die(unsigned int cpu)
153{
154	if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
155		pr_err("CPU%u: cpu didn't die\n", cpu);
156		return;
157	}
158	printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
159
 
 
 
 
 
 
 
 
160	if (!platform_cpu_kill(cpu))
161		printk("CPU%u: unable to kill\n", cpu);
162}
163
164/*
165 * Called from the idle thread for the CPU which has been shutdown.
166 *
167 * Note that we disable IRQs here, but do not re-enable them
168 * before returning to the caller. This is also the behaviour
169 * of the other hotplug-cpu capable cores, so presumably coming
170 * out of idle fixes this.
171 */
172void __ref cpu_die(void)
173{
174	unsigned int cpu = smp_processor_id();
175
176	idle_task_exit();
177
178	local_irq_disable();
179	mb();
180
181	/* Tell __cpu_die() that this CPU is now safe to dispose of */
182	complete(&cpu_died);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183
184	/*
185	 * actual CPU shutdown procedure is at least platform (if not
186	 * CPU) specific.
 
 
 
 
 
 
 
 
187	 */
188	platform_cpu_die(cpu);
 
 
 
 
189
190	/*
191	 * Do not return to the idle loop - jump back to the secondary
192	 * cpu initialisation.  There's some initialisation which needs
193	 * to be repeated to undo the effects of taking the CPU offline.
194	 */
195	__asm__("mov	sp, %0\n"
196	"	mov	fp, #0\n"
197	"	b	secondary_start_kernel"
198		:
199		: "r" (task_stack_page(current) + THREAD_SIZE - 8));
200}
201#endif /* CONFIG_HOTPLUG_CPU */
202
203/*
204 * Called by both boot and secondaries to move global data into
205 * per-processor storage.
206 */
207static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
208{
209	struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
210
211	cpu_info->loops_per_jiffy = loops_per_jiffy;
 
212
213	store_cpu_topology(cpuid);
 
214}
215
216static void percpu_timer_setup(void);
217
218/*
219 * This is the secondary CPU boot entry.  We're using this CPUs
220 * idle thread stack, but a set of temporary page tables.
221 */
222asmlinkage void __cpuinit secondary_start_kernel(void)
223{
224	struct mm_struct *mm = &init_mm;
225	unsigned int cpu = smp_processor_id();
 
 
 
 
 
 
 
 
 
 
 
226
227	/*
228	 * All kernel threads share the same mm context; grab a
229	 * reference and switch to it.
230	 */
231	atomic_inc(&mm->mm_count);
 
232	current->active_mm = mm;
233	cpumask_set_cpu(cpu, mm_cpumask(mm));
234	cpu_switch_mm(mm->pgd, mm);
235	enter_lazy_tlb(mm, current);
236	local_flush_tlb_all();
237
238	printk("CPU%u: Booted secondary processor\n", cpu);
 
 
 
 
 
239
240	cpu_init();
241	preempt_disable();
242	trace_hardirqs_off();
243
244	/*
245	 * Give the platform a chance to do its own initialisation.
246	 */
247	platform_secondary_init(cpu);
 
248
249	notify_cpu_starting(cpu);
250
251	calibrate_delay();
252
253	smp_store_cpu_info(cpu);
254
255	/*
256	 * OK, now it's safe to let the boot CPU continue.  Wait for
257	 * the CPU migration code to notice that the CPU is online
258	 * before we continue - which happens after __cpu_up returns.
259	 */
260	set_cpu_online(cpu, true);
 
 
 
261	complete(&cpu_running);
262
263	/*
264	 * Setup the percpu timer for this CPU.
265	 */
266	percpu_timer_setup();
267
268	local_irq_enable();
269	local_fiq_enable();
 
270
271	/*
272	 * OK, it's off to the idle thread for us
273	 */
274	cpu_idle();
275}
276
277void __init smp_cpus_done(unsigned int max_cpus)
278{
279	int cpu;
280	unsigned long bogosum = 0;
281
282	for_each_online_cpu(cpu)
283		bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
284
285	printk(KERN_INFO "SMP: Total of %d processors activated "
286	       "(%lu.%02lu BogoMIPS).\n",
287	       num_online_cpus(),
288	       bogosum / (500000/HZ),
289	       (bogosum / (5000/HZ)) % 100);
 
 
290}
291
292void __init smp_prepare_boot_cpu(void)
293{
 
294}
295
296void __init smp_prepare_cpus(unsigned int max_cpus)
297{
298	unsigned int ncores = num_possible_cpus();
299
300	init_cpu_topology();
301
302	smp_store_cpu_info(smp_processor_id());
303
304	/*
305	 * are we trying to boot more cores than exist?
306	 */
307	if (max_cpus > ncores)
308		max_cpus = ncores;
309	if (ncores > 1 && max_cpus) {
310		/*
311		 * Enable the local timer or broadcast device for the
312		 * boot CPU, but only if we have more than one CPU.
313		 */
314		percpu_timer_setup();
315
316		/*
317		 * Initialise the present map, which describes the set of CPUs
318		 * actually populated at the present time. A platform should
319		 * re-initialize the map in platform_smp_prepare_cpus() if
320		 * present != possible (e.g. physical hotplug).
321		 */
322		init_cpu_present(cpu_possible_mask);
323
324		/*
325		 * Initialise the SCU if there are more than one CPU
326		 * and let them know where to start.
327		 */
328		platform_smp_prepare_cpus(max_cpus);
 
329	}
330}
331
332static void (*smp_cross_call)(const struct cpumask *, unsigned int);
333
334void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
335{
336	smp_cross_call = fn;
337}
338
339void arch_send_call_function_ipi_mask(const struct cpumask *mask)
340{
341	smp_cross_call(mask, IPI_CALL_FUNC);
342}
343
344void arch_send_call_function_single_ipi(int cpu)
345{
346	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
347}
348
349static const char *ipi_types[NR_IPI] = {
350#define S(x,s)	[x - IPI_TIMER] = s
 
351	S(IPI_TIMER, "Timer broadcast interrupts"),
352	S(IPI_RESCHEDULE, "Rescheduling interrupts"),
353	S(IPI_CALL_FUNC, "Function call interrupts"),
354	S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
355	S(IPI_CPU_STOP, "CPU stop interrupts"),
 
 
356};
357
 
 
 
 
 
 
358void show_ipi_list(struct seq_file *p, int prec)
359{
360	unsigned int cpu, i;
361
362	for (i = 0; i < NR_IPI; i++) {
363		seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
364
365		for_each_present_cpu(cpu)
366			seq_printf(p, "%10u ",
367				   __get_irq_stat(cpu, ipi_irqs[i]));
368
369		seq_printf(p, " %s\n", ipi_types[i]);
370	}
371}
372
373u64 smp_irq_stat_cpu(unsigned int cpu)
374{
375	u64 sum = 0;
376	int i;
377
378	for (i = 0; i < NR_IPI; i++)
379		sum += __get_irq_stat(cpu, ipi_irqs[i]);
380
381	return sum;
382}
383
384/*
385 * Timer (local or broadcast) support
386 */
387static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
388
389static void ipi_timer(void)
390{
391	struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
392	evt->event_handler(evt);
393}
394
395#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
396static void smp_timer_broadcast(const struct cpumask *mask)
397{
398	smp_cross_call(mask, IPI_TIMER);
399}
400#else
401#define smp_timer_broadcast	NULL
402#endif
403
404static void broadcast_timer_set_mode(enum clock_event_mode mode,
405	struct clock_event_device *evt)
406{
407}
408
409static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
410{
411	evt->name	= "dummy_timer";
412	evt->features	= CLOCK_EVT_FEAT_ONESHOT |
413			  CLOCK_EVT_FEAT_PERIODIC |
414			  CLOCK_EVT_FEAT_DUMMY;
415	evt->rating	= 400;
416	evt->mult	= 1;
417	evt->set_mode	= broadcast_timer_set_mode;
418
419	clockevents_register_device(evt);
420}
421
422static struct local_timer_ops *lt_ops;
423
424#ifdef CONFIG_LOCAL_TIMERS
425int local_timer_register(struct local_timer_ops *ops)
426{
427	if (!is_smp() || !setup_max_cpus)
428		return -ENXIO;
429
430	if (lt_ops)
431		return -EBUSY;
432
433	lt_ops = ops;
434	return 0;
435}
436#endif
437
438static void __cpuinit percpu_timer_setup(void)
 
439{
440	unsigned int cpu = smp_processor_id();
441	struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
442
443	evt->cpumask = cpumask_of(cpu);
444	evt->broadcast = smp_timer_broadcast;
445
446	if (!lt_ops || lt_ops->setup(evt))
447		broadcast_timer_setup(evt);
448}
449
450#ifdef CONFIG_HOTPLUG_CPU
451/*
452 * The generic clock events code purposely does not stop the local timer
453 * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
454 * manually here.
455 */
456static void percpu_timer_stop(void)
457{
458	unsigned int cpu = smp_processor_id();
459	struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
460
461	if (lt_ops)
462		lt_ops->stop(evt);
463}
464#endif
465
466static DEFINE_RAW_SPINLOCK(stop_lock);
467
468/*
469 * ipi_cpu_stop - handle IPI from smp_send_stop()
470 */
471static void ipi_cpu_stop(unsigned int cpu)
472{
473	if (system_state == SYSTEM_BOOTING ||
474	    system_state == SYSTEM_RUNNING) {
475		raw_spin_lock(&stop_lock);
476		printk(KERN_CRIT "CPU%u: stopping\n", cpu);
477		dump_stack();
478		raw_spin_unlock(&stop_lock);
479	}
480
481	set_cpu_online(cpu, false);
482
483	local_fiq_disable();
484	local_irq_disable();
485
486	while (1)
487		cpu_relax();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
488}
489
490/*
491 * Main handler for inter-processor interrupts
492 */
493asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
494{
495	handle_IPI(ipinr, regs);
496}
497
498void handle_IPI(int ipinr, struct pt_regs *regs)
499{
500	unsigned int cpu = smp_processor_id();
501	struct pt_regs *old_regs = set_irq_regs(regs);
502
503	if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI)
504		__inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]);
 
 
505
506	switch (ipinr) {
 
 
 
 
507	case IPI_TIMER:
508		irq_enter();
509		ipi_timer();
510		irq_exit();
511		break;
 
512
513	case IPI_RESCHEDULE:
514		scheduler_ipi();
515		break;
516
517	case IPI_CALL_FUNC:
518		irq_enter();
519		generic_smp_call_function_interrupt();
520		irq_exit();
521		break;
522
523	case IPI_CALL_FUNC_SINGLE:
 
 
 
 
 
 
 
524		irq_enter();
525		generic_smp_call_function_single_interrupt();
526		irq_exit();
527		break;
 
528
529	case IPI_CPU_STOP:
 
 
 
 
 
 
 
530		irq_enter();
531		ipi_cpu_stop(cpu);
532		irq_exit();
 
533		break;
534
535	default:
536		printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
537		       cpu, ipinr);
538		break;
539	}
 
 
 
540	set_irq_regs(old_regs);
541}
542
543void smp_send_reschedule(int cpu)
544{
545	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
546}
547
548#ifdef CONFIG_HOTPLUG_CPU
549static void smp_kill_cpus(cpumask_t *mask)
550{
551	unsigned int cpu;
552	for_each_cpu(cpu, mask)
553		platform_cpu_kill(cpu);
554}
555#else
556static void smp_kill_cpus(cpumask_t *mask) { }
557#endif
558
559void smp_send_stop(void)
560{
561	unsigned long timeout;
562	struct cpumask mask;
563
564	cpumask_copy(&mask, cpu_online_mask);
565	cpumask_clear_cpu(smp_processor_id(), &mask);
566	if (!cpumask_empty(&mask))
567		smp_cross_call(&mask, IPI_CPU_STOP);
568
569	/* Wait up to one second for other CPUs to stop */
570	timeout = USEC_PER_SEC;
571	while (num_online_cpus() > 1 && timeout--)
572		udelay(1);
573
574	if (num_online_cpus() > 1)
575		pr_warning("SMP: failed to stop secondary CPUs\n");
 
576
577	smp_kill_cpus(&mask);
 
 
 
 
 
 
 
 
 
 
 
 
578}
579
580/*
581 * not supported here
582 */
583int setup_profiling_timer(unsigned int multiplier)
584{
585	return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
586}