Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v4.6
 
  1/*
  2 * SMP initialisation and IPI support
  3 * Based on arch/arm/kernel/smp.c
  4 *
  5 * Copyright (C) 2012 ARM Ltd.
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope that it will be useful,
 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 * GNU General Public License for more details.
 15 *
 16 * You should have received a copy of the GNU General Public License
 17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 18 */
 19
 20#include <linux/acpi.h>
 
 21#include <linux/delay.h>
 22#include <linux/init.h>
 23#include <linux/spinlock.h>
 24#include <linux/sched.h>
 
 
 25#include <linux/interrupt.h>
 26#include <linux/cache.h>
 27#include <linux/profile.h>
 28#include <linux/errno.h>
 29#include <linux/mm.h>
 30#include <linux/err.h>
 31#include <linux/cpu.h>
 32#include <linux/smp.h>
 33#include <linux/seq_file.h>
 34#include <linux/irq.h>
 
 35#include <linux/percpu.h>
 36#include <linux/clockchips.h>
 37#include <linux/completion.h>
 38#include <linux/of.h>
 39#include <linux/irq_work.h>
 
 
 
 
 
 40
 41#include <asm/alternative.h>
 42#include <asm/atomic.h>
 43#include <asm/cacheflush.h>
 44#include <asm/cpu.h>
 45#include <asm/cputype.h>
 46#include <asm/cpu_ops.h>
 
 
 47#include <asm/mmu_context.h>
 48#include <asm/pgtable.h>
 49#include <asm/pgalloc.h>
 50#include <asm/processor.h>
 51#include <asm/smp_plat.h>
 52#include <asm/sections.h>
 53#include <asm/tlbflush.h>
 54#include <asm/ptrace.h>
 55#include <asm/virt.h>
 56
 57#define CREATE_TRACE_POINTS
 58#include <trace/events/ipi.h>
 59
 60/*
 61 * as from 2.5, kernels no longer have an init_tasks structure
 62 * so we need some other way of telling a new secondary core
 63 * where to place its SVC stack
 64 */
 65struct secondary_data secondary_data;
 66/* Number of CPUs which aren't online, but looping in kernel text. */
 67int cpus_stuck_in_kernel;
 68
 69enum ipi_msg_type {
 70	IPI_RESCHEDULE,
 71	IPI_CALL_FUNC,
 72	IPI_CPU_STOP,
 
 73	IPI_TIMER,
 74	IPI_IRQ_WORK,
 75	IPI_WAKEUP
 
 
 
 
 
 
 
 76};
 77
 
 
 
 
 
 
 
 
 78#ifdef CONFIG_HOTPLUG_CPU
 
 79static int op_cpu_kill(unsigned int cpu);
 80#else
 81static inline int op_cpu_kill(unsigned int cpu)
 82{
 83	return -ENOSYS;
 84}
 85#endif
 86
 87
 88/*
 89 * Boot a secondary CPU, and assign it the specified idle task.
 90 * This also gives us the initial stack to use for this CPU.
 91 */
 92static int boot_secondary(unsigned int cpu, struct task_struct *idle)
 93{
 94	if (cpu_ops[cpu]->cpu_boot)
 95		return cpu_ops[cpu]->cpu_boot(cpu);
 
 
 96
 97	return -EOPNOTSUPP;
 98}
 99
100static DECLARE_COMPLETION(cpu_running);
101
102int __cpu_up(unsigned int cpu, struct task_struct *idle)
103{
104	int ret;
105	long status;
106
107	/*
108	 * We need to tell the secondary core where to find its stack and the
109	 * page tables.
110	 */
111	secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
112	update_cpu_boot_status(CPU_MMU_OFF);
113	__flush_dcache_area(&secondary_data, sizeof(secondary_data));
114
115	/*
116	 * Now bring the CPU into our world.
117	 */
118	ret = boot_secondary(cpu, idle);
119	if (ret == 0) {
120		/*
121		 * CPU was successfully started, wait for it to come online or
122		 * time out.
123		 */
124		wait_for_completion_timeout(&cpu_running,
125					    msecs_to_jiffies(1000));
126
127		if (!cpu_online(cpu)) {
128			pr_crit("CPU%u: failed to come online\n", cpu);
129			ret = -EIO;
130		}
131	} else {
132		pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
133	}
134
135	secondary_data.stack = NULL;
136	status = READ_ONCE(secondary_data.status);
137	if (ret && status) {
 
 
 
 
 
138
139		if (status == CPU_MMU_OFF)
140			status = READ_ONCE(__early_cpu_boot_status);
 
 
 
141
142		switch (status) {
143		default:
144			pr_err("CPU%u: failed in unknown state : 0x%lx\n",
145					cpu, status);
146			break;
147		case CPU_KILL_ME:
148			if (!op_cpu_kill(cpu)) {
149				pr_crit("CPU%u: died during early boot\n", cpu);
150				break;
151			}
152			/* Fall through */
153			pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
154		case CPU_STUCK_IN_KERNEL:
155			pr_crit("CPU%u: is stuck in kernel\n", cpu);
156			cpus_stuck_in_kernel++;
157			break;
158		case CPU_PANIC_KERNEL:
159			panic("CPU%u detected unsupported configuration\n", cpu);
160		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161	}
162
163	return ret;
164}
165
166static void smp_store_cpu_info(unsigned int cpuid)
167{
168	store_cpu_topology(cpuid);
 
 
 
 
 
 
 
 
 
 
169}
170
171/*
172 * This is the secondary CPU boot entry.  We're using this CPUs
173 * idle thread stack, but a set of temporary page tables.
174 */
175asmlinkage void secondary_start_kernel(void)
176{
 
177	struct mm_struct *mm = &init_mm;
 
178	unsigned int cpu = smp_processor_id();
179
180	/*
181	 * All kernel threads share the same mm context; grab a
182	 * reference and switch to it.
183	 */
184	atomic_inc(&mm->mm_count);
185	current->active_mm = mm;
186
187	set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
188
189	/*
190	 * TTBR0 is only used for the identity mapping at this stage. Make it
191	 * point to zero page to avoid speculatively fetching new entries.
192	 */
193	cpu_uninstall_idmap();
194
195	preempt_disable();
 
 
 
196	trace_hardirqs_off();
197
198	/*
199	 * If the system has established the capabilities, make sure
200	 * this CPU ticks all of those. If it doesn't, the CPU will
201	 * fail to come online.
202	 */
203	verify_local_cpu_capabilities();
204
205	if (cpu_ops[cpu]->cpu_postboot)
206		cpu_ops[cpu]->cpu_postboot();
 
207
208	/*
209	 * Log the CPU info before it is marked online and might get read.
210	 */
211	cpuinfo_store_cpu();
 
212
213	/*
214	 * Enable GIC and timers.
215	 */
216	notify_cpu_starting(cpu);
217
218	smp_store_cpu_info(cpu);
 
 
219
220	/*
221	 * OK, now it's safe to let the boot CPU continue.  Wait for
222	 * the CPU migration code to notice that the CPU is online
223	 * before we continue.
224	 */
225	pr_info("CPU%u: Booted secondary processor [%08x]\n",
226					 cpu, read_cpuid_id());
 
227	update_cpu_boot_status(CPU_BOOT_SUCCESS);
228	/* Make sure the status update is visible before we complete */
229	smp_wmb();
230	set_cpu_online(cpu, true);
231	complete(&cpu_running);
232
233	local_dbg_enable();
234	local_irq_enable();
235	local_async_enable();
 
 
 
 
 
236
237	/*
238	 * OK, it's off to the idle thread for us
239	 */
240	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
241}
242
243#ifdef CONFIG_HOTPLUG_CPU
244static int op_cpu_disable(unsigned int cpu)
245{
 
 
246	/*
247	 * If we don't have a cpu_die method, abort before we reach the point
248	 * of no return. CPU0 may not have an cpu_ops, so test for it.
249	 */
250	if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
251		return -EOPNOTSUPP;
252
253	/*
254	 * We may need to abort a hot unplug for some other mechanism-specific
255	 * reason.
256	 */
257	if (cpu_ops[cpu]->cpu_disable)
258		return cpu_ops[cpu]->cpu_disable(cpu);
259
260	return 0;
261}
262
263/*
264 * __cpu_disable runs on the processor to be shutdown.
265 */
266int __cpu_disable(void)
267{
268	unsigned int cpu = smp_processor_id();
269	int ret;
270
271	ret = op_cpu_disable(cpu);
272	if (ret)
273		return ret;
274
 
 
 
275	/*
276	 * Take this CPU offline.  Once we clear this, we can't return,
277	 * and we must not schedule until we're ready to give up the cpu.
278	 */
279	set_cpu_online(cpu, false);
 
280
281	/*
282	 * OK - migrate IRQs away from this CPU
283	 */
284	irq_migrate_all_off_this_cpu();
285
286	return 0;
287}
288
289static int op_cpu_kill(unsigned int cpu)
290{
 
 
291	/*
292	 * If we have no means of synchronising with the dying CPU, then assume
293	 * that it is really dead. We can only wait for an arbitrary length of
294	 * time and hope that it's dead, so let's skip the wait and just hope.
295	 */
296	if (!cpu_ops[cpu]->cpu_kill)
297		return 0;
298
299	return cpu_ops[cpu]->cpu_kill(cpu);
300}
301
302/*
303 * called on the thread which is asking for a CPU to be shutdown -
304 * waits until shutdown has completed, or it is timed out.
305 */
306void __cpu_die(unsigned int cpu)
307{
308	int err;
309
310	if (!cpu_wait_death(cpu, 5)) {
311		pr_crit("CPU%u: cpu didn't die\n", cpu);
312		return;
313	}
314	pr_notice("CPU%u: shutdown\n", cpu);
315
316	/*
317	 * Now that the dying CPU is beyond the point of no return w.r.t.
318	 * in-kernel synchronisation, try to get the firwmare to help us to
319	 * verify that it has really left the kernel before we consider
320	 * clobbering anything it might still be using.
321	 */
322	err = op_cpu_kill(cpu);
323	if (err)
324		pr_warn("CPU%d may not have shut down cleanly: %d\n",
325			cpu, err);
326}
327
328/*
329 * Called from the idle thread for the CPU which has been shutdown.
330 *
331 * Note that we disable IRQs here, but do not re-enable them
332 * before returning to the caller. This is also the behaviour
333 * of the other hotplug-cpu capable cores, so presumably coming
334 * out of idle fixes this.
335 */
336void cpu_die(void)
337{
338	unsigned int cpu = smp_processor_id();
 
339
340	idle_task_exit();
341
342	local_irq_disable();
343
344	/* Tell __cpu_die() that this CPU is now safe to dispose of */
345	(void)cpu_report_death();
346
347	/*
348	 * Actually shutdown the CPU. This must never fail. The specific hotplug
349	 * mechanism must perform all required cache maintenance to ensure that
350	 * no dirty lines are lost in the process of shutting down the CPU.
351	 */
352	cpu_ops[cpu]->cpu_die(cpu);
353
354	BUG();
355}
356#endif
357
 
 
 
 
 
 
 
 
 
 
358/*
359 * Kill the calling secondary CPU, early in bringup before it is turned
360 * online.
361 */
362void cpu_die_early(void)
363{
364	int cpu = smp_processor_id();
365
366	pr_crit("CPU%d: will not boot\n", cpu);
367
368	/* Mark this CPU absent */
369	set_cpu_present(cpu, 0);
 
 
 
 
 
 
370
371#ifdef CONFIG_HOTPLUG_CPU
372	update_cpu_boot_status(CPU_KILL_ME);
373	/* Check if we can park ourselves */
374	if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die)
375		cpu_ops[cpu]->cpu_die(cpu);
376#endif
377	update_cpu_boot_status(CPU_STUCK_IN_KERNEL);
378
379	cpu_park_loop();
380}
381
382static void __init hyp_mode_check(void)
383{
384	if (is_hyp_mode_available())
385		pr_info("CPU: All CPU(s) started at EL2\n");
386	else if (is_hyp_mode_mismatched())
387		WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
388			   "CPU: CPUs started in inconsistent modes");
389	else
390		pr_info("CPU: All CPU(s) started at EL1\n");
 
 
 
 
391}
392
393void __init smp_cpus_done(unsigned int max_cpus)
394{
395	pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
396	setup_cpu_features();
397	hyp_mode_check();
398	apply_alternatives_all();
 
 
399}
400
401void __init smp_prepare_boot_cpu(void)
402{
403	cpuinfo_store_boot_cpu();
404	set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
405}
406
407static u64 __init of_get_cpu_mpidr(struct device_node *dn)
408{
409	const __be32 *cell;
410	u64 hwid;
411
412	/*
413	 * A cpu node with missing "reg" property is
414	 * considered invalid to build a cpu_logical_map
415	 * entry.
416	 */
417	cell = of_get_property(dn, "reg", NULL);
418	if (!cell) {
419		pr_err("%s: missing reg property\n", dn->full_name);
420		return INVALID_HWID;
421	}
422
423	hwid = of_read_number(cell, of_n_addr_cells(dn));
424	/*
425	 * Non affinity bits must be set to 0 in the DT
426	 */
427	if (hwid & ~MPIDR_HWID_BITMASK) {
428		pr_err("%s: invalid reg property\n", dn->full_name);
429		return INVALID_HWID;
430	}
431	return hwid;
 
432}
433
434/*
435 * Duplicate MPIDRs are a recipe for disaster. Scan all initialized
436 * entries and check for duplicates. If any is found just ignore the
437 * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
438 * matching valid MPIDR values.
439 */
440static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
441{
442	unsigned int i;
443
444	for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
445		if (cpu_logical_map(i) == hwid)
446			return true;
447	return false;
448}
449
450/*
451 * Initialize cpu operations for a logical cpu and
452 * set it in the possible mask on success
453 */
454static int __init smp_cpu_setup(int cpu)
455{
456	if (cpu_read_ops(cpu))
 
 
457		return -ENODEV;
458
459	if (cpu_ops[cpu]->cpu_init(cpu))
 
460		return -ENODEV;
461
462	set_cpu_possible(cpu, true);
463
464	return 0;
465}
466
467static bool bootcpu_valid __initdata;
468static unsigned int cpu_count = 1;
469
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
470#ifdef CONFIG_ACPI
 
 
 
 
 
 
 
 
471/*
472 * acpi_map_gic_cpu_interface - parse processor MADT entry
473 *
474 * Carry out sanity checks on MADT processor entry and initialize
475 * cpu_logical_map on success
476 */
477static void __init
478acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
479{
480	u64 hwid = processor->arm_mpidr;
481
482	if (!(processor->flags & ACPI_MADT_ENABLED)) {
 
483		pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
484		return;
485	}
486
487	if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
488		pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
489		return;
490	}
491
492	if (is_mpidr_duplicate(cpu_count, hwid)) {
493		pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid);
494		return;
495	}
496
497	/* Check if GICC structure of boot CPU is available in the MADT */
498	if (cpu_logical_map(0) == hwid) {
499		if (bootcpu_valid) {
500			pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
501			       hwid);
502			return;
503		}
504		bootcpu_valid = true;
 
505		return;
506	}
507
508	if (cpu_count >= NR_CPUS)
509		return;
510
511	/* map the logical cpu id to cpu MPIDR */
512	cpu_logical_map(cpu_count) = hwid;
 
 
513
514	/*
515	 * Set-up the ACPI parking protocol cpu entries
516	 * while initializing the cpu_logical_map to
517	 * avoid parsing MADT entries multiple times for
518	 * nothing (ie a valid cpu_logical_map entry should
519	 * contain a valid parking protocol data set to
520	 * initialize the cpu if the parking protocol is
521	 * the only available enable method).
522	 */
523	acpi_set_mailbox_entry(cpu_count, processor);
524
525	cpu_count++;
526}
527
528static int __init
529acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
530			     const unsigned long end)
531{
532	struct acpi_madt_generic_interrupt *processor;
533
534	processor = (struct acpi_madt_generic_interrupt *)header;
535	if (BAD_MADT_GICC_ENTRY(processor, end))
536		return -EINVAL;
537
538	acpi_table_print_madt_entry(header);
539
540	acpi_map_gic_cpu_interface(processor);
541
542	return 0;
543}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
544#else
545#define acpi_table_parse_madt(...)	do { } while (0)
546#endif
547
548/*
549 * Enumerate the possible CPU set from the device tree and build the
550 * cpu logical map array containing MPIDR values related to logical
551 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
552 */
553static void __init of_parse_and_init_cpus(void)
554{
555	struct device_node *dn = NULL;
556
557	while ((dn = of_find_node_by_type(dn, "cpu"))) {
558		u64 hwid = of_get_cpu_mpidr(dn);
559
560		if (hwid == INVALID_HWID)
561			goto next;
562
563		if (is_mpidr_duplicate(cpu_count, hwid)) {
564			pr_err("%s: duplicate cpu reg properties in the DT\n",
565				dn->full_name);
566			goto next;
567		}
568
569		/*
570		 * The numbering scheme requires that the boot CPU
571		 * must be assigned logical id 0. Record it so that
572		 * the logical map built from DT is validated and can
573		 * be used.
574		 */
575		if (hwid == cpu_logical_map(0)) {
576			if (bootcpu_valid) {
577				pr_err("%s: duplicate boot cpu reg property in DT\n",
578					dn->full_name);
579				goto next;
580			}
581
582			bootcpu_valid = true;
 
583
584			/*
585			 * cpu_logical_map has already been
586			 * initialized and the boot cpu doesn't need
587			 * the enable-method so continue without
588			 * incrementing cpu.
589			 */
590			continue;
591		}
592
593		if (cpu_count >= NR_CPUS)
594			goto next;
595
596		pr_debug("cpu logical map 0x%llx\n", hwid);
597		cpu_logical_map(cpu_count) = hwid;
 
 
598next:
599		cpu_count++;
600	}
601}
602
603/*
604 * Enumerate the possible CPU set from the device tree or ACPI and build the
605 * cpu logical map array containing MPIDR values related to logical
606 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
607 */
608void __init smp_init_cpus(void)
609{
610	int i;
611
612	if (acpi_disabled)
613		of_parse_and_init_cpus();
614	else
615		/*
616		 * do a walk of MADT to determine how many CPUs
617		 * we have including disabled CPUs, and get information
618		 * we need for SMP init
619		 */
620		acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
621				      acpi_parse_gic_cpu_interface, 0);
622
623	if (cpu_count > NR_CPUS)
624		pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n",
625			cpu_count, NR_CPUS);
626
627	if (!bootcpu_valid) {
628		pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
629		return;
630	}
631
632	/*
633	 * We need to set the cpu_logical_map entries before enabling
634	 * the cpus so that cpu processor description entries (DT cpu nodes
635	 * and ACPI MADT entries) can be retrieved by matching the cpu hwid
636	 * with entries in cpu_logical_map while initializing the cpus.
637	 * If the cpu set-up fails, invalidate the cpu_logical_map entry.
638	 */
639	for (i = 1; i < NR_CPUS; i++) {
640		if (cpu_logical_map(i) != INVALID_HWID) {
641			if (smp_cpu_setup(i))
642				cpu_logical_map(i) = INVALID_HWID;
643		}
644	}
645}
646
647void __init smp_prepare_cpus(unsigned int max_cpus)
648{
 
649	int err;
650	unsigned int cpu, ncores = num_possible_cpus();
 
651
652	init_cpu_topology();
653
654	smp_store_cpu_info(smp_processor_id());
 
 
 
655
656	/*
657	 * are we trying to boot more cores than exist?
 
658	 */
659	if (max_cpus > ncores)
660		max_cpus = ncores;
661
662	/* Don't bother if we're effectively UP */
663	if (max_cpus <= 1)
664		return;
665
666	/*
667	 * Initialise the present map (which describes the set of CPUs
668	 * actually populated at the present time) and release the
669	 * secondaries from the bootloader.
670	 *
671	 * Make sure we online at most (max_cpus - 1) additional CPUs.
672	 */
673	max_cpus--;
674	for_each_possible_cpu(cpu) {
675		if (max_cpus == 0)
676			break;
677
678		if (cpu == smp_processor_id())
679			continue;
680
681		if (!cpu_ops[cpu])
 
682			continue;
683
684		err = cpu_ops[cpu]->cpu_prepare(cpu);
685		if (err)
686			continue;
687
688		set_cpu_present(cpu, true);
689		max_cpus--;
690	}
691}
692
693void (*__smp_cross_call)(const struct cpumask *, unsigned int);
694
695void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
696{
697	__smp_cross_call = fn;
698}
699
700static const char *ipi_types[NR_IPI] __tracepoint_string = {
701#define S(x,s)	[x] = s
702	S(IPI_RESCHEDULE, "Rescheduling interrupts"),
703	S(IPI_CALL_FUNC, "Function call interrupts"),
704	S(IPI_CPU_STOP, "CPU stop interrupts"),
705	S(IPI_TIMER, "Timer broadcast interrupts"),
706	S(IPI_IRQ_WORK, "IRQ work interrupts"),
707	S(IPI_WAKEUP, "CPU wake-up interrupts"),
708};
709
710static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
711{
712	trace_ipi_raise(target, ipi_types[ipinr]);
713	__smp_cross_call(target, ipinr);
714}
715
716void show_ipi_list(struct seq_file *p, int prec)
717{
718	unsigned int cpu, i;
719
720	for (i = 0; i < NR_IPI; i++) {
721		seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
722			   prec >= 4 ? " " : "");
723		for_each_online_cpu(cpu)
724			seq_printf(p, "%10u ",
725				   __get_irq_stat(cpu, ipi_irqs[i]));
726		seq_printf(p, "      %s\n", ipi_types[i]);
727	}
728}
729
730u64 smp_irq_stat_cpu(unsigned int cpu)
731{
732	u64 sum = 0;
733	int i;
734
735	for (i = 0; i < NR_IPI; i++)
736		sum += __get_irq_stat(cpu, ipi_irqs[i]);
737
738	return sum;
739}
740
741void arch_send_call_function_ipi_mask(const struct cpumask *mask)
742{
743	smp_cross_call(mask, IPI_CALL_FUNC);
744}
745
746void arch_send_call_function_single_ipi(int cpu)
747{
748	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
749}
750
751#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
752void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
753{
754	smp_cross_call(mask, IPI_WAKEUP);
755}
756#endif
757
758#ifdef CONFIG_IRQ_WORK
759void arch_irq_work_raise(void)
760{
761	if (__smp_cross_call)
762		smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
763}
764#endif
765
766static DEFINE_RAW_SPINLOCK(stop_lock);
 
 
 
 
 
 
 
767
768/*
769 * ipi_cpu_stop - handle IPI from smp_send_stop()
 
 
770 */
771static void ipi_cpu_stop(unsigned int cpu)
772{
773	if (system_state == SYSTEM_BOOTING ||
774	    system_state == SYSTEM_RUNNING) {
775		raw_spin_lock(&stop_lock);
776		pr_crit("CPU%u: stopping\n", cpu);
777		dump_stack();
778		raw_spin_unlock(&stop_lock);
779	}
 
 
 
 
 
 
 
 
 
 
780
781	set_cpu_online(cpu, false);
782
783	local_irq_disable();
784
785	while (1)
786		cpu_relax();
 
 
 
 
 
 
787}
788
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
789/*
790 * Main handler for inter-processor interrupts
791 */
792void handle_IPI(int ipinr, struct pt_regs *regs)
793{
794	unsigned int cpu = smp_processor_id();
795	struct pt_regs *old_regs = set_irq_regs(regs);
796
797	if ((unsigned)ipinr < NR_IPI) {
798		trace_ipi_entry_rcuidle(ipi_types[ipinr]);
799		__inc_irq_stat(cpu, ipi_irqs[ipinr]);
800	}
801
802	switch (ipinr) {
803	case IPI_RESCHEDULE:
804		scheduler_ipi();
805		break;
806
807	case IPI_CALL_FUNC:
808		irq_enter();
809		generic_smp_call_function_interrupt();
810		irq_exit();
811		break;
812
813	case IPI_CPU_STOP:
814		irq_enter();
815		ipi_cpu_stop(cpu);
816		irq_exit();
 
 
 
 
817		break;
818
819#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
820	case IPI_TIMER:
821		irq_enter();
822		tick_receive_broadcast();
823		irq_exit();
824		break;
825#endif
826
827#ifdef CONFIG_IRQ_WORK
828	case IPI_IRQ_WORK:
829		irq_enter();
830		irq_work_run();
831		irq_exit();
832		break;
833#endif
834
835#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
836	case IPI_WAKEUP:
837		WARN_ONCE(!acpi_parking_protocol_valid(cpu),
838			  "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
839			  cpu);
 
 
 
 
 
840		break;
841#endif
842
843	default:
844		pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
845		break;
846	}
847
848	if ((unsigned)ipinr < NR_IPI)
849		trace_ipi_exit_rcuidle(ipi_types[ipinr]);
850	set_irq_regs(old_regs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
851}
852
853void smp_send_reschedule(int cpu)
854{
855	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
856}
857
 
 
 
 
 
 
 
 
 
 
 
858#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
859void tick_broadcast(const struct cpumask *mask)
860{
861	smp_cross_call(mask, IPI_TIMER);
862}
863#endif
864
 
 
 
 
 
 
 
 
 
 
 
865void smp_send_stop(void)
866{
 
 
867	unsigned long timeout;
868
869	if (num_online_cpus() > 1) {
870		cpumask_t mask;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
871
 
 
 
 
 
 
 
 
872		cpumask_copy(&mask, cpu_online_mask);
873		cpumask_clear_cpu(smp_processor_id(), &mask);
874
875		smp_cross_call(&mask, IPI_CPU_STOP);
 
876	}
877
878	/* Wait up to one second for other CPUs to stop */
879	timeout = USEC_PER_SEC;
880	while (num_online_cpus() > 1 && timeout--)
881		udelay(1);
882
883	if (num_online_cpus() > 1)
884		pr_warning("SMP: failed to stop secondary CPUs\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
885}
886
887/*
888 * not supported here
889 */
890int setup_profiling_timer(unsigned int multiplier)
 
 
 
891{
892	return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
893}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * SMP initialisation and IPI support
   4 * Based on arch/arm/kernel/smp.c
   5 *
   6 * Copyright (C) 2012 ARM Ltd.
 
 
 
 
 
 
 
 
 
 
 
 
   7 */
   8
   9#include <linux/acpi.h>
  10#include <linux/arm_sdei.h>
  11#include <linux/delay.h>
  12#include <linux/init.h>
  13#include <linux/spinlock.h>
  14#include <linux/sched/mm.h>
  15#include <linux/sched/hotplug.h>
  16#include <linux/sched/task_stack.h>
  17#include <linux/interrupt.h>
  18#include <linux/cache.h>
  19#include <linux/profile.h>
  20#include <linux/errno.h>
  21#include <linux/mm.h>
  22#include <linux/err.h>
  23#include <linux/cpu.h>
  24#include <linux/smp.h>
  25#include <linux/seq_file.h>
  26#include <linux/irq.h>
  27#include <linux/irqchip/arm-gic-v3.h>
  28#include <linux/percpu.h>
  29#include <linux/clockchips.h>
  30#include <linux/completion.h>
  31#include <linux/of.h>
  32#include <linux/irq_work.h>
  33#include <linux/kernel_stat.h>
  34#include <linux/kexec.h>
  35#include <linux/kgdb.h>
  36#include <linux/kvm_host.h>
  37#include <linux/nmi.h>
  38
  39#include <asm/alternative.h>
  40#include <asm/atomic.h>
  41#include <asm/cacheflush.h>
  42#include <asm/cpu.h>
  43#include <asm/cputype.h>
  44#include <asm/cpu_ops.h>
  45#include <asm/daifflags.h>
  46#include <asm/kvm_mmu.h>
  47#include <asm/mmu_context.h>
  48#include <asm/numa.h>
 
  49#include <asm/processor.h>
  50#include <asm/smp_plat.h>
  51#include <asm/sections.h>
  52#include <asm/tlbflush.h>
  53#include <asm/ptrace.h>
  54#include <asm/virt.h>
  55
 
  56#include <trace/events/ipi.h>
  57
  58/*
  59 * as from 2.5, kernels no longer have an init_tasks structure
  60 * so we need some other way of telling a new secondary core
  61 * where to place its SVC stack
  62 */
  63struct secondary_data secondary_data;
  64/* Number of CPUs which aren't online, but looping in kernel text. */
  65static int cpus_stuck_in_kernel;
  66
  67enum ipi_msg_type {
  68	IPI_RESCHEDULE,
  69	IPI_CALL_FUNC,
  70	IPI_CPU_STOP,
  71	IPI_CPU_STOP_NMI,
  72	IPI_TIMER,
  73	IPI_IRQ_WORK,
  74	NR_IPI,
  75	/*
  76	 * Any enum >= NR_IPI and < MAX_IPI is special and not tracable
  77	 * with trace_ipi_*
  78	 */
  79	IPI_CPU_BACKTRACE = NR_IPI,
  80	IPI_KGDB_ROUNDUP,
  81	MAX_IPI
  82};
  83
  84static int ipi_irq_base __ro_after_init;
  85static int nr_ipi __ro_after_init = NR_IPI;
  86static struct irq_desc *ipi_desc[MAX_IPI] __ro_after_init;
  87
  88static bool crash_stop;
  89
  90static void ipi_setup(int cpu);
  91
  92#ifdef CONFIG_HOTPLUG_CPU
  93static void ipi_teardown(int cpu);
  94static int op_cpu_kill(unsigned int cpu);
  95#else
  96static inline int op_cpu_kill(unsigned int cpu)
  97{
  98	return -ENOSYS;
  99}
 100#endif
 101
 102
 103/*
 104 * Boot a secondary CPU, and assign it the specified idle task.
 105 * This also gives us the initial stack to use for this CPU.
 106 */
 107static int boot_secondary(unsigned int cpu, struct task_struct *idle)
 108{
 109	const struct cpu_operations *ops = get_cpu_ops(cpu);
 110
 111	if (ops->cpu_boot)
 112		return ops->cpu_boot(cpu);
 113
 114	return -EOPNOTSUPP;
 115}
 116
 117static DECLARE_COMPLETION(cpu_running);
 118
 119int __cpu_up(unsigned int cpu, struct task_struct *idle)
 120{
 121	int ret;
 122	long status;
 123
 124	/*
 125	 * We need to tell the secondary core where to find its stack and the
 126	 * page tables.
 127	 */
 128	secondary_data.task = idle;
 129	update_cpu_boot_status(CPU_MMU_OFF);
 
 130
 131	/* Now bring the CPU into our world */
 
 
 132	ret = boot_secondary(cpu, idle);
 133	if (ret) {
 134		if (ret != -EPERM)
 135			pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
 136		return ret;
 
 
 
 
 
 
 
 
 
 
 137	}
 138
 139	/*
 140	 * CPU was successfully started, wait for it to come online or
 141	 * time out.
 142	 */
 143	wait_for_completion_timeout(&cpu_running,
 144				    msecs_to_jiffies(5000));
 145	if (cpu_online(cpu))
 146		return 0;
 147
 148	pr_crit("CPU%u: failed to come online\n", cpu);
 149	secondary_data.task = NULL;
 150	status = READ_ONCE(secondary_data.status);
 151	if (status == CPU_MMU_OFF)
 152		status = READ_ONCE(__early_cpu_boot_status);
 153
 154	switch (status & CPU_BOOT_STATUS_MASK) {
 155	default:
 156		pr_err("CPU%u: failed in unknown state : 0x%lx\n",
 157		       cpu, status);
 158		cpus_stuck_in_kernel++;
 159		break;
 160	case CPU_KILL_ME:
 161		if (!op_cpu_kill(cpu)) {
 162			pr_crit("CPU%u: died during early boot\n", cpu);
 
 
 
 
 
 
 163			break;
 
 
 164		}
 165		pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
 166		fallthrough;
 167	case CPU_STUCK_IN_KERNEL:
 168		pr_crit("CPU%u: is stuck in kernel\n", cpu);
 169		if (status & CPU_STUCK_REASON_52_BIT_VA)
 170			pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
 171		if (status & CPU_STUCK_REASON_NO_GRAN) {
 172			pr_crit("CPU%u: does not support %luK granule\n",
 173				cpu, PAGE_SIZE / SZ_1K);
 174		}
 175		cpus_stuck_in_kernel++;
 176		break;
 177	case CPU_PANIC_KERNEL:
 178		panic("CPU%u detected unsupported configuration\n", cpu);
 179	}
 180
 181	return -EIO;
 182}
 183
 184static void init_gic_priority_masking(void)
 185{
 186	u32 cpuflags;
 187
 188	if (WARN_ON(!gic_enable_sre()))
 189		return;
 190
 191	cpuflags = read_sysreg(daif);
 192
 193	WARN_ON(!(cpuflags & PSR_I_BIT));
 194	WARN_ON(!(cpuflags & PSR_F_BIT));
 195
 196	gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
 197}
 198
 199/*
 200 * This is the secondary CPU boot entry.  We're using this CPUs
 201 * idle thread stack, but a set of temporary page tables.
 202 */
 203asmlinkage notrace void secondary_start_kernel(void)
 204{
 205	u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
 206	struct mm_struct *mm = &init_mm;
 207	const struct cpu_operations *ops;
 208	unsigned int cpu = smp_processor_id();
 209
 210	/*
 211	 * All kernel threads share the same mm context; grab a
 212	 * reference and switch to it.
 213	 */
 214	mmgrab(mm);
 215	current->active_mm = mm;
 216
 
 
 217	/*
 218	 * TTBR0 is only used for the identity mapping at this stage. Make it
 219	 * point to zero page to avoid speculatively fetching new entries.
 220	 */
 221	cpu_uninstall_idmap();
 222
 223	if (system_uses_irq_prio_masking())
 224		init_gic_priority_masking();
 225
 226	rcutree_report_cpu_starting(cpu);
 227	trace_hardirqs_off();
 228
 229	/*
 230	 * If the system has established the capabilities, make sure
 231	 * this CPU ticks all of those. If it doesn't, the CPU will
 232	 * fail to come online.
 233	 */
 234	check_local_cpu_capabilities();
 235
 236	ops = get_cpu_ops(cpu);
 237	if (ops->cpu_postboot)
 238		ops->cpu_postboot();
 239
 240	/*
 241	 * Log the CPU info before it is marked online and might get read.
 242	 */
 243	cpuinfo_store_cpu();
 244	store_cpu_topology(cpu);
 245
 246	/*
 247	 * Enable GIC and timers.
 248	 */
 249	notify_cpu_starting(cpu);
 250
 251	ipi_setup(cpu);
 252
 253	numa_add_cpu(cpu);
 254
 255	/*
 256	 * OK, now it's safe to let the boot CPU continue.  Wait for
 257	 * the CPU migration code to notice that the CPU is online
 258	 * before we continue.
 259	 */
 260	pr_info("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n",
 261					 cpu, (unsigned long)mpidr,
 262					 read_cpuid_id());
 263	update_cpu_boot_status(CPU_BOOT_SUCCESS);
 
 
 264	set_cpu_online(cpu, true);
 265	complete(&cpu_running);
 266
 267	/*
 268	 * Secondary CPUs enter the kernel with all DAIF exceptions masked.
 269	 *
 270	 * As with setup_arch() we must unmask Debug and SError exceptions, and
 271	 * as the root irqchip has already been detected and initialized we can
 272	 * unmask IRQ and FIQ at the same time.
 273	 */
 274	local_daif_restore(DAIF_PROCCTX);
 275
 276	/*
 277	 * OK, it's off to the idle thread for us
 278	 */
 279	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 280}
 281
 282#ifdef CONFIG_HOTPLUG_CPU
 283static int op_cpu_disable(unsigned int cpu)
 284{
 285	const struct cpu_operations *ops = get_cpu_ops(cpu);
 286
 287	/*
 288	 * If we don't have a cpu_die method, abort before we reach the point
 289	 * of no return. CPU0 may not have an cpu_ops, so test for it.
 290	 */
 291	if (!ops || !ops->cpu_die)
 292		return -EOPNOTSUPP;
 293
 294	/*
 295	 * We may need to abort a hot unplug for some other mechanism-specific
 296	 * reason.
 297	 */
 298	if (ops->cpu_disable)
 299		return ops->cpu_disable(cpu);
 300
 301	return 0;
 302}
 303
 304/*
 305 * __cpu_disable runs on the processor to be shutdown.
 306 */
 307int __cpu_disable(void)
 308{
 309	unsigned int cpu = smp_processor_id();
 310	int ret;
 311
 312	ret = op_cpu_disable(cpu);
 313	if (ret)
 314		return ret;
 315
 316	remove_cpu_topology(cpu);
 317	numa_remove_cpu(cpu);
 318
 319	/*
 320	 * Take this CPU offline.  Once we clear this, we can't return,
 321	 * and we must not schedule until we're ready to give up the cpu.
 322	 */
 323	set_cpu_online(cpu, false);
 324	ipi_teardown(cpu);
 325
 326	/*
 327	 * OK - migrate IRQs away from this CPU
 328	 */
 329	irq_migrate_all_off_this_cpu();
 330
 331	return 0;
 332}
 333
 334static int op_cpu_kill(unsigned int cpu)
 335{
 336	const struct cpu_operations *ops = get_cpu_ops(cpu);
 337
 338	/*
 339	 * If we have no means of synchronising with the dying CPU, then assume
 340	 * that it is really dead. We can only wait for an arbitrary length of
 341	 * time and hope that it's dead, so let's skip the wait and just hope.
 342	 */
 343	if (!ops->cpu_kill)
 344		return 0;
 345
 346	return ops->cpu_kill(cpu);
 347}
 348
 349/*
 350 * Called on the thread which is asking for a CPU to be shutdown after the
 351 * shutdown completed.
 352 */
 353void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
 354{
 355	int err;
 356
 357	pr_debug("CPU%u: shutdown\n", cpu);
 
 
 
 
 358
 359	/*
 360	 * Now that the dying CPU is beyond the point of no return w.r.t.
 361	 * in-kernel synchronisation, try to get the firwmare to help us to
 362	 * verify that it has really left the kernel before we consider
 363	 * clobbering anything it might still be using.
 364	 */
 365	err = op_cpu_kill(cpu);
 366	if (err)
 367		pr_warn("CPU%d may not have shut down cleanly: %d\n", cpu, err);
 
 368}
 369
 370/*
 371 * Called from the idle thread for the CPU which has been shutdown.
 372 *
 
 
 
 
 373 */
 374void __noreturn cpu_die(void)
 375{
 376	unsigned int cpu = smp_processor_id();
 377	const struct cpu_operations *ops = get_cpu_ops(cpu);
 378
 379	idle_task_exit();
 380
 381	local_daif_mask();
 382
 383	/* Tell cpuhp_bp_sync_dead() that this CPU is now safe to dispose of */
 384	cpuhp_ap_report_dead();
 385
 386	/*
 387	 * Actually shutdown the CPU. This must never fail. The specific hotplug
 388	 * mechanism must perform all required cache maintenance to ensure that
 389	 * no dirty lines are lost in the process of shutting down the CPU.
 390	 */
 391	ops->cpu_die(cpu);
 392
 393	BUG();
 394}
 395#endif
 396
 397static void __cpu_try_die(int cpu)
 398{
 399#ifdef CONFIG_HOTPLUG_CPU
 400	const struct cpu_operations *ops = get_cpu_ops(cpu);
 401
 402	if (ops && ops->cpu_die)
 403		ops->cpu_die(cpu);
 404#endif
 405}
 406
 407/*
 408 * Kill the calling secondary CPU, early in bringup before it is turned
 409 * online.
 410 */
 411void __noreturn cpu_die_early(void)
 412{
 413	int cpu = smp_processor_id();
 414
 415	pr_crit("CPU%d: will not boot\n", cpu);
 416
 417	/* Mark this CPU absent */
 418	set_cpu_present(cpu, 0);
 419	rcutree_report_cpu_dead();
 420
 421	if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
 422		update_cpu_boot_status(CPU_KILL_ME);
 423		__cpu_try_die(cpu);
 424	}
 425
 
 
 
 
 
 
 426	update_cpu_boot_status(CPU_STUCK_IN_KERNEL);
 427
 428	cpu_park_loop();
 429}
 430
 431static void __init hyp_mode_check(void)
 432{
 433	if (is_hyp_mode_available())
 434		pr_info("CPU: All CPU(s) started at EL2\n");
 435	else if (is_hyp_mode_mismatched())
 436		WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
 437			   "CPU: CPUs started in inconsistent modes");
 438	else
 439		pr_info("CPU: All CPU(s) started at EL1\n");
 440	if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode()) {
 441		kvm_compute_layout();
 442		kvm_apply_hyp_relocations();
 443	}
 444}
 445
 446void __init smp_cpus_done(unsigned int max_cpus)
 447{
 448	pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
 
 449	hyp_mode_check();
 450	setup_system_features();
 451	setup_user_features();
 452	mark_linear_text_alias_ro();
 453}
 454
 455void __init smp_prepare_boot_cpu(void)
 456{
 
 
 
 
 
 
 
 
 
 457	/*
 458	 * The runtime per-cpu areas have been allocated by
 459	 * setup_per_cpu_areas(), and CPU0's boot time per-cpu area will be
 460	 * freed shortly, so we must move over to the runtime per-cpu area.
 461	 */
 462	set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
 
 
 
 
 463
 464	cpuinfo_store_boot_cpu();
 465	setup_boot_cpu_features();
 466
 467	/* Conditionally switch to GIC PMR for interrupt masking */
 468	if (system_uses_irq_prio_masking())
 469		init_gic_priority_masking();
 470
 471	kasan_init_hw_tags();
 472	/* Init percpu seeds for random tags after cpus are set up. */
 473	kasan_init_sw_tags();
 474}
 475
 476/*
 477 * Duplicate MPIDRs are a recipe for disaster. Scan all initialized
 478 * entries and check for duplicates. If any is found just ignore the
 479 * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
 480 * matching valid MPIDR values.
 481 */
 482static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
 483{
 484	unsigned int i;
 485
 486	for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
 487		if (cpu_logical_map(i) == hwid)
 488			return true;
 489	return false;
 490}
 491
 492/*
 493 * Initialize cpu operations for a logical cpu and
 494 * set it in the possible mask on success
 495 */
 496static int __init smp_cpu_setup(int cpu)
 497{
 498	const struct cpu_operations *ops;
 499
 500	if (init_cpu_ops(cpu))
 501		return -ENODEV;
 502
 503	ops = get_cpu_ops(cpu);
 504	if (ops->cpu_init(cpu))
 505		return -ENODEV;
 506
 507	set_cpu_possible(cpu, true);
 508
 509	return 0;
 510}
 511
 512static bool bootcpu_valid __initdata;
 513static unsigned int cpu_count = 1;
 514
 515int arch_register_cpu(int cpu)
 516{
 517	acpi_handle acpi_handle = acpi_get_processor_handle(cpu);
 518	struct cpu *c = &per_cpu(cpu_devices, cpu);
 519
 520	if (!acpi_disabled && !acpi_handle &&
 521	    IS_ENABLED(CONFIG_ACPI_HOTPLUG_CPU))
 522		return -EPROBE_DEFER;
 523
 524#ifdef CONFIG_ACPI_HOTPLUG_CPU
 525	/* For now block anything that looks like physical CPU Hotplug */
 526	if (invalid_logical_cpuid(cpu) || !cpu_present(cpu)) {
 527		pr_err_once("Changing CPU present bit is not supported\n");
 528		return -ENODEV;
 529	}
 530#endif
 531
 532	/*
 533	 * Availability of the acpi handle is sufficient to establish
 534	 * that _STA has aleady been checked. No need to recheck here.
 535	 */
 536	c->hotpluggable = arch_cpu_is_hotpluggable(cpu);
 537
 538	return register_cpu(c, cpu);
 539}
 540
 541#ifdef CONFIG_ACPI_HOTPLUG_CPU
 542void arch_unregister_cpu(int cpu)
 543{
 544	acpi_handle acpi_handle = acpi_get_processor_handle(cpu);
 545	struct cpu *c = &per_cpu(cpu_devices, cpu);
 546	acpi_status status;
 547	unsigned long long sta;
 548
 549	if (!acpi_handle) {
 550		pr_err_once("Removing a CPU without associated ACPI handle\n");
 551		return;
 552	}
 553
 554	status = acpi_evaluate_integer(acpi_handle, "_STA", NULL, &sta);
 555	if (ACPI_FAILURE(status))
 556		return;
 557
 558	/* For now do not allow anything that looks like physical CPU HP */
 559	if (cpu_present(cpu) && !(sta & ACPI_STA_DEVICE_PRESENT)) {
 560		pr_err_once("Changing CPU present bit is not supported\n");
 561		return;
 562	}
 563
 564	unregister_cpu(c);
 565}
 566#endif /* CONFIG_ACPI_HOTPLUG_CPU */
 567
 568#ifdef CONFIG_ACPI
 569static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS];
 570
 571struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu)
 572{
 573	return &cpu_madt_gicc[cpu];
 574}
 575EXPORT_SYMBOL_GPL(acpi_cpu_get_madt_gicc);
 576
 577/*
 578 * acpi_map_gic_cpu_interface - parse processor MADT entry
 579 *
 580 * Carry out sanity checks on MADT processor entry and initialize
 581 * cpu_logical_map on success
 582 */
 583static void __init
 584acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
 585{
 586	u64 hwid = processor->arm_mpidr;
 587
 588	if (!(processor->flags &
 589	      (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE))) {
 590		pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
 591		return;
 592	}
 593
 594	if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
 595		pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
 596		return;
 597	}
 598
 599	if (is_mpidr_duplicate(cpu_count, hwid)) {
 600		pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid);
 601		return;
 602	}
 603
 604	/* Check if GICC structure of boot CPU is available in the MADT */
 605	if (cpu_logical_map(0) == hwid) {
 606		if (bootcpu_valid) {
 607			pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
 608			       hwid);
 609			return;
 610		}
 611		bootcpu_valid = true;
 612		cpu_madt_gicc[0] = *processor;
 613		return;
 614	}
 615
 616	if (cpu_count >= NR_CPUS)
 617		return;
 618
 619	/* map the logical cpu id to cpu MPIDR */
 620	set_cpu_logical_map(cpu_count, hwid);
 621
 622	cpu_madt_gicc[cpu_count] = *processor;
 623
 624	/*
 625	 * Set-up the ACPI parking protocol cpu entries
 626	 * while initializing the cpu_logical_map to
 627	 * avoid parsing MADT entries multiple times for
 628	 * nothing (ie a valid cpu_logical_map entry should
 629	 * contain a valid parking protocol data set to
 630	 * initialize the cpu if the parking protocol is
 631	 * the only available enable method).
 632	 */
 633	acpi_set_mailbox_entry(cpu_count, processor);
 634
 635	cpu_count++;
 636}
 637
 638static int __init
 639acpi_parse_gic_cpu_interface(union acpi_subtable_headers *header,
 640			     const unsigned long end)
 641{
 642	struct acpi_madt_generic_interrupt *processor;
 643
 644	processor = (struct acpi_madt_generic_interrupt *)header;
 645	if (BAD_MADT_GICC_ENTRY(processor, end))
 646		return -EINVAL;
 647
 648	acpi_table_print_madt_entry(&header->common);
 649
 650	acpi_map_gic_cpu_interface(processor);
 651
 652	return 0;
 653}
 654
 655static void __init acpi_parse_and_init_cpus(void)
 656{
 657	int i;
 658
 659	/*
 660	 * do a walk of MADT to determine how many CPUs
 661	 * we have including disabled CPUs, and get information
 662	 * we need for SMP init.
 663	 */
 664	acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
 665				      acpi_parse_gic_cpu_interface, 0);
 666
 667	/*
 668	 * In ACPI, SMP and CPU NUMA information is provided in separate
 669	 * static tables, namely the MADT and the SRAT.
 670	 *
 671	 * Thus, it is simpler to first create the cpu logical map through
 672	 * an MADT walk and then map the logical cpus to their node ids
 673	 * as separate steps.
 674	 */
 675	acpi_map_cpus_to_nodes();
 676
 677	for (i = 0; i < nr_cpu_ids; i++)
 678		early_map_cpu_to_node(i, acpi_numa_get_nid(i));
 679}
 680#else
 681#define acpi_parse_and_init_cpus(...)	do { } while (0)
 682#endif
 683
 684/*
 685 * Enumerate the possible CPU set from the device tree and build the
 686 * cpu logical map array containing MPIDR values related to logical
 687 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
 688 */
 689static void __init of_parse_and_init_cpus(void)
 690{
 691	struct device_node *dn;
 692
 693	for_each_of_cpu_node(dn) {
 694		u64 hwid = of_get_cpu_hwid(dn, 0);
 695
 696		if (hwid & ~MPIDR_HWID_BITMASK)
 697			goto next;
 698
 699		if (is_mpidr_duplicate(cpu_count, hwid)) {
 700			pr_err("%pOF: duplicate cpu reg properties in the DT\n",
 701				dn);
 702			goto next;
 703		}
 704
 705		/*
 706		 * The numbering scheme requires that the boot CPU
 707		 * must be assigned logical id 0. Record it so that
 708		 * the logical map built from DT is validated and can
 709		 * be used.
 710		 */
 711		if (hwid == cpu_logical_map(0)) {
 712			if (bootcpu_valid) {
 713				pr_err("%pOF: duplicate boot cpu reg property in DT\n",
 714					dn);
 715				goto next;
 716			}
 717
 718			bootcpu_valid = true;
 719			early_map_cpu_to_node(0, of_node_to_nid(dn));
 720
 721			/*
 722			 * cpu_logical_map has already been
 723			 * initialized and the boot cpu doesn't need
 724			 * the enable-method so continue without
 725			 * incrementing cpu.
 726			 */
 727			continue;
 728		}
 729
 730		if (cpu_count >= NR_CPUS)
 731			goto next;
 732
 733		pr_debug("cpu logical map 0x%llx\n", hwid);
 734		set_cpu_logical_map(cpu_count, hwid);
 735
 736		early_map_cpu_to_node(cpu_count, of_node_to_nid(dn));
 737next:
 738		cpu_count++;
 739	}
 740}
 741
 742/*
 743 * Enumerate the possible CPU set from the device tree or ACPI and build the
 744 * cpu logical map array containing MPIDR values related to logical
 745 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
 746 */
 747void __init smp_init_cpus(void)
 748{
 749	int i;
 750
 751	if (acpi_disabled)
 752		of_parse_and_init_cpus();
 753	else
 754		acpi_parse_and_init_cpus();
 
 
 
 
 
 
 755
 756	if (cpu_count > nr_cpu_ids)
 757		pr_warn("Number of cores (%d) exceeds configured maximum of %u - clipping\n",
 758			cpu_count, nr_cpu_ids);
 759
 760	if (!bootcpu_valid) {
 761		pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
 762		return;
 763	}
 764
 765	/*
 766	 * We need to set the cpu_logical_map entries before enabling
 767	 * the cpus so that cpu processor description entries (DT cpu nodes
 768	 * and ACPI MADT entries) can be retrieved by matching the cpu hwid
 769	 * with entries in cpu_logical_map while initializing the cpus.
 770	 * If the cpu set-up fails, invalidate the cpu_logical_map entry.
 771	 */
 772	for (i = 1; i < nr_cpu_ids; i++) {
 773		if (cpu_logical_map(i) != INVALID_HWID) {
 774			if (smp_cpu_setup(i))
 775				set_cpu_logical_map(i, INVALID_HWID);
 776		}
 777	}
 778}
 779
 780void __init smp_prepare_cpus(unsigned int max_cpus)
 781{
 782	const struct cpu_operations *ops;
 783	int err;
 784	unsigned int cpu;
 785	unsigned int this_cpu;
 786
 787	init_cpu_topology();
 788
 789	this_cpu = smp_processor_id();
 790	store_cpu_topology(this_cpu);
 791	numa_store_cpu_info(this_cpu);
 792	numa_add_cpu(this_cpu);
 793
 794	/*
 795	 * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
 796	 * secondary CPUs present.
 797	 */
 798	if (max_cpus == 0)
 
 
 
 
 799		return;
 800
 801	/*
 802	 * Initialise the present map (which describes the set of CPUs
 803	 * actually populated at the present time) and release the
 804	 * secondaries from the bootloader.
 
 
 805	 */
 
 806	for_each_possible_cpu(cpu) {
 
 
 807
 808		if (cpu == smp_processor_id())
 809			continue;
 810
 811		ops = get_cpu_ops(cpu);
 812		if (!ops)
 813			continue;
 814
 815		err = ops->cpu_prepare(cpu);
 816		if (err)
 817			continue;
 818
 819		set_cpu_present(cpu, true);
 820		numa_store_cpu_info(cpu);
 821	}
 822}
 823
 824static const char *ipi_types[MAX_IPI] __tracepoint_string = {
 825	[IPI_RESCHEDULE]	= "Rescheduling interrupts",
 826	[IPI_CALL_FUNC]		= "Function call interrupts",
 827	[IPI_CPU_STOP]		= "CPU stop interrupts",
 828	[IPI_CPU_STOP_NMI]	= "CPU stop NMIs",
 829	[IPI_TIMER]		= "Timer broadcast interrupts",
 830	[IPI_IRQ_WORK]		= "IRQ work interrupts",
 831	[IPI_CPU_BACKTRACE]	= "CPU backtrace interrupts",
 832	[IPI_KGDB_ROUNDUP]	= "KGDB roundup interrupts",
 
 
 
 
 
 
 833};
 834
 835static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
 836
 837unsigned long irq_err_count;
 
 
 838
 839int arch_show_interrupts(struct seq_file *p, int prec)
 840{
 841	unsigned int cpu, i;
 842
 843	for (i = 0; i < MAX_IPI; i++) {
 844		seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
 845			   prec >= 4 ? " " : "");
 846		for_each_online_cpu(cpu)
 847			seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
 
 848		seq_printf(p, "      %s\n", ipi_types[i]);
 849	}
 
 850
 851	seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
 852	return 0;
 
 
 
 
 
 
 
 853}
 854
 855void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 856{
 857	smp_cross_call(mask, IPI_CALL_FUNC);
 858}
 859
 860void arch_send_call_function_single_ipi(int cpu)
 861{
 862	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
 863}
 864
 
 
 
 
 
 
 
 865#ifdef CONFIG_IRQ_WORK
 866void arch_irq_work_raise(void)
 867{
 868	smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
 
 869}
 870#endif
 871
 872static void __noreturn local_cpu_stop(unsigned int cpu)
 873{
 874	set_cpu_online(cpu, false);
 875
 876	local_daif_mask();
 877	sdei_mask_local_cpu();
 878	cpu_park_loop();
 879}
 880
 881/*
 882 * We need to implement panic_smp_self_stop() for parallel panic() calls, so
 883 * that cpu_online_mask gets correctly updated and smp_send_stop() can skip
 884 * CPUs that have already stopped themselves.
 885 */
 886void __noreturn panic_smp_self_stop(void)
 887{
 888	local_cpu_stop(smp_processor_id());
 889}
 890
 891static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
 892{
 893#ifdef CONFIG_KEXEC_CORE
 894	/*
 895	 * Use local_daif_mask() instead of local_irq_disable() to make sure
 896	 * that pseudo-NMIs are disabled. The "crash stop" code starts with
 897	 * an IRQ and falls back to NMI (which might be pseudo). If the IRQ
 898	 * finally goes through right as we're timing out then the NMI could
 899	 * interrupt us. It's better to prevent the NMI and let the IRQ
 900	 * finish since the pt_regs will be better.
 901	 */
 902	local_daif_mask();
 903
 904	crash_save_cpu(regs, cpu);
 905
 906	set_cpu_online(cpu, false);
 907
 908	sdei_mask_local_cpu();
 909
 910	if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
 911		__cpu_try_die(cpu);
 912
 913	/* just in case */
 914	cpu_park_loop();
 915#else
 916	BUG();
 917#endif
 918}
 919
 920static void arm64_backtrace_ipi(cpumask_t *mask)
 921{
 922	__ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
 923}
 924
 925void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
 926{
 927	/*
 928	 * NOTE: though nmi_trigger_cpumask_backtrace() has "nmi_" in the name,
 929	 * nothing about it truly needs to be implemented using an NMI, it's
 930	 * just that it's _allowed_ to work with NMIs. If ipi_should_be_nmi()
 931	 * returned false our backtrace attempt will just use a regular IPI.
 932	 */
 933	nmi_trigger_cpumask_backtrace(mask, exclude_cpu, arm64_backtrace_ipi);
 934}
 935
 936#ifdef CONFIG_KGDB
 937void kgdb_roundup_cpus(void)
 938{
 939	int this_cpu = raw_smp_processor_id();
 940	int cpu;
 941
 942	for_each_online_cpu(cpu) {
 943		/* No need to roundup ourselves */
 944		if (cpu == this_cpu)
 945			continue;
 946
 947		__ipi_send_single(ipi_desc[IPI_KGDB_ROUNDUP], cpu);
 948	}
 949}
 950#endif
 951
 952/*
 953 * Main handler for inter-processor interrupts
 954 */
 955static void do_handle_IPI(int ipinr)
 956{
 957	unsigned int cpu = smp_processor_id();
 
 958
 959	if ((unsigned)ipinr < NR_IPI)
 960		trace_ipi_entry(ipi_types[ipinr]);
 
 
 961
 962	switch (ipinr) {
 963	case IPI_RESCHEDULE:
 964		scheduler_ipi();
 965		break;
 966
 967	case IPI_CALL_FUNC:
 
 968		generic_smp_call_function_interrupt();
 
 969		break;
 970
 971	case IPI_CPU_STOP:
 972	case IPI_CPU_STOP_NMI:
 973		if (IS_ENABLED(CONFIG_KEXEC_CORE) && crash_stop) {
 974			ipi_cpu_crash_stop(cpu, get_irq_regs());
 975			unreachable();
 976		} else {
 977			local_cpu_stop(cpu);
 978		}
 979		break;
 980
 981#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 982	case IPI_TIMER:
 
 983		tick_receive_broadcast();
 
 984		break;
 985#endif
 986
 987#ifdef CONFIG_IRQ_WORK
 988	case IPI_IRQ_WORK:
 
 989		irq_work_run();
 
 990		break;
 991#endif
 992
 993	case IPI_CPU_BACKTRACE:
 994		/*
 995		 * NOTE: in some cases this _won't_ be NMI context. See the
 996		 * comment in arch_trigger_cpumask_backtrace().
 997		 */
 998		nmi_cpu_backtrace(get_irq_regs());
 999		break;
1000
1001	case IPI_KGDB_ROUNDUP:
1002		kgdb_nmicallback(cpu, get_irq_regs());
1003		break;
 
1004
1005	default:
1006		pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
1007		break;
1008	}
1009
1010	if ((unsigned)ipinr < NR_IPI)
1011		trace_ipi_exit(ipi_types[ipinr]);
1012}
1013
1014static irqreturn_t ipi_handler(int irq, void *data)
1015{
1016	do_handle_IPI(irq - ipi_irq_base);
1017	return IRQ_HANDLED;
1018}
1019
1020static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
1021{
1022	trace_ipi_raise(target, ipi_types[ipinr]);
1023	__ipi_send_mask(ipi_desc[ipinr], target);
1024}
1025
1026static bool ipi_should_be_nmi(enum ipi_msg_type ipi)
1027{
1028	if (!system_uses_irq_prio_masking())
1029		return false;
1030
1031	switch (ipi) {
1032	case IPI_CPU_STOP_NMI:
1033	case IPI_CPU_BACKTRACE:
1034	case IPI_KGDB_ROUNDUP:
1035		return true;
1036	default:
1037		return false;
1038	}
1039}
1040
1041static void ipi_setup(int cpu)
1042{
1043	int i;
1044
1045	if (WARN_ON_ONCE(!ipi_irq_base))
1046		return;
1047
1048	for (i = 0; i < nr_ipi; i++) {
1049		if (ipi_should_be_nmi(i)) {
1050			prepare_percpu_nmi(ipi_irq_base + i);
1051			enable_percpu_nmi(ipi_irq_base + i, 0);
1052		} else {
1053			enable_percpu_irq(ipi_irq_base + i, 0);
1054		}
1055	}
1056}
1057
1058#ifdef CONFIG_HOTPLUG_CPU
1059static void ipi_teardown(int cpu)
1060{
1061	int i;
1062
1063	if (WARN_ON_ONCE(!ipi_irq_base))
1064		return;
1065
1066	for (i = 0; i < nr_ipi; i++) {
1067		if (ipi_should_be_nmi(i)) {
1068			disable_percpu_nmi(ipi_irq_base + i);
1069			teardown_percpu_nmi(ipi_irq_base + i);
1070		} else {
1071			disable_percpu_irq(ipi_irq_base + i);
1072		}
1073	}
1074}
1075#endif
1076
1077void __init set_smp_ipi_range(int ipi_base, int n)
1078{
1079	int i;
1080
1081	WARN_ON(n < MAX_IPI);
1082	nr_ipi = min(n, MAX_IPI);
1083
1084	for (i = 0; i < nr_ipi; i++) {
1085		int err;
1086
1087		if (ipi_should_be_nmi(i)) {
1088			err = request_percpu_nmi(ipi_base + i, ipi_handler,
1089						 "IPI", &irq_stat);
1090			WARN(err, "Could not request IPI %d as NMI, err=%d\n",
1091			     i, err);
1092		} else {
1093			err = request_percpu_irq(ipi_base + i, ipi_handler,
1094						 "IPI", &irq_stat);
1095			WARN(err, "Could not request IPI %d as IRQ, err=%d\n",
1096			     i, err);
1097		}
1098
1099		ipi_desc[i] = irq_to_desc(ipi_base + i);
1100		irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
1101	}
1102
1103	ipi_irq_base = ipi_base;
1104
1105	/* Setup the boot CPU immediately */
1106	ipi_setup(smp_processor_id());
1107}
1108
1109void arch_smp_send_reschedule(int cpu)
1110{
1111	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
1112}
1113
1114#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
1115void arch_send_wakeup_ipi(unsigned int cpu)
1116{
1117	/*
1118	 * We use a scheduler IPI to wake the CPU as this avoids the need for a
1119	 * dedicated IPI and we can safely handle spurious scheduler IPIs.
1120	 */
1121	smp_send_reschedule(cpu);
1122}
1123#endif
1124
1125#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
1126void tick_broadcast(const struct cpumask *mask)
1127{
1128	smp_cross_call(mask, IPI_TIMER);
1129}
1130#endif
1131
1132/*
1133 * The number of CPUs online, not counting this CPU (which may not be
1134 * fully online and so not counted in num_online_cpus()).
1135 */
1136static inline unsigned int num_other_online_cpus(void)
1137{
1138	unsigned int this_cpu_online = cpu_online(smp_processor_id());
1139
1140	return num_online_cpus() - this_cpu_online;
1141}
1142
1143void smp_send_stop(void)
1144{
1145	static unsigned long stop_in_progress;
1146	cpumask_t mask;
1147	unsigned long timeout;
1148
1149	/*
1150	 * If this cpu is the only one alive at this point in time, online or
1151	 * not, there are no stop messages to be sent around, so just back out.
1152	 */
1153	if (num_other_online_cpus() == 0)
1154		goto skip_ipi;
1155
1156	/* Only proceed if this is the first CPU to reach this code */
1157	if (test_and_set_bit(0, &stop_in_progress))
1158		return;
1159
1160	/*
1161	 * Send an IPI to all currently online CPUs except the CPU running
1162	 * this code.
1163	 *
1164	 * NOTE: we don't do anything here to prevent other CPUs from coming
1165	 * online after we snapshot `cpu_online_mask`. Ideally, the calling code
1166	 * should do something to prevent other CPUs from coming up. This code
1167	 * can be called in the panic path and thus it doesn't seem wise to
1168	 * grab the CPU hotplug mutex ourselves. Worst case:
1169	 * - If a CPU comes online as we're running, we'll likely notice it
1170	 *   during the 1 second wait below and then we'll catch it when we try
1171	 *   with an NMI (assuming NMIs are enabled) since we re-snapshot the
1172	 *   mask before sending an NMI.
1173	 * - If we leave the function and see that CPUs are still online we'll
1174	 *   at least print a warning. Especially without NMIs this function
1175	 *   isn't foolproof anyway so calling code will just have to accept
1176	 *   the fact that there could be cases where a CPU can't be stopped.
1177	 */
1178	cpumask_copy(&mask, cpu_online_mask);
1179	cpumask_clear_cpu(smp_processor_id(), &mask);
1180
1181	if (system_state <= SYSTEM_RUNNING)
1182		pr_crit("SMP: stopping secondary CPUs\n");
1183
1184	/*
1185	 * Start with a normal IPI and wait up to one second for other CPUs to
1186	 * stop. We do this first because it gives other processors a chance
1187	 * to exit critical sections / drop locks and makes the rest of the
1188	 * stop process (especially console flush) more robust.
1189	 */
1190	smp_cross_call(&mask, IPI_CPU_STOP);
1191	timeout = USEC_PER_SEC;
1192	while (num_other_online_cpus() && timeout--)
1193		udelay(1);
1194
1195	/*
1196	 * If CPUs are still online, try an NMI. There's no excuse for this to
1197	 * be slow, so we only give them an extra 10 ms to respond.
1198	 */
1199	if (num_other_online_cpus() && ipi_should_be_nmi(IPI_CPU_STOP_NMI)) {
1200		smp_rmb();
1201		cpumask_copy(&mask, cpu_online_mask);
1202		cpumask_clear_cpu(smp_processor_id(), &mask);
1203
1204		pr_info("SMP: retry stop with NMI for CPUs %*pbl\n",
1205			cpumask_pr_args(&mask));
1206
1207		smp_cross_call(&mask, IPI_CPU_STOP_NMI);
1208		timeout = USEC_PER_MSEC * 10;
1209		while (num_other_online_cpus() && timeout--)
1210			udelay(1);
1211	}
1212
1213	if (num_other_online_cpus()) {
1214		smp_rmb();
1215		cpumask_copy(&mask, cpu_online_mask);
1216		cpumask_clear_cpu(smp_processor_id(), &mask);
1217
1218		pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
1219			cpumask_pr_args(&mask));
1220	}
1221
1222skip_ipi:
1223	sdei_mask_local_cpu();
1224}
 
1225
1226#ifdef CONFIG_KEXEC_CORE
1227void crash_smp_send_stop(void)
1228{
1229	/*
1230	 * This function can be called twice in panic path, but obviously
1231	 * we execute this only once.
1232	 *
1233	 * We use this same boolean to tell whether the IPI we send was a
1234	 * stop or a "crash stop".
1235	 */
1236	if (crash_stop)
1237		return;
1238	crash_stop = 1;
1239
1240	smp_send_stop();
1241
1242	sdei_handler_abort();
1243}
1244
1245bool smp_crash_stop_failed(void)
1246{
1247	return num_other_online_cpus() != 0;
1248}
1249#endif
1250
1251static bool have_cpu_die(void)
1252{
1253#ifdef CONFIG_HOTPLUG_CPU
1254	int any_cpu = raw_smp_processor_id();
1255	const struct cpu_operations *ops = get_cpu_ops(any_cpu);
1256
1257	if (ops && ops->cpu_die)
1258		return true;
1259#endif
1260	return false;
1261}
1262
1263bool cpus_are_stuck_in_kernel(void)
1264{
1265	bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
1266
1267	return !!cpus_stuck_in_kernel || smp_spin_tables ||
1268		is_protected_kvm_enabled();
1269}