Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * SMP support for ppc.
4 *
5 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
6 * deal of code from the sparc and intel versions.
7 *
8 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 *
10 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
11 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 */
13
14#undef DEBUG
15
16#include <linux/kernel.h>
17#include <linux/export.h>
18#include <linux/sched/mm.h>
19#include <linux/sched/task_stack.h>
20#include <linux/sched/topology.h>
21#include <linux/smp.h>
22#include <linux/interrupt.h>
23#include <linux/delay.h>
24#include <linux/init.h>
25#include <linux/spinlock.h>
26#include <linux/cache.h>
27#include <linux/err.h>
28#include <linux/device.h>
29#include <linux/cpu.h>
30#include <linux/notifier.h>
31#include <linux/topology.h>
32#include <linux/profile.h>
33#include <linux/processor.h>
34#include <linux/random.h>
35#include <linux/stackprotector.h>
36#include <linux/pgtable.h>
37#include <linux/clockchips.h>
38
39#include <asm/ptrace.h>
40#include <linux/atomic.h>
41#include <asm/irq.h>
42#include <asm/hw_irq.h>
43#include <asm/kvm_ppc.h>
44#include <asm/dbell.h>
45#include <asm/page.h>
46#include <asm/prom.h>
47#include <asm/smp.h>
48#include <asm/time.h>
49#include <asm/machdep.h>
50#include <asm/cputhreads.h>
51#include <asm/cputable.h>
52#include <asm/mpic.h>
53#include <asm/vdso_datapage.h>
54#ifdef CONFIG_PPC64
55#include <asm/paca.h>
56#endif
57#include <asm/vdso.h>
58#include <asm/debug.h>
59#include <asm/kexec.h>
60#include <asm/asm-prototypes.h>
61#include <asm/cpu_has_feature.h>
62#include <asm/ftrace.h>
63#include <asm/kup.h>
64
65#ifdef DEBUG
66#include <asm/udbg.h>
67#define DBG(fmt...) udbg_printf(fmt)
68#else
69#define DBG(fmt...)
70#endif
71
72#ifdef CONFIG_HOTPLUG_CPU
73/* State of each CPU during hotplug phases */
74static DEFINE_PER_CPU(int, cpu_state) = { 0 };
75#endif
76
77struct task_struct *secondary_current;
78bool has_big_cores;
79bool coregroup_enabled;
80bool thread_group_shares_l2;
81
82DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
83DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
84DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
85DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
86static DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map);
87
88EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
89EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
90EXPORT_PER_CPU_SYMBOL(cpu_core_map);
91EXPORT_SYMBOL_GPL(has_big_cores);
92
93enum {
94#ifdef CONFIG_SCHED_SMT
95 smt_idx,
96#endif
97 cache_idx,
98 mc_idx,
99 die_idx,
100};
101
102#define MAX_THREAD_LIST_SIZE 8
103#define THREAD_GROUP_SHARE_L1 1
104#define THREAD_GROUP_SHARE_L2 2
105struct thread_groups {
106 unsigned int property;
107 unsigned int nr_groups;
108 unsigned int threads_per_group;
109 unsigned int thread_list[MAX_THREAD_LIST_SIZE];
110};
111
112/* Maximum number of properties that groups of threads within a core can share */
113#define MAX_THREAD_GROUP_PROPERTIES 2
114
115struct thread_groups_list {
116 unsigned int nr_properties;
117 struct thread_groups property_tgs[MAX_THREAD_GROUP_PROPERTIES];
118};
119
120static struct thread_groups_list tgl[NR_CPUS] __initdata;
121/*
122 * On big-cores system, thread_group_l1_cache_map for each CPU corresponds to
123 * the set its siblings that share the L1-cache.
124 */
125static DEFINE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map);
126
127/*
128 * On some big-cores system, thread_group_l2_cache_map for each CPU
129 * corresponds to the set its siblings within the core that share the
130 * L2-cache.
131 */
132static DEFINE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map);
133
134/* SMP operations for this machine */
135struct smp_ops_t *smp_ops;
136
137/* Can't be static due to PowerMac hackery */
138volatile unsigned int cpu_callin_map[NR_CPUS];
139
140int smt_enabled_at_boot = 1;
141
142/*
143 * Returns 1 if the specified cpu should be brought up during boot.
144 * Used to inhibit booting threads if they've been disabled or
145 * limited on the command line
146 */
147int smp_generic_cpu_bootable(unsigned int nr)
148{
149 /* Special case - we inhibit secondary thread startup
150 * during boot if the user requests it.
151 */
152 if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
153 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
154 return 0;
155 if (smt_enabled_at_boot
156 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
157 return 0;
158 }
159
160 return 1;
161}
162
163
164#ifdef CONFIG_PPC64
165int smp_generic_kick_cpu(int nr)
166{
167 if (nr < 0 || nr >= nr_cpu_ids)
168 return -EINVAL;
169
170 /*
171 * The processor is currently spinning, waiting for the
172 * cpu_start field to become non-zero After we set cpu_start,
173 * the processor will continue on to secondary_start
174 */
175 if (!paca_ptrs[nr]->cpu_start) {
176 paca_ptrs[nr]->cpu_start = 1;
177 smp_mb();
178 return 0;
179 }
180
181#ifdef CONFIG_HOTPLUG_CPU
182 /*
183 * Ok it's not there, so it might be soft-unplugged, let's
184 * try to bring it back
185 */
186 generic_set_cpu_up(nr);
187 smp_wmb();
188 smp_send_reschedule(nr);
189#endif /* CONFIG_HOTPLUG_CPU */
190
191 return 0;
192}
193#endif /* CONFIG_PPC64 */
194
195static irqreturn_t call_function_action(int irq, void *data)
196{
197 generic_smp_call_function_interrupt();
198 return IRQ_HANDLED;
199}
200
201static irqreturn_t reschedule_action(int irq, void *data)
202{
203 scheduler_ipi();
204 return IRQ_HANDLED;
205}
206
207#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
208static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
209{
210 timer_broadcast_interrupt();
211 return IRQ_HANDLED;
212}
213#endif
214
215#ifdef CONFIG_NMI_IPI
216static irqreturn_t nmi_ipi_action(int irq, void *data)
217{
218 smp_handle_nmi_ipi(get_irq_regs());
219 return IRQ_HANDLED;
220}
221#endif
222
223static irq_handler_t smp_ipi_action[] = {
224 [PPC_MSG_CALL_FUNCTION] = call_function_action,
225 [PPC_MSG_RESCHEDULE] = reschedule_action,
226#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
227 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
228#endif
229#ifdef CONFIG_NMI_IPI
230 [PPC_MSG_NMI_IPI] = nmi_ipi_action,
231#endif
232};
233
234/*
235 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
236 * than going through the call function infrastructure, and strongly
237 * serialized, so it is more appropriate for debugging.
238 */
239const char *smp_ipi_name[] = {
240 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
241 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
242#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
243 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
244#endif
245#ifdef CONFIG_NMI_IPI
246 [PPC_MSG_NMI_IPI] = "nmi ipi",
247#endif
248};
249
250/* optional function to request ipi, for controllers with >= 4 ipis */
251int smp_request_message_ipi(int virq, int msg)
252{
253 int err;
254
255 if (msg < 0 || msg > PPC_MSG_NMI_IPI)
256 return -EINVAL;
257#ifndef CONFIG_NMI_IPI
258 if (msg == PPC_MSG_NMI_IPI)
259 return 1;
260#endif
261
262 err = request_irq(virq, smp_ipi_action[msg],
263 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
264 smp_ipi_name[msg], NULL);
265 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
266 virq, smp_ipi_name[msg], err);
267
268 return err;
269}
270
271#ifdef CONFIG_PPC_SMP_MUXED_IPI
272struct cpu_messages {
273 long messages; /* current messages */
274};
275static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
276
277void smp_muxed_ipi_set_message(int cpu, int msg)
278{
279 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
280 char *message = (char *)&info->messages;
281
282 /*
283 * Order previous accesses before accesses in the IPI handler.
284 */
285 smp_mb();
286 message[msg] = 1;
287}
288
289void smp_muxed_ipi_message_pass(int cpu, int msg)
290{
291 smp_muxed_ipi_set_message(cpu, msg);
292
293 /*
294 * cause_ipi functions are required to include a full barrier
295 * before doing whatever causes the IPI.
296 */
297 smp_ops->cause_ipi(cpu);
298}
299
300#ifdef __BIG_ENDIAN__
301#define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
302#else
303#define IPI_MESSAGE(A) (1uL << (8 * (A)))
304#endif
305
306irqreturn_t smp_ipi_demux(void)
307{
308 mb(); /* order any irq clear */
309
310 return smp_ipi_demux_relaxed();
311}
312
313/* sync-free variant. Callers should ensure synchronization */
314irqreturn_t smp_ipi_demux_relaxed(void)
315{
316 struct cpu_messages *info;
317 unsigned long all;
318
319 info = this_cpu_ptr(&ipi_message);
320 do {
321 all = xchg(&info->messages, 0);
322#if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
323 /*
324 * Must check for PPC_MSG_RM_HOST_ACTION messages
325 * before PPC_MSG_CALL_FUNCTION messages because when
326 * a VM is destroyed, we call kick_all_cpus_sync()
327 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
328 * messages have completed before we free any VCPUs.
329 */
330 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
331 kvmppc_xics_ipi_action();
332#endif
333 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
334 generic_smp_call_function_interrupt();
335 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
336 scheduler_ipi();
337#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
338 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
339 timer_broadcast_interrupt();
340#endif
341#ifdef CONFIG_NMI_IPI
342 if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
343 nmi_ipi_action(0, NULL);
344#endif
345 } while (info->messages);
346
347 return IRQ_HANDLED;
348}
349#endif /* CONFIG_PPC_SMP_MUXED_IPI */
350
351static inline void do_message_pass(int cpu, int msg)
352{
353 if (smp_ops->message_pass)
354 smp_ops->message_pass(cpu, msg);
355#ifdef CONFIG_PPC_SMP_MUXED_IPI
356 else
357 smp_muxed_ipi_message_pass(cpu, msg);
358#endif
359}
360
361void smp_send_reschedule(int cpu)
362{
363 if (likely(smp_ops))
364 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
365}
366EXPORT_SYMBOL_GPL(smp_send_reschedule);
367
368void arch_send_call_function_single_ipi(int cpu)
369{
370 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
371}
372
373void arch_send_call_function_ipi_mask(const struct cpumask *mask)
374{
375 unsigned int cpu;
376
377 for_each_cpu(cpu, mask)
378 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
379}
380
381#ifdef CONFIG_NMI_IPI
382
383/*
384 * "NMI IPI" system.
385 *
386 * NMI IPIs may not be recoverable, so should not be used as ongoing part of
387 * a running system. They can be used for crash, debug, halt/reboot, etc.
388 *
389 * The IPI call waits with interrupts disabled until all targets enter the
390 * NMI handler, then returns. Subsequent IPIs can be issued before targets
391 * have returned from their handlers, so there is no guarantee about
392 * concurrency or re-entrancy.
393 *
394 * A new NMI can be issued before all targets exit the handler.
395 *
396 * The IPI call may time out without all targets entering the NMI handler.
397 * In that case, there is some logic to recover (and ignore subsequent
398 * NMI interrupts that may eventually be raised), but the platform interrupt
399 * handler may not be able to distinguish this from other exception causes,
400 * which may cause a crash.
401 */
402
403static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
404static struct cpumask nmi_ipi_pending_mask;
405static bool nmi_ipi_busy = false;
406static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
407
408static void nmi_ipi_lock_start(unsigned long *flags)
409{
410 raw_local_irq_save(*flags);
411 hard_irq_disable();
412 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
413 raw_local_irq_restore(*flags);
414 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
415 raw_local_irq_save(*flags);
416 hard_irq_disable();
417 }
418}
419
420static void nmi_ipi_lock(void)
421{
422 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
423 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
424}
425
426static void nmi_ipi_unlock(void)
427{
428 smp_mb();
429 WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
430 atomic_set(&__nmi_ipi_lock, 0);
431}
432
433static void nmi_ipi_unlock_end(unsigned long *flags)
434{
435 nmi_ipi_unlock();
436 raw_local_irq_restore(*flags);
437}
438
439/*
440 * Platform NMI handler calls this to ack
441 */
442int smp_handle_nmi_ipi(struct pt_regs *regs)
443{
444 void (*fn)(struct pt_regs *) = NULL;
445 unsigned long flags;
446 int me = raw_smp_processor_id();
447 int ret = 0;
448
449 /*
450 * Unexpected NMIs are possible here because the interrupt may not
451 * be able to distinguish NMI IPIs from other types of NMIs, or
452 * because the caller may have timed out.
453 */
454 nmi_ipi_lock_start(&flags);
455 if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
456 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
457 fn = READ_ONCE(nmi_ipi_function);
458 WARN_ON_ONCE(!fn);
459 ret = 1;
460 }
461 nmi_ipi_unlock_end(&flags);
462
463 if (fn)
464 fn(regs);
465
466 return ret;
467}
468
469static void do_smp_send_nmi_ipi(int cpu, bool safe)
470{
471 if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
472 return;
473
474 if (cpu >= 0) {
475 do_message_pass(cpu, PPC_MSG_NMI_IPI);
476 } else {
477 int c;
478
479 for_each_online_cpu(c) {
480 if (c == raw_smp_processor_id())
481 continue;
482 do_message_pass(c, PPC_MSG_NMI_IPI);
483 }
484 }
485}
486
487/*
488 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
489 * - fn is the target callback function.
490 * - delay_us > 0 is the delay before giving up waiting for targets to
491 * begin executing the handler, == 0 specifies indefinite delay.
492 */
493static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
494 u64 delay_us, bool safe)
495{
496 unsigned long flags;
497 int me = raw_smp_processor_id();
498 int ret = 1;
499
500 BUG_ON(cpu == me);
501 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
502
503 if (unlikely(!smp_ops))
504 return 0;
505
506 nmi_ipi_lock_start(&flags);
507 while (nmi_ipi_busy) {
508 nmi_ipi_unlock_end(&flags);
509 spin_until_cond(!nmi_ipi_busy);
510 nmi_ipi_lock_start(&flags);
511 }
512 nmi_ipi_busy = true;
513 nmi_ipi_function = fn;
514
515 WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
516
517 if (cpu < 0) {
518 /* ALL_OTHERS */
519 cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
520 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
521 } else {
522 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
523 }
524
525 nmi_ipi_unlock();
526
527 /* Interrupts remain hard disabled */
528
529 do_smp_send_nmi_ipi(cpu, safe);
530
531 nmi_ipi_lock();
532 /* nmi_ipi_busy is set here, so unlock/lock is okay */
533 while (!cpumask_empty(&nmi_ipi_pending_mask)) {
534 nmi_ipi_unlock();
535 udelay(1);
536 nmi_ipi_lock();
537 if (delay_us) {
538 delay_us--;
539 if (!delay_us)
540 break;
541 }
542 }
543
544 if (!cpumask_empty(&nmi_ipi_pending_mask)) {
545 /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
546 ret = 0;
547 cpumask_clear(&nmi_ipi_pending_mask);
548 }
549
550 nmi_ipi_function = NULL;
551 nmi_ipi_busy = false;
552
553 nmi_ipi_unlock_end(&flags);
554
555 return ret;
556}
557
558int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
559{
560 return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
561}
562
563int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
564{
565 return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
566}
567#endif /* CONFIG_NMI_IPI */
568
569#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
570void tick_broadcast(const struct cpumask *mask)
571{
572 unsigned int cpu;
573
574 for_each_cpu(cpu, mask)
575 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
576}
577#endif
578
579#ifdef CONFIG_DEBUGGER
580static void debugger_ipi_callback(struct pt_regs *regs)
581{
582 debugger_ipi(regs);
583}
584
585void smp_send_debugger_break(void)
586{
587 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
588}
589#endif
590
591#ifdef CONFIG_KEXEC_CORE
592void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
593{
594 int cpu;
595
596 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
597 if (kdump_in_progress() && crash_wake_offline) {
598 for_each_present_cpu(cpu) {
599 if (cpu_online(cpu))
600 continue;
601 /*
602 * crash_ipi_callback will wait for
603 * all cpus, including offline CPUs.
604 * We don't care about nmi_ipi_function.
605 * Offline cpus will jump straight into
606 * crash_ipi_callback, we can skip the
607 * entire NMI dance and waiting for
608 * cpus to clear pending mask, etc.
609 */
610 do_smp_send_nmi_ipi(cpu, false);
611 }
612 }
613}
614#endif
615
616#ifdef CONFIG_NMI_IPI
617static void nmi_stop_this_cpu(struct pt_regs *regs)
618{
619 /*
620 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
621 */
622 set_cpu_online(smp_processor_id(), false);
623
624 spin_begin();
625 while (1)
626 spin_cpu_relax();
627}
628
629void smp_send_stop(void)
630{
631 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
632}
633
634#else /* CONFIG_NMI_IPI */
635
636static void stop_this_cpu(void *dummy)
637{
638 hard_irq_disable();
639
640 /*
641 * Offlining CPUs in stop_this_cpu can result in scheduler warnings,
642 * (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants
643 * to know other CPUs are offline before it breaks locks to flush
644 * printk buffers, in case we panic()ed while holding the lock.
645 */
646 set_cpu_online(smp_processor_id(), false);
647
648 spin_begin();
649 while (1)
650 spin_cpu_relax();
651}
652
653void smp_send_stop(void)
654{
655 static bool stopped = false;
656
657 /*
658 * Prevent waiting on csd lock from a previous smp_send_stop.
659 * This is racy, but in general callers try to do the right
660 * thing and only fire off one smp_send_stop (e.g., see
661 * kernel/panic.c)
662 */
663 if (stopped)
664 return;
665
666 stopped = true;
667
668 smp_call_function(stop_this_cpu, NULL, 0);
669}
670#endif /* CONFIG_NMI_IPI */
671
672struct task_struct *current_set[NR_CPUS];
673
674static void smp_store_cpu_info(int id)
675{
676 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
677#ifdef CONFIG_PPC_FSL_BOOK3E
678 per_cpu(next_tlbcam_idx, id)
679 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
680#endif
681}
682
683/*
684 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
685 * rather than just passing around the cpumask we pass around a function that
686 * returns the that cpumask for the given CPU.
687 */
688static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
689{
690 cpumask_set_cpu(i, get_cpumask(j));
691 cpumask_set_cpu(j, get_cpumask(i));
692}
693
694#ifdef CONFIG_HOTPLUG_CPU
695static void set_cpus_unrelated(int i, int j,
696 struct cpumask *(*get_cpumask)(int))
697{
698 cpumask_clear_cpu(i, get_cpumask(j));
699 cpumask_clear_cpu(j, get_cpumask(i));
700}
701#endif
702
703/*
704 * Extends set_cpus_related. Instead of setting one CPU at a time in
705 * dstmask, set srcmask at oneshot. dstmask should be super set of srcmask.
706 */
707static void or_cpumasks_related(int i, int j, struct cpumask *(*srcmask)(int),
708 struct cpumask *(*dstmask)(int))
709{
710 struct cpumask *mask;
711 int k;
712
713 mask = srcmask(j);
714 for_each_cpu(k, srcmask(i))
715 cpumask_or(dstmask(k), dstmask(k), mask);
716
717 if (i == j)
718 return;
719
720 mask = srcmask(i);
721 for_each_cpu(k, srcmask(j))
722 cpumask_or(dstmask(k), dstmask(k), mask);
723}
724
725/*
726 * parse_thread_groups: Parses the "ibm,thread-groups" device tree
727 * property for the CPU device node @dn and stores
728 * the parsed output in the thread_groups_list
729 * structure @tglp.
730 *
731 * @dn: The device node of the CPU device.
732 * @tglp: Pointer to a thread group list structure into which the parsed
733 * output of "ibm,thread-groups" is stored.
734 *
735 * ibm,thread-groups[0..N-1] array defines which group of threads in
736 * the CPU-device node can be grouped together based on the property.
737 *
738 * This array can represent thread groupings for multiple properties.
739 *
740 * ibm,thread-groups[i + 0] tells us the property based on which the
741 * threads are being grouped together. If this value is 1, it implies
742 * that the threads in the same group share L1, translation cache. If
743 * the value is 2, it implies that the threads in the same group share
744 * the same L2 cache.
745 *
746 * ibm,thread-groups[i+1] tells us how many such thread groups exist for the
747 * property ibm,thread-groups[i]
748 *
749 * ibm,thread-groups[i+2] tells us the number of threads in each such
750 * group.
751 * Suppose k = (ibm,thread-groups[i+1] * ibm,thread-groups[i+2]), then,
752 *
753 * ibm,thread-groups[i+3..i+k+2] (is the list of threads identified by
754 * "ibm,ppc-interrupt-server#s" arranged as per their membership in
755 * the grouping.
756 *
757 * Example:
758 * If "ibm,thread-groups" = [1,2,4,8,10,12,14,9,11,13,15,2,2,4,8,10,12,14,9,11,13,15]
759 * This can be decomposed up into two consecutive arrays:
760 * a) [1,2,4,8,10,12,14,9,11,13,15]
761 * b) [2,2,4,8,10,12,14,9,11,13,15]
762 *
763 * where in,
764 *
765 * a) provides information of Property "1" being shared by "2" groups,
766 * each with "4" threads each. The "ibm,ppc-interrupt-server#s" of
767 * the first group is {8,10,12,14} and the
768 * "ibm,ppc-interrupt-server#s" of the second group is
769 * {9,11,13,15}. Property "1" is indicative of the thread in the
770 * group sharing L1 cache, translation cache and Instruction Data
771 * flow.
772 *
773 * b) provides information of Property "2" being shared by "2" groups,
774 * each group with "4" threads. The "ibm,ppc-interrupt-server#s" of
775 * the first group is {8,10,12,14} and the
776 * "ibm,ppc-interrupt-server#s" of the second group is
777 * {9,11,13,15}. Property "2" indicates that the threads in each
778 * group share the L2-cache.
779 *
780 * Returns 0 on success, -EINVAL if the property does not exist,
781 * -ENODATA if property does not have a value, and -EOVERFLOW if the
782 * property data isn't large enough.
783 */
784static int parse_thread_groups(struct device_node *dn,
785 struct thread_groups_list *tglp)
786{
787 unsigned int property_idx = 0;
788 u32 *thread_group_array;
789 size_t total_threads;
790 int ret = 0, count;
791 u32 *thread_list;
792 int i = 0;
793
794 count = of_property_count_u32_elems(dn, "ibm,thread-groups");
795 thread_group_array = kcalloc(count, sizeof(u32), GFP_KERNEL);
796 ret = of_property_read_u32_array(dn, "ibm,thread-groups",
797 thread_group_array, count);
798 if (ret)
799 goto out_free;
800
801 while (i < count && property_idx < MAX_THREAD_GROUP_PROPERTIES) {
802 int j;
803 struct thread_groups *tg = &tglp->property_tgs[property_idx++];
804
805 tg->property = thread_group_array[i];
806 tg->nr_groups = thread_group_array[i + 1];
807 tg->threads_per_group = thread_group_array[i + 2];
808 total_threads = tg->nr_groups * tg->threads_per_group;
809
810 thread_list = &thread_group_array[i + 3];
811
812 for (j = 0; j < total_threads; j++)
813 tg->thread_list[j] = thread_list[j];
814 i = i + 3 + total_threads;
815 }
816
817 tglp->nr_properties = property_idx;
818
819out_free:
820 kfree(thread_group_array);
821 return ret;
822}
823
824/*
825 * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
826 * that @cpu belongs to.
827 *
828 * @cpu : The logical CPU whose thread group is being searched.
829 * @tg : The thread-group structure of the CPU node which @cpu belongs
830 * to.
831 *
832 * Returns the index to tg->thread_list that points to the the start
833 * of the thread_group that @cpu belongs to.
834 *
835 * Returns -1 if cpu doesn't belong to any of the groups pointed to by
836 * tg->thread_list.
837 */
838static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
839{
840 int hw_cpu_id = get_hard_smp_processor_id(cpu);
841 int i, j;
842
843 for (i = 0; i < tg->nr_groups; i++) {
844 int group_start = i * tg->threads_per_group;
845
846 for (j = 0; j < tg->threads_per_group; j++) {
847 int idx = group_start + j;
848
849 if (tg->thread_list[idx] == hw_cpu_id)
850 return group_start;
851 }
852 }
853
854 return -1;
855}
856
857static struct thread_groups *__init get_thread_groups(int cpu,
858 int group_property,
859 int *err)
860{
861 struct device_node *dn = of_get_cpu_node(cpu, NULL);
862 struct thread_groups_list *cpu_tgl = &tgl[cpu];
863 struct thread_groups *tg = NULL;
864 int i;
865 *err = 0;
866
867 if (!dn) {
868 *err = -ENODATA;
869 return NULL;
870 }
871
872 if (!cpu_tgl->nr_properties) {
873 *err = parse_thread_groups(dn, cpu_tgl);
874 if (*err)
875 goto out;
876 }
877
878 for (i = 0; i < cpu_tgl->nr_properties; i++) {
879 if (cpu_tgl->property_tgs[i].property == group_property) {
880 tg = &cpu_tgl->property_tgs[i];
881 break;
882 }
883 }
884
885 if (!tg)
886 *err = -EINVAL;
887out:
888 of_node_put(dn);
889 return tg;
890}
891
892static int __init init_thread_group_cache_map(int cpu, int cache_property)
893
894{
895 int first_thread = cpu_first_thread_sibling(cpu);
896 int i, cpu_group_start = -1, err = 0;
897 struct thread_groups *tg = NULL;
898 cpumask_var_t *mask = NULL;
899
900 if (cache_property != THREAD_GROUP_SHARE_L1 &&
901 cache_property != THREAD_GROUP_SHARE_L2)
902 return -EINVAL;
903
904 tg = get_thread_groups(cpu, cache_property, &err);
905 if (!tg)
906 return err;
907
908 cpu_group_start = get_cpu_thread_group_start(cpu, tg);
909
910 if (unlikely(cpu_group_start == -1)) {
911 WARN_ON_ONCE(1);
912 return -ENODATA;
913 }
914
915 if (cache_property == THREAD_GROUP_SHARE_L1)
916 mask = &per_cpu(thread_group_l1_cache_map, cpu);
917 else if (cache_property == THREAD_GROUP_SHARE_L2)
918 mask = &per_cpu(thread_group_l2_cache_map, cpu);
919
920 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu));
921
922 for (i = first_thread; i < first_thread + threads_per_core; i++) {
923 int i_group_start = get_cpu_thread_group_start(i, tg);
924
925 if (unlikely(i_group_start == -1)) {
926 WARN_ON_ONCE(1);
927 return -ENODATA;
928 }
929
930 if (i_group_start == cpu_group_start)
931 cpumask_set_cpu(i, *mask);
932 }
933
934 return 0;
935}
936
937static bool shared_caches;
938
939#ifdef CONFIG_SCHED_SMT
940/* cpumask of CPUs with asymmetric SMT dependency */
941static int powerpc_smt_flags(void)
942{
943 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
944
945 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
946 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
947 flags |= SD_ASYM_PACKING;
948 }
949 return flags;
950}
951#endif
952
953/*
954 * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
955 * This topology makes it *much* cheaper to migrate tasks between adjacent cores
956 * since the migrated task remains cache hot. We want to take advantage of this
957 * at the scheduler level so an extra topology level is required.
958 */
959static int powerpc_shared_cache_flags(void)
960{
961 return SD_SHARE_PKG_RESOURCES;
962}
963
964/*
965 * We can't just pass cpu_l2_cache_mask() directly because
966 * returns a non-const pointer and the compiler barfs on that.
967 */
968static const struct cpumask *shared_cache_mask(int cpu)
969{
970 return per_cpu(cpu_l2_cache_map, cpu);
971}
972
973#ifdef CONFIG_SCHED_SMT
974static const struct cpumask *smallcore_smt_mask(int cpu)
975{
976 return cpu_smallcore_mask(cpu);
977}
978#endif
979
980static struct cpumask *cpu_coregroup_mask(int cpu)
981{
982 return per_cpu(cpu_coregroup_map, cpu);
983}
984
985static bool has_coregroup_support(void)
986{
987 return coregroup_enabled;
988}
989
990static const struct cpumask *cpu_mc_mask(int cpu)
991{
992 return cpu_coregroup_mask(cpu);
993}
994
995static struct sched_domain_topology_level powerpc_topology[] = {
996#ifdef CONFIG_SCHED_SMT
997 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
998#endif
999 { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
1000 { cpu_mc_mask, SD_INIT_NAME(MC) },
1001 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
1002 { NULL, },
1003};
1004
1005static int __init init_big_cores(void)
1006{
1007 int cpu;
1008
1009 for_each_possible_cpu(cpu) {
1010 int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L1);
1011
1012 if (err)
1013 return err;
1014
1015 zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
1016 GFP_KERNEL,
1017 cpu_to_node(cpu));
1018 }
1019
1020 has_big_cores = true;
1021
1022 for_each_possible_cpu(cpu) {
1023 int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L2);
1024
1025 if (err)
1026 return err;
1027 }
1028
1029 thread_group_shares_l2 = true;
1030 pr_debug("L2 cache only shared by the threads in the small core\n");
1031 return 0;
1032}
1033
1034void __init smp_prepare_cpus(unsigned int max_cpus)
1035{
1036 unsigned int cpu;
1037
1038 DBG("smp_prepare_cpus\n");
1039
1040 /*
1041 * setup_cpu may need to be called on the boot cpu. We havent
1042 * spun any cpus up but lets be paranoid.
1043 */
1044 BUG_ON(boot_cpuid != smp_processor_id());
1045
1046 /* Fixup boot cpu */
1047 smp_store_cpu_info(boot_cpuid);
1048 cpu_callin_map[boot_cpuid] = 1;
1049
1050 for_each_possible_cpu(cpu) {
1051 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
1052 GFP_KERNEL, cpu_to_node(cpu));
1053 zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
1054 GFP_KERNEL, cpu_to_node(cpu));
1055 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
1056 GFP_KERNEL, cpu_to_node(cpu));
1057 if (has_coregroup_support())
1058 zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
1059 GFP_KERNEL, cpu_to_node(cpu));
1060
1061#ifdef CONFIG_NUMA
1062 /*
1063 * numa_node_id() works after this.
1064 */
1065 if (cpu_present(cpu)) {
1066 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
1067 set_cpu_numa_mem(cpu,
1068 local_memory_node(numa_cpu_lookup_table[cpu]));
1069 }
1070#endif
1071 }
1072
1073 /* Init the cpumasks so the boot CPU is related to itself */
1074 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
1075 cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
1076 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
1077
1078 if (has_coregroup_support())
1079 cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
1080
1081 init_big_cores();
1082 if (has_big_cores) {
1083 cpumask_set_cpu(boot_cpuid,
1084 cpu_smallcore_mask(boot_cpuid));
1085 }
1086
1087 if (cpu_to_chip_id(boot_cpuid) != -1) {
1088 int idx = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
1089
1090 /*
1091 * All threads of a core will all belong to the same core,
1092 * chip_id_lookup_table will have one entry per core.
1093 * Assumption: if boot_cpuid doesn't have a chip-id, then no
1094 * other CPUs, will also not have chip-id.
1095 */
1096 chip_id_lookup_table = kcalloc(idx, sizeof(int), GFP_KERNEL);
1097 if (chip_id_lookup_table)
1098 memset(chip_id_lookup_table, -1, sizeof(int) * idx);
1099 }
1100
1101 if (smp_ops && smp_ops->probe)
1102 smp_ops->probe();
1103}
1104
1105void smp_prepare_boot_cpu(void)
1106{
1107 BUG_ON(smp_processor_id() != boot_cpuid);
1108#ifdef CONFIG_PPC64
1109 paca_ptrs[boot_cpuid]->__current = current;
1110#endif
1111 set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
1112 current_set[boot_cpuid] = current;
1113}
1114
1115#ifdef CONFIG_HOTPLUG_CPU
1116
1117int generic_cpu_disable(void)
1118{
1119 unsigned int cpu = smp_processor_id();
1120
1121 if (cpu == boot_cpuid)
1122 return -EBUSY;
1123
1124 set_cpu_online(cpu, false);
1125#ifdef CONFIG_PPC64
1126 vdso_data->processorCount--;
1127#endif
1128 /* Update affinity of all IRQs previously aimed at this CPU */
1129 irq_migrate_all_off_this_cpu();
1130
1131 /*
1132 * Depending on the details of the interrupt controller, it's possible
1133 * that one of the interrupts we just migrated away from this CPU is
1134 * actually already pending on this CPU. If we leave it in that state
1135 * the interrupt will never be EOI'ed, and will never fire again. So
1136 * temporarily enable interrupts here, to allow any pending interrupt to
1137 * be received (and EOI'ed), before we take this CPU offline.
1138 */
1139 local_irq_enable();
1140 mdelay(1);
1141 local_irq_disable();
1142
1143 return 0;
1144}
1145
1146void generic_cpu_die(unsigned int cpu)
1147{
1148 int i;
1149
1150 for (i = 0; i < 100; i++) {
1151 smp_rmb();
1152 if (is_cpu_dead(cpu))
1153 return;
1154 msleep(100);
1155 }
1156 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
1157}
1158
1159void generic_set_cpu_dead(unsigned int cpu)
1160{
1161 per_cpu(cpu_state, cpu) = CPU_DEAD;
1162}
1163
1164/*
1165 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
1166 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
1167 * which makes the delay in generic_cpu_die() not happen.
1168 */
1169void generic_set_cpu_up(unsigned int cpu)
1170{
1171 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1172}
1173
1174int generic_check_cpu_restart(unsigned int cpu)
1175{
1176 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
1177}
1178
1179int is_cpu_dead(unsigned int cpu)
1180{
1181 return per_cpu(cpu_state, cpu) == CPU_DEAD;
1182}
1183
1184static bool secondaries_inhibited(void)
1185{
1186 return kvm_hv_mode_active();
1187}
1188
1189#else /* HOTPLUG_CPU */
1190
1191#define secondaries_inhibited() 0
1192
1193#endif
1194
1195static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
1196{
1197#ifdef CONFIG_PPC64
1198 paca_ptrs[cpu]->__current = idle;
1199 paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
1200 THREAD_SIZE - STACK_FRAME_OVERHEAD;
1201#endif
1202 idle->cpu = cpu;
1203 secondary_current = current_set[cpu] = idle;
1204}
1205
1206int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1207{
1208 int rc, c;
1209
1210 /*
1211 * Don't allow secondary threads to come online if inhibited
1212 */
1213 if (threads_per_core > 1 && secondaries_inhibited() &&
1214 cpu_thread_in_subcore(cpu))
1215 return -EBUSY;
1216
1217 if (smp_ops == NULL ||
1218 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1219 return -EINVAL;
1220
1221 cpu_idle_thread_init(cpu, tidle);
1222
1223 /*
1224 * The platform might need to allocate resources prior to bringing
1225 * up the CPU
1226 */
1227 if (smp_ops->prepare_cpu) {
1228 rc = smp_ops->prepare_cpu(cpu);
1229 if (rc)
1230 return rc;
1231 }
1232
1233 /* Make sure callin-map entry is 0 (can be leftover a CPU
1234 * hotplug
1235 */
1236 cpu_callin_map[cpu] = 0;
1237
1238 /* The information for processor bringup must
1239 * be written out to main store before we release
1240 * the processor.
1241 */
1242 smp_mb();
1243
1244 /* wake up cpus */
1245 DBG("smp: kicking cpu %d\n", cpu);
1246 rc = smp_ops->kick_cpu(cpu);
1247 if (rc) {
1248 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
1249 return rc;
1250 }
1251
1252 /*
1253 * wait to see if the cpu made a callin (is actually up).
1254 * use this value that I found through experimentation.
1255 * -- Cort
1256 */
1257 if (system_state < SYSTEM_RUNNING)
1258 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
1259 udelay(100);
1260#ifdef CONFIG_HOTPLUG_CPU
1261 else
1262 /*
1263 * CPUs can take much longer to come up in the
1264 * hotplug case. Wait five seconds.
1265 */
1266 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
1267 msleep(1);
1268#endif
1269
1270 if (!cpu_callin_map[cpu]) {
1271 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1272 return -ENOENT;
1273 }
1274
1275 DBG("Processor %u found.\n", cpu);
1276
1277 if (smp_ops->give_timebase)
1278 smp_ops->give_timebase();
1279
1280 /* Wait until cpu puts itself in the online & active maps */
1281 spin_until_cond(cpu_online(cpu));
1282
1283 return 0;
1284}
1285
1286/* Return the value of the reg property corresponding to the given
1287 * logical cpu.
1288 */
1289int cpu_to_core_id(int cpu)
1290{
1291 struct device_node *np;
1292 const __be32 *reg;
1293 int id = -1;
1294
1295 np = of_get_cpu_node(cpu, NULL);
1296 if (!np)
1297 goto out;
1298
1299 reg = of_get_property(np, "reg", NULL);
1300 if (!reg)
1301 goto out;
1302
1303 id = be32_to_cpup(reg);
1304out:
1305 of_node_put(np);
1306 return id;
1307}
1308EXPORT_SYMBOL_GPL(cpu_to_core_id);
1309
1310/* Helper routines for cpu to core mapping */
1311int cpu_core_index_of_thread(int cpu)
1312{
1313 return cpu >> threads_shift;
1314}
1315EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
1316
1317int cpu_first_thread_of_core(int core)
1318{
1319 return core << threads_shift;
1320}
1321EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
1322
1323/* Must be called when no change can occur to cpu_present_mask,
1324 * i.e. during cpu online or offline.
1325 */
1326static struct device_node *cpu_to_l2cache(int cpu)
1327{
1328 struct device_node *np;
1329 struct device_node *cache;
1330
1331 if (!cpu_present(cpu))
1332 return NULL;
1333
1334 np = of_get_cpu_node(cpu, NULL);
1335 if (np == NULL)
1336 return NULL;
1337
1338 cache = of_find_next_cache_node(np);
1339
1340 of_node_put(np);
1341
1342 return cache;
1343}
1344
1345static bool update_mask_by_l2(int cpu, cpumask_var_t *mask)
1346{
1347 struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1348 struct device_node *l2_cache, *np;
1349 int i;
1350
1351 if (has_big_cores)
1352 submask_fn = cpu_smallcore_mask;
1353
1354 /*
1355 * If the threads in a thread-group share L2 cache, then the
1356 * L2-mask can be obtained from thread_group_l2_cache_map.
1357 */
1358 if (thread_group_shares_l2) {
1359 cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu));
1360
1361 for_each_cpu(i, per_cpu(thread_group_l2_cache_map, cpu)) {
1362 if (cpu_online(i))
1363 set_cpus_related(i, cpu, cpu_l2_cache_mask);
1364 }
1365
1366 /* Verify that L1-cache siblings are a subset of L2 cache-siblings */
1367 if (!cpumask_equal(submask_fn(cpu), cpu_l2_cache_mask(cpu)) &&
1368 !cpumask_subset(submask_fn(cpu), cpu_l2_cache_mask(cpu))) {
1369 pr_warn_once("CPU %d : Inconsistent L1 and L2 cache siblings\n",
1370 cpu);
1371 }
1372
1373 return true;
1374 }
1375
1376 l2_cache = cpu_to_l2cache(cpu);
1377 if (!l2_cache || !*mask) {
1378 /* Assume only core siblings share cache with this CPU */
1379 for_each_cpu(i, submask_fn(cpu))
1380 set_cpus_related(cpu, i, cpu_l2_cache_mask);
1381
1382 return false;
1383 }
1384
1385 cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1386
1387 /* Update l2-cache mask with all the CPUs that are part of submask */
1388 or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask);
1389
1390 /* Skip all CPUs already part of current CPU l2-cache mask */
1391 cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(cpu));
1392
1393 for_each_cpu(i, *mask) {
1394 /*
1395 * when updating the marks the current CPU has not been marked
1396 * online, but we need to update the cache masks
1397 */
1398 np = cpu_to_l2cache(i);
1399
1400 /* Skip all CPUs already part of current CPU l2-cache */
1401 if (np == l2_cache) {
1402 or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask);
1403 cpumask_andnot(*mask, *mask, submask_fn(i));
1404 } else {
1405 cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(i));
1406 }
1407
1408 of_node_put(np);
1409 }
1410 of_node_put(l2_cache);
1411
1412 return true;
1413}
1414
1415#ifdef CONFIG_HOTPLUG_CPU
1416static void remove_cpu_from_masks(int cpu)
1417{
1418 struct cpumask *(*mask_fn)(int) = cpu_sibling_mask;
1419 int i;
1420
1421 if (shared_caches)
1422 mask_fn = cpu_l2_cache_mask;
1423
1424 for_each_cpu(i, mask_fn(cpu)) {
1425 set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
1426 set_cpus_unrelated(cpu, i, cpu_sibling_mask);
1427 if (has_big_cores)
1428 set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
1429 }
1430
1431 for_each_cpu(i, cpu_core_mask(cpu))
1432 set_cpus_unrelated(cpu, i, cpu_core_mask);
1433
1434 if (has_coregroup_support()) {
1435 for_each_cpu(i, cpu_coregroup_mask(cpu))
1436 set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
1437 }
1438}
1439#endif
1440
1441static inline void add_cpu_to_smallcore_masks(int cpu)
1442{
1443 int i;
1444
1445 if (!has_big_cores)
1446 return;
1447
1448 cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
1449
1450 for_each_cpu(i, per_cpu(thread_group_l1_cache_map, cpu)) {
1451 if (cpu_online(i))
1452 set_cpus_related(i, cpu, cpu_smallcore_mask);
1453 }
1454}
1455
1456static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
1457{
1458 struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1459 int coregroup_id = cpu_to_coregroup_id(cpu);
1460 int i;
1461
1462 if (shared_caches)
1463 submask_fn = cpu_l2_cache_mask;
1464
1465 if (!*mask) {
1466 /* Assume only siblings are part of this CPU's coregroup */
1467 for_each_cpu(i, submask_fn(cpu))
1468 set_cpus_related(cpu, i, cpu_coregroup_mask);
1469
1470 return;
1471 }
1472
1473 cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1474
1475 /* Update coregroup mask with all the CPUs that are part of submask */
1476 or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask);
1477
1478 /* Skip all CPUs already part of coregroup mask */
1479 cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu));
1480
1481 for_each_cpu(i, *mask) {
1482 /* Skip all CPUs not part of this coregroup */
1483 if (coregroup_id == cpu_to_coregroup_id(i)) {
1484 or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask);
1485 cpumask_andnot(*mask, *mask, submask_fn(i));
1486 } else {
1487 cpumask_andnot(*mask, *mask, cpu_coregroup_mask(i));
1488 }
1489 }
1490}
1491
1492static void add_cpu_to_masks(int cpu)
1493{
1494 struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1495 int first_thread = cpu_first_thread_sibling(cpu);
1496 cpumask_var_t mask;
1497 int chip_id = -1;
1498 bool ret;
1499 int i;
1500
1501 /*
1502 * This CPU will not be in the online mask yet so we need to manually
1503 * add it to it's own thread sibling mask.
1504 */
1505 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1506 cpumask_set_cpu(cpu, cpu_core_mask(cpu));
1507
1508 for (i = first_thread; i < first_thread + threads_per_core; i++)
1509 if (cpu_online(i))
1510 set_cpus_related(i, cpu, cpu_sibling_mask);
1511
1512 add_cpu_to_smallcore_masks(cpu);
1513
1514 /* In CPU-hotplug path, hence use GFP_ATOMIC */
1515 ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
1516 update_mask_by_l2(cpu, &mask);
1517
1518 if (has_coregroup_support())
1519 update_coregroup_mask(cpu, &mask);
1520
1521 if (chip_id_lookup_table && ret)
1522 chip_id = cpu_to_chip_id(cpu);
1523
1524 if (shared_caches)
1525 submask_fn = cpu_l2_cache_mask;
1526
1527 /* Update core_mask with all the CPUs that are part of submask */
1528 or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask);
1529
1530 /* Skip all CPUs already part of current CPU core mask */
1531 cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
1532
1533 /* If chip_id is -1; limit the cpu_core_mask to within DIE*/
1534 if (chip_id == -1)
1535 cpumask_and(mask, mask, cpu_cpu_mask(cpu));
1536
1537 for_each_cpu(i, mask) {
1538 if (chip_id == cpu_to_chip_id(i)) {
1539 or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
1540 cpumask_andnot(mask, mask, submask_fn(i));
1541 } else {
1542 cpumask_andnot(mask, mask, cpu_core_mask(i));
1543 }
1544 }
1545
1546 free_cpumask_var(mask);
1547}
1548
1549/* Activate a secondary processor. */
1550void start_secondary(void *unused)
1551{
1552 unsigned int cpu = raw_smp_processor_id();
1553
1554 /* PPC64 calls setup_kup() in early_setup_secondary() */
1555 if (IS_ENABLED(CONFIG_PPC32))
1556 setup_kup();
1557
1558 mmgrab(&init_mm);
1559 current->active_mm = &init_mm;
1560
1561 smp_store_cpu_info(cpu);
1562 set_dec(tb_ticks_per_jiffy);
1563 rcu_cpu_starting(cpu);
1564 cpu_callin_map[cpu] = 1;
1565
1566 if (smp_ops->setup_cpu)
1567 smp_ops->setup_cpu(cpu);
1568 if (smp_ops->take_timebase)
1569 smp_ops->take_timebase();
1570
1571 secondary_cpu_time_init();
1572
1573#ifdef CONFIG_PPC64
1574 if (system_state == SYSTEM_RUNNING)
1575 vdso_data->processorCount++;
1576
1577 vdso_getcpu_init();
1578#endif
1579 set_numa_node(numa_cpu_lookup_table[cpu]);
1580 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1581
1582 /* Update topology CPU masks */
1583 add_cpu_to_masks(cpu);
1584
1585 /*
1586 * Check for any shared caches. Note that this must be done on a
1587 * per-core basis because one core in the pair might be disabled.
1588 */
1589 if (!shared_caches) {
1590 struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
1591 struct cpumask *mask = cpu_l2_cache_mask(cpu);
1592
1593 if (has_big_cores)
1594 sibling_mask = cpu_smallcore_mask;
1595
1596 if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu)))
1597 shared_caches = true;
1598 }
1599
1600 smp_wmb();
1601 notify_cpu_starting(cpu);
1602 set_cpu_online(cpu, true);
1603
1604 boot_init_stack_canary();
1605
1606 local_irq_enable();
1607
1608 /* We can enable ftrace for secondary cpus now */
1609 this_cpu_enable_ftrace();
1610
1611 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1612
1613 BUG();
1614}
1615
1616int setup_profiling_timer(unsigned int multiplier)
1617{
1618 return 0;
1619}
1620
1621static void fixup_topology(void)
1622{
1623 int i;
1624
1625#ifdef CONFIG_SCHED_SMT
1626 if (has_big_cores) {
1627 pr_info("Big cores detected but using small core scheduling\n");
1628 powerpc_topology[smt_idx].mask = smallcore_smt_mask;
1629 }
1630#endif
1631
1632 if (!has_coregroup_support())
1633 powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask;
1634
1635 /*
1636 * Try to consolidate topology levels here instead of
1637 * allowing scheduler to degenerate.
1638 * - Dont consolidate if masks are different.
1639 * - Dont consolidate if sd_flags exists and are different.
1640 */
1641 for (i = 1; i <= die_idx; i++) {
1642 if (powerpc_topology[i].mask != powerpc_topology[i - 1].mask)
1643 continue;
1644
1645 if (powerpc_topology[i].sd_flags && powerpc_topology[i - 1].sd_flags &&
1646 powerpc_topology[i].sd_flags != powerpc_topology[i - 1].sd_flags)
1647 continue;
1648
1649 if (!powerpc_topology[i - 1].sd_flags)
1650 powerpc_topology[i - 1].sd_flags = powerpc_topology[i].sd_flags;
1651
1652 powerpc_topology[i].mask = powerpc_topology[i + 1].mask;
1653 powerpc_topology[i].sd_flags = powerpc_topology[i + 1].sd_flags;
1654#ifdef CONFIG_SCHED_DEBUG
1655 powerpc_topology[i].name = powerpc_topology[i + 1].name;
1656#endif
1657 }
1658}
1659
1660void __init smp_cpus_done(unsigned int max_cpus)
1661{
1662 /*
1663 * We are running pinned to the boot CPU, see rest_init().
1664 */
1665 if (smp_ops && smp_ops->setup_cpu)
1666 smp_ops->setup_cpu(boot_cpuid);
1667
1668 if (smp_ops && smp_ops->bringup_done)
1669 smp_ops->bringup_done();
1670
1671 dump_numa_cpu_topology();
1672
1673 fixup_topology();
1674 set_sched_topology(powerpc_topology);
1675}
1676
1677#ifdef CONFIG_HOTPLUG_CPU
1678int __cpu_disable(void)
1679{
1680 int cpu = smp_processor_id();
1681 int err;
1682
1683 if (!smp_ops->cpu_disable)
1684 return -ENOSYS;
1685
1686 this_cpu_disable_ftrace();
1687
1688 err = smp_ops->cpu_disable();
1689 if (err)
1690 return err;
1691
1692 /* Update sibling maps */
1693 remove_cpu_from_masks(cpu);
1694
1695 return 0;
1696}
1697
1698void __cpu_die(unsigned int cpu)
1699{
1700 if (smp_ops->cpu_die)
1701 smp_ops->cpu_die(cpu);
1702}
1703
1704void arch_cpu_idle_dead(void)
1705{
1706 /*
1707 * Disable on the down path. This will be re-enabled by
1708 * start_secondary() via start_secondary_resume() below
1709 */
1710 this_cpu_disable_ftrace();
1711
1712 if (smp_ops->cpu_offline_self)
1713 smp_ops->cpu_offline_self();
1714
1715 /* If we return, we re-enter start_secondary */
1716 start_secondary_resume();
1717}
1718
1719#endif
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * SMP support for ppc.
4 *
5 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
6 * deal of code from the sparc and intel versions.
7 *
8 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 *
10 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
11 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 */
13
14#undef DEBUG
15
16#include <linux/kernel.h>
17#include <linux/export.h>
18#include <linux/sched/mm.h>
19#include <linux/sched/task_stack.h>
20#include <linux/sched/topology.h>
21#include <linux/smp.h>
22#include <linux/interrupt.h>
23#include <linux/delay.h>
24#include <linux/init.h>
25#include <linux/spinlock.h>
26#include <linux/cache.h>
27#include <linux/err.h>
28#include <linux/device.h>
29#include <linux/cpu.h>
30#include <linux/notifier.h>
31#include <linux/topology.h>
32#include <linux/profile.h>
33#include <linux/processor.h>
34#include <linux/random.h>
35#include <linux/stackprotector.h>
36#include <linux/pgtable.h>
37#include <linux/clockchips.h>
38#include <linux/kexec.h>
39
40#include <asm/ptrace.h>
41#include <linux/atomic.h>
42#include <asm/irq.h>
43#include <asm/hw_irq.h>
44#include <asm/kvm_ppc.h>
45#include <asm/dbell.h>
46#include <asm/page.h>
47#include <asm/smp.h>
48#include <asm/time.h>
49#include <asm/machdep.h>
50#include <asm/mmu_context.h>
51#include <asm/cputhreads.h>
52#include <asm/cputable.h>
53#include <asm/mpic.h>
54#include <asm/vdso_datapage.h>
55#ifdef CONFIG_PPC64
56#include <asm/paca.h>
57#endif
58#include <asm/vdso.h>
59#include <asm/debug.h>
60#include <asm/cpu_has_feature.h>
61#include <asm/ftrace.h>
62#include <asm/kup.h>
63#include <asm/fadump.h>
64#include <asm/systemcfg.h>
65
66#include <trace/events/ipi.h>
67
68#ifdef DEBUG
69#include <asm/udbg.h>
70#define DBG(fmt...) udbg_printf(fmt)
71#else
72#define DBG(fmt...)
73#endif
74
75#ifdef CONFIG_HOTPLUG_CPU
76/* State of each CPU during hotplug phases */
77static DEFINE_PER_CPU(int, cpu_state) = { 0 };
78#endif
79
80struct task_struct *secondary_current;
81bool has_big_cores __ro_after_init;
82bool coregroup_enabled __ro_after_init;
83bool thread_group_shares_l2 __ro_after_init;
84bool thread_group_shares_l3 __ro_after_init;
85
86DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
87DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
88DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
89DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
90static DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map);
91
92EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
93EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
94EXPORT_PER_CPU_SYMBOL(cpu_core_map);
95EXPORT_SYMBOL_GPL(has_big_cores);
96
97#define MAX_THREAD_LIST_SIZE 8
98#define THREAD_GROUP_SHARE_L1 1
99#define THREAD_GROUP_SHARE_L2_L3 2
100struct thread_groups {
101 unsigned int property;
102 unsigned int nr_groups;
103 unsigned int threads_per_group;
104 unsigned int thread_list[MAX_THREAD_LIST_SIZE];
105};
106
107/* Maximum number of properties that groups of threads within a core can share */
108#define MAX_THREAD_GROUP_PROPERTIES 2
109
110struct thread_groups_list {
111 unsigned int nr_properties;
112 struct thread_groups property_tgs[MAX_THREAD_GROUP_PROPERTIES];
113};
114
115static struct thread_groups_list tgl[NR_CPUS] __initdata;
116/*
117 * On big-cores system, thread_group_l1_cache_map for each CPU corresponds to
118 * the set its siblings that share the L1-cache.
119 */
120DEFINE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map);
121
122/*
123 * On some big-cores system, thread_group_l2_cache_map for each CPU
124 * corresponds to the set its siblings within the core that share the
125 * L2-cache.
126 */
127DEFINE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map);
128
129/*
130 * On P10, thread_group_l3_cache_map for each CPU is equal to the
131 * thread_group_l2_cache_map
132 */
133DEFINE_PER_CPU(cpumask_var_t, thread_group_l3_cache_map);
134
135/* SMP operations for this machine */
136struct smp_ops_t *smp_ops;
137
138/* Can't be static due to PowerMac hackery */
139volatile unsigned int cpu_callin_map[NR_CPUS];
140
141int smt_enabled_at_boot = 1;
142
143/*
144 * Returns 1 if the specified cpu should be brought up during boot.
145 * Used to inhibit booting threads if they've been disabled or
146 * limited on the command line
147 */
148int smp_generic_cpu_bootable(unsigned int nr)
149{
150 /* Special case - we inhibit secondary thread startup
151 * during boot if the user requests it.
152 */
153 if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
154 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
155 return 0;
156 if (smt_enabled_at_boot
157 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
158 return 0;
159 }
160
161 return 1;
162}
163
164
165#ifdef CONFIG_PPC64
166int smp_generic_kick_cpu(int nr)
167{
168 if (nr < 0 || nr >= nr_cpu_ids)
169 return -EINVAL;
170
171 /*
172 * The processor is currently spinning, waiting for the
173 * cpu_start field to become non-zero After we set cpu_start,
174 * the processor will continue on to secondary_start
175 */
176 if (!paca_ptrs[nr]->cpu_start) {
177 paca_ptrs[nr]->cpu_start = 1;
178 smp_mb();
179 return 0;
180 }
181
182#ifdef CONFIG_HOTPLUG_CPU
183 /*
184 * Ok it's not there, so it might be soft-unplugged, let's
185 * try to bring it back
186 */
187 generic_set_cpu_up(nr);
188 smp_wmb();
189 smp_send_reschedule(nr);
190#endif /* CONFIG_HOTPLUG_CPU */
191
192 return 0;
193}
194#endif /* CONFIG_PPC64 */
195
196static irqreturn_t call_function_action(int irq, void *data)
197{
198 generic_smp_call_function_interrupt();
199 return IRQ_HANDLED;
200}
201
202static irqreturn_t reschedule_action(int irq, void *data)
203{
204 scheduler_ipi();
205 return IRQ_HANDLED;
206}
207
208#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
209static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
210{
211 timer_broadcast_interrupt();
212 return IRQ_HANDLED;
213}
214#endif
215
216#ifdef CONFIG_NMI_IPI
217static irqreturn_t nmi_ipi_action(int irq, void *data)
218{
219 smp_handle_nmi_ipi(get_irq_regs());
220 return IRQ_HANDLED;
221}
222#endif
223
224static irq_handler_t smp_ipi_action[] = {
225 [PPC_MSG_CALL_FUNCTION] = call_function_action,
226 [PPC_MSG_RESCHEDULE] = reschedule_action,
227#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
228 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
229#endif
230#ifdef CONFIG_NMI_IPI
231 [PPC_MSG_NMI_IPI] = nmi_ipi_action,
232#endif
233};
234
235/*
236 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
237 * than going through the call function infrastructure, and strongly
238 * serialized, so it is more appropriate for debugging.
239 */
240const char *smp_ipi_name[] = {
241 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
242 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
243#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
244 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
245#endif
246#ifdef CONFIG_NMI_IPI
247 [PPC_MSG_NMI_IPI] = "nmi ipi",
248#endif
249};
250
251/* optional function to request ipi, for controllers with >= 4 ipis */
252int smp_request_message_ipi(int virq, int msg)
253{
254 int err;
255
256 if (msg < 0 || msg > PPC_MSG_NMI_IPI)
257 return -EINVAL;
258#ifndef CONFIG_NMI_IPI
259 if (msg == PPC_MSG_NMI_IPI)
260 return 1;
261#endif
262
263 err = request_irq(virq, smp_ipi_action[msg],
264 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
265 smp_ipi_name[msg], NULL);
266 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
267 virq, smp_ipi_name[msg], err);
268
269 return err;
270}
271
272#ifdef CONFIG_PPC_SMP_MUXED_IPI
273struct cpu_messages {
274 long messages; /* current messages */
275};
276static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
277
278void smp_muxed_ipi_set_message(int cpu, int msg)
279{
280 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
281 char *message = (char *)&info->messages;
282
283 /*
284 * Order previous accesses before accesses in the IPI handler.
285 */
286 smp_mb();
287 WRITE_ONCE(message[msg], 1);
288}
289
290void smp_muxed_ipi_message_pass(int cpu, int msg)
291{
292 smp_muxed_ipi_set_message(cpu, msg);
293
294 /*
295 * cause_ipi functions are required to include a full barrier
296 * before doing whatever causes the IPI.
297 */
298 smp_ops->cause_ipi(cpu);
299}
300
301#ifdef __BIG_ENDIAN__
302#define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
303#else
304#define IPI_MESSAGE(A) (1uL << (8 * (A)))
305#endif
306
307irqreturn_t smp_ipi_demux(void)
308{
309 mb(); /* order any irq clear */
310
311 return smp_ipi_demux_relaxed();
312}
313
314/* sync-free variant. Callers should ensure synchronization */
315irqreturn_t smp_ipi_demux_relaxed(void)
316{
317 struct cpu_messages *info;
318 unsigned long all;
319
320 info = this_cpu_ptr(&ipi_message);
321 do {
322 all = xchg(&info->messages, 0);
323#if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
324 /*
325 * Must check for PPC_MSG_RM_HOST_ACTION messages
326 * before PPC_MSG_CALL_FUNCTION messages because when
327 * a VM is destroyed, we call kick_all_cpus_sync()
328 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
329 * messages have completed before we free any VCPUs.
330 */
331 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
332 kvmppc_xics_ipi_action();
333#endif
334 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
335 generic_smp_call_function_interrupt();
336 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
337 scheduler_ipi();
338#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
339 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
340 timer_broadcast_interrupt();
341#endif
342#ifdef CONFIG_NMI_IPI
343 if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
344 nmi_ipi_action(0, NULL);
345#endif
346 } while (READ_ONCE(info->messages));
347
348 return IRQ_HANDLED;
349}
350#endif /* CONFIG_PPC_SMP_MUXED_IPI */
351
352static inline void do_message_pass(int cpu, int msg)
353{
354 if (smp_ops->message_pass)
355 smp_ops->message_pass(cpu, msg);
356#ifdef CONFIG_PPC_SMP_MUXED_IPI
357 else
358 smp_muxed_ipi_message_pass(cpu, msg);
359#endif
360}
361
362void arch_smp_send_reschedule(int cpu)
363{
364 if (likely(smp_ops))
365 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
366}
367EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
368
369void arch_send_call_function_single_ipi(int cpu)
370{
371 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
372}
373
374void arch_send_call_function_ipi_mask(const struct cpumask *mask)
375{
376 unsigned int cpu;
377
378 for_each_cpu(cpu, mask)
379 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
380}
381
382#ifdef CONFIG_NMI_IPI
383
384/*
385 * "NMI IPI" system.
386 *
387 * NMI IPIs may not be recoverable, so should not be used as ongoing part of
388 * a running system. They can be used for crash, debug, halt/reboot, etc.
389 *
390 * The IPI call waits with interrupts disabled until all targets enter the
391 * NMI handler, then returns. Subsequent IPIs can be issued before targets
392 * have returned from their handlers, so there is no guarantee about
393 * concurrency or re-entrancy.
394 *
395 * A new NMI can be issued before all targets exit the handler.
396 *
397 * The IPI call may time out without all targets entering the NMI handler.
398 * In that case, there is some logic to recover (and ignore subsequent
399 * NMI interrupts that may eventually be raised), but the platform interrupt
400 * handler may not be able to distinguish this from other exception causes,
401 * which may cause a crash.
402 */
403
404static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
405static struct cpumask nmi_ipi_pending_mask;
406static bool nmi_ipi_busy = false;
407static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
408
409noinstr static void nmi_ipi_lock_start(unsigned long *flags)
410{
411 raw_local_irq_save(*flags);
412 hard_irq_disable();
413 while (raw_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
414 raw_local_irq_restore(*flags);
415 spin_until_cond(raw_atomic_read(&__nmi_ipi_lock) == 0);
416 raw_local_irq_save(*flags);
417 hard_irq_disable();
418 }
419}
420
421noinstr static void nmi_ipi_lock(void)
422{
423 while (raw_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
424 spin_until_cond(raw_atomic_read(&__nmi_ipi_lock) == 0);
425}
426
427noinstr static void nmi_ipi_unlock(void)
428{
429 smp_mb();
430 WARN_ON(raw_atomic_read(&__nmi_ipi_lock) != 1);
431 raw_atomic_set(&__nmi_ipi_lock, 0);
432}
433
434noinstr static void nmi_ipi_unlock_end(unsigned long *flags)
435{
436 nmi_ipi_unlock();
437 raw_local_irq_restore(*flags);
438}
439
440/*
441 * Platform NMI handler calls this to ack
442 */
443noinstr int smp_handle_nmi_ipi(struct pt_regs *regs)
444{
445 void (*fn)(struct pt_regs *) = NULL;
446 unsigned long flags;
447 int me = raw_smp_processor_id();
448 int ret = 0;
449
450 /*
451 * Unexpected NMIs are possible here because the interrupt may not
452 * be able to distinguish NMI IPIs from other types of NMIs, or
453 * because the caller may have timed out.
454 */
455 nmi_ipi_lock_start(&flags);
456 if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
457 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
458 fn = READ_ONCE(nmi_ipi_function);
459 WARN_ON_ONCE(!fn);
460 ret = 1;
461 }
462 nmi_ipi_unlock_end(&flags);
463
464 if (fn)
465 fn(regs);
466
467 return ret;
468}
469
470static void do_smp_send_nmi_ipi(int cpu, bool safe)
471{
472 if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
473 return;
474
475 if (cpu >= 0) {
476 do_message_pass(cpu, PPC_MSG_NMI_IPI);
477 } else {
478 int c;
479
480 for_each_online_cpu(c) {
481 if (c == raw_smp_processor_id())
482 continue;
483 do_message_pass(c, PPC_MSG_NMI_IPI);
484 }
485 }
486}
487
488/*
489 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
490 * - fn is the target callback function.
491 * - delay_us > 0 is the delay before giving up waiting for targets to
492 * begin executing the handler, == 0 specifies indefinite delay.
493 */
494static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
495 u64 delay_us, bool safe)
496{
497 unsigned long flags;
498 int me = raw_smp_processor_id();
499 int ret = 1;
500
501 BUG_ON(cpu == me);
502 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
503
504 if (unlikely(!smp_ops))
505 return 0;
506
507 nmi_ipi_lock_start(&flags);
508 while (nmi_ipi_busy) {
509 nmi_ipi_unlock_end(&flags);
510 spin_until_cond(!nmi_ipi_busy);
511 nmi_ipi_lock_start(&flags);
512 }
513 nmi_ipi_busy = true;
514 nmi_ipi_function = fn;
515
516 WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
517
518 if (cpu < 0) {
519 /* ALL_OTHERS */
520 cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
521 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
522 } else {
523 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
524 }
525
526 nmi_ipi_unlock();
527
528 /* Interrupts remain hard disabled */
529
530 do_smp_send_nmi_ipi(cpu, safe);
531
532 nmi_ipi_lock();
533 /* nmi_ipi_busy is set here, so unlock/lock is okay */
534 while (!cpumask_empty(&nmi_ipi_pending_mask)) {
535 nmi_ipi_unlock();
536 udelay(1);
537 nmi_ipi_lock();
538 if (delay_us) {
539 delay_us--;
540 if (!delay_us)
541 break;
542 }
543 }
544
545 if (!cpumask_empty(&nmi_ipi_pending_mask)) {
546 /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
547 ret = 0;
548 cpumask_clear(&nmi_ipi_pending_mask);
549 }
550
551 nmi_ipi_function = NULL;
552 nmi_ipi_busy = false;
553
554 nmi_ipi_unlock_end(&flags);
555
556 return ret;
557}
558
559int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
560{
561 return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
562}
563
564int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
565{
566 return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
567}
568#endif /* CONFIG_NMI_IPI */
569
570#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
571void tick_broadcast(const struct cpumask *mask)
572{
573 unsigned int cpu;
574
575 for_each_cpu(cpu, mask)
576 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
577}
578#endif
579
580#ifdef CONFIG_DEBUGGER
581static void debugger_ipi_callback(struct pt_regs *regs)
582{
583 debugger_ipi(regs);
584}
585
586void smp_send_debugger_break(void)
587{
588 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
589}
590#endif
591
592#ifdef CONFIG_CRASH_DUMP
593void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
594{
595 int cpu;
596
597 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
598 if (kdump_in_progress() && crash_wake_offline) {
599 for_each_present_cpu(cpu) {
600 if (cpu_online(cpu))
601 continue;
602 /*
603 * crash_ipi_callback will wait for
604 * all cpus, including offline CPUs.
605 * We don't care about nmi_ipi_function.
606 * Offline cpus will jump straight into
607 * crash_ipi_callback, we can skip the
608 * entire NMI dance and waiting for
609 * cpus to clear pending mask, etc.
610 */
611 do_smp_send_nmi_ipi(cpu, false);
612 }
613 }
614}
615#endif
616
617void crash_smp_send_stop(void)
618{
619 static bool stopped = false;
620
621 /*
622 * In case of fadump, register data for all CPUs is captured by f/w
623 * on ibm,os-term rtas call. Skip IPI callbacks to other CPUs before
624 * this rtas call to avoid tricky post processing of those CPUs'
625 * backtraces.
626 */
627 if (should_fadump_crash())
628 return;
629
630 if (stopped)
631 return;
632
633 stopped = true;
634
635#ifdef CONFIG_CRASH_DUMP
636 if (kexec_crash_image) {
637 crash_kexec_prepare();
638 return;
639 }
640#endif
641
642 smp_send_stop();
643}
644
645#ifdef CONFIG_NMI_IPI
646static void nmi_stop_this_cpu(struct pt_regs *regs)
647{
648 /*
649 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
650 */
651 set_cpu_online(smp_processor_id(), false);
652
653 spin_begin();
654 while (1)
655 spin_cpu_relax();
656}
657
658void smp_send_stop(void)
659{
660 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
661}
662
663#else /* CONFIG_NMI_IPI */
664
665static void stop_this_cpu(void *dummy)
666{
667 hard_irq_disable();
668
669 /*
670 * Offlining CPUs in stop_this_cpu can result in scheduler warnings,
671 * (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants
672 * to know other CPUs are offline before it breaks locks to flush
673 * printk buffers, in case we panic()ed while holding the lock.
674 */
675 set_cpu_online(smp_processor_id(), false);
676
677 spin_begin();
678 while (1)
679 spin_cpu_relax();
680}
681
682void smp_send_stop(void)
683{
684 static bool stopped = false;
685
686 /*
687 * Prevent waiting on csd lock from a previous smp_send_stop.
688 * This is racy, but in general callers try to do the right
689 * thing and only fire off one smp_send_stop (e.g., see
690 * kernel/panic.c)
691 */
692 if (stopped)
693 return;
694
695 stopped = true;
696
697 smp_call_function(stop_this_cpu, NULL, 0);
698}
699#endif /* CONFIG_NMI_IPI */
700
701static struct task_struct *current_set[NR_CPUS];
702
703static void smp_store_cpu_info(int id)
704{
705 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
706#ifdef CONFIG_PPC_E500
707 per_cpu(next_tlbcam_idx, id)
708 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
709#endif
710}
711
712/*
713 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
714 * rather than just passing around the cpumask we pass around a function that
715 * returns the that cpumask for the given CPU.
716 */
717static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
718{
719 cpumask_set_cpu(i, get_cpumask(j));
720 cpumask_set_cpu(j, get_cpumask(i));
721}
722
723#ifdef CONFIG_HOTPLUG_CPU
724static void set_cpus_unrelated(int i, int j,
725 struct cpumask *(*get_cpumask)(int))
726{
727 cpumask_clear_cpu(i, get_cpumask(j));
728 cpumask_clear_cpu(j, get_cpumask(i));
729}
730#endif
731
732/*
733 * Extends set_cpus_related. Instead of setting one CPU at a time in
734 * dstmask, set srcmask at oneshot. dstmask should be super set of srcmask.
735 */
736static void or_cpumasks_related(int i, int j, struct cpumask *(*srcmask)(int),
737 struct cpumask *(*dstmask)(int))
738{
739 struct cpumask *mask;
740 int k;
741
742 mask = srcmask(j);
743 for_each_cpu(k, srcmask(i))
744 cpumask_or(dstmask(k), dstmask(k), mask);
745
746 if (i == j)
747 return;
748
749 mask = srcmask(i);
750 for_each_cpu(k, srcmask(j))
751 cpumask_or(dstmask(k), dstmask(k), mask);
752}
753
754/*
755 * parse_thread_groups: Parses the "ibm,thread-groups" device tree
756 * property for the CPU device node @dn and stores
757 * the parsed output in the thread_groups_list
758 * structure @tglp.
759 *
760 * @dn: The device node of the CPU device.
761 * @tglp: Pointer to a thread group list structure into which the parsed
762 * output of "ibm,thread-groups" is stored.
763 *
764 * ibm,thread-groups[0..N-1] array defines which group of threads in
765 * the CPU-device node can be grouped together based on the property.
766 *
767 * This array can represent thread groupings for multiple properties.
768 *
769 * ibm,thread-groups[i + 0] tells us the property based on which the
770 * threads are being grouped together. If this value is 1, it implies
771 * that the threads in the same group share L1, translation cache. If
772 * the value is 2, it implies that the threads in the same group share
773 * the same L2 cache.
774 *
775 * ibm,thread-groups[i+1] tells us how many such thread groups exist for the
776 * property ibm,thread-groups[i]
777 *
778 * ibm,thread-groups[i+2] tells us the number of threads in each such
779 * group.
780 * Suppose k = (ibm,thread-groups[i+1] * ibm,thread-groups[i+2]), then,
781 *
782 * ibm,thread-groups[i+3..i+k+2] (is the list of threads identified by
783 * "ibm,ppc-interrupt-server#s" arranged as per their membership in
784 * the grouping.
785 *
786 * Example:
787 * If "ibm,thread-groups" = [1,2,4,8,10,12,14,9,11,13,15,2,2,4,8,10,12,14,9,11,13,15]
788 * This can be decomposed up into two consecutive arrays:
789 * a) [1,2,4,8,10,12,14,9,11,13,15]
790 * b) [2,2,4,8,10,12,14,9,11,13,15]
791 *
792 * where in,
793 *
794 * a) provides information of Property "1" being shared by "2" groups,
795 * each with "4" threads each. The "ibm,ppc-interrupt-server#s" of
796 * the first group is {8,10,12,14} and the
797 * "ibm,ppc-interrupt-server#s" of the second group is
798 * {9,11,13,15}. Property "1" is indicative of the thread in the
799 * group sharing L1 cache, translation cache and Instruction Data
800 * flow.
801 *
802 * b) provides information of Property "2" being shared by "2" groups,
803 * each group with "4" threads. The "ibm,ppc-interrupt-server#s" of
804 * the first group is {8,10,12,14} and the
805 * "ibm,ppc-interrupt-server#s" of the second group is
806 * {9,11,13,15}. Property "2" indicates that the threads in each
807 * group share the L2-cache.
808 *
809 * Returns 0 on success, -EINVAL if the property does not exist,
810 * -ENODATA if property does not have a value, and -EOVERFLOW if the
811 * property data isn't large enough.
812 */
813static int parse_thread_groups(struct device_node *dn,
814 struct thread_groups_list *tglp)
815{
816 unsigned int property_idx = 0;
817 u32 *thread_group_array;
818 size_t total_threads;
819 int ret = 0, count;
820 u32 *thread_list;
821 int i = 0;
822
823 count = of_property_count_u32_elems(dn, "ibm,thread-groups");
824 thread_group_array = kcalloc(count, sizeof(u32), GFP_KERNEL);
825 ret = of_property_read_u32_array(dn, "ibm,thread-groups",
826 thread_group_array, count);
827 if (ret)
828 goto out_free;
829
830 while (i < count && property_idx < MAX_THREAD_GROUP_PROPERTIES) {
831 int j;
832 struct thread_groups *tg = &tglp->property_tgs[property_idx++];
833
834 tg->property = thread_group_array[i];
835 tg->nr_groups = thread_group_array[i + 1];
836 tg->threads_per_group = thread_group_array[i + 2];
837 total_threads = tg->nr_groups * tg->threads_per_group;
838
839 thread_list = &thread_group_array[i + 3];
840
841 for (j = 0; j < total_threads; j++)
842 tg->thread_list[j] = thread_list[j];
843 i = i + 3 + total_threads;
844 }
845
846 tglp->nr_properties = property_idx;
847
848out_free:
849 kfree(thread_group_array);
850 return ret;
851}
852
853/*
854 * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
855 * that @cpu belongs to.
856 *
857 * @cpu : The logical CPU whose thread group is being searched.
858 * @tg : The thread-group structure of the CPU node which @cpu belongs
859 * to.
860 *
861 * Returns the index to tg->thread_list that points to the start
862 * of the thread_group that @cpu belongs to.
863 *
864 * Returns -1 if cpu doesn't belong to any of the groups pointed to by
865 * tg->thread_list.
866 */
867static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
868{
869 int hw_cpu_id = get_hard_smp_processor_id(cpu);
870 int i, j;
871
872 for (i = 0; i < tg->nr_groups; i++) {
873 int group_start = i * tg->threads_per_group;
874
875 for (j = 0; j < tg->threads_per_group; j++) {
876 int idx = group_start + j;
877
878 if (tg->thread_list[idx] == hw_cpu_id)
879 return group_start;
880 }
881 }
882
883 return -1;
884}
885
886static struct thread_groups *__init get_thread_groups(int cpu,
887 int group_property,
888 int *err)
889{
890 struct device_node *dn = of_get_cpu_node(cpu, NULL);
891 struct thread_groups_list *cpu_tgl = &tgl[cpu];
892 struct thread_groups *tg = NULL;
893 int i;
894 *err = 0;
895
896 if (!dn) {
897 *err = -ENODATA;
898 return NULL;
899 }
900
901 if (!cpu_tgl->nr_properties) {
902 *err = parse_thread_groups(dn, cpu_tgl);
903 if (*err)
904 goto out;
905 }
906
907 for (i = 0; i < cpu_tgl->nr_properties; i++) {
908 if (cpu_tgl->property_tgs[i].property == group_property) {
909 tg = &cpu_tgl->property_tgs[i];
910 break;
911 }
912 }
913
914 if (!tg)
915 *err = -EINVAL;
916out:
917 of_node_put(dn);
918 return tg;
919}
920
921static int __init update_mask_from_threadgroup(cpumask_var_t *mask, struct thread_groups *tg,
922 int cpu, int cpu_group_start)
923{
924 int first_thread = cpu_first_thread_sibling(cpu);
925 int i;
926
927 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu));
928
929 for (i = first_thread; i < first_thread + threads_per_core; i++) {
930 int i_group_start = get_cpu_thread_group_start(i, tg);
931
932 if (unlikely(i_group_start == -1)) {
933 WARN_ON_ONCE(1);
934 return -ENODATA;
935 }
936
937 if (i_group_start == cpu_group_start)
938 cpumask_set_cpu(i, *mask);
939 }
940
941 return 0;
942}
943
944static int __init init_thread_group_cache_map(int cpu, int cache_property)
945
946{
947 int cpu_group_start = -1, err = 0;
948 struct thread_groups *tg = NULL;
949 cpumask_var_t *mask = NULL;
950
951 if (cache_property != THREAD_GROUP_SHARE_L1 &&
952 cache_property != THREAD_GROUP_SHARE_L2_L3)
953 return -EINVAL;
954
955 tg = get_thread_groups(cpu, cache_property, &err);
956
957 if (!tg)
958 return err;
959
960 cpu_group_start = get_cpu_thread_group_start(cpu, tg);
961
962 if (unlikely(cpu_group_start == -1)) {
963 WARN_ON_ONCE(1);
964 return -ENODATA;
965 }
966
967 if (cache_property == THREAD_GROUP_SHARE_L1) {
968 mask = &per_cpu(thread_group_l1_cache_map, cpu);
969 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
970 }
971 else if (cache_property == THREAD_GROUP_SHARE_L2_L3) {
972 mask = &per_cpu(thread_group_l2_cache_map, cpu);
973 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
974 mask = &per_cpu(thread_group_l3_cache_map, cpu);
975 update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
976 }
977
978
979 return 0;
980}
981
982static bool shared_caches __ro_after_init;
983
984#ifdef CONFIG_SCHED_SMT
985/* cpumask of CPUs with asymmetric SMT dependency */
986static int powerpc_smt_flags(void)
987{
988 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_LLC;
989
990 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
991 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
992 flags |= SD_ASYM_PACKING;
993 }
994 return flags;
995}
996#endif
997
998/*
999 * On shared processor LPARs scheduled on a big core (which has two or more
1000 * independent thread groups per core), prefer lower numbered CPUs, so
1001 * that workload consolidates to lesser number of cores.
1002 */
1003static __ro_after_init DEFINE_STATIC_KEY_FALSE(splpar_asym_pack);
1004
1005/*
1006 * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
1007 * This topology makes it *much* cheaper to migrate tasks between adjacent cores
1008 * since the migrated task remains cache hot. We want to take advantage of this
1009 * at the scheduler level so an extra topology level is required.
1010 */
1011static int powerpc_shared_cache_flags(void)
1012{
1013 if (static_branch_unlikely(&splpar_asym_pack))
1014 return SD_SHARE_LLC | SD_ASYM_PACKING;
1015
1016 return SD_SHARE_LLC;
1017}
1018
1019static int powerpc_shared_proc_flags(void)
1020{
1021 if (static_branch_unlikely(&splpar_asym_pack))
1022 return SD_ASYM_PACKING;
1023
1024 return 0;
1025}
1026
1027/*
1028 * We can't just pass cpu_l2_cache_mask() directly because
1029 * returns a non-const pointer and the compiler barfs on that.
1030 */
1031static const struct cpumask *shared_cache_mask(int cpu)
1032{
1033 return per_cpu(cpu_l2_cache_map, cpu);
1034}
1035
1036#ifdef CONFIG_SCHED_SMT
1037static const struct cpumask *smallcore_smt_mask(int cpu)
1038{
1039 return cpu_smallcore_mask(cpu);
1040}
1041#endif
1042
1043static struct cpumask *cpu_coregroup_mask(int cpu)
1044{
1045 return per_cpu(cpu_coregroup_map, cpu);
1046}
1047
1048static bool has_coregroup_support(void)
1049{
1050 /* Coregroup identification not available on shared systems */
1051 if (is_shared_processor())
1052 return 0;
1053
1054 return coregroup_enabled;
1055}
1056
1057static const struct cpumask *cpu_mc_mask(int cpu)
1058{
1059 return cpu_coregroup_mask(cpu);
1060}
1061
1062static int __init init_big_cores(void)
1063{
1064 int cpu;
1065
1066 for_each_possible_cpu(cpu) {
1067 int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L1);
1068
1069 if (err)
1070 return err;
1071
1072 zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
1073 GFP_KERNEL,
1074 cpu_to_node(cpu));
1075 }
1076
1077 has_big_cores = true;
1078
1079 for_each_possible_cpu(cpu) {
1080 int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L2_L3);
1081
1082 if (err)
1083 return err;
1084 }
1085
1086 thread_group_shares_l2 = true;
1087 thread_group_shares_l3 = true;
1088 pr_debug("L2/L3 cache only shared by the threads in the small core\n");
1089
1090 return 0;
1091}
1092
1093void __init smp_prepare_cpus(unsigned int max_cpus)
1094{
1095 unsigned int cpu, num_threads;
1096
1097 DBG("smp_prepare_cpus\n");
1098
1099 /*
1100 * setup_cpu may need to be called on the boot cpu. We haven't
1101 * spun any cpus up but lets be paranoid.
1102 */
1103 BUG_ON(boot_cpuid != smp_processor_id());
1104
1105 /* Fixup boot cpu */
1106 smp_store_cpu_info(boot_cpuid);
1107 cpu_callin_map[boot_cpuid] = 1;
1108
1109 for_each_possible_cpu(cpu) {
1110 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
1111 GFP_KERNEL, cpu_to_node(cpu));
1112 zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
1113 GFP_KERNEL, cpu_to_node(cpu));
1114 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
1115 GFP_KERNEL, cpu_to_node(cpu));
1116 if (has_coregroup_support())
1117 zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
1118 GFP_KERNEL, cpu_to_node(cpu));
1119
1120#ifdef CONFIG_NUMA
1121 /*
1122 * numa_node_id() works after this.
1123 */
1124 if (cpu_present(cpu)) {
1125 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
1126 set_cpu_numa_mem(cpu,
1127 local_memory_node(numa_cpu_lookup_table[cpu]));
1128 }
1129#endif
1130 }
1131
1132 /* Init the cpumasks so the boot CPU is related to itself */
1133 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
1134 cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
1135 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
1136
1137 if (has_coregroup_support())
1138 cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
1139
1140 init_big_cores();
1141 if (has_big_cores) {
1142 cpumask_set_cpu(boot_cpuid,
1143 cpu_smallcore_mask(boot_cpuid));
1144 }
1145
1146 if (cpu_to_chip_id(boot_cpuid) != -1) {
1147 int idx = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
1148
1149 /*
1150 * All threads of a core will all belong to the same core,
1151 * chip_id_lookup_table will have one entry per core.
1152 * Assumption: if boot_cpuid doesn't have a chip-id, then no
1153 * other CPUs, will also not have chip-id.
1154 */
1155 chip_id_lookup_table = kcalloc(idx, sizeof(int), GFP_KERNEL);
1156 if (chip_id_lookup_table)
1157 memset(chip_id_lookup_table, -1, sizeof(int) * idx);
1158 }
1159
1160 if (smp_ops && smp_ops->probe)
1161 smp_ops->probe();
1162
1163 // Initalise the generic SMT topology support
1164 num_threads = 1;
1165 if (smt_enabled_at_boot)
1166 num_threads = smt_enabled_at_boot;
1167 cpu_smt_set_num_threads(num_threads, threads_per_core);
1168}
1169
1170void __init smp_prepare_boot_cpu(void)
1171{
1172 BUG_ON(smp_processor_id() != boot_cpuid);
1173#ifdef CONFIG_PPC64
1174 paca_ptrs[boot_cpuid]->__current = current;
1175#endif
1176 set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
1177 current_set[boot_cpuid] = current;
1178}
1179
1180#ifdef CONFIG_HOTPLUG_CPU
1181
1182int generic_cpu_disable(void)
1183{
1184 unsigned int cpu = smp_processor_id();
1185
1186 if (cpu == boot_cpuid)
1187 return -EBUSY;
1188
1189 set_cpu_online(cpu, false);
1190#ifdef CONFIG_PPC64_PROC_SYSTEMCFG
1191 systemcfg->processorCount--;
1192#endif
1193 /* Update affinity of all IRQs previously aimed at this CPU */
1194 irq_migrate_all_off_this_cpu();
1195
1196 /*
1197 * Depending on the details of the interrupt controller, it's possible
1198 * that one of the interrupts we just migrated away from this CPU is
1199 * actually already pending on this CPU. If we leave it in that state
1200 * the interrupt will never be EOI'ed, and will never fire again. So
1201 * temporarily enable interrupts here, to allow any pending interrupt to
1202 * be received (and EOI'ed), before we take this CPU offline.
1203 */
1204 local_irq_enable();
1205 mdelay(1);
1206 local_irq_disable();
1207
1208 return 0;
1209}
1210
1211void generic_cpu_die(unsigned int cpu)
1212{
1213 int i;
1214
1215 for (i = 0; i < 100; i++) {
1216 smp_rmb();
1217 if (is_cpu_dead(cpu))
1218 return;
1219 msleep(100);
1220 }
1221 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
1222}
1223
1224void generic_set_cpu_dead(unsigned int cpu)
1225{
1226 per_cpu(cpu_state, cpu) = CPU_DEAD;
1227}
1228
1229/*
1230 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
1231 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
1232 * which makes the delay in generic_cpu_die() not happen.
1233 */
1234void generic_set_cpu_up(unsigned int cpu)
1235{
1236 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1237}
1238
1239int generic_check_cpu_restart(unsigned int cpu)
1240{
1241 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
1242}
1243
1244int is_cpu_dead(unsigned int cpu)
1245{
1246 return per_cpu(cpu_state, cpu) == CPU_DEAD;
1247}
1248
1249static bool secondaries_inhibited(void)
1250{
1251 return kvm_hv_mode_active();
1252}
1253
1254#else /* HOTPLUG_CPU */
1255
1256#define secondaries_inhibited() 0
1257
1258#endif
1259
1260static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
1261{
1262#ifdef CONFIG_PPC64
1263 paca_ptrs[cpu]->__current = idle;
1264 paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
1265 THREAD_SIZE - STACK_FRAME_MIN_SIZE;
1266#endif
1267 task_thread_info(idle)->cpu = cpu;
1268 secondary_current = current_set[cpu] = idle;
1269}
1270
1271int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1272{
1273 const unsigned long boot_spin_ms = 5 * MSEC_PER_SEC;
1274 const bool booting = system_state < SYSTEM_RUNNING;
1275 const unsigned long hp_spin_ms = 1;
1276 unsigned long deadline;
1277 int rc;
1278 const unsigned long spin_wait_ms = booting ? boot_spin_ms : hp_spin_ms;
1279
1280 /*
1281 * Don't allow secondary threads to come online if inhibited
1282 */
1283 if (threads_per_core > 1 && secondaries_inhibited() &&
1284 cpu_thread_in_subcore(cpu))
1285 return -EBUSY;
1286
1287 if (smp_ops == NULL ||
1288 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1289 return -EINVAL;
1290
1291 cpu_idle_thread_init(cpu, tidle);
1292
1293 /*
1294 * The platform might need to allocate resources prior to bringing
1295 * up the CPU
1296 */
1297 if (smp_ops->prepare_cpu) {
1298 rc = smp_ops->prepare_cpu(cpu);
1299 if (rc)
1300 return rc;
1301 }
1302
1303 /* Make sure callin-map entry is 0 (can be leftover a CPU
1304 * hotplug
1305 */
1306 cpu_callin_map[cpu] = 0;
1307
1308 /* The information for processor bringup must
1309 * be written out to main store before we release
1310 * the processor.
1311 */
1312 smp_mb();
1313
1314 /* wake up cpus */
1315 DBG("smp: kicking cpu %d\n", cpu);
1316 rc = smp_ops->kick_cpu(cpu);
1317 if (rc) {
1318 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
1319 return rc;
1320 }
1321
1322 /*
1323 * At boot time, simply spin on the callin word until the
1324 * deadline passes.
1325 *
1326 * At run time, spin for an optimistic amount of time to avoid
1327 * sleeping in the common case.
1328 */
1329 deadline = jiffies + msecs_to_jiffies(spin_wait_ms);
1330 spin_until_cond(cpu_callin_map[cpu] || time_is_before_jiffies(deadline));
1331
1332 if (!cpu_callin_map[cpu] && system_state >= SYSTEM_RUNNING) {
1333 const unsigned long sleep_interval_us = 10 * USEC_PER_MSEC;
1334 const unsigned long sleep_wait_ms = 100 * MSEC_PER_SEC;
1335
1336 deadline = jiffies + msecs_to_jiffies(sleep_wait_ms);
1337 while (!cpu_callin_map[cpu] && time_is_after_jiffies(deadline))
1338 fsleep(sleep_interval_us);
1339 }
1340
1341 if (!cpu_callin_map[cpu]) {
1342 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1343 return -ENOENT;
1344 }
1345
1346 DBG("Processor %u found.\n", cpu);
1347
1348 if (smp_ops->give_timebase)
1349 smp_ops->give_timebase();
1350
1351 /* Wait until cpu puts itself in the online & active maps */
1352 spin_until_cond(cpu_online(cpu));
1353
1354 return 0;
1355}
1356
1357/* Return the value of the reg property corresponding to the given
1358 * logical cpu.
1359 */
1360int cpu_to_core_id(int cpu)
1361{
1362 struct device_node *np;
1363 int id = -1;
1364
1365 np = of_get_cpu_node(cpu, NULL);
1366 if (!np)
1367 goto out;
1368
1369 id = of_get_cpu_hwid(np, 0);
1370out:
1371 of_node_put(np);
1372 return id;
1373}
1374EXPORT_SYMBOL_GPL(cpu_to_core_id);
1375
1376/* Helper routines for cpu to core mapping */
1377int cpu_core_index_of_thread(int cpu)
1378{
1379 return cpu >> threads_shift;
1380}
1381EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
1382
1383int cpu_first_thread_of_core(int core)
1384{
1385 return core << threads_shift;
1386}
1387EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
1388
1389/* Must be called when no change can occur to cpu_present_mask,
1390 * i.e. during cpu online or offline.
1391 */
1392static struct device_node *cpu_to_l2cache(int cpu)
1393{
1394 struct device_node *np;
1395 struct device_node *cache;
1396
1397 if (!cpu_present(cpu))
1398 return NULL;
1399
1400 np = of_get_cpu_node(cpu, NULL);
1401 if (np == NULL)
1402 return NULL;
1403
1404 cache = of_find_next_cache_node(np);
1405
1406 of_node_put(np);
1407
1408 return cache;
1409}
1410
1411static bool update_mask_by_l2(int cpu, cpumask_var_t *mask)
1412{
1413 struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1414 struct device_node *l2_cache, *np;
1415 int i;
1416
1417 if (has_big_cores)
1418 submask_fn = cpu_smallcore_mask;
1419
1420 /*
1421 * If the threads in a thread-group share L2 cache, then the
1422 * L2-mask can be obtained from thread_group_l2_cache_map.
1423 */
1424 if (thread_group_shares_l2) {
1425 cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu));
1426
1427 for_each_cpu(i, per_cpu(thread_group_l2_cache_map, cpu)) {
1428 if (cpu_online(i))
1429 set_cpus_related(i, cpu, cpu_l2_cache_mask);
1430 }
1431
1432 /* Verify that L1-cache siblings are a subset of L2 cache-siblings */
1433 if (!cpumask_equal(submask_fn(cpu), cpu_l2_cache_mask(cpu)) &&
1434 !cpumask_subset(submask_fn(cpu), cpu_l2_cache_mask(cpu))) {
1435 pr_warn_once("CPU %d : Inconsistent L1 and L2 cache siblings\n",
1436 cpu);
1437 }
1438
1439 return true;
1440 }
1441
1442 l2_cache = cpu_to_l2cache(cpu);
1443 if (!l2_cache || !*mask) {
1444 /* Assume only core siblings share cache with this CPU */
1445 for_each_cpu(i, cpu_sibling_mask(cpu))
1446 set_cpus_related(cpu, i, cpu_l2_cache_mask);
1447
1448 return false;
1449 }
1450
1451 cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1452
1453 /* Update l2-cache mask with all the CPUs that are part of submask */
1454 or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask);
1455
1456 /* Skip all CPUs already part of current CPU l2-cache mask */
1457 cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(cpu));
1458
1459 for_each_cpu(i, *mask) {
1460 /*
1461 * when updating the marks the current CPU has not been marked
1462 * online, but we need to update the cache masks
1463 */
1464 np = cpu_to_l2cache(i);
1465
1466 /* Skip all CPUs already part of current CPU l2-cache */
1467 if (np == l2_cache) {
1468 or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask);
1469 cpumask_andnot(*mask, *mask, submask_fn(i));
1470 } else {
1471 cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(i));
1472 }
1473
1474 of_node_put(np);
1475 }
1476 of_node_put(l2_cache);
1477
1478 return true;
1479}
1480
1481#ifdef CONFIG_HOTPLUG_CPU
1482static void remove_cpu_from_masks(int cpu)
1483{
1484 struct cpumask *(*mask_fn)(int) = cpu_sibling_mask;
1485 int i;
1486
1487 unmap_cpu_from_node(cpu);
1488
1489 if (shared_caches)
1490 mask_fn = cpu_l2_cache_mask;
1491
1492 for_each_cpu(i, mask_fn(cpu)) {
1493 set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
1494 set_cpus_unrelated(cpu, i, cpu_sibling_mask);
1495 if (has_big_cores)
1496 set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
1497 }
1498
1499 for_each_cpu(i, cpu_core_mask(cpu))
1500 set_cpus_unrelated(cpu, i, cpu_core_mask);
1501
1502 if (has_coregroup_support()) {
1503 for_each_cpu(i, cpu_coregroup_mask(cpu))
1504 set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
1505 }
1506}
1507#endif
1508
1509static inline void add_cpu_to_smallcore_masks(int cpu)
1510{
1511 int i;
1512
1513 if (!has_big_cores)
1514 return;
1515
1516 cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
1517
1518 for_each_cpu(i, per_cpu(thread_group_l1_cache_map, cpu)) {
1519 if (cpu_online(i))
1520 set_cpus_related(i, cpu, cpu_smallcore_mask);
1521 }
1522}
1523
1524static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
1525{
1526 struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1527 int coregroup_id = cpu_to_coregroup_id(cpu);
1528 int i;
1529
1530 if (shared_caches)
1531 submask_fn = cpu_l2_cache_mask;
1532
1533 if (!*mask) {
1534 /* Assume only siblings are part of this CPU's coregroup */
1535 for_each_cpu(i, submask_fn(cpu))
1536 set_cpus_related(cpu, i, cpu_coregroup_mask);
1537
1538 return;
1539 }
1540
1541 cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1542
1543 /* Update coregroup mask with all the CPUs that are part of submask */
1544 or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask);
1545
1546 /* Skip all CPUs already part of coregroup mask */
1547 cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu));
1548
1549 for_each_cpu(i, *mask) {
1550 /* Skip all CPUs not part of this coregroup */
1551 if (coregroup_id == cpu_to_coregroup_id(i)) {
1552 or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask);
1553 cpumask_andnot(*mask, *mask, submask_fn(i));
1554 } else {
1555 cpumask_andnot(*mask, *mask, cpu_coregroup_mask(i));
1556 }
1557 }
1558}
1559
1560static void add_cpu_to_masks(int cpu)
1561{
1562 struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1563 int first_thread = cpu_first_thread_sibling(cpu);
1564 cpumask_var_t mask;
1565 int chip_id = -1;
1566 bool ret;
1567 int i;
1568
1569 /*
1570 * This CPU will not be in the online mask yet so we need to manually
1571 * add it to its own thread sibling mask.
1572 */
1573 map_cpu_to_node(cpu, cpu_to_node(cpu));
1574 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1575 cpumask_set_cpu(cpu, cpu_core_mask(cpu));
1576
1577 for (i = first_thread; i < first_thread + threads_per_core; i++)
1578 if (cpu_online(i))
1579 set_cpus_related(i, cpu, cpu_sibling_mask);
1580
1581 add_cpu_to_smallcore_masks(cpu);
1582
1583 /* In CPU-hotplug path, hence use GFP_ATOMIC */
1584 ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
1585 update_mask_by_l2(cpu, &mask);
1586
1587 if (has_coregroup_support())
1588 update_coregroup_mask(cpu, &mask);
1589
1590 if (chip_id_lookup_table && ret)
1591 chip_id = cpu_to_chip_id(cpu);
1592
1593 if (shared_caches)
1594 submask_fn = cpu_l2_cache_mask;
1595
1596 /* Update core_mask with all the CPUs that are part of submask */
1597 or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask);
1598
1599 /* Skip all CPUs already part of current CPU core mask */
1600 cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
1601
1602 /* If chip_id is -1; limit the cpu_core_mask to within PKG */
1603 if (chip_id == -1)
1604 cpumask_and(mask, mask, cpu_cpu_mask(cpu));
1605
1606 for_each_cpu(i, mask) {
1607 if (chip_id == cpu_to_chip_id(i)) {
1608 or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
1609 cpumask_andnot(mask, mask, submask_fn(i));
1610 } else {
1611 cpumask_andnot(mask, mask, cpu_core_mask(i));
1612 }
1613 }
1614
1615 free_cpumask_var(mask);
1616}
1617
1618/* Activate a secondary processor. */
1619__no_stack_protector
1620void start_secondary(void *unused)
1621{
1622 unsigned int cpu = raw_smp_processor_id();
1623
1624 /* PPC64 calls setup_kup() in early_setup_secondary() */
1625 if (IS_ENABLED(CONFIG_PPC32))
1626 setup_kup();
1627
1628 mmgrab_lazy_tlb(&init_mm);
1629 current->active_mm = &init_mm;
1630 VM_WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(&init_mm)));
1631 cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
1632 inc_mm_active_cpus(&init_mm);
1633
1634 smp_store_cpu_info(cpu);
1635 set_dec(tb_ticks_per_jiffy);
1636 rcutree_report_cpu_starting(cpu);
1637 cpu_callin_map[cpu] = 1;
1638
1639 if (smp_ops->setup_cpu)
1640 smp_ops->setup_cpu(cpu);
1641 if (smp_ops->take_timebase)
1642 smp_ops->take_timebase();
1643
1644 secondary_cpu_time_init();
1645
1646#ifdef CONFIG_PPC64_PROC_SYSTEMCFG
1647 if (system_state == SYSTEM_RUNNING)
1648 systemcfg->processorCount++;
1649#endif
1650
1651#ifdef CONFIG_PPC64
1652 vdso_getcpu_init();
1653#endif
1654 set_numa_node(numa_cpu_lookup_table[cpu]);
1655 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1656
1657 /* Update topology CPU masks */
1658 add_cpu_to_masks(cpu);
1659
1660 /*
1661 * Check for any shared caches. Note that this must be done on a
1662 * per-core basis because one core in the pair might be disabled.
1663 */
1664 if (!shared_caches) {
1665 struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
1666 struct cpumask *mask = cpu_l2_cache_mask(cpu);
1667
1668 if (has_big_cores)
1669 sibling_mask = cpu_smallcore_mask;
1670
1671 if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu)))
1672 shared_caches = true;
1673 }
1674
1675 smp_wmb();
1676 notify_cpu_starting(cpu);
1677 set_cpu_online(cpu, true);
1678
1679 boot_init_stack_canary();
1680
1681 local_irq_enable();
1682
1683 /* We can enable ftrace for secondary cpus now */
1684 this_cpu_enable_ftrace();
1685
1686 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1687
1688 BUG();
1689}
1690
1691static struct sched_domain_topology_level powerpc_topology[6];
1692
1693static void __init build_sched_topology(void)
1694{
1695 int i = 0;
1696
1697 if (is_shared_processor() && has_big_cores)
1698 static_branch_enable(&splpar_asym_pack);
1699
1700#ifdef CONFIG_SCHED_SMT
1701 if (has_big_cores) {
1702 pr_info("Big cores detected but using small core scheduling\n");
1703 powerpc_topology[i++] = (struct sched_domain_topology_level){
1704 smallcore_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT)
1705 };
1706 } else {
1707 powerpc_topology[i++] = (struct sched_domain_topology_level){
1708 cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT)
1709 };
1710 }
1711#endif
1712 if (shared_caches) {
1713 powerpc_topology[i++] = (struct sched_domain_topology_level){
1714 shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE)
1715 };
1716 }
1717 if (has_coregroup_support()) {
1718 powerpc_topology[i++] = (struct sched_domain_topology_level){
1719 cpu_mc_mask, powerpc_shared_proc_flags, SD_INIT_NAME(MC)
1720 };
1721 }
1722 powerpc_topology[i++] = (struct sched_domain_topology_level){
1723 cpu_cpu_mask, powerpc_shared_proc_flags, SD_INIT_NAME(PKG)
1724 };
1725
1726 /* There must be one trailing NULL entry left. */
1727 BUG_ON(i >= ARRAY_SIZE(powerpc_topology) - 1);
1728
1729 set_sched_topology(powerpc_topology);
1730}
1731
1732void __init smp_cpus_done(unsigned int max_cpus)
1733{
1734 /*
1735 * We are running pinned to the boot CPU, see rest_init().
1736 */
1737 if (smp_ops && smp_ops->setup_cpu)
1738 smp_ops->setup_cpu(boot_cpuid);
1739
1740 if (smp_ops && smp_ops->bringup_done)
1741 smp_ops->bringup_done();
1742
1743 dump_numa_cpu_topology();
1744 build_sched_topology();
1745}
1746
1747/*
1748 * For asym packing, by default lower numbered CPU has higher priority.
1749 * On shared processors, pack to lower numbered core. However avoid moving
1750 * between thread_groups within the same core.
1751 */
1752int arch_asym_cpu_priority(int cpu)
1753{
1754 if (static_branch_unlikely(&splpar_asym_pack))
1755 return -cpu / threads_per_core;
1756
1757 return -cpu;
1758}
1759
1760#ifdef CONFIG_HOTPLUG_CPU
1761int __cpu_disable(void)
1762{
1763 int cpu = smp_processor_id();
1764 int err;
1765
1766 if (!smp_ops->cpu_disable)
1767 return -ENOSYS;
1768
1769 this_cpu_disable_ftrace();
1770
1771 err = smp_ops->cpu_disable();
1772 if (err)
1773 return err;
1774
1775 /* Update sibling maps */
1776 remove_cpu_from_masks(cpu);
1777
1778 return 0;
1779}
1780
1781void __cpu_die(unsigned int cpu)
1782{
1783 /*
1784 * This could perhaps be a generic call in idlea_task_dead(), but
1785 * that requires testing from all archs, so first put it here to
1786 */
1787 VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(&init_mm)));
1788 dec_mm_active_cpus(&init_mm);
1789 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
1790
1791 if (smp_ops->cpu_die)
1792 smp_ops->cpu_die(cpu);
1793}
1794
1795void __noreturn arch_cpu_idle_dead(void)
1796{
1797 /*
1798 * Disable on the down path. This will be re-enabled by
1799 * start_secondary() via start_secondary_resume() below
1800 */
1801 this_cpu_disable_ftrace();
1802
1803 if (smp_ops->cpu_offline_self)
1804 smp_ops->cpu_offline_self();
1805
1806 /* If we return, we re-enter start_secondary */
1807 start_secondary_resume();
1808}
1809
1810#endif