Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * SMP support for ppc.
4 *
5 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
6 * deal of code from the sparc and intel versions.
7 *
8 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 *
10 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
11 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 */
13
14#undef DEBUG
15
16#include <linux/kernel.h>
17#include <linux/export.h>
18#include <linux/sched/mm.h>
19#include <linux/sched/task_stack.h>
20#include <linux/sched/topology.h>
21#include <linux/smp.h>
22#include <linux/interrupt.h>
23#include <linux/delay.h>
24#include <linux/init.h>
25#include <linux/spinlock.h>
26#include <linux/cache.h>
27#include <linux/err.h>
28#include <linux/device.h>
29#include <linux/cpu.h>
30#include <linux/notifier.h>
31#include <linux/topology.h>
32#include <linux/profile.h>
33#include <linux/processor.h>
34#include <linux/random.h>
35#include <linux/stackprotector.h>
36#include <linux/pgtable.h>
37
38#include <asm/ptrace.h>
39#include <linux/atomic.h>
40#include <asm/irq.h>
41#include <asm/hw_irq.h>
42#include <asm/kvm_ppc.h>
43#include <asm/dbell.h>
44#include <asm/page.h>
45#include <asm/prom.h>
46#include <asm/smp.h>
47#include <asm/time.h>
48#include <asm/machdep.h>
49#include <asm/cputhreads.h>
50#include <asm/cputable.h>
51#include <asm/mpic.h>
52#include <asm/vdso_datapage.h>
53#ifdef CONFIG_PPC64
54#include <asm/paca.h>
55#endif
56#include <asm/vdso.h>
57#include <asm/debug.h>
58#include <asm/kexec.h>
59#include <asm/asm-prototypes.h>
60#include <asm/cpu_has_feature.h>
61#include <asm/ftrace.h>
62#include <asm/kup.h>
63
64#ifdef DEBUG
65#include <asm/udbg.h>
66#define DBG(fmt...) udbg_printf(fmt)
67#else
68#define DBG(fmt...)
69#endif
70
71#ifdef CONFIG_HOTPLUG_CPU
72/* State of each CPU during hotplug phases */
73static DEFINE_PER_CPU(int, cpu_state) = { 0 };
74#endif
75
76struct task_struct *secondary_current;
77bool has_big_cores;
78
79DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
80DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
81DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
82DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
83
84EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
85EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
86EXPORT_PER_CPU_SYMBOL(cpu_core_map);
87EXPORT_SYMBOL_GPL(has_big_cores);
88
89#define MAX_THREAD_LIST_SIZE 8
90#define THREAD_GROUP_SHARE_L1 1
91struct thread_groups {
92 unsigned int property;
93 unsigned int nr_groups;
94 unsigned int threads_per_group;
95 unsigned int thread_list[MAX_THREAD_LIST_SIZE];
96};
97
98/*
99 * On big-cores system, cpu_l1_cache_map for each CPU corresponds to
100 * the set its siblings that share the L1-cache.
101 */
102DEFINE_PER_CPU(cpumask_var_t, cpu_l1_cache_map);
103
104/* SMP operations for this machine */
105struct smp_ops_t *smp_ops;
106
107/* Can't be static due to PowerMac hackery */
108volatile unsigned int cpu_callin_map[NR_CPUS];
109
110int smt_enabled_at_boot = 1;
111
112/*
113 * Returns 1 if the specified cpu should be brought up during boot.
114 * Used to inhibit booting threads if they've been disabled or
115 * limited on the command line
116 */
117int smp_generic_cpu_bootable(unsigned int nr)
118{
119 /* Special case - we inhibit secondary thread startup
120 * during boot if the user requests it.
121 */
122 if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
123 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
124 return 0;
125 if (smt_enabled_at_boot
126 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
127 return 0;
128 }
129
130 return 1;
131}
132
133
134#ifdef CONFIG_PPC64
135int smp_generic_kick_cpu(int nr)
136{
137 if (nr < 0 || nr >= nr_cpu_ids)
138 return -EINVAL;
139
140 /*
141 * The processor is currently spinning, waiting for the
142 * cpu_start field to become non-zero After we set cpu_start,
143 * the processor will continue on to secondary_start
144 */
145 if (!paca_ptrs[nr]->cpu_start) {
146 paca_ptrs[nr]->cpu_start = 1;
147 smp_mb();
148 return 0;
149 }
150
151#ifdef CONFIG_HOTPLUG_CPU
152 /*
153 * Ok it's not there, so it might be soft-unplugged, let's
154 * try to bring it back
155 */
156 generic_set_cpu_up(nr);
157 smp_wmb();
158 smp_send_reschedule(nr);
159#endif /* CONFIG_HOTPLUG_CPU */
160
161 return 0;
162}
163#endif /* CONFIG_PPC64 */
164
165static irqreturn_t call_function_action(int irq, void *data)
166{
167 generic_smp_call_function_interrupt();
168 return IRQ_HANDLED;
169}
170
171static irqreturn_t reschedule_action(int irq, void *data)
172{
173 scheduler_ipi();
174 return IRQ_HANDLED;
175}
176
177#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
178static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
179{
180 timer_broadcast_interrupt();
181 return IRQ_HANDLED;
182}
183#endif
184
185#ifdef CONFIG_NMI_IPI
186static irqreturn_t nmi_ipi_action(int irq, void *data)
187{
188 smp_handle_nmi_ipi(get_irq_regs());
189 return IRQ_HANDLED;
190}
191#endif
192
193static irq_handler_t smp_ipi_action[] = {
194 [PPC_MSG_CALL_FUNCTION] = call_function_action,
195 [PPC_MSG_RESCHEDULE] = reschedule_action,
196#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
197 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
198#endif
199#ifdef CONFIG_NMI_IPI
200 [PPC_MSG_NMI_IPI] = nmi_ipi_action,
201#endif
202};
203
204/*
205 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
206 * than going through the call function infrastructure, and strongly
207 * serialized, so it is more appropriate for debugging.
208 */
209const char *smp_ipi_name[] = {
210 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
211 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
212#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
213 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
214#endif
215#ifdef CONFIG_NMI_IPI
216 [PPC_MSG_NMI_IPI] = "nmi ipi",
217#endif
218};
219
220/* optional function to request ipi, for controllers with >= 4 ipis */
221int smp_request_message_ipi(int virq, int msg)
222{
223 int err;
224
225 if (msg < 0 || msg > PPC_MSG_NMI_IPI)
226 return -EINVAL;
227#ifndef CONFIG_NMI_IPI
228 if (msg == PPC_MSG_NMI_IPI)
229 return 1;
230#endif
231
232 err = request_irq(virq, smp_ipi_action[msg],
233 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
234 smp_ipi_name[msg], NULL);
235 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
236 virq, smp_ipi_name[msg], err);
237
238 return err;
239}
240
241#ifdef CONFIG_PPC_SMP_MUXED_IPI
242struct cpu_messages {
243 long messages; /* current messages */
244};
245static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
246
247void smp_muxed_ipi_set_message(int cpu, int msg)
248{
249 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
250 char *message = (char *)&info->messages;
251
252 /*
253 * Order previous accesses before accesses in the IPI handler.
254 */
255 smp_mb();
256 message[msg] = 1;
257}
258
259void smp_muxed_ipi_message_pass(int cpu, int msg)
260{
261 smp_muxed_ipi_set_message(cpu, msg);
262
263 /*
264 * cause_ipi functions are required to include a full barrier
265 * before doing whatever causes the IPI.
266 */
267 smp_ops->cause_ipi(cpu);
268}
269
270#ifdef __BIG_ENDIAN__
271#define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
272#else
273#define IPI_MESSAGE(A) (1uL << (8 * (A)))
274#endif
275
276irqreturn_t smp_ipi_demux(void)
277{
278 mb(); /* order any irq clear */
279
280 return smp_ipi_demux_relaxed();
281}
282
283/* sync-free variant. Callers should ensure synchronization */
284irqreturn_t smp_ipi_demux_relaxed(void)
285{
286 struct cpu_messages *info;
287 unsigned long all;
288
289 info = this_cpu_ptr(&ipi_message);
290 do {
291 all = xchg(&info->messages, 0);
292#if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
293 /*
294 * Must check for PPC_MSG_RM_HOST_ACTION messages
295 * before PPC_MSG_CALL_FUNCTION messages because when
296 * a VM is destroyed, we call kick_all_cpus_sync()
297 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
298 * messages have completed before we free any VCPUs.
299 */
300 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
301 kvmppc_xics_ipi_action();
302#endif
303 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
304 generic_smp_call_function_interrupt();
305 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
306 scheduler_ipi();
307#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
308 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
309 timer_broadcast_interrupt();
310#endif
311#ifdef CONFIG_NMI_IPI
312 if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
313 nmi_ipi_action(0, NULL);
314#endif
315 } while (info->messages);
316
317 return IRQ_HANDLED;
318}
319#endif /* CONFIG_PPC_SMP_MUXED_IPI */
320
321static inline void do_message_pass(int cpu, int msg)
322{
323 if (smp_ops->message_pass)
324 smp_ops->message_pass(cpu, msg);
325#ifdef CONFIG_PPC_SMP_MUXED_IPI
326 else
327 smp_muxed_ipi_message_pass(cpu, msg);
328#endif
329}
330
331void smp_send_reschedule(int cpu)
332{
333 if (likely(smp_ops))
334 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
335}
336EXPORT_SYMBOL_GPL(smp_send_reschedule);
337
338void arch_send_call_function_single_ipi(int cpu)
339{
340 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
341}
342
343void arch_send_call_function_ipi_mask(const struct cpumask *mask)
344{
345 unsigned int cpu;
346
347 for_each_cpu(cpu, mask)
348 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
349}
350
351#ifdef CONFIG_NMI_IPI
352
353/*
354 * "NMI IPI" system.
355 *
356 * NMI IPIs may not be recoverable, so should not be used as ongoing part of
357 * a running system. They can be used for crash, debug, halt/reboot, etc.
358 *
359 * The IPI call waits with interrupts disabled until all targets enter the
360 * NMI handler, then returns. Subsequent IPIs can be issued before targets
361 * have returned from their handlers, so there is no guarantee about
362 * concurrency or re-entrancy.
363 *
364 * A new NMI can be issued before all targets exit the handler.
365 *
366 * The IPI call may time out without all targets entering the NMI handler.
367 * In that case, there is some logic to recover (and ignore subsequent
368 * NMI interrupts that may eventually be raised), but the platform interrupt
369 * handler may not be able to distinguish this from other exception causes,
370 * which may cause a crash.
371 */
372
373static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
374static struct cpumask nmi_ipi_pending_mask;
375static bool nmi_ipi_busy = false;
376static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
377
378static void nmi_ipi_lock_start(unsigned long *flags)
379{
380 raw_local_irq_save(*flags);
381 hard_irq_disable();
382 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
383 raw_local_irq_restore(*flags);
384 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
385 raw_local_irq_save(*flags);
386 hard_irq_disable();
387 }
388}
389
390static void nmi_ipi_lock(void)
391{
392 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
393 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
394}
395
396static void nmi_ipi_unlock(void)
397{
398 smp_mb();
399 WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
400 atomic_set(&__nmi_ipi_lock, 0);
401}
402
403static void nmi_ipi_unlock_end(unsigned long *flags)
404{
405 nmi_ipi_unlock();
406 raw_local_irq_restore(*flags);
407}
408
409/*
410 * Platform NMI handler calls this to ack
411 */
412int smp_handle_nmi_ipi(struct pt_regs *regs)
413{
414 void (*fn)(struct pt_regs *) = NULL;
415 unsigned long flags;
416 int me = raw_smp_processor_id();
417 int ret = 0;
418
419 /*
420 * Unexpected NMIs are possible here because the interrupt may not
421 * be able to distinguish NMI IPIs from other types of NMIs, or
422 * because the caller may have timed out.
423 */
424 nmi_ipi_lock_start(&flags);
425 if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
426 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
427 fn = READ_ONCE(nmi_ipi_function);
428 WARN_ON_ONCE(!fn);
429 ret = 1;
430 }
431 nmi_ipi_unlock_end(&flags);
432
433 if (fn)
434 fn(regs);
435
436 return ret;
437}
438
439static void do_smp_send_nmi_ipi(int cpu, bool safe)
440{
441 if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
442 return;
443
444 if (cpu >= 0) {
445 do_message_pass(cpu, PPC_MSG_NMI_IPI);
446 } else {
447 int c;
448
449 for_each_online_cpu(c) {
450 if (c == raw_smp_processor_id())
451 continue;
452 do_message_pass(c, PPC_MSG_NMI_IPI);
453 }
454 }
455}
456
457/*
458 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
459 * - fn is the target callback function.
460 * - delay_us > 0 is the delay before giving up waiting for targets to
461 * begin executing the handler, == 0 specifies indefinite delay.
462 */
463static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
464 u64 delay_us, bool safe)
465{
466 unsigned long flags;
467 int me = raw_smp_processor_id();
468 int ret = 1;
469
470 BUG_ON(cpu == me);
471 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
472
473 if (unlikely(!smp_ops))
474 return 0;
475
476 nmi_ipi_lock_start(&flags);
477 while (nmi_ipi_busy) {
478 nmi_ipi_unlock_end(&flags);
479 spin_until_cond(!nmi_ipi_busy);
480 nmi_ipi_lock_start(&flags);
481 }
482 nmi_ipi_busy = true;
483 nmi_ipi_function = fn;
484
485 WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
486
487 if (cpu < 0) {
488 /* ALL_OTHERS */
489 cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
490 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
491 } else {
492 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
493 }
494
495 nmi_ipi_unlock();
496
497 /* Interrupts remain hard disabled */
498
499 do_smp_send_nmi_ipi(cpu, safe);
500
501 nmi_ipi_lock();
502 /* nmi_ipi_busy is set here, so unlock/lock is okay */
503 while (!cpumask_empty(&nmi_ipi_pending_mask)) {
504 nmi_ipi_unlock();
505 udelay(1);
506 nmi_ipi_lock();
507 if (delay_us) {
508 delay_us--;
509 if (!delay_us)
510 break;
511 }
512 }
513
514 if (!cpumask_empty(&nmi_ipi_pending_mask)) {
515 /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
516 ret = 0;
517 cpumask_clear(&nmi_ipi_pending_mask);
518 }
519
520 nmi_ipi_function = NULL;
521 nmi_ipi_busy = false;
522
523 nmi_ipi_unlock_end(&flags);
524
525 return ret;
526}
527
528int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
529{
530 return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
531}
532
533int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
534{
535 return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
536}
537#endif /* CONFIG_NMI_IPI */
538
539#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
540void tick_broadcast(const struct cpumask *mask)
541{
542 unsigned int cpu;
543
544 for_each_cpu(cpu, mask)
545 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
546}
547#endif
548
549#ifdef CONFIG_DEBUGGER
550void debugger_ipi_callback(struct pt_regs *regs)
551{
552 debugger_ipi(regs);
553}
554
555void smp_send_debugger_break(void)
556{
557 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
558}
559#endif
560
561#ifdef CONFIG_KEXEC_CORE
562void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
563{
564 int cpu;
565
566 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
567 if (kdump_in_progress() && crash_wake_offline) {
568 for_each_present_cpu(cpu) {
569 if (cpu_online(cpu))
570 continue;
571 /*
572 * crash_ipi_callback will wait for
573 * all cpus, including offline CPUs.
574 * We don't care about nmi_ipi_function.
575 * Offline cpus will jump straight into
576 * crash_ipi_callback, we can skip the
577 * entire NMI dance and waiting for
578 * cpus to clear pending mask, etc.
579 */
580 do_smp_send_nmi_ipi(cpu, false);
581 }
582 }
583}
584#endif
585
586#ifdef CONFIG_NMI_IPI
587static void nmi_stop_this_cpu(struct pt_regs *regs)
588{
589 /*
590 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
591 */
592 spin_begin();
593 while (1)
594 spin_cpu_relax();
595}
596
597void smp_send_stop(void)
598{
599 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
600}
601
602#else /* CONFIG_NMI_IPI */
603
604static void stop_this_cpu(void *dummy)
605{
606 hard_irq_disable();
607 spin_begin();
608 while (1)
609 spin_cpu_relax();
610}
611
612void smp_send_stop(void)
613{
614 static bool stopped = false;
615
616 /*
617 * Prevent waiting on csd lock from a previous smp_send_stop.
618 * This is racy, but in general callers try to do the right
619 * thing and only fire off one smp_send_stop (e.g., see
620 * kernel/panic.c)
621 */
622 if (stopped)
623 return;
624
625 stopped = true;
626
627 smp_call_function(stop_this_cpu, NULL, 0);
628}
629#endif /* CONFIG_NMI_IPI */
630
631struct task_struct *current_set[NR_CPUS];
632
633static void smp_store_cpu_info(int id)
634{
635 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
636#ifdef CONFIG_PPC_FSL_BOOK3E
637 per_cpu(next_tlbcam_idx, id)
638 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
639#endif
640}
641
642/*
643 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
644 * rather than just passing around the cpumask we pass around a function that
645 * returns the that cpumask for the given CPU.
646 */
647static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
648{
649 cpumask_set_cpu(i, get_cpumask(j));
650 cpumask_set_cpu(j, get_cpumask(i));
651}
652
653#ifdef CONFIG_HOTPLUG_CPU
654static void set_cpus_unrelated(int i, int j,
655 struct cpumask *(*get_cpumask)(int))
656{
657 cpumask_clear_cpu(i, get_cpumask(j));
658 cpumask_clear_cpu(j, get_cpumask(i));
659}
660#endif
661
662/*
663 * parse_thread_groups: Parses the "ibm,thread-groups" device tree
664 * property for the CPU device node @dn and stores
665 * the parsed output in the thread_groups
666 * structure @tg if the ibm,thread-groups[0]
667 * matches @property.
668 *
669 * @dn: The device node of the CPU device.
670 * @tg: Pointer to a thread group structure into which the parsed
671 * output of "ibm,thread-groups" is stored.
672 * @property: The property of the thread-group that the caller is
673 * interested in.
674 *
675 * ibm,thread-groups[0..N-1] array defines which group of threads in
676 * the CPU-device node can be grouped together based on the property.
677 *
678 * ibm,thread-groups[0] tells us the property based on which the
679 * threads are being grouped together. If this value is 1, it implies
680 * that the threads in the same group share L1, translation cache.
681 *
682 * ibm,thread-groups[1] tells us how many such thread groups exist.
683 *
684 * ibm,thread-groups[2] tells us the number of threads in each such
685 * group.
686 *
687 * ibm,thread-groups[3..N-1] is the list of threads identified by
688 * "ibm,ppc-interrupt-server#s" arranged as per their membership in
689 * the grouping.
690 *
691 * Example: If ibm,thread-groups = [1,2,4,5,6,7,8,9,10,11,12] it
692 * implies that there are 2 groups of 4 threads each, where each group
693 * of threads share L1, translation cache.
694 *
695 * The "ibm,ppc-interrupt-server#s" of the first group is {5,6,7,8}
696 * and the "ibm,ppc-interrupt-server#s" of the second group is {9, 10,
697 * 11, 12} structure
698 *
699 * Returns 0 on success, -EINVAL if the property does not exist,
700 * -ENODATA if property does not have a value, and -EOVERFLOW if the
701 * property data isn't large enough.
702 */
703static int parse_thread_groups(struct device_node *dn,
704 struct thread_groups *tg,
705 unsigned int property)
706{
707 int i;
708 u32 thread_group_array[3 + MAX_THREAD_LIST_SIZE];
709 u32 *thread_list;
710 size_t total_threads;
711 int ret;
712
713 ret = of_property_read_u32_array(dn, "ibm,thread-groups",
714 thread_group_array, 3);
715 if (ret)
716 return ret;
717
718 tg->property = thread_group_array[0];
719 tg->nr_groups = thread_group_array[1];
720 tg->threads_per_group = thread_group_array[2];
721 if (tg->property != property ||
722 tg->nr_groups < 1 ||
723 tg->threads_per_group < 1)
724 return -ENODATA;
725
726 total_threads = tg->nr_groups * tg->threads_per_group;
727
728 ret = of_property_read_u32_array(dn, "ibm,thread-groups",
729 thread_group_array,
730 3 + total_threads);
731 if (ret)
732 return ret;
733
734 thread_list = &thread_group_array[3];
735
736 for (i = 0 ; i < total_threads; i++)
737 tg->thread_list[i] = thread_list[i];
738
739 return 0;
740}
741
742/*
743 * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
744 * that @cpu belongs to.
745 *
746 * @cpu : The logical CPU whose thread group is being searched.
747 * @tg : The thread-group structure of the CPU node which @cpu belongs
748 * to.
749 *
750 * Returns the index to tg->thread_list that points to the the start
751 * of the thread_group that @cpu belongs to.
752 *
753 * Returns -1 if cpu doesn't belong to any of the groups pointed to by
754 * tg->thread_list.
755 */
756static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
757{
758 int hw_cpu_id = get_hard_smp_processor_id(cpu);
759 int i, j;
760
761 for (i = 0; i < tg->nr_groups; i++) {
762 int group_start = i * tg->threads_per_group;
763
764 for (j = 0; j < tg->threads_per_group; j++) {
765 int idx = group_start + j;
766
767 if (tg->thread_list[idx] == hw_cpu_id)
768 return group_start;
769 }
770 }
771
772 return -1;
773}
774
775static int init_cpu_l1_cache_map(int cpu)
776
777{
778 struct device_node *dn = of_get_cpu_node(cpu, NULL);
779 struct thread_groups tg = {.property = 0,
780 .nr_groups = 0,
781 .threads_per_group = 0};
782 int first_thread = cpu_first_thread_sibling(cpu);
783 int i, cpu_group_start = -1, err = 0;
784
785 if (!dn)
786 return -ENODATA;
787
788 err = parse_thread_groups(dn, &tg, THREAD_GROUP_SHARE_L1);
789 if (err)
790 goto out;
791
792 zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu),
793 GFP_KERNEL,
794 cpu_to_node(cpu));
795
796 cpu_group_start = get_cpu_thread_group_start(cpu, &tg);
797
798 if (unlikely(cpu_group_start == -1)) {
799 WARN_ON_ONCE(1);
800 err = -ENODATA;
801 goto out;
802 }
803
804 for (i = first_thread; i < first_thread + threads_per_core; i++) {
805 int i_group_start = get_cpu_thread_group_start(i, &tg);
806
807 if (unlikely(i_group_start == -1)) {
808 WARN_ON_ONCE(1);
809 err = -ENODATA;
810 goto out;
811 }
812
813 if (i_group_start == cpu_group_start)
814 cpumask_set_cpu(i, per_cpu(cpu_l1_cache_map, cpu));
815 }
816
817out:
818 of_node_put(dn);
819 return err;
820}
821
822static int init_big_cores(void)
823{
824 int cpu;
825
826 for_each_possible_cpu(cpu) {
827 int err = init_cpu_l1_cache_map(cpu);
828
829 if (err)
830 return err;
831
832 zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
833 GFP_KERNEL,
834 cpu_to_node(cpu));
835 }
836
837 has_big_cores = true;
838 return 0;
839}
840
841void __init smp_prepare_cpus(unsigned int max_cpus)
842{
843 unsigned int cpu;
844
845 DBG("smp_prepare_cpus\n");
846
847 /*
848 * setup_cpu may need to be called on the boot cpu. We havent
849 * spun any cpus up but lets be paranoid.
850 */
851 BUG_ON(boot_cpuid != smp_processor_id());
852
853 /* Fixup boot cpu */
854 smp_store_cpu_info(boot_cpuid);
855 cpu_callin_map[boot_cpuid] = 1;
856
857 for_each_possible_cpu(cpu) {
858 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
859 GFP_KERNEL, cpu_to_node(cpu));
860 zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
861 GFP_KERNEL, cpu_to_node(cpu));
862 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
863 GFP_KERNEL, cpu_to_node(cpu));
864 /*
865 * numa_node_id() works after this.
866 */
867 if (cpu_present(cpu)) {
868 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
869 set_cpu_numa_mem(cpu,
870 local_memory_node(numa_cpu_lookup_table[cpu]));
871 }
872 }
873
874 /* Init the cpumasks so the boot CPU is related to itself */
875 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
876 cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
877 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
878
879 init_big_cores();
880 if (has_big_cores) {
881 cpumask_set_cpu(boot_cpuid,
882 cpu_smallcore_mask(boot_cpuid));
883 }
884
885 if (smp_ops && smp_ops->probe)
886 smp_ops->probe();
887}
888
889void smp_prepare_boot_cpu(void)
890{
891 BUG_ON(smp_processor_id() != boot_cpuid);
892#ifdef CONFIG_PPC64
893 paca_ptrs[boot_cpuid]->__current = current;
894#endif
895 set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
896 current_set[boot_cpuid] = current;
897}
898
899#ifdef CONFIG_HOTPLUG_CPU
900
901int generic_cpu_disable(void)
902{
903 unsigned int cpu = smp_processor_id();
904
905 if (cpu == boot_cpuid)
906 return -EBUSY;
907
908 set_cpu_online(cpu, false);
909#ifdef CONFIG_PPC64
910 vdso_data->processorCount--;
911#endif
912 /* Update affinity of all IRQs previously aimed at this CPU */
913 irq_migrate_all_off_this_cpu();
914
915 /*
916 * Depending on the details of the interrupt controller, it's possible
917 * that one of the interrupts we just migrated away from this CPU is
918 * actually already pending on this CPU. If we leave it in that state
919 * the interrupt will never be EOI'ed, and will never fire again. So
920 * temporarily enable interrupts here, to allow any pending interrupt to
921 * be received (and EOI'ed), before we take this CPU offline.
922 */
923 local_irq_enable();
924 mdelay(1);
925 local_irq_disable();
926
927 return 0;
928}
929
930void generic_cpu_die(unsigned int cpu)
931{
932 int i;
933
934 for (i = 0; i < 100; i++) {
935 smp_rmb();
936 if (is_cpu_dead(cpu))
937 return;
938 msleep(100);
939 }
940 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
941}
942
943void generic_set_cpu_dead(unsigned int cpu)
944{
945 per_cpu(cpu_state, cpu) = CPU_DEAD;
946}
947
948/*
949 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
950 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
951 * which makes the delay in generic_cpu_die() not happen.
952 */
953void generic_set_cpu_up(unsigned int cpu)
954{
955 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
956}
957
958int generic_check_cpu_restart(unsigned int cpu)
959{
960 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
961}
962
963int is_cpu_dead(unsigned int cpu)
964{
965 return per_cpu(cpu_state, cpu) == CPU_DEAD;
966}
967
968static bool secondaries_inhibited(void)
969{
970 return kvm_hv_mode_active();
971}
972
973#else /* HOTPLUG_CPU */
974
975#define secondaries_inhibited() 0
976
977#endif
978
979static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
980{
981#ifdef CONFIG_PPC64
982 paca_ptrs[cpu]->__current = idle;
983 paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
984 THREAD_SIZE - STACK_FRAME_OVERHEAD;
985#endif
986 idle->cpu = cpu;
987 secondary_current = current_set[cpu] = idle;
988}
989
990int __cpu_up(unsigned int cpu, struct task_struct *tidle)
991{
992 int rc, c;
993
994 /*
995 * Don't allow secondary threads to come online if inhibited
996 */
997 if (threads_per_core > 1 && secondaries_inhibited() &&
998 cpu_thread_in_subcore(cpu))
999 return -EBUSY;
1000
1001 if (smp_ops == NULL ||
1002 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1003 return -EINVAL;
1004
1005 cpu_idle_thread_init(cpu, tidle);
1006
1007 /*
1008 * The platform might need to allocate resources prior to bringing
1009 * up the CPU
1010 */
1011 if (smp_ops->prepare_cpu) {
1012 rc = smp_ops->prepare_cpu(cpu);
1013 if (rc)
1014 return rc;
1015 }
1016
1017 /* Make sure callin-map entry is 0 (can be leftover a CPU
1018 * hotplug
1019 */
1020 cpu_callin_map[cpu] = 0;
1021
1022 /* The information for processor bringup must
1023 * be written out to main store before we release
1024 * the processor.
1025 */
1026 smp_mb();
1027
1028 /* wake up cpus */
1029 DBG("smp: kicking cpu %d\n", cpu);
1030 rc = smp_ops->kick_cpu(cpu);
1031 if (rc) {
1032 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
1033 return rc;
1034 }
1035
1036 /*
1037 * wait to see if the cpu made a callin (is actually up).
1038 * use this value that I found through experimentation.
1039 * -- Cort
1040 */
1041 if (system_state < SYSTEM_RUNNING)
1042 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
1043 udelay(100);
1044#ifdef CONFIG_HOTPLUG_CPU
1045 else
1046 /*
1047 * CPUs can take much longer to come up in the
1048 * hotplug case. Wait five seconds.
1049 */
1050 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
1051 msleep(1);
1052#endif
1053
1054 if (!cpu_callin_map[cpu]) {
1055 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1056 return -ENOENT;
1057 }
1058
1059 DBG("Processor %u found.\n", cpu);
1060
1061 if (smp_ops->give_timebase)
1062 smp_ops->give_timebase();
1063
1064 /* Wait until cpu puts itself in the online & active maps */
1065 spin_until_cond(cpu_online(cpu));
1066
1067 return 0;
1068}
1069
1070/* Return the value of the reg property corresponding to the given
1071 * logical cpu.
1072 */
1073int cpu_to_core_id(int cpu)
1074{
1075 struct device_node *np;
1076 const __be32 *reg;
1077 int id = -1;
1078
1079 np = of_get_cpu_node(cpu, NULL);
1080 if (!np)
1081 goto out;
1082
1083 reg = of_get_property(np, "reg", NULL);
1084 if (!reg)
1085 goto out;
1086
1087 id = be32_to_cpup(reg);
1088out:
1089 of_node_put(np);
1090 return id;
1091}
1092EXPORT_SYMBOL_GPL(cpu_to_core_id);
1093
1094/* Helper routines for cpu to core mapping */
1095int cpu_core_index_of_thread(int cpu)
1096{
1097 return cpu >> threads_shift;
1098}
1099EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
1100
1101int cpu_first_thread_of_core(int core)
1102{
1103 return core << threads_shift;
1104}
1105EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
1106
1107/* Must be called when no change can occur to cpu_present_mask,
1108 * i.e. during cpu online or offline.
1109 */
1110static struct device_node *cpu_to_l2cache(int cpu)
1111{
1112 struct device_node *np;
1113 struct device_node *cache;
1114
1115 if (!cpu_present(cpu))
1116 return NULL;
1117
1118 np = of_get_cpu_node(cpu, NULL);
1119 if (np == NULL)
1120 return NULL;
1121
1122 cache = of_find_next_cache_node(np);
1123
1124 of_node_put(np);
1125
1126 return cache;
1127}
1128
1129static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int))
1130{
1131 struct device_node *l2_cache, *np;
1132 int i;
1133
1134 l2_cache = cpu_to_l2cache(cpu);
1135 if (!l2_cache)
1136 return false;
1137
1138 for_each_cpu(i, cpu_online_mask) {
1139 /*
1140 * when updating the marks the current CPU has not been marked
1141 * online, but we need to update the cache masks
1142 */
1143 np = cpu_to_l2cache(i);
1144 if (!np)
1145 continue;
1146
1147 if (np == l2_cache)
1148 set_cpus_related(cpu, i, mask_fn);
1149
1150 of_node_put(np);
1151 }
1152 of_node_put(l2_cache);
1153
1154 return true;
1155}
1156
1157#ifdef CONFIG_HOTPLUG_CPU
1158static void remove_cpu_from_masks(int cpu)
1159{
1160 int i;
1161
1162 /* NB: cpu_core_mask is a superset of the others */
1163 for_each_cpu(i, cpu_core_mask(cpu)) {
1164 set_cpus_unrelated(cpu, i, cpu_core_mask);
1165 set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
1166 set_cpus_unrelated(cpu, i, cpu_sibling_mask);
1167 if (has_big_cores)
1168 set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
1169 }
1170}
1171#endif
1172
1173static inline void add_cpu_to_smallcore_masks(int cpu)
1174{
1175 struct cpumask *this_l1_cache_map = per_cpu(cpu_l1_cache_map, cpu);
1176 int i, first_thread = cpu_first_thread_sibling(cpu);
1177
1178 if (!has_big_cores)
1179 return;
1180
1181 cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
1182
1183 for (i = first_thread; i < first_thread + threads_per_core; i++) {
1184 if (cpu_online(i) && cpumask_test_cpu(i, this_l1_cache_map))
1185 set_cpus_related(i, cpu, cpu_smallcore_mask);
1186 }
1187}
1188
1189int get_physical_package_id(int cpu)
1190{
1191 int pkg_id = cpu_to_chip_id(cpu);
1192
1193 /*
1194 * If the platform is PowerNV or Guest on KVM, ibm,chip-id is
1195 * defined. Hence we would return the chip-id as the result of
1196 * get_physical_package_id.
1197 */
1198 if (pkg_id == -1 && firmware_has_feature(FW_FEATURE_LPAR) &&
1199 IS_ENABLED(CONFIG_PPC_SPLPAR)) {
1200 struct device_node *np = of_get_cpu_node(cpu, NULL);
1201 pkg_id = of_node_to_nid(np);
1202 of_node_put(np);
1203 }
1204
1205 return pkg_id;
1206}
1207EXPORT_SYMBOL_GPL(get_physical_package_id);
1208
1209static void add_cpu_to_masks(int cpu)
1210{
1211 int first_thread = cpu_first_thread_sibling(cpu);
1212 int pkg_id = get_physical_package_id(cpu);
1213 int i;
1214
1215 /*
1216 * This CPU will not be in the online mask yet so we need to manually
1217 * add it to it's own thread sibling mask.
1218 */
1219 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1220
1221 for (i = first_thread; i < first_thread + threads_per_core; i++)
1222 if (cpu_online(i))
1223 set_cpus_related(i, cpu, cpu_sibling_mask);
1224
1225 add_cpu_to_smallcore_masks(cpu);
1226 /*
1227 * Copy the thread sibling mask into the cache sibling mask
1228 * and mark any CPUs that share an L2 with this CPU.
1229 */
1230 for_each_cpu(i, cpu_sibling_mask(cpu))
1231 set_cpus_related(cpu, i, cpu_l2_cache_mask);
1232 update_mask_by_l2(cpu, cpu_l2_cache_mask);
1233
1234 /*
1235 * Copy the cache sibling mask into core sibling mask and mark
1236 * any CPUs on the same chip as this CPU.
1237 */
1238 for_each_cpu(i, cpu_l2_cache_mask(cpu))
1239 set_cpus_related(cpu, i, cpu_core_mask);
1240
1241 if (pkg_id == -1)
1242 return;
1243
1244 for_each_cpu(i, cpu_online_mask)
1245 if (get_physical_package_id(i) == pkg_id)
1246 set_cpus_related(cpu, i, cpu_core_mask);
1247}
1248
1249static bool shared_caches;
1250
1251/* Activate a secondary processor. */
1252void start_secondary(void *unused)
1253{
1254 unsigned int cpu = smp_processor_id();
1255 struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
1256
1257 mmgrab(&init_mm);
1258 current->active_mm = &init_mm;
1259
1260 smp_store_cpu_info(cpu);
1261 set_dec(tb_ticks_per_jiffy);
1262 preempt_disable();
1263 cpu_callin_map[cpu] = 1;
1264
1265 if (smp_ops->setup_cpu)
1266 smp_ops->setup_cpu(cpu);
1267 if (smp_ops->take_timebase)
1268 smp_ops->take_timebase();
1269
1270 secondary_cpu_time_init();
1271
1272#ifdef CONFIG_PPC64
1273 if (system_state == SYSTEM_RUNNING)
1274 vdso_data->processorCount++;
1275
1276 vdso_getcpu_init();
1277#endif
1278 /* Update topology CPU masks */
1279 add_cpu_to_masks(cpu);
1280
1281 if (has_big_cores)
1282 sibling_mask = cpu_smallcore_mask;
1283 /*
1284 * Check for any shared caches. Note that this must be done on a
1285 * per-core basis because one core in the pair might be disabled.
1286 */
1287 if (!cpumask_equal(cpu_l2_cache_mask(cpu), sibling_mask(cpu)))
1288 shared_caches = true;
1289
1290 set_numa_node(numa_cpu_lookup_table[cpu]);
1291 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1292
1293 smp_wmb();
1294 notify_cpu_starting(cpu);
1295 set_cpu_online(cpu, true);
1296
1297 boot_init_stack_canary();
1298
1299 local_irq_enable();
1300
1301 /* We can enable ftrace for secondary cpus now */
1302 this_cpu_enable_ftrace();
1303
1304 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1305
1306 BUG();
1307}
1308
1309int setup_profiling_timer(unsigned int multiplier)
1310{
1311 return 0;
1312}
1313
1314#ifdef CONFIG_SCHED_SMT
1315/* cpumask of CPUs with asymetric SMT dependancy */
1316static int powerpc_smt_flags(void)
1317{
1318 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
1319
1320 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
1321 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
1322 flags |= SD_ASYM_PACKING;
1323 }
1324 return flags;
1325}
1326#endif
1327
1328static struct sched_domain_topology_level powerpc_topology[] = {
1329#ifdef CONFIG_SCHED_SMT
1330 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1331#endif
1332 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
1333 { NULL, },
1334};
1335
1336/*
1337 * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
1338 * This topology makes it *much* cheaper to migrate tasks between adjacent cores
1339 * since the migrated task remains cache hot. We want to take advantage of this
1340 * at the scheduler level so an extra topology level is required.
1341 */
1342static int powerpc_shared_cache_flags(void)
1343{
1344 return SD_SHARE_PKG_RESOURCES;
1345}
1346
1347/*
1348 * We can't just pass cpu_l2_cache_mask() directly because
1349 * returns a non-const pointer and the compiler barfs on that.
1350 */
1351static const struct cpumask *shared_cache_mask(int cpu)
1352{
1353 return cpu_l2_cache_mask(cpu);
1354}
1355
1356#ifdef CONFIG_SCHED_SMT
1357static const struct cpumask *smallcore_smt_mask(int cpu)
1358{
1359 return cpu_smallcore_mask(cpu);
1360}
1361#endif
1362
1363static struct sched_domain_topology_level power9_topology[] = {
1364#ifdef CONFIG_SCHED_SMT
1365 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1366#endif
1367 { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
1368 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
1369 { NULL, },
1370};
1371
1372void __init smp_cpus_done(unsigned int max_cpus)
1373{
1374 /*
1375 * We are running pinned to the boot CPU, see rest_init().
1376 */
1377 if (smp_ops && smp_ops->setup_cpu)
1378 smp_ops->setup_cpu(boot_cpuid);
1379
1380 if (smp_ops && smp_ops->bringup_done)
1381 smp_ops->bringup_done();
1382
1383 dump_numa_cpu_topology();
1384
1385#ifdef CONFIG_SCHED_SMT
1386 if (has_big_cores) {
1387 pr_info("Big cores detected but using small core scheduling\n");
1388 power9_topology[0].mask = smallcore_smt_mask;
1389 powerpc_topology[0].mask = smallcore_smt_mask;
1390 }
1391#endif
1392 /*
1393 * If any CPU detects that it's sharing a cache with another CPU then
1394 * use the deeper topology that is aware of this sharing.
1395 */
1396 if (shared_caches) {
1397 pr_info("Using shared cache scheduler topology\n");
1398 set_sched_topology(power9_topology);
1399 } else {
1400 pr_info("Using standard scheduler topology\n");
1401 set_sched_topology(powerpc_topology);
1402 }
1403}
1404
1405#ifdef CONFIG_HOTPLUG_CPU
1406int __cpu_disable(void)
1407{
1408 int cpu = smp_processor_id();
1409 int err;
1410
1411 if (!smp_ops->cpu_disable)
1412 return -ENOSYS;
1413
1414 this_cpu_disable_ftrace();
1415
1416 err = smp_ops->cpu_disable();
1417 if (err)
1418 return err;
1419
1420 /* Update sibling maps */
1421 remove_cpu_from_masks(cpu);
1422
1423 return 0;
1424}
1425
1426void __cpu_die(unsigned int cpu)
1427{
1428 if (smp_ops->cpu_die)
1429 smp_ops->cpu_die(cpu);
1430}
1431
1432void cpu_die(void)
1433{
1434 /*
1435 * Disable on the down path. This will be re-enabled by
1436 * start_secondary() via start_secondary_resume() below
1437 */
1438 this_cpu_disable_ftrace();
1439
1440 if (ppc_md.cpu_die)
1441 ppc_md.cpu_die();
1442
1443 /* If we return, we re-enter start_secondary */
1444 start_secondary_resume();
1445}
1446
1447#endif
1/*
2 * SMP support for ppc.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
6 *
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
8 *
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18#undef DEBUG
19
20#include <linux/kernel.h>
21#include <linux/export.h>
22#include <linux/sched/mm.h>
23#include <linux/sched/topology.h>
24#include <linux/smp.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <linux/init.h>
28#include <linux/spinlock.h>
29#include <linux/cache.h>
30#include <linux/err.h>
31#include <linux/device.h>
32#include <linux/cpu.h>
33#include <linux/notifier.h>
34#include <linux/topology.h>
35#include <linux/profile.h>
36#include <linux/processor.h>
37
38#include <asm/ptrace.h>
39#include <linux/atomic.h>
40#include <asm/irq.h>
41#include <asm/hw_irq.h>
42#include <asm/kvm_ppc.h>
43#include <asm/dbell.h>
44#include <asm/page.h>
45#include <asm/pgtable.h>
46#include <asm/prom.h>
47#include <asm/smp.h>
48#include <asm/time.h>
49#include <asm/machdep.h>
50#include <asm/cputhreads.h>
51#include <asm/cputable.h>
52#include <asm/mpic.h>
53#include <asm/vdso_datapage.h>
54#ifdef CONFIG_PPC64
55#include <asm/paca.h>
56#endif
57#include <asm/vdso.h>
58#include <asm/debug.h>
59#include <asm/kexec.h>
60#include <asm/asm-prototypes.h>
61#include <asm/cpu_has_feature.h>
62
63#ifdef DEBUG
64#include <asm/udbg.h>
65#define DBG(fmt...) udbg_printf(fmt)
66#else
67#define DBG(fmt...)
68#endif
69
70#ifdef CONFIG_HOTPLUG_CPU
71/* State of each CPU during hotplug phases */
72static DEFINE_PER_CPU(int, cpu_state) = { 0 };
73#endif
74
75struct thread_info *secondary_ti;
76
77DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
78DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
79DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
80
81EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
82EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
83EXPORT_PER_CPU_SYMBOL(cpu_core_map);
84
85/* SMP operations for this machine */
86struct smp_ops_t *smp_ops;
87
88/* Can't be static due to PowerMac hackery */
89volatile unsigned int cpu_callin_map[NR_CPUS];
90
91int smt_enabled_at_boot = 1;
92
93/*
94 * Returns 1 if the specified cpu should be brought up during boot.
95 * Used to inhibit booting threads if they've been disabled or
96 * limited on the command line
97 */
98int smp_generic_cpu_bootable(unsigned int nr)
99{
100 /* Special case - we inhibit secondary thread startup
101 * during boot if the user requests it.
102 */
103 if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
104 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
105 return 0;
106 if (smt_enabled_at_boot
107 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
108 return 0;
109 }
110
111 return 1;
112}
113
114
115#ifdef CONFIG_PPC64
116int smp_generic_kick_cpu(int nr)
117{
118 if (nr < 0 || nr >= nr_cpu_ids)
119 return -EINVAL;
120
121 /*
122 * The processor is currently spinning, waiting for the
123 * cpu_start field to become non-zero After we set cpu_start,
124 * the processor will continue on to secondary_start
125 */
126 if (!paca_ptrs[nr]->cpu_start) {
127 paca_ptrs[nr]->cpu_start = 1;
128 smp_mb();
129 return 0;
130 }
131
132#ifdef CONFIG_HOTPLUG_CPU
133 /*
134 * Ok it's not there, so it might be soft-unplugged, let's
135 * try to bring it back
136 */
137 generic_set_cpu_up(nr);
138 smp_wmb();
139 smp_send_reschedule(nr);
140#endif /* CONFIG_HOTPLUG_CPU */
141
142 return 0;
143}
144#endif /* CONFIG_PPC64 */
145
146static irqreturn_t call_function_action(int irq, void *data)
147{
148 generic_smp_call_function_interrupt();
149 return IRQ_HANDLED;
150}
151
152static irqreturn_t reschedule_action(int irq, void *data)
153{
154 scheduler_ipi();
155 return IRQ_HANDLED;
156}
157
158static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
159{
160 tick_broadcast_ipi_handler();
161 return IRQ_HANDLED;
162}
163
164#ifdef CONFIG_NMI_IPI
165static irqreturn_t nmi_ipi_action(int irq, void *data)
166{
167 smp_handle_nmi_ipi(get_irq_regs());
168 return IRQ_HANDLED;
169}
170#endif
171
172static irq_handler_t smp_ipi_action[] = {
173 [PPC_MSG_CALL_FUNCTION] = call_function_action,
174 [PPC_MSG_RESCHEDULE] = reschedule_action,
175 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
176#ifdef CONFIG_NMI_IPI
177 [PPC_MSG_NMI_IPI] = nmi_ipi_action,
178#endif
179};
180
181/*
182 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
183 * than going through the call function infrastructure, and strongly
184 * serialized, so it is more appropriate for debugging.
185 */
186const char *smp_ipi_name[] = {
187 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
188 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
189 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
190 [PPC_MSG_NMI_IPI] = "nmi ipi",
191};
192
193/* optional function to request ipi, for controllers with >= 4 ipis */
194int smp_request_message_ipi(int virq, int msg)
195{
196 int err;
197
198 if (msg < 0 || msg > PPC_MSG_NMI_IPI)
199 return -EINVAL;
200#ifndef CONFIG_NMI_IPI
201 if (msg == PPC_MSG_NMI_IPI)
202 return 1;
203#endif
204
205 err = request_irq(virq, smp_ipi_action[msg],
206 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
207 smp_ipi_name[msg], NULL);
208 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
209 virq, smp_ipi_name[msg], err);
210
211 return err;
212}
213
214#ifdef CONFIG_PPC_SMP_MUXED_IPI
215struct cpu_messages {
216 long messages; /* current messages */
217};
218static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
219
220void smp_muxed_ipi_set_message(int cpu, int msg)
221{
222 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
223 char *message = (char *)&info->messages;
224
225 /*
226 * Order previous accesses before accesses in the IPI handler.
227 */
228 smp_mb();
229 message[msg] = 1;
230}
231
232void smp_muxed_ipi_message_pass(int cpu, int msg)
233{
234 smp_muxed_ipi_set_message(cpu, msg);
235
236 /*
237 * cause_ipi functions are required to include a full barrier
238 * before doing whatever causes the IPI.
239 */
240 smp_ops->cause_ipi(cpu);
241}
242
243#ifdef __BIG_ENDIAN__
244#define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
245#else
246#define IPI_MESSAGE(A) (1uL << (8 * (A)))
247#endif
248
249irqreturn_t smp_ipi_demux(void)
250{
251 mb(); /* order any irq clear */
252
253 return smp_ipi_demux_relaxed();
254}
255
256/* sync-free variant. Callers should ensure synchronization */
257irqreturn_t smp_ipi_demux_relaxed(void)
258{
259 struct cpu_messages *info;
260 unsigned long all;
261
262 info = this_cpu_ptr(&ipi_message);
263 do {
264 all = xchg(&info->messages, 0);
265#if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
266 /*
267 * Must check for PPC_MSG_RM_HOST_ACTION messages
268 * before PPC_MSG_CALL_FUNCTION messages because when
269 * a VM is destroyed, we call kick_all_cpus_sync()
270 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
271 * messages have completed before we free any VCPUs.
272 */
273 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
274 kvmppc_xics_ipi_action();
275#endif
276 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
277 generic_smp_call_function_interrupt();
278 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
279 scheduler_ipi();
280 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
281 tick_broadcast_ipi_handler();
282#ifdef CONFIG_NMI_IPI
283 if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
284 nmi_ipi_action(0, NULL);
285#endif
286 } while (info->messages);
287
288 return IRQ_HANDLED;
289}
290#endif /* CONFIG_PPC_SMP_MUXED_IPI */
291
292static inline void do_message_pass(int cpu, int msg)
293{
294 if (smp_ops->message_pass)
295 smp_ops->message_pass(cpu, msg);
296#ifdef CONFIG_PPC_SMP_MUXED_IPI
297 else
298 smp_muxed_ipi_message_pass(cpu, msg);
299#endif
300}
301
302void smp_send_reschedule(int cpu)
303{
304 if (likely(smp_ops))
305 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
306}
307EXPORT_SYMBOL_GPL(smp_send_reschedule);
308
309void arch_send_call_function_single_ipi(int cpu)
310{
311 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
312}
313
314void arch_send_call_function_ipi_mask(const struct cpumask *mask)
315{
316 unsigned int cpu;
317
318 for_each_cpu(cpu, mask)
319 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
320}
321
322#ifdef CONFIG_NMI_IPI
323
324/*
325 * "NMI IPI" system.
326 *
327 * NMI IPIs may not be recoverable, so should not be used as ongoing part of
328 * a running system. They can be used for crash, debug, halt/reboot, etc.
329 *
330 * NMI IPIs are globally single threaded. No more than one in progress at
331 * any time.
332 *
333 * The IPI call waits with interrupts disabled until all targets enter the
334 * NMI handler, then the call returns.
335 *
336 * No new NMI can be initiated until targets exit the handler.
337 *
338 * The IPI call may time out without all targets entering the NMI handler.
339 * In that case, there is some logic to recover (and ignore subsequent
340 * NMI interrupts that may eventually be raised), but the platform interrupt
341 * handler may not be able to distinguish this from other exception causes,
342 * which may cause a crash.
343 */
344
345static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
346static struct cpumask nmi_ipi_pending_mask;
347static int nmi_ipi_busy_count = 0;
348static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
349
350static void nmi_ipi_lock_start(unsigned long *flags)
351{
352 raw_local_irq_save(*flags);
353 hard_irq_disable();
354 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
355 raw_local_irq_restore(*flags);
356 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
357 raw_local_irq_save(*flags);
358 hard_irq_disable();
359 }
360}
361
362static void nmi_ipi_lock(void)
363{
364 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
365 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
366}
367
368static void nmi_ipi_unlock(void)
369{
370 smp_mb();
371 WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
372 atomic_set(&__nmi_ipi_lock, 0);
373}
374
375static void nmi_ipi_unlock_end(unsigned long *flags)
376{
377 nmi_ipi_unlock();
378 raw_local_irq_restore(*flags);
379}
380
381/*
382 * Platform NMI handler calls this to ack
383 */
384int smp_handle_nmi_ipi(struct pt_regs *regs)
385{
386 void (*fn)(struct pt_regs *);
387 unsigned long flags;
388 int me = raw_smp_processor_id();
389 int ret = 0;
390
391 /*
392 * Unexpected NMIs are possible here because the interrupt may not
393 * be able to distinguish NMI IPIs from other types of NMIs, or
394 * because the caller may have timed out.
395 */
396 nmi_ipi_lock_start(&flags);
397 if (!nmi_ipi_busy_count)
398 goto out;
399 if (!cpumask_test_cpu(me, &nmi_ipi_pending_mask))
400 goto out;
401
402 fn = nmi_ipi_function;
403 if (!fn)
404 goto out;
405
406 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
407 nmi_ipi_busy_count++;
408 nmi_ipi_unlock();
409
410 ret = 1;
411
412 fn(regs);
413
414 nmi_ipi_lock();
415 nmi_ipi_busy_count--;
416out:
417 nmi_ipi_unlock_end(&flags);
418
419 return ret;
420}
421
422static void do_smp_send_nmi_ipi(int cpu)
423{
424 if (smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
425 return;
426
427 if (cpu >= 0) {
428 do_message_pass(cpu, PPC_MSG_NMI_IPI);
429 } else {
430 int c;
431
432 for_each_online_cpu(c) {
433 if (c == raw_smp_processor_id())
434 continue;
435 do_message_pass(c, PPC_MSG_NMI_IPI);
436 }
437 }
438}
439
440void smp_flush_nmi_ipi(u64 delay_us)
441{
442 unsigned long flags;
443
444 nmi_ipi_lock_start(&flags);
445 while (nmi_ipi_busy_count) {
446 nmi_ipi_unlock_end(&flags);
447 udelay(1);
448 if (delay_us) {
449 delay_us--;
450 if (!delay_us)
451 return;
452 }
453 nmi_ipi_lock_start(&flags);
454 }
455 nmi_ipi_unlock_end(&flags);
456}
457
458/*
459 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
460 * - fn is the target callback function.
461 * - delay_us > 0 is the delay before giving up waiting for targets to
462 * enter the handler, == 0 specifies indefinite delay.
463 */
464int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
465{
466 unsigned long flags;
467 int me = raw_smp_processor_id();
468 int ret = 1;
469
470 BUG_ON(cpu == me);
471 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
472
473 if (unlikely(!smp_ops))
474 return 0;
475
476 /* Take the nmi_ipi_busy count/lock with interrupts hard disabled */
477 nmi_ipi_lock_start(&flags);
478 while (nmi_ipi_busy_count) {
479 nmi_ipi_unlock_end(&flags);
480 spin_until_cond(nmi_ipi_busy_count == 0);
481 nmi_ipi_lock_start(&flags);
482 }
483
484 nmi_ipi_function = fn;
485
486 if (cpu < 0) {
487 /* ALL_OTHERS */
488 cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
489 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
490 } else {
491 /* cpumask starts clear */
492 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
493 }
494 nmi_ipi_busy_count++;
495 nmi_ipi_unlock();
496
497 do_smp_send_nmi_ipi(cpu);
498
499 while (!cpumask_empty(&nmi_ipi_pending_mask)) {
500 udelay(1);
501 if (delay_us) {
502 delay_us--;
503 if (!delay_us)
504 break;
505 }
506 }
507
508 nmi_ipi_lock();
509 if (!cpumask_empty(&nmi_ipi_pending_mask)) {
510 /* Could not gather all CPUs */
511 ret = 0;
512 cpumask_clear(&nmi_ipi_pending_mask);
513 }
514 nmi_ipi_busy_count--;
515 nmi_ipi_unlock_end(&flags);
516
517 return ret;
518}
519#endif /* CONFIG_NMI_IPI */
520
521#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
522void tick_broadcast(const struct cpumask *mask)
523{
524 unsigned int cpu;
525
526 for_each_cpu(cpu, mask)
527 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
528}
529#endif
530
531#ifdef CONFIG_DEBUGGER
532void debugger_ipi_callback(struct pt_regs *regs)
533{
534 debugger_ipi(regs);
535}
536
537void smp_send_debugger_break(void)
538{
539 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
540}
541#endif
542
543#ifdef CONFIG_KEXEC_CORE
544void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
545{
546 int cpu;
547
548 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
549 if (kdump_in_progress() && crash_wake_offline) {
550 for_each_present_cpu(cpu) {
551 if (cpu_online(cpu))
552 continue;
553 /*
554 * crash_ipi_callback will wait for
555 * all cpus, including offline CPUs.
556 * We don't care about nmi_ipi_function.
557 * Offline cpus will jump straight into
558 * crash_ipi_callback, we can skip the
559 * entire NMI dance and waiting for
560 * cpus to clear pending mask, etc.
561 */
562 do_smp_send_nmi_ipi(cpu);
563 }
564 }
565}
566#endif
567
568#ifdef CONFIG_NMI_IPI
569static void nmi_stop_this_cpu(struct pt_regs *regs)
570{
571 /*
572 * This is a special case because it never returns, so the NMI IPI
573 * handling would never mark it as done, which makes any later
574 * smp_send_nmi_ipi() call spin forever. Mark it done now.
575 *
576 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
577 */
578 nmi_ipi_lock();
579 nmi_ipi_busy_count--;
580 nmi_ipi_unlock();
581
582 /* Remove this CPU */
583 set_cpu_online(smp_processor_id(), false);
584
585 spin_begin();
586 while (1)
587 spin_cpu_relax();
588}
589
590void smp_send_stop(void)
591{
592 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
593}
594
595#else /* CONFIG_NMI_IPI */
596
597static void stop_this_cpu(void *dummy)
598{
599 /* Remove this CPU */
600 set_cpu_online(smp_processor_id(), false);
601
602 hard_irq_disable();
603 spin_begin();
604 while (1)
605 spin_cpu_relax();
606}
607
608void smp_send_stop(void)
609{
610 static bool stopped = false;
611
612 /*
613 * Prevent waiting on csd lock from a previous smp_send_stop.
614 * This is racy, but in general callers try to do the right
615 * thing and only fire off one smp_send_stop (e.g., see
616 * kernel/panic.c)
617 */
618 if (stopped)
619 return;
620
621 stopped = true;
622
623 smp_call_function(stop_this_cpu, NULL, 0);
624}
625#endif /* CONFIG_NMI_IPI */
626
627struct thread_info *current_set[NR_CPUS];
628
629static void smp_store_cpu_info(int id)
630{
631 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
632#ifdef CONFIG_PPC_FSL_BOOK3E
633 per_cpu(next_tlbcam_idx, id)
634 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
635#endif
636}
637
638/*
639 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
640 * rather than just passing around the cpumask we pass around a function that
641 * returns the that cpumask for the given CPU.
642 */
643static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
644{
645 cpumask_set_cpu(i, get_cpumask(j));
646 cpumask_set_cpu(j, get_cpumask(i));
647}
648
649#ifdef CONFIG_HOTPLUG_CPU
650static void set_cpus_unrelated(int i, int j,
651 struct cpumask *(*get_cpumask)(int))
652{
653 cpumask_clear_cpu(i, get_cpumask(j));
654 cpumask_clear_cpu(j, get_cpumask(i));
655}
656#endif
657
658void __init smp_prepare_cpus(unsigned int max_cpus)
659{
660 unsigned int cpu;
661
662 DBG("smp_prepare_cpus\n");
663
664 /*
665 * setup_cpu may need to be called on the boot cpu. We havent
666 * spun any cpus up but lets be paranoid.
667 */
668 BUG_ON(boot_cpuid != smp_processor_id());
669
670 /* Fixup boot cpu */
671 smp_store_cpu_info(boot_cpuid);
672 cpu_callin_map[boot_cpuid] = 1;
673
674 for_each_possible_cpu(cpu) {
675 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
676 GFP_KERNEL, cpu_to_node(cpu));
677 zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
678 GFP_KERNEL, cpu_to_node(cpu));
679 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
680 GFP_KERNEL, cpu_to_node(cpu));
681 /*
682 * numa_node_id() works after this.
683 */
684 if (cpu_present(cpu)) {
685 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
686 set_cpu_numa_mem(cpu,
687 local_memory_node(numa_cpu_lookup_table[cpu]));
688 }
689 }
690
691 /* Init the cpumasks so the boot CPU is related to itself */
692 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
693 cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
694 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
695
696 if (smp_ops && smp_ops->probe)
697 smp_ops->probe();
698}
699
700void smp_prepare_boot_cpu(void)
701{
702 BUG_ON(smp_processor_id() != boot_cpuid);
703#ifdef CONFIG_PPC64
704 paca_ptrs[boot_cpuid]->__current = current;
705#endif
706 set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
707 current_set[boot_cpuid] = task_thread_info(current);
708}
709
710#ifdef CONFIG_HOTPLUG_CPU
711
712int generic_cpu_disable(void)
713{
714 unsigned int cpu = smp_processor_id();
715
716 if (cpu == boot_cpuid)
717 return -EBUSY;
718
719 set_cpu_online(cpu, false);
720#ifdef CONFIG_PPC64
721 vdso_data->processorCount--;
722#endif
723 /* Update affinity of all IRQs previously aimed at this CPU */
724 irq_migrate_all_off_this_cpu();
725
726 /*
727 * Depending on the details of the interrupt controller, it's possible
728 * that one of the interrupts we just migrated away from this CPU is
729 * actually already pending on this CPU. If we leave it in that state
730 * the interrupt will never be EOI'ed, and will never fire again. So
731 * temporarily enable interrupts here, to allow any pending interrupt to
732 * be received (and EOI'ed), before we take this CPU offline.
733 */
734 local_irq_enable();
735 mdelay(1);
736 local_irq_disable();
737
738 return 0;
739}
740
741void generic_cpu_die(unsigned int cpu)
742{
743 int i;
744
745 for (i = 0; i < 100; i++) {
746 smp_rmb();
747 if (is_cpu_dead(cpu))
748 return;
749 msleep(100);
750 }
751 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
752}
753
754void generic_set_cpu_dead(unsigned int cpu)
755{
756 per_cpu(cpu_state, cpu) = CPU_DEAD;
757}
758
759/*
760 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
761 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
762 * which makes the delay in generic_cpu_die() not happen.
763 */
764void generic_set_cpu_up(unsigned int cpu)
765{
766 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
767}
768
769int generic_check_cpu_restart(unsigned int cpu)
770{
771 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
772}
773
774int is_cpu_dead(unsigned int cpu)
775{
776 return per_cpu(cpu_state, cpu) == CPU_DEAD;
777}
778
779static bool secondaries_inhibited(void)
780{
781 return kvm_hv_mode_active();
782}
783
784#else /* HOTPLUG_CPU */
785
786#define secondaries_inhibited() 0
787
788#endif
789
790static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
791{
792 struct thread_info *ti = task_thread_info(idle);
793
794#ifdef CONFIG_PPC64
795 paca_ptrs[cpu]->__current = idle;
796 paca_ptrs[cpu]->kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
797#endif
798 ti->cpu = cpu;
799 secondary_ti = current_set[cpu] = ti;
800}
801
802int __cpu_up(unsigned int cpu, struct task_struct *tidle)
803{
804 int rc, c;
805
806 /*
807 * Don't allow secondary threads to come online if inhibited
808 */
809 if (threads_per_core > 1 && secondaries_inhibited() &&
810 cpu_thread_in_subcore(cpu))
811 return -EBUSY;
812
813 if (smp_ops == NULL ||
814 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
815 return -EINVAL;
816
817 cpu_idle_thread_init(cpu, tidle);
818
819 /*
820 * The platform might need to allocate resources prior to bringing
821 * up the CPU
822 */
823 if (smp_ops->prepare_cpu) {
824 rc = smp_ops->prepare_cpu(cpu);
825 if (rc)
826 return rc;
827 }
828
829 /* Make sure callin-map entry is 0 (can be leftover a CPU
830 * hotplug
831 */
832 cpu_callin_map[cpu] = 0;
833
834 /* The information for processor bringup must
835 * be written out to main store before we release
836 * the processor.
837 */
838 smp_mb();
839
840 /* wake up cpus */
841 DBG("smp: kicking cpu %d\n", cpu);
842 rc = smp_ops->kick_cpu(cpu);
843 if (rc) {
844 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
845 return rc;
846 }
847
848 /*
849 * wait to see if the cpu made a callin (is actually up).
850 * use this value that I found through experimentation.
851 * -- Cort
852 */
853 if (system_state < SYSTEM_RUNNING)
854 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
855 udelay(100);
856#ifdef CONFIG_HOTPLUG_CPU
857 else
858 /*
859 * CPUs can take much longer to come up in the
860 * hotplug case. Wait five seconds.
861 */
862 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
863 msleep(1);
864#endif
865
866 if (!cpu_callin_map[cpu]) {
867 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
868 return -ENOENT;
869 }
870
871 DBG("Processor %u found.\n", cpu);
872
873 if (smp_ops->give_timebase)
874 smp_ops->give_timebase();
875
876 /* Wait until cpu puts itself in the online & active maps */
877 spin_until_cond(cpu_online(cpu));
878
879 return 0;
880}
881
882/* Return the value of the reg property corresponding to the given
883 * logical cpu.
884 */
885int cpu_to_core_id(int cpu)
886{
887 struct device_node *np;
888 const __be32 *reg;
889 int id = -1;
890
891 np = of_get_cpu_node(cpu, NULL);
892 if (!np)
893 goto out;
894
895 reg = of_get_property(np, "reg", NULL);
896 if (!reg)
897 goto out;
898
899 id = be32_to_cpup(reg);
900out:
901 of_node_put(np);
902 return id;
903}
904EXPORT_SYMBOL_GPL(cpu_to_core_id);
905
906/* Helper routines for cpu to core mapping */
907int cpu_core_index_of_thread(int cpu)
908{
909 return cpu >> threads_shift;
910}
911EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
912
913int cpu_first_thread_of_core(int core)
914{
915 return core << threads_shift;
916}
917EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
918
919/* Must be called when no change can occur to cpu_present_mask,
920 * i.e. during cpu online or offline.
921 */
922static struct device_node *cpu_to_l2cache(int cpu)
923{
924 struct device_node *np;
925 struct device_node *cache;
926
927 if (!cpu_present(cpu))
928 return NULL;
929
930 np = of_get_cpu_node(cpu, NULL);
931 if (np == NULL)
932 return NULL;
933
934 cache = of_find_next_cache_node(np);
935
936 of_node_put(np);
937
938 return cache;
939}
940
941static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int))
942{
943 struct device_node *l2_cache, *np;
944 int i;
945
946 l2_cache = cpu_to_l2cache(cpu);
947 if (!l2_cache)
948 return false;
949
950 for_each_cpu(i, cpu_online_mask) {
951 /*
952 * when updating the marks the current CPU has not been marked
953 * online, but we need to update the cache masks
954 */
955 np = cpu_to_l2cache(i);
956 if (!np)
957 continue;
958
959 if (np == l2_cache)
960 set_cpus_related(cpu, i, mask_fn);
961
962 of_node_put(np);
963 }
964 of_node_put(l2_cache);
965
966 return true;
967}
968
969#ifdef CONFIG_HOTPLUG_CPU
970static void remove_cpu_from_masks(int cpu)
971{
972 int i;
973
974 /* NB: cpu_core_mask is a superset of the others */
975 for_each_cpu(i, cpu_core_mask(cpu)) {
976 set_cpus_unrelated(cpu, i, cpu_core_mask);
977 set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
978 set_cpus_unrelated(cpu, i, cpu_sibling_mask);
979 }
980}
981#endif
982
983static void add_cpu_to_masks(int cpu)
984{
985 int first_thread = cpu_first_thread_sibling(cpu);
986 int chipid = cpu_to_chip_id(cpu);
987 int i;
988
989 /*
990 * This CPU will not be in the online mask yet so we need to manually
991 * add it to it's own thread sibling mask.
992 */
993 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
994
995 for (i = first_thread; i < first_thread + threads_per_core; i++)
996 if (cpu_online(i))
997 set_cpus_related(i, cpu, cpu_sibling_mask);
998
999 /*
1000 * Copy the thread sibling mask into the cache sibling mask
1001 * and mark any CPUs that share an L2 with this CPU.
1002 */
1003 for_each_cpu(i, cpu_sibling_mask(cpu))
1004 set_cpus_related(cpu, i, cpu_l2_cache_mask);
1005 update_mask_by_l2(cpu, cpu_l2_cache_mask);
1006
1007 /*
1008 * Copy the cache sibling mask into core sibling mask and mark
1009 * any CPUs on the same chip as this CPU.
1010 */
1011 for_each_cpu(i, cpu_l2_cache_mask(cpu))
1012 set_cpus_related(cpu, i, cpu_core_mask);
1013
1014 if (chipid == -1)
1015 return;
1016
1017 for_each_cpu(i, cpu_online_mask)
1018 if (cpu_to_chip_id(i) == chipid)
1019 set_cpus_related(cpu, i, cpu_core_mask);
1020}
1021
1022static bool shared_caches;
1023
1024/* Activate a secondary processor. */
1025void start_secondary(void *unused)
1026{
1027 unsigned int cpu = smp_processor_id();
1028
1029 mmgrab(&init_mm);
1030 current->active_mm = &init_mm;
1031
1032 smp_store_cpu_info(cpu);
1033 set_dec(tb_ticks_per_jiffy);
1034 preempt_disable();
1035 cpu_callin_map[cpu] = 1;
1036
1037 if (smp_ops->setup_cpu)
1038 smp_ops->setup_cpu(cpu);
1039 if (smp_ops->take_timebase)
1040 smp_ops->take_timebase();
1041
1042 secondary_cpu_time_init();
1043
1044#ifdef CONFIG_PPC64
1045 if (system_state == SYSTEM_RUNNING)
1046 vdso_data->processorCount++;
1047
1048 vdso_getcpu_init();
1049#endif
1050 /* Update topology CPU masks */
1051 add_cpu_to_masks(cpu);
1052
1053 /*
1054 * Check for any shared caches. Note that this must be done on a
1055 * per-core basis because one core in the pair might be disabled.
1056 */
1057 if (!cpumask_equal(cpu_l2_cache_mask(cpu), cpu_sibling_mask(cpu)))
1058 shared_caches = true;
1059
1060 set_numa_node(numa_cpu_lookup_table[cpu]);
1061 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1062
1063 smp_wmb();
1064 notify_cpu_starting(cpu);
1065 set_cpu_online(cpu, true);
1066
1067 local_irq_enable();
1068
1069 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1070
1071 BUG();
1072}
1073
1074int setup_profiling_timer(unsigned int multiplier)
1075{
1076 return 0;
1077}
1078
1079#ifdef CONFIG_SCHED_SMT
1080/* cpumask of CPUs with asymetric SMT dependancy */
1081static int powerpc_smt_flags(void)
1082{
1083 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
1084
1085 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
1086 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
1087 flags |= SD_ASYM_PACKING;
1088 }
1089 return flags;
1090}
1091#endif
1092
1093static struct sched_domain_topology_level powerpc_topology[] = {
1094#ifdef CONFIG_SCHED_SMT
1095 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1096#endif
1097 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
1098 { NULL, },
1099};
1100
1101/*
1102 * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
1103 * This topology makes it *much* cheaper to migrate tasks between adjacent cores
1104 * since the migrated task remains cache hot. We want to take advantage of this
1105 * at the scheduler level so an extra topology level is required.
1106 */
1107static int powerpc_shared_cache_flags(void)
1108{
1109 return SD_SHARE_PKG_RESOURCES;
1110}
1111
1112/*
1113 * We can't just pass cpu_l2_cache_mask() directly because
1114 * returns a non-const pointer and the compiler barfs on that.
1115 */
1116static const struct cpumask *shared_cache_mask(int cpu)
1117{
1118 return cpu_l2_cache_mask(cpu);
1119}
1120
1121static struct sched_domain_topology_level power9_topology[] = {
1122#ifdef CONFIG_SCHED_SMT
1123 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1124#endif
1125 { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
1126 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
1127 { NULL, },
1128};
1129
1130void __init smp_cpus_done(unsigned int max_cpus)
1131{
1132 /*
1133 * We are running pinned to the boot CPU, see rest_init().
1134 */
1135 if (smp_ops && smp_ops->setup_cpu)
1136 smp_ops->setup_cpu(boot_cpuid);
1137
1138 if (smp_ops && smp_ops->bringup_done)
1139 smp_ops->bringup_done();
1140
1141 dump_numa_cpu_topology();
1142
1143 /*
1144 * If any CPU detects that it's sharing a cache with another CPU then
1145 * use the deeper topology that is aware of this sharing.
1146 */
1147 if (shared_caches) {
1148 pr_info("Using shared cache scheduler topology\n");
1149 set_sched_topology(power9_topology);
1150 } else {
1151 pr_info("Using standard scheduler topology\n");
1152 set_sched_topology(powerpc_topology);
1153 }
1154}
1155
1156#ifdef CONFIG_HOTPLUG_CPU
1157int __cpu_disable(void)
1158{
1159 int cpu = smp_processor_id();
1160 int err;
1161
1162 if (!smp_ops->cpu_disable)
1163 return -ENOSYS;
1164
1165 err = smp_ops->cpu_disable();
1166 if (err)
1167 return err;
1168
1169 /* Update sibling maps */
1170 remove_cpu_from_masks(cpu);
1171
1172 return 0;
1173}
1174
1175void __cpu_die(unsigned int cpu)
1176{
1177 if (smp_ops->cpu_die)
1178 smp_ops->cpu_die(cpu);
1179}
1180
1181void cpu_die(void)
1182{
1183 if (ppc_md.cpu_die)
1184 ppc_md.cpu_die();
1185
1186 /* If we return, we re-enter start_secondary */
1187 start_secondary_resume();
1188}
1189
1190#endif