Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * SMP support for ppc.
   4 *
   5 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
   6 * deal of code from the sparc and intel versions.
   7 *
   8 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
   9 *
  10 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
  11 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
  12 */
  13
  14#undef DEBUG
  15
  16#include <linux/kernel.h>
  17#include <linux/export.h>
  18#include <linux/sched/mm.h>
  19#include <linux/sched/task_stack.h>
  20#include <linux/sched/topology.h>
  21#include <linux/smp.h>
  22#include <linux/interrupt.h>
  23#include <linux/delay.h>
  24#include <linux/init.h>
  25#include <linux/spinlock.h>
  26#include <linux/cache.h>
  27#include <linux/err.h>
  28#include <linux/device.h>
  29#include <linux/cpu.h>
  30#include <linux/notifier.h>
  31#include <linux/topology.h>
  32#include <linux/profile.h>
  33#include <linux/processor.h>
  34#include <linux/random.h>
  35#include <linux/stackprotector.h>
  36#include <linux/pgtable.h>
  37#include <linux/clockchips.h>
  38#include <linux/kexec.h>
  39
  40#include <asm/ptrace.h>
  41#include <linux/atomic.h>
  42#include <asm/irq.h>
  43#include <asm/hw_irq.h>
  44#include <asm/kvm_ppc.h>
  45#include <asm/dbell.h>
  46#include <asm/page.h>
  47#include <asm/smp.h>
  48#include <asm/time.h>
  49#include <asm/machdep.h>
  50#include <asm/mmu_context.h>
  51#include <asm/cputhreads.h>
  52#include <asm/cputable.h>
  53#include <asm/mpic.h>
  54#include <asm/vdso_datapage.h>
  55#ifdef CONFIG_PPC64
  56#include <asm/paca.h>
  57#endif
  58#include <asm/vdso.h>
  59#include <asm/debug.h>
  60#include <asm/cpu_has_feature.h>
  61#include <asm/ftrace.h>
  62#include <asm/kup.h>
  63#include <asm/fadump.h>
 
  64
  65#include <trace/events/ipi.h>
  66
  67#ifdef DEBUG
  68#include <asm/udbg.h>
  69#define DBG(fmt...) udbg_printf(fmt)
  70#else
  71#define DBG(fmt...)
  72#endif
  73
  74#ifdef CONFIG_HOTPLUG_CPU
  75/* State of each CPU during hotplug phases */
  76static DEFINE_PER_CPU(int, cpu_state) = { 0 };
  77#endif
  78
  79struct task_struct *secondary_current;
  80bool has_big_cores __ro_after_init;
  81bool coregroup_enabled __ro_after_init;
  82bool thread_group_shares_l2 __ro_after_init;
  83bool thread_group_shares_l3 __ro_after_init;
  84
  85DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
  86DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
  87DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
  88DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
  89static DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map);
  90
  91EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  92EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
  93EXPORT_PER_CPU_SYMBOL(cpu_core_map);
  94EXPORT_SYMBOL_GPL(has_big_cores);
  95
  96#define MAX_THREAD_LIST_SIZE	8
  97#define THREAD_GROUP_SHARE_L1   1
  98#define THREAD_GROUP_SHARE_L2_L3 2
  99struct thread_groups {
 100	unsigned int property;
 101	unsigned int nr_groups;
 102	unsigned int threads_per_group;
 103	unsigned int thread_list[MAX_THREAD_LIST_SIZE];
 104};
 105
 106/* Maximum number of properties that groups of threads within a core can share */
 107#define MAX_THREAD_GROUP_PROPERTIES 2
 108
 109struct thread_groups_list {
 110	unsigned int nr_properties;
 111	struct thread_groups property_tgs[MAX_THREAD_GROUP_PROPERTIES];
 112};
 113
 114static struct thread_groups_list tgl[NR_CPUS] __initdata;
 115/*
 116 * On big-cores system, thread_group_l1_cache_map for each CPU corresponds to
 117 * the set its siblings that share the L1-cache.
 118 */
 119DEFINE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map);
 120
 121/*
 122 * On some big-cores system, thread_group_l2_cache_map for each CPU
 123 * corresponds to the set its siblings within the core that share the
 124 * L2-cache.
 125 */
 126DEFINE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map);
 127
 128/*
 129 * On P10, thread_group_l3_cache_map for each CPU is equal to the
 130 * thread_group_l2_cache_map
 131 */
 132DEFINE_PER_CPU(cpumask_var_t, thread_group_l3_cache_map);
 133
 134/* SMP operations for this machine */
 135struct smp_ops_t *smp_ops;
 136
 137/* Can't be static due to PowerMac hackery */
 138volatile unsigned int cpu_callin_map[NR_CPUS];
 139
 140int smt_enabled_at_boot = 1;
 141
 142/*
 143 * Returns 1 if the specified cpu should be brought up during boot.
 144 * Used to inhibit booting threads if they've been disabled or
 145 * limited on the command line
 146 */
 147int smp_generic_cpu_bootable(unsigned int nr)
 148{
 149	/* Special case - we inhibit secondary thread startup
 150	 * during boot if the user requests it.
 151	 */
 152	if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
 153		if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
 154			return 0;
 155		if (smt_enabled_at_boot
 156		    && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
 157			return 0;
 158	}
 159
 160	return 1;
 161}
 162
 163
 164#ifdef CONFIG_PPC64
 165int smp_generic_kick_cpu(int nr)
 166{
 167	if (nr < 0 || nr >= nr_cpu_ids)
 168		return -EINVAL;
 169
 170	/*
 171	 * The processor is currently spinning, waiting for the
 172	 * cpu_start field to become non-zero After we set cpu_start,
 173	 * the processor will continue on to secondary_start
 174	 */
 175	if (!paca_ptrs[nr]->cpu_start) {
 176		paca_ptrs[nr]->cpu_start = 1;
 177		smp_mb();
 178		return 0;
 179	}
 180
 181#ifdef CONFIG_HOTPLUG_CPU
 182	/*
 183	 * Ok it's not there, so it might be soft-unplugged, let's
 184	 * try to bring it back
 185	 */
 186	generic_set_cpu_up(nr);
 187	smp_wmb();
 188	smp_send_reschedule(nr);
 189#endif /* CONFIG_HOTPLUG_CPU */
 190
 191	return 0;
 192}
 193#endif /* CONFIG_PPC64 */
 194
 195static irqreturn_t call_function_action(int irq, void *data)
 196{
 197	generic_smp_call_function_interrupt();
 198	return IRQ_HANDLED;
 199}
 200
 201static irqreturn_t reschedule_action(int irq, void *data)
 202{
 203	scheduler_ipi();
 204	return IRQ_HANDLED;
 205}
 206
 207#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 208static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
 209{
 210	timer_broadcast_interrupt();
 211	return IRQ_HANDLED;
 212}
 213#endif
 214
 215#ifdef CONFIG_NMI_IPI
 216static irqreturn_t nmi_ipi_action(int irq, void *data)
 217{
 218	smp_handle_nmi_ipi(get_irq_regs());
 219	return IRQ_HANDLED;
 220}
 221#endif
 222
 223static irq_handler_t smp_ipi_action[] = {
 224	[PPC_MSG_CALL_FUNCTION] =  call_function_action,
 225	[PPC_MSG_RESCHEDULE] = reschedule_action,
 226#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 227	[PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
 228#endif
 229#ifdef CONFIG_NMI_IPI
 230	[PPC_MSG_NMI_IPI] = nmi_ipi_action,
 231#endif
 232};
 233
 234/*
 235 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
 236 * than going through the call function infrastructure, and strongly
 237 * serialized, so it is more appropriate for debugging.
 238 */
 239const char *smp_ipi_name[] = {
 240	[PPC_MSG_CALL_FUNCTION] =  "ipi call function",
 241	[PPC_MSG_RESCHEDULE] = "ipi reschedule",
 242#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 243	[PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
 244#endif
 245#ifdef CONFIG_NMI_IPI
 246	[PPC_MSG_NMI_IPI] = "nmi ipi",
 247#endif
 248};
 249
 250/* optional function to request ipi, for controllers with >= 4 ipis */
 251int smp_request_message_ipi(int virq, int msg)
 252{
 253	int err;
 254
 255	if (msg < 0 || msg > PPC_MSG_NMI_IPI)
 256		return -EINVAL;
 257#ifndef CONFIG_NMI_IPI
 258	if (msg == PPC_MSG_NMI_IPI)
 259		return 1;
 260#endif
 261
 262	err = request_irq(virq, smp_ipi_action[msg],
 263			  IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
 264			  smp_ipi_name[msg], NULL);
 265	WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
 266		virq, smp_ipi_name[msg], err);
 267
 268	return err;
 269}
 270
 271#ifdef CONFIG_PPC_SMP_MUXED_IPI
 272struct cpu_messages {
 273	long messages;			/* current messages */
 274};
 275static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
 276
 277void smp_muxed_ipi_set_message(int cpu, int msg)
 278{
 279	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
 280	char *message = (char *)&info->messages;
 281
 282	/*
 283	 * Order previous accesses before accesses in the IPI handler.
 284	 */
 285	smp_mb();
 286	WRITE_ONCE(message[msg], 1);
 287}
 288
 289void smp_muxed_ipi_message_pass(int cpu, int msg)
 290{
 291	smp_muxed_ipi_set_message(cpu, msg);
 292
 293	/*
 294	 * cause_ipi functions are required to include a full barrier
 295	 * before doing whatever causes the IPI.
 296	 */
 297	smp_ops->cause_ipi(cpu);
 298}
 299
 300#ifdef __BIG_ENDIAN__
 301#define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
 302#else
 303#define IPI_MESSAGE(A) (1uL << (8 * (A)))
 304#endif
 305
 306irqreturn_t smp_ipi_demux(void)
 307{
 308	mb();	/* order any irq clear */
 309
 310	return smp_ipi_demux_relaxed();
 311}
 312
 313/* sync-free variant. Callers should ensure synchronization */
 314irqreturn_t smp_ipi_demux_relaxed(void)
 315{
 316	struct cpu_messages *info;
 317	unsigned long all;
 318
 319	info = this_cpu_ptr(&ipi_message);
 320	do {
 321		all = xchg(&info->messages, 0);
 322#if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
 323		/*
 324		 * Must check for PPC_MSG_RM_HOST_ACTION messages
 325		 * before PPC_MSG_CALL_FUNCTION messages because when
 326		 * a VM is destroyed, we call kick_all_cpus_sync()
 327		 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
 328		 * messages have completed before we free any VCPUs.
 329		 */
 330		if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
 331			kvmppc_xics_ipi_action();
 332#endif
 333		if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
 334			generic_smp_call_function_interrupt();
 335		if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
 336			scheduler_ipi();
 337#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 338		if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
 339			timer_broadcast_interrupt();
 340#endif
 341#ifdef CONFIG_NMI_IPI
 342		if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
 343			nmi_ipi_action(0, NULL);
 344#endif
 345	} while (READ_ONCE(info->messages));
 346
 347	return IRQ_HANDLED;
 348}
 349#endif /* CONFIG_PPC_SMP_MUXED_IPI */
 350
 351static inline void do_message_pass(int cpu, int msg)
 352{
 353	if (smp_ops->message_pass)
 354		smp_ops->message_pass(cpu, msg);
 355#ifdef CONFIG_PPC_SMP_MUXED_IPI
 356	else
 357		smp_muxed_ipi_message_pass(cpu, msg);
 358#endif
 359}
 360
 361void arch_smp_send_reschedule(int cpu)
 362{
 363	if (likely(smp_ops))
 364		do_message_pass(cpu, PPC_MSG_RESCHEDULE);
 365}
 366EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
 367
 368void arch_send_call_function_single_ipi(int cpu)
 369{
 370	do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
 371}
 372
 373void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 374{
 375	unsigned int cpu;
 376
 377	for_each_cpu(cpu, mask)
 378		do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
 379}
 380
 381#ifdef CONFIG_NMI_IPI
 382
 383/*
 384 * "NMI IPI" system.
 385 *
 386 * NMI IPIs may not be recoverable, so should not be used as ongoing part of
 387 * a running system. They can be used for crash, debug, halt/reboot, etc.
 388 *
 389 * The IPI call waits with interrupts disabled until all targets enter the
 390 * NMI handler, then returns. Subsequent IPIs can be issued before targets
 391 * have returned from their handlers, so there is no guarantee about
 392 * concurrency or re-entrancy.
 393 *
 394 * A new NMI can be issued before all targets exit the handler.
 395 *
 396 * The IPI call may time out without all targets entering the NMI handler.
 397 * In that case, there is some logic to recover (and ignore subsequent
 398 * NMI interrupts that may eventually be raised), but the platform interrupt
 399 * handler may not be able to distinguish this from other exception causes,
 400 * which may cause a crash.
 401 */
 402
 403static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
 404static struct cpumask nmi_ipi_pending_mask;
 405static bool nmi_ipi_busy = false;
 406static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
 407
 408noinstr static void nmi_ipi_lock_start(unsigned long *flags)
 409{
 410	raw_local_irq_save(*flags);
 411	hard_irq_disable();
 412	while (raw_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
 413		raw_local_irq_restore(*flags);
 414		spin_until_cond(raw_atomic_read(&__nmi_ipi_lock) == 0);
 415		raw_local_irq_save(*flags);
 416		hard_irq_disable();
 417	}
 418}
 419
 420noinstr static void nmi_ipi_lock(void)
 421{
 422	while (raw_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
 423		spin_until_cond(raw_atomic_read(&__nmi_ipi_lock) == 0);
 424}
 425
 426noinstr static void nmi_ipi_unlock(void)
 427{
 428	smp_mb();
 429	WARN_ON(raw_atomic_read(&__nmi_ipi_lock) != 1);
 430	raw_atomic_set(&__nmi_ipi_lock, 0);
 431}
 432
 433noinstr static void nmi_ipi_unlock_end(unsigned long *flags)
 434{
 435	nmi_ipi_unlock();
 436	raw_local_irq_restore(*flags);
 437}
 438
 439/*
 440 * Platform NMI handler calls this to ack
 441 */
 442noinstr int smp_handle_nmi_ipi(struct pt_regs *regs)
 443{
 444	void (*fn)(struct pt_regs *) = NULL;
 445	unsigned long flags;
 446	int me = raw_smp_processor_id();
 447	int ret = 0;
 448
 449	/*
 450	 * Unexpected NMIs are possible here because the interrupt may not
 451	 * be able to distinguish NMI IPIs from other types of NMIs, or
 452	 * because the caller may have timed out.
 453	 */
 454	nmi_ipi_lock_start(&flags);
 455	if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
 456		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
 457		fn = READ_ONCE(nmi_ipi_function);
 458		WARN_ON_ONCE(!fn);
 459		ret = 1;
 460	}
 461	nmi_ipi_unlock_end(&flags);
 462
 463	if (fn)
 464		fn(regs);
 465
 466	return ret;
 467}
 468
 469static void do_smp_send_nmi_ipi(int cpu, bool safe)
 470{
 471	if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
 472		return;
 473
 474	if (cpu >= 0) {
 475		do_message_pass(cpu, PPC_MSG_NMI_IPI);
 476	} else {
 477		int c;
 478
 479		for_each_online_cpu(c) {
 480			if (c == raw_smp_processor_id())
 481				continue;
 482			do_message_pass(c, PPC_MSG_NMI_IPI);
 483		}
 484	}
 485}
 486
 487/*
 488 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
 489 * - fn is the target callback function.
 490 * - delay_us > 0 is the delay before giving up waiting for targets to
 491 *   begin executing the handler, == 0 specifies indefinite delay.
 492 */
 493static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
 494				u64 delay_us, bool safe)
 495{
 496	unsigned long flags;
 497	int me = raw_smp_processor_id();
 498	int ret = 1;
 499
 500	BUG_ON(cpu == me);
 501	BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
 502
 503	if (unlikely(!smp_ops))
 504		return 0;
 505
 506	nmi_ipi_lock_start(&flags);
 507	while (nmi_ipi_busy) {
 508		nmi_ipi_unlock_end(&flags);
 509		spin_until_cond(!nmi_ipi_busy);
 510		nmi_ipi_lock_start(&flags);
 511	}
 512	nmi_ipi_busy = true;
 513	nmi_ipi_function = fn;
 514
 515	WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
 516
 517	if (cpu < 0) {
 518		/* ALL_OTHERS */
 519		cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
 520		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
 521	} else {
 522		cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
 523	}
 524
 525	nmi_ipi_unlock();
 526
 527	/* Interrupts remain hard disabled */
 528
 529	do_smp_send_nmi_ipi(cpu, safe);
 530
 531	nmi_ipi_lock();
 532	/* nmi_ipi_busy is set here, so unlock/lock is okay */
 533	while (!cpumask_empty(&nmi_ipi_pending_mask)) {
 534		nmi_ipi_unlock();
 535		udelay(1);
 536		nmi_ipi_lock();
 537		if (delay_us) {
 538			delay_us--;
 539			if (!delay_us)
 540				break;
 541		}
 542	}
 543
 544	if (!cpumask_empty(&nmi_ipi_pending_mask)) {
 545		/* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
 546		ret = 0;
 547		cpumask_clear(&nmi_ipi_pending_mask);
 548	}
 549
 550	nmi_ipi_function = NULL;
 551	nmi_ipi_busy = false;
 552
 553	nmi_ipi_unlock_end(&flags);
 554
 555	return ret;
 556}
 557
 558int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
 559{
 560	return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
 561}
 562
 563int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
 564{
 565	return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
 566}
 567#endif /* CONFIG_NMI_IPI */
 568
 569#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 570void tick_broadcast(const struct cpumask *mask)
 571{
 572	unsigned int cpu;
 573
 574	for_each_cpu(cpu, mask)
 575		do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
 576}
 577#endif
 578
 579#ifdef CONFIG_DEBUGGER
 580static void debugger_ipi_callback(struct pt_regs *regs)
 581{
 582	debugger_ipi(regs);
 583}
 584
 585void smp_send_debugger_break(void)
 586{
 587	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
 588}
 589#endif
 590
 591#ifdef CONFIG_KEXEC_CORE
 592void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
 593{
 594	int cpu;
 595
 596	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
 597	if (kdump_in_progress() && crash_wake_offline) {
 598		for_each_present_cpu(cpu) {
 599			if (cpu_online(cpu))
 600				continue;
 601			/*
 602			 * crash_ipi_callback will wait for
 603			 * all cpus, including offline CPUs.
 604			 * We don't care about nmi_ipi_function.
 605			 * Offline cpus will jump straight into
 606			 * crash_ipi_callback, we can skip the
 607			 * entire NMI dance and waiting for
 608			 * cpus to clear pending mask, etc.
 609			 */
 610			do_smp_send_nmi_ipi(cpu, false);
 611		}
 612	}
 613}
 614#endif
 615
 616void crash_smp_send_stop(void)
 617{
 618	static bool stopped = false;
 619
 620	/*
 621	 * In case of fadump, register data for all CPUs is captured by f/w
 622	 * on ibm,os-term rtas call. Skip IPI callbacks to other CPUs before
 623	 * this rtas call to avoid tricky post processing of those CPUs'
 624	 * backtraces.
 625	 */
 626	if (should_fadump_crash())
 627		return;
 628
 629	if (stopped)
 630		return;
 631
 632	stopped = true;
 633
 634#ifdef CONFIG_KEXEC_CORE
 635	if (kexec_crash_image) {
 636		crash_kexec_prepare();
 637		return;
 638	}
 639#endif
 640
 641	smp_send_stop();
 642}
 643
 644#ifdef CONFIG_NMI_IPI
 645static void nmi_stop_this_cpu(struct pt_regs *regs)
 646{
 647	/*
 648	 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
 649	 */
 650	set_cpu_online(smp_processor_id(), false);
 651
 652	spin_begin();
 653	while (1)
 654		spin_cpu_relax();
 655}
 656
 657void smp_send_stop(void)
 658{
 659	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
 660}
 661
 662#else /* CONFIG_NMI_IPI */
 663
 664static void stop_this_cpu(void *dummy)
 665{
 666	hard_irq_disable();
 667
 668	/*
 669	 * Offlining CPUs in stop_this_cpu can result in scheduler warnings,
 670	 * (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants
 671	 * to know other CPUs are offline before it breaks locks to flush
 672	 * printk buffers, in case we panic()ed while holding the lock.
 673	 */
 674	set_cpu_online(smp_processor_id(), false);
 675
 676	spin_begin();
 677	while (1)
 678		spin_cpu_relax();
 679}
 680
 681void smp_send_stop(void)
 682{
 683	static bool stopped = false;
 684
 685	/*
 686	 * Prevent waiting on csd lock from a previous smp_send_stop.
 687	 * This is racy, but in general callers try to do the right
 688	 * thing and only fire off one smp_send_stop (e.g., see
 689	 * kernel/panic.c)
 690	 */
 691	if (stopped)
 692		return;
 693
 694	stopped = true;
 695
 696	smp_call_function(stop_this_cpu, NULL, 0);
 697}
 698#endif /* CONFIG_NMI_IPI */
 699
 700static struct task_struct *current_set[NR_CPUS];
 701
 702static void smp_store_cpu_info(int id)
 703{
 704	per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
 705#ifdef CONFIG_PPC_E500
 706	per_cpu(next_tlbcam_idx, id)
 707		= (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
 708#endif
 709}
 710
 711/*
 712 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
 713 * rather than just passing around the cpumask we pass around a function that
 714 * returns the that cpumask for the given CPU.
 715 */
 716static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
 717{
 718	cpumask_set_cpu(i, get_cpumask(j));
 719	cpumask_set_cpu(j, get_cpumask(i));
 720}
 721
 722#ifdef CONFIG_HOTPLUG_CPU
 723static void set_cpus_unrelated(int i, int j,
 724		struct cpumask *(*get_cpumask)(int))
 725{
 726	cpumask_clear_cpu(i, get_cpumask(j));
 727	cpumask_clear_cpu(j, get_cpumask(i));
 728}
 729#endif
 730
 731/*
 732 * Extends set_cpus_related. Instead of setting one CPU at a time in
 733 * dstmask, set srcmask at oneshot. dstmask should be super set of srcmask.
 734 */
 735static void or_cpumasks_related(int i, int j, struct cpumask *(*srcmask)(int),
 736				struct cpumask *(*dstmask)(int))
 737{
 738	struct cpumask *mask;
 739	int k;
 740
 741	mask = srcmask(j);
 742	for_each_cpu(k, srcmask(i))
 743		cpumask_or(dstmask(k), dstmask(k), mask);
 744
 745	if (i == j)
 746		return;
 747
 748	mask = srcmask(i);
 749	for_each_cpu(k, srcmask(j))
 750		cpumask_or(dstmask(k), dstmask(k), mask);
 751}
 752
 753/*
 754 * parse_thread_groups: Parses the "ibm,thread-groups" device tree
 755 *                      property for the CPU device node @dn and stores
 756 *                      the parsed output in the thread_groups_list
 757 *                      structure @tglp.
 758 *
 759 * @dn: The device node of the CPU device.
 760 * @tglp: Pointer to a thread group list structure into which the parsed
 761 *      output of "ibm,thread-groups" is stored.
 762 *
 763 * ibm,thread-groups[0..N-1] array defines which group of threads in
 764 * the CPU-device node can be grouped together based on the property.
 765 *
 766 * This array can represent thread groupings for multiple properties.
 767 *
 768 * ibm,thread-groups[i + 0] tells us the property based on which the
 769 * threads are being grouped together. If this value is 1, it implies
 770 * that the threads in the same group share L1, translation cache. If
 771 * the value is 2, it implies that the threads in the same group share
 772 * the same L2 cache.
 773 *
 774 * ibm,thread-groups[i+1] tells us how many such thread groups exist for the
 775 * property ibm,thread-groups[i]
 776 *
 777 * ibm,thread-groups[i+2] tells us the number of threads in each such
 778 * group.
 779 * Suppose k = (ibm,thread-groups[i+1] * ibm,thread-groups[i+2]), then,
 780 *
 781 * ibm,thread-groups[i+3..i+k+2] (is the list of threads identified by
 782 * "ibm,ppc-interrupt-server#s" arranged as per their membership in
 783 * the grouping.
 784 *
 785 * Example:
 786 * If "ibm,thread-groups" = [1,2,4,8,10,12,14,9,11,13,15,2,2,4,8,10,12,14,9,11,13,15]
 787 * This can be decomposed up into two consecutive arrays:
 788 * a) [1,2,4,8,10,12,14,9,11,13,15]
 789 * b) [2,2,4,8,10,12,14,9,11,13,15]
 790 *
 791 * where in,
 792 *
 793 * a) provides information of Property "1" being shared by "2" groups,
 794 *  each with "4" threads each. The "ibm,ppc-interrupt-server#s" of
 795 *  the first group is {8,10,12,14} and the
 796 *  "ibm,ppc-interrupt-server#s" of the second group is
 797 *  {9,11,13,15}. Property "1" is indicative of the thread in the
 798 *  group sharing L1 cache, translation cache and Instruction Data
 799 *  flow.
 800 *
 801 * b) provides information of Property "2" being shared by "2" groups,
 802 *  each group with "4" threads. The "ibm,ppc-interrupt-server#s" of
 803 *  the first group is {8,10,12,14} and the
 804 *  "ibm,ppc-interrupt-server#s" of the second group is
 805 *  {9,11,13,15}. Property "2" indicates that the threads in each
 806 *  group share the L2-cache.
 807 *
 808 * Returns 0 on success, -EINVAL if the property does not exist,
 809 * -ENODATA if property does not have a value, and -EOVERFLOW if the
 810 * property data isn't large enough.
 811 */
 812static int parse_thread_groups(struct device_node *dn,
 813			       struct thread_groups_list *tglp)
 814{
 815	unsigned int property_idx = 0;
 816	u32 *thread_group_array;
 817	size_t total_threads;
 818	int ret = 0, count;
 819	u32 *thread_list;
 820	int i = 0;
 821
 822	count = of_property_count_u32_elems(dn, "ibm,thread-groups");
 823	thread_group_array = kcalloc(count, sizeof(u32), GFP_KERNEL);
 824	ret = of_property_read_u32_array(dn, "ibm,thread-groups",
 825					 thread_group_array, count);
 826	if (ret)
 827		goto out_free;
 828
 829	while (i < count && property_idx < MAX_THREAD_GROUP_PROPERTIES) {
 830		int j;
 831		struct thread_groups *tg = &tglp->property_tgs[property_idx++];
 832
 833		tg->property = thread_group_array[i];
 834		tg->nr_groups = thread_group_array[i + 1];
 835		tg->threads_per_group = thread_group_array[i + 2];
 836		total_threads = tg->nr_groups * tg->threads_per_group;
 837
 838		thread_list = &thread_group_array[i + 3];
 839
 840		for (j = 0; j < total_threads; j++)
 841			tg->thread_list[j] = thread_list[j];
 842		i = i + 3 + total_threads;
 843	}
 844
 845	tglp->nr_properties = property_idx;
 846
 847out_free:
 848	kfree(thread_group_array);
 849	return ret;
 850}
 851
 852/*
 853 * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
 854 *                              that @cpu belongs to.
 855 *
 856 * @cpu : The logical CPU whose thread group is being searched.
 857 * @tg : The thread-group structure of the CPU node which @cpu belongs
 858 *       to.
 859 *
 860 * Returns the index to tg->thread_list that points to the start
 861 * of the thread_group that @cpu belongs to.
 862 *
 863 * Returns -1 if cpu doesn't belong to any of the groups pointed to by
 864 * tg->thread_list.
 865 */
 866static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
 867{
 868	int hw_cpu_id = get_hard_smp_processor_id(cpu);
 869	int i, j;
 870
 871	for (i = 0; i < tg->nr_groups; i++) {
 872		int group_start = i * tg->threads_per_group;
 873
 874		for (j = 0; j < tg->threads_per_group; j++) {
 875			int idx = group_start + j;
 876
 877			if (tg->thread_list[idx] == hw_cpu_id)
 878				return group_start;
 879		}
 880	}
 881
 882	return -1;
 883}
 884
 885static struct thread_groups *__init get_thread_groups(int cpu,
 886						      int group_property,
 887						      int *err)
 888{
 889	struct device_node *dn = of_get_cpu_node(cpu, NULL);
 890	struct thread_groups_list *cpu_tgl = &tgl[cpu];
 891	struct thread_groups *tg = NULL;
 892	int i;
 893	*err = 0;
 894
 895	if (!dn) {
 896		*err = -ENODATA;
 897		return NULL;
 898	}
 899
 900	if (!cpu_tgl->nr_properties) {
 901		*err = parse_thread_groups(dn, cpu_tgl);
 902		if (*err)
 903			goto out;
 904	}
 905
 906	for (i = 0; i < cpu_tgl->nr_properties; i++) {
 907		if (cpu_tgl->property_tgs[i].property == group_property) {
 908			tg = &cpu_tgl->property_tgs[i];
 909			break;
 910		}
 911	}
 912
 913	if (!tg)
 914		*err = -EINVAL;
 915out:
 916	of_node_put(dn);
 917	return tg;
 918}
 919
 920static int __init update_mask_from_threadgroup(cpumask_var_t *mask, struct thread_groups *tg,
 921					       int cpu, int cpu_group_start)
 922{
 923	int first_thread = cpu_first_thread_sibling(cpu);
 924	int i;
 925
 926	zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu));
 927
 928	for (i = first_thread; i < first_thread + threads_per_core; i++) {
 929		int i_group_start = get_cpu_thread_group_start(i, tg);
 930
 931		if (unlikely(i_group_start == -1)) {
 932			WARN_ON_ONCE(1);
 933			return -ENODATA;
 934		}
 935
 936		if (i_group_start == cpu_group_start)
 937			cpumask_set_cpu(i, *mask);
 938	}
 939
 940	return 0;
 941}
 942
 943static int __init init_thread_group_cache_map(int cpu, int cache_property)
 944
 945{
 946	int cpu_group_start = -1, err = 0;
 947	struct thread_groups *tg = NULL;
 948	cpumask_var_t *mask = NULL;
 949
 950	if (cache_property != THREAD_GROUP_SHARE_L1 &&
 951	    cache_property != THREAD_GROUP_SHARE_L2_L3)
 952		return -EINVAL;
 953
 954	tg = get_thread_groups(cpu, cache_property, &err);
 955
 956	if (!tg)
 957		return err;
 958
 959	cpu_group_start = get_cpu_thread_group_start(cpu, tg);
 960
 961	if (unlikely(cpu_group_start == -1)) {
 962		WARN_ON_ONCE(1);
 963		return -ENODATA;
 964	}
 965
 966	if (cache_property == THREAD_GROUP_SHARE_L1) {
 967		mask = &per_cpu(thread_group_l1_cache_map, cpu);
 968		update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
 969	}
 970	else if (cache_property == THREAD_GROUP_SHARE_L2_L3) {
 971		mask = &per_cpu(thread_group_l2_cache_map, cpu);
 972		update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
 973		mask = &per_cpu(thread_group_l3_cache_map, cpu);
 974		update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
 975	}
 976
 977
 978	return 0;
 979}
 980
 981static bool shared_caches __ro_after_init;
 982
 983#ifdef CONFIG_SCHED_SMT
 984/* cpumask of CPUs with asymmetric SMT dependency */
 985static int powerpc_smt_flags(void)
 986{
 987	int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
 988
 989	if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
 990		printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
 991		flags |= SD_ASYM_PACKING;
 992	}
 993	return flags;
 994}
 995#endif
 996
 997/*
 998 * On shared processor LPARs scheduled on a big core (which has two or more
 999 * independent thread groups per core), prefer lower numbered CPUs, so
1000 * that workload consolidates to lesser number of cores.
1001 */
1002static __ro_after_init DEFINE_STATIC_KEY_FALSE(splpar_asym_pack);
1003
1004/*
1005 * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
1006 * This topology makes it *much* cheaper to migrate tasks between adjacent cores
1007 * since the migrated task remains cache hot. We want to take advantage of this
1008 * at the scheduler level so an extra topology level is required.
1009 */
1010static int powerpc_shared_cache_flags(void)
1011{
1012	if (static_branch_unlikely(&splpar_asym_pack))
1013		return SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING;
1014
1015	return SD_SHARE_PKG_RESOURCES;
1016}
1017
1018static int powerpc_shared_proc_flags(void)
1019{
1020	if (static_branch_unlikely(&splpar_asym_pack))
1021		return SD_ASYM_PACKING;
1022
1023	return 0;
1024}
1025
1026/*
1027 * We can't just pass cpu_l2_cache_mask() directly because
1028 * returns a non-const pointer and the compiler barfs on that.
1029 */
1030static const struct cpumask *shared_cache_mask(int cpu)
1031{
1032	return per_cpu(cpu_l2_cache_map, cpu);
1033}
1034
1035#ifdef CONFIG_SCHED_SMT
1036static const struct cpumask *smallcore_smt_mask(int cpu)
1037{
1038	return cpu_smallcore_mask(cpu);
1039}
1040#endif
1041
1042static struct cpumask *cpu_coregroup_mask(int cpu)
1043{
1044	return per_cpu(cpu_coregroup_map, cpu);
1045}
1046
1047static bool has_coregroup_support(void)
1048{
1049	/* Coregroup identification not available on shared systems */
1050	if (is_shared_processor())
1051		return 0;
1052
1053	return coregroup_enabled;
1054}
1055
1056static const struct cpumask *cpu_mc_mask(int cpu)
1057{
1058	return cpu_coregroup_mask(cpu);
1059}
1060
1061static int __init init_big_cores(void)
1062{
1063	int cpu;
1064
1065	for_each_possible_cpu(cpu) {
1066		int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L1);
1067
1068		if (err)
1069			return err;
1070
1071		zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
1072					GFP_KERNEL,
1073					cpu_to_node(cpu));
1074	}
1075
1076	has_big_cores = true;
1077
1078	for_each_possible_cpu(cpu) {
1079		int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L2_L3);
1080
1081		if (err)
1082			return err;
1083	}
1084
1085	thread_group_shares_l2 = true;
1086	thread_group_shares_l3 = true;
1087	pr_debug("L2/L3 cache only shared by the threads in the small core\n");
1088
1089	return 0;
1090}
1091
1092void __init smp_prepare_cpus(unsigned int max_cpus)
1093{
1094	unsigned int cpu, num_threads;
1095
1096	DBG("smp_prepare_cpus\n");
1097
1098	/* 
1099	 * setup_cpu may need to be called on the boot cpu. We haven't
1100	 * spun any cpus up but lets be paranoid.
1101	 */
1102	BUG_ON(boot_cpuid != smp_processor_id());
1103
1104	/* Fixup boot cpu */
1105	smp_store_cpu_info(boot_cpuid);
1106	cpu_callin_map[boot_cpuid] = 1;
1107
1108	for_each_possible_cpu(cpu) {
1109		zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
1110					GFP_KERNEL, cpu_to_node(cpu));
1111		zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
1112					GFP_KERNEL, cpu_to_node(cpu));
1113		zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
1114					GFP_KERNEL, cpu_to_node(cpu));
1115		if (has_coregroup_support())
1116			zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
1117						GFP_KERNEL, cpu_to_node(cpu));
1118
1119#ifdef CONFIG_NUMA
1120		/*
1121		 * numa_node_id() works after this.
1122		 */
1123		if (cpu_present(cpu)) {
1124			set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
1125			set_cpu_numa_mem(cpu,
1126				local_memory_node(numa_cpu_lookup_table[cpu]));
1127		}
1128#endif
1129	}
1130
1131	/* Init the cpumasks so the boot CPU is related to itself */
1132	cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
1133	cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
1134	cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
1135
1136	if (has_coregroup_support())
1137		cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
1138
1139	init_big_cores();
1140	if (has_big_cores) {
1141		cpumask_set_cpu(boot_cpuid,
1142				cpu_smallcore_mask(boot_cpuid));
1143	}
1144
1145	if (cpu_to_chip_id(boot_cpuid) != -1) {
1146		int idx = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
1147
1148		/*
1149		 * All threads of a core will all belong to the same core,
1150		 * chip_id_lookup_table will have one entry per core.
1151		 * Assumption: if boot_cpuid doesn't have a chip-id, then no
1152		 * other CPUs, will also not have chip-id.
1153		 */
1154		chip_id_lookup_table = kcalloc(idx, sizeof(int), GFP_KERNEL);
1155		if (chip_id_lookup_table)
1156			memset(chip_id_lookup_table, -1, sizeof(int) * idx);
1157	}
1158
1159	if (smp_ops && smp_ops->probe)
1160		smp_ops->probe();
1161
1162	// Initalise the generic SMT topology support
1163	num_threads = 1;
1164	if (smt_enabled_at_boot)
1165		num_threads = smt_enabled_at_boot;
1166	cpu_smt_set_num_threads(num_threads, threads_per_core);
1167}
1168
1169void smp_prepare_boot_cpu(void)
1170{
1171	BUG_ON(smp_processor_id() != boot_cpuid);
1172#ifdef CONFIG_PPC64
1173	paca_ptrs[boot_cpuid]->__current = current;
1174#endif
1175	set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
1176	current_set[boot_cpuid] = current;
1177}
1178
1179#ifdef CONFIG_HOTPLUG_CPU
1180
1181int generic_cpu_disable(void)
1182{
1183	unsigned int cpu = smp_processor_id();
1184
1185	if (cpu == boot_cpuid)
1186		return -EBUSY;
1187
1188	set_cpu_online(cpu, false);
1189#ifdef CONFIG_PPC64
1190	vdso_data->processorCount--;
1191#endif
1192	/* Update affinity of all IRQs previously aimed at this CPU */
1193	irq_migrate_all_off_this_cpu();
1194
1195	/*
1196	 * Depending on the details of the interrupt controller, it's possible
1197	 * that one of the interrupts we just migrated away from this CPU is
1198	 * actually already pending on this CPU. If we leave it in that state
1199	 * the interrupt will never be EOI'ed, and will never fire again. So
1200	 * temporarily enable interrupts here, to allow any pending interrupt to
1201	 * be received (and EOI'ed), before we take this CPU offline.
1202	 */
1203	local_irq_enable();
1204	mdelay(1);
1205	local_irq_disable();
1206
1207	return 0;
1208}
1209
1210void generic_cpu_die(unsigned int cpu)
1211{
1212	int i;
1213
1214	for (i = 0; i < 100; i++) {
1215		smp_rmb();
1216		if (is_cpu_dead(cpu))
1217			return;
1218		msleep(100);
1219	}
1220	printk(KERN_ERR "CPU%d didn't die...\n", cpu);
1221}
1222
1223void generic_set_cpu_dead(unsigned int cpu)
1224{
1225	per_cpu(cpu_state, cpu) = CPU_DEAD;
1226}
1227
1228/*
1229 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
1230 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
1231 * which makes the delay in generic_cpu_die() not happen.
1232 */
1233void generic_set_cpu_up(unsigned int cpu)
1234{
1235	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1236}
1237
1238int generic_check_cpu_restart(unsigned int cpu)
1239{
1240	return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
1241}
1242
1243int is_cpu_dead(unsigned int cpu)
1244{
1245	return per_cpu(cpu_state, cpu) == CPU_DEAD;
1246}
1247
1248static bool secondaries_inhibited(void)
1249{
1250	return kvm_hv_mode_active();
1251}
1252
1253#else /* HOTPLUG_CPU */
1254
1255#define secondaries_inhibited()		0
1256
1257#endif
1258
1259static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
1260{
1261#ifdef CONFIG_PPC64
1262	paca_ptrs[cpu]->__current = idle;
1263	paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
1264				 THREAD_SIZE - STACK_FRAME_MIN_SIZE;
1265#endif
1266	task_thread_info(idle)->cpu = cpu;
1267	secondary_current = current_set[cpu] = idle;
1268}
1269
1270int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1271{
1272	const unsigned long boot_spin_ms = 5 * MSEC_PER_SEC;
1273	const bool booting = system_state < SYSTEM_RUNNING;
1274	const unsigned long hp_spin_ms = 1;
1275	unsigned long deadline;
1276	int rc;
1277	const unsigned long spin_wait_ms = booting ? boot_spin_ms : hp_spin_ms;
1278
1279	/*
1280	 * Don't allow secondary threads to come online if inhibited
1281	 */
1282	if (threads_per_core > 1 && secondaries_inhibited() &&
1283	    cpu_thread_in_subcore(cpu))
1284		return -EBUSY;
1285
1286	if (smp_ops == NULL ||
1287	    (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1288		return -EINVAL;
1289
1290	cpu_idle_thread_init(cpu, tidle);
1291
1292	/*
1293	 * The platform might need to allocate resources prior to bringing
1294	 * up the CPU
1295	 */
1296	if (smp_ops->prepare_cpu) {
1297		rc = smp_ops->prepare_cpu(cpu);
1298		if (rc)
1299			return rc;
1300	}
1301
1302	/* Make sure callin-map entry is 0 (can be leftover a CPU
1303	 * hotplug
1304	 */
1305	cpu_callin_map[cpu] = 0;
1306
1307	/* The information for processor bringup must
1308	 * be written out to main store before we release
1309	 * the processor.
1310	 */
1311	smp_mb();
1312
1313	/* wake up cpus */
1314	DBG("smp: kicking cpu %d\n", cpu);
1315	rc = smp_ops->kick_cpu(cpu);
1316	if (rc) {
1317		pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
1318		return rc;
1319	}
1320
1321	/*
1322	 * At boot time, simply spin on the callin word until the
1323	 * deadline passes.
1324	 *
1325	 * At run time, spin for an optimistic amount of time to avoid
1326	 * sleeping in the common case.
1327	 */
1328	deadline = jiffies + msecs_to_jiffies(spin_wait_ms);
1329	spin_until_cond(cpu_callin_map[cpu] || time_is_before_jiffies(deadline));
1330
1331	if (!cpu_callin_map[cpu] && system_state >= SYSTEM_RUNNING) {
1332		const unsigned long sleep_interval_us = 10 * USEC_PER_MSEC;
1333		const unsigned long sleep_wait_ms = 100 * MSEC_PER_SEC;
1334
1335		deadline = jiffies + msecs_to_jiffies(sleep_wait_ms);
1336		while (!cpu_callin_map[cpu] && time_is_after_jiffies(deadline))
1337			fsleep(sleep_interval_us);
1338	}
1339
1340	if (!cpu_callin_map[cpu]) {
1341		printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1342		return -ENOENT;
1343	}
1344
1345	DBG("Processor %u found.\n", cpu);
1346
1347	if (smp_ops->give_timebase)
1348		smp_ops->give_timebase();
1349
1350	/* Wait until cpu puts itself in the online & active maps */
1351	spin_until_cond(cpu_online(cpu));
1352
1353	return 0;
1354}
1355
1356/* Return the value of the reg property corresponding to the given
1357 * logical cpu.
1358 */
1359int cpu_to_core_id(int cpu)
1360{
1361	struct device_node *np;
1362	int id = -1;
1363
1364	np = of_get_cpu_node(cpu, NULL);
1365	if (!np)
1366		goto out;
1367
1368	id = of_get_cpu_hwid(np, 0);
1369out:
1370	of_node_put(np);
1371	return id;
1372}
1373EXPORT_SYMBOL_GPL(cpu_to_core_id);
1374
1375/* Helper routines for cpu to core mapping */
1376int cpu_core_index_of_thread(int cpu)
1377{
1378	return cpu >> threads_shift;
1379}
1380EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
1381
1382int cpu_first_thread_of_core(int core)
1383{
1384	return core << threads_shift;
1385}
1386EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
1387
1388/* Must be called when no change can occur to cpu_present_mask,
1389 * i.e. during cpu online or offline.
1390 */
1391static struct device_node *cpu_to_l2cache(int cpu)
1392{
1393	struct device_node *np;
1394	struct device_node *cache;
1395
1396	if (!cpu_present(cpu))
1397		return NULL;
1398
1399	np = of_get_cpu_node(cpu, NULL);
1400	if (np == NULL)
1401		return NULL;
1402
1403	cache = of_find_next_cache_node(np);
1404
1405	of_node_put(np);
1406
1407	return cache;
1408}
1409
1410static bool update_mask_by_l2(int cpu, cpumask_var_t *mask)
1411{
1412	struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1413	struct device_node *l2_cache, *np;
1414	int i;
1415
1416	if (has_big_cores)
1417		submask_fn = cpu_smallcore_mask;
1418
1419	/*
1420	 * If the threads in a thread-group share L2 cache, then the
1421	 * L2-mask can be obtained from thread_group_l2_cache_map.
1422	 */
1423	if (thread_group_shares_l2) {
1424		cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu));
1425
1426		for_each_cpu(i, per_cpu(thread_group_l2_cache_map, cpu)) {
1427			if (cpu_online(i))
1428				set_cpus_related(i, cpu, cpu_l2_cache_mask);
1429		}
1430
1431		/* Verify that L1-cache siblings are a subset of L2 cache-siblings */
1432		if (!cpumask_equal(submask_fn(cpu), cpu_l2_cache_mask(cpu)) &&
1433		    !cpumask_subset(submask_fn(cpu), cpu_l2_cache_mask(cpu))) {
1434			pr_warn_once("CPU %d : Inconsistent L1 and L2 cache siblings\n",
1435				     cpu);
1436		}
1437
1438		return true;
1439	}
1440
1441	l2_cache = cpu_to_l2cache(cpu);
1442	if (!l2_cache || !*mask) {
1443		/* Assume only core siblings share cache with this CPU */
1444		for_each_cpu(i, cpu_sibling_mask(cpu))
1445			set_cpus_related(cpu, i, cpu_l2_cache_mask);
1446
1447		return false;
1448	}
1449
1450	cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1451
1452	/* Update l2-cache mask with all the CPUs that are part of submask */
1453	or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask);
1454
1455	/* Skip all CPUs already part of current CPU l2-cache mask */
1456	cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(cpu));
1457
1458	for_each_cpu(i, *mask) {
1459		/*
1460		 * when updating the marks the current CPU has not been marked
1461		 * online, but we need to update the cache masks
1462		 */
1463		np = cpu_to_l2cache(i);
1464
1465		/* Skip all CPUs already part of current CPU l2-cache */
1466		if (np == l2_cache) {
1467			or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask);
1468			cpumask_andnot(*mask, *mask, submask_fn(i));
1469		} else {
1470			cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(i));
1471		}
1472
1473		of_node_put(np);
1474	}
1475	of_node_put(l2_cache);
1476
1477	return true;
1478}
1479
1480#ifdef CONFIG_HOTPLUG_CPU
1481static void remove_cpu_from_masks(int cpu)
1482{
1483	struct cpumask *(*mask_fn)(int) = cpu_sibling_mask;
1484	int i;
1485
1486	unmap_cpu_from_node(cpu);
1487
1488	if (shared_caches)
1489		mask_fn = cpu_l2_cache_mask;
1490
1491	for_each_cpu(i, mask_fn(cpu)) {
1492		set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
1493		set_cpus_unrelated(cpu, i, cpu_sibling_mask);
1494		if (has_big_cores)
1495			set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
1496	}
1497
1498	for_each_cpu(i, cpu_core_mask(cpu))
1499		set_cpus_unrelated(cpu, i, cpu_core_mask);
1500
1501	if (has_coregroup_support()) {
1502		for_each_cpu(i, cpu_coregroup_mask(cpu))
1503			set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
1504	}
1505}
1506#endif
1507
1508static inline void add_cpu_to_smallcore_masks(int cpu)
1509{
1510	int i;
1511
1512	if (!has_big_cores)
1513		return;
1514
1515	cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
1516
1517	for_each_cpu(i, per_cpu(thread_group_l1_cache_map, cpu)) {
1518		if (cpu_online(i))
1519			set_cpus_related(i, cpu, cpu_smallcore_mask);
1520	}
1521}
1522
1523static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
1524{
1525	struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1526	int coregroup_id = cpu_to_coregroup_id(cpu);
1527	int i;
1528
1529	if (shared_caches)
1530		submask_fn = cpu_l2_cache_mask;
1531
1532	if (!*mask) {
1533		/* Assume only siblings are part of this CPU's coregroup */
1534		for_each_cpu(i, submask_fn(cpu))
1535			set_cpus_related(cpu, i, cpu_coregroup_mask);
1536
1537		return;
1538	}
1539
1540	cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1541
1542	/* Update coregroup mask with all the CPUs that are part of submask */
1543	or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask);
1544
1545	/* Skip all CPUs already part of coregroup mask */
1546	cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu));
1547
1548	for_each_cpu(i, *mask) {
1549		/* Skip all CPUs not part of this coregroup */
1550		if (coregroup_id == cpu_to_coregroup_id(i)) {
1551			or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask);
1552			cpumask_andnot(*mask, *mask, submask_fn(i));
1553		} else {
1554			cpumask_andnot(*mask, *mask, cpu_coregroup_mask(i));
1555		}
1556	}
1557}
1558
1559static void add_cpu_to_masks(int cpu)
1560{
1561	struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1562	int first_thread = cpu_first_thread_sibling(cpu);
1563	cpumask_var_t mask;
1564	int chip_id = -1;
1565	bool ret;
1566	int i;
1567
1568	/*
1569	 * This CPU will not be in the online mask yet so we need to manually
1570	 * add it to it's own thread sibling mask.
1571	 */
1572	map_cpu_to_node(cpu, cpu_to_node(cpu));
1573	cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1574	cpumask_set_cpu(cpu, cpu_core_mask(cpu));
1575
1576	for (i = first_thread; i < first_thread + threads_per_core; i++)
1577		if (cpu_online(i))
1578			set_cpus_related(i, cpu, cpu_sibling_mask);
1579
1580	add_cpu_to_smallcore_masks(cpu);
1581
1582	/* In CPU-hotplug path, hence use GFP_ATOMIC */
1583	ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
1584	update_mask_by_l2(cpu, &mask);
1585
1586	if (has_coregroup_support())
1587		update_coregroup_mask(cpu, &mask);
1588
1589	if (chip_id_lookup_table && ret)
1590		chip_id = cpu_to_chip_id(cpu);
1591
1592	if (shared_caches)
1593		submask_fn = cpu_l2_cache_mask;
1594
1595	/* Update core_mask with all the CPUs that are part of submask */
1596	or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask);
1597
1598	/* Skip all CPUs already part of current CPU core mask */
1599	cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
1600
1601	/* If chip_id is -1; limit the cpu_core_mask to within PKG */
1602	if (chip_id == -1)
1603		cpumask_and(mask, mask, cpu_cpu_mask(cpu));
1604
1605	for_each_cpu(i, mask) {
1606		if (chip_id == cpu_to_chip_id(i)) {
1607			or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
1608			cpumask_andnot(mask, mask, submask_fn(i));
1609		} else {
1610			cpumask_andnot(mask, mask, cpu_core_mask(i));
1611		}
1612	}
1613
1614	free_cpumask_var(mask);
1615}
1616
1617/* Activate a secondary processor. */
1618__no_stack_protector
1619void start_secondary(void *unused)
1620{
1621	unsigned int cpu = raw_smp_processor_id();
1622
1623	/* PPC64 calls setup_kup() in early_setup_secondary() */
1624	if (IS_ENABLED(CONFIG_PPC32))
1625		setup_kup();
1626
1627	mmgrab_lazy_tlb(&init_mm);
1628	current->active_mm = &init_mm;
1629	VM_WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(&init_mm)));
1630	cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
1631	inc_mm_active_cpus(&init_mm);
1632
1633	smp_store_cpu_info(cpu);
1634	set_dec(tb_ticks_per_jiffy);
1635	rcutree_report_cpu_starting(cpu);
1636	cpu_callin_map[cpu] = 1;
1637
1638	if (smp_ops->setup_cpu)
1639		smp_ops->setup_cpu(cpu);
1640	if (smp_ops->take_timebase)
1641		smp_ops->take_timebase();
1642
1643	secondary_cpu_time_init();
1644
1645#ifdef CONFIG_PPC64
1646	if (system_state == SYSTEM_RUNNING)
1647		vdso_data->processorCount++;
 
1648
 
1649	vdso_getcpu_init();
1650#endif
1651	set_numa_node(numa_cpu_lookup_table[cpu]);
1652	set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1653
1654	/* Update topology CPU masks */
1655	add_cpu_to_masks(cpu);
1656
1657	/*
1658	 * Check for any shared caches. Note that this must be done on a
1659	 * per-core basis because one core in the pair might be disabled.
1660	 */
1661	if (!shared_caches) {
1662		struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
1663		struct cpumask *mask = cpu_l2_cache_mask(cpu);
1664
1665		if (has_big_cores)
1666			sibling_mask = cpu_smallcore_mask;
1667
1668		if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu)))
1669			shared_caches = true;
1670	}
1671
1672	smp_wmb();
1673	notify_cpu_starting(cpu);
1674	set_cpu_online(cpu, true);
1675
1676	boot_init_stack_canary();
1677
1678	local_irq_enable();
1679
1680	/* We can enable ftrace for secondary cpus now */
1681	this_cpu_enable_ftrace();
1682
1683	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1684
1685	BUG();
1686}
1687
1688static struct sched_domain_topology_level powerpc_topology[6];
1689
1690static void __init build_sched_topology(void)
1691{
1692	int i = 0;
1693
1694	if (is_shared_processor() && has_big_cores)
1695		static_branch_enable(&splpar_asym_pack);
1696
1697#ifdef CONFIG_SCHED_SMT
1698	if (has_big_cores) {
1699		pr_info("Big cores detected but using small core scheduling\n");
1700		powerpc_topology[i++] = (struct sched_domain_topology_level){
1701			smallcore_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT)
1702		};
1703	} else {
1704		powerpc_topology[i++] = (struct sched_domain_topology_level){
1705			cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT)
1706		};
1707	}
1708#endif
1709	if (shared_caches) {
1710		powerpc_topology[i++] = (struct sched_domain_topology_level){
1711			shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE)
1712		};
1713	}
1714	if (has_coregroup_support()) {
1715		powerpc_topology[i++] = (struct sched_domain_topology_level){
1716			cpu_mc_mask, powerpc_shared_proc_flags, SD_INIT_NAME(MC)
1717		};
1718	}
1719	powerpc_topology[i++] = (struct sched_domain_topology_level){
1720		cpu_cpu_mask, powerpc_shared_proc_flags, SD_INIT_NAME(PKG)
1721	};
1722
1723	/* There must be one trailing NULL entry left.  */
1724	BUG_ON(i >= ARRAY_SIZE(powerpc_topology) - 1);
1725
1726	set_sched_topology(powerpc_topology);
1727}
1728
1729void __init smp_cpus_done(unsigned int max_cpus)
1730{
1731	/*
1732	 * We are running pinned to the boot CPU, see rest_init().
1733	 */
1734	if (smp_ops && smp_ops->setup_cpu)
1735		smp_ops->setup_cpu(boot_cpuid);
1736
1737	if (smp_ops && smp_ops->bringup_done)
1738		smp_ops->bringup_done();
1739
1740	dump_numa_cpu_topology();
1741	build_sched_topology();
1742}
1743
1744/*
1745 * For asym packing, by default lower numbered CPU has higher priority.
1746 * On shared processors, pack to lower numbered core. However avoid moving
1747 * between thread_groups within the same core.
1748 */
1749int arch_asym_cpu_priority(int cpu)
1750{
1751	if (static_branch_unlikely(&splpar_asym_pack))
1752		return -cpu / threads_per_core;
1753
1754	return -cpu;
1755}
1756
1757#ifdef CONFIG_HOTPLUG_CPU
1758int __cpu_disable(void)
1759{
1760	int cpu = smp_processor_id();
1761	int err;
1762
1763	if (!smp_ops->cpu_disable)
1764		return -ENOSYS;
1765
1766	this_cpu_disable_ftrace();
1767
1768	err = smp_ops->cpu_disable();
1769	if (err)
1770		return err;
1771
1772	/* Update sibling maps */
1773	remove_cpu_from_masks(cpu);
1774
1775	return 0;
1776}
1777
1778void __cpu_die(unsigned int cpu)
1779{
1780	/*
1781	 * This could perhaps be a generic call in idlea_task_dead(), but
1782	 * that requires testing from all archs, so first put it here to
1783	 */
1784	VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(&init_mm)));
1785	dec_mm_active_cpus(&init_mm);
1786	cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
1787
1788	if (smp_ops->cpu_die)
1789		smp_ops->cpu_die(cpu);
1790}
1791
1792void __noreturn arch_cpu_idle_dead(void)
1793{
1794	/*
1795	 * Disable on the down path. This will be re-enabled by
1796	 * start_secondary() via start_secondary_resume() below
1797	 */
1798	this_cpu_disable_ftrace();
1799
1800	if (smp_ops->cpu_offline_self)
1801		smp_ops->cpu_offline_self();
1802
1803	/* If we return, we re-enter start_secondary */
1804	start_secondary_resume();
1805}
1806
1807#endif
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * SMP support for ppc.
   4 *
   5 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
   6 * deal of code from the sparc and intel versions.
   7 *
   8 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
   9 *
  10 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
  11 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
  12 */
  13
  14#undef DEBUG
  15
  16#include <linux/kernel.h>
  17#include <linux/export.h>
  18#include <linux/sched/mm.h>
  19#include <linux/sched/task_stack.h>
  20#include <linux/sched/topology.h>
  21#include <linux/smp.h>
  22#include <linux/interrupt.h>
  23#include <linux/delay.h>
  24#include <linux/init.h>
  25#include <linux/spinlock.h>
  26#include <linux/cache.h>
  27#include <linux/err.h>
  28#include <linux/device.h>
  29#include <linux/cpu.h>
  30#include <linux/notifier.h>
  31#include <linux/topology.h>
  32#include <linux/profile.h>
  33#include <linux/processor.h>
  34#include <linux/random.h>
  35#include <linux/stackprotector.h>
  36#include <linux/pgtable.h>
  37#include <linux/clockchips.h>
  38#include <linux/kexec.h>
  39
  40#include <asm/ptrace.h>
  41#include <linux/atomic.h>
  42#include <asm/irq.h>
  43#include <asm/hw_irq.h>
  44#include <asm/kvm_ppc.h>
  45#include <asm/dbell.h>
  46#include <asm/page.h>
  47#include <asm/smp.h>
  48#include <asm/time.h>
  49#include <asm/machdep.h>
  50#include <asm/mmu_context.h>
  51#include <asm/cputhreads.h>
  52#include <asm/cputable.h>
  53#include <asm/mpic.h>
  54#include <asm/vdso_datapage.h>
  55#ifdef CONFIG_PPC64
  56#include <asm/paca.h>
  57#endif
  58#include <asm/vdso.h>
  59#include <asm/debug.h>
  60#include <asm/cpu_has_feature.h>
  61#include <asm/ftrace.h>
  62#include <asm/kup.h>
  63#include <asm/fadump.h>
  64#include <asm/systemcfg.h>
  65
  66#include <trace/events/ipi.h>
  67
  68#ifdef DEBUG
  69#include <asm/udbg.h>
  70#define DBG(fmt...) udbg_printf(fmt)
  71#else
  72#define DBG(fmt...)
  73#endif
  74
  75#ifdef CONFIG_HOTPLUG_CPU
  76/* State of each CPU during hotplug phases */
  77static DEFINE_PER_CPU(int, cpu_state) = { 0 };
  78#endif
  79
  80struct task_struct *secondary_current;
  81bool has_big_cores __ro_after_init;
  82bool coregroup_enabled __ro_after_init;
  83bool thread_group_shares_l2 __ro_after_init;
  84bool thread_group_shares_l3 __ro_after_init;
  85
  86DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
  87DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
  88DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
  89DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
  90static DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map);
  91
  92EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  93EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
  94EXPORT_PER_CPU_SYMBOL(cpu_core_map);
  95EXPORT_SYMBOL_GPL(has_big_cores);
  96
  97#define MAX_THREAD_LIST_SIZE	8
  98#define THREAD_GROUP_SHARE_L1   1
  99#define THREAD_GROUP_SHARE_L2_L3 2
 100struct thread_groups {
 101	unsigned int property;
 102	unsigned int nr_groups;
 103	unsigned int threads_per_group;
 104	unsigned int thread_list[MAX_THREAD_LIST_SIZE];
 105};
 106
 107/* Maximum number of properties that groups of threads within a core can share */
 108#define MAX_THREAD_GROUP_PROPERTIES 2
 109
 110struct thread_groups_list {
 111	unsigned int nr_properties;
 112	struct thread_groups property_tgs[MAX_THREAD_GROUP_PROPERTIES];
 113};
 114
 115static struct thread_groups_list tgl[NR_CPUS] __initdata;
 116/*
 117 * On big-cores system, thread_group_l1_cache_map for each CPU corresponds to
 118 * the set its siblings that share the L1-cache.
 119 */
 120DEFINE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map);
 121
 122/*
 123 * On some big-cores system, thread_group_l2_cache_map for each CPU
 124 * corresponds to the set its siblings within the core that share the
 125 * L2-cache.
 126 */
 127DEFINE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map);
 128
 129/*
 130 * On P10, thread_group_l3_cache_map for each CPU is equal to the
 131 * thread_group_l2_cache_map
 132 */
 133DEFINE_PER_CPU(cpumask_var_t, thread_group_l3_cache_map);
 134
 135/* SMP operations for this machine */
 136struct smp_ops_t *smp_ops;
 137
 138/* Can't be static due to PowerMac hackery */
 139volatile unsigned int cpu_callin_map[NR_CPUS];
 140
 141int smt_enabled_at_boot = 1;
 142
 143/*
 144 * Returns 1 if the specified cpu should be brought up during boot.
 145 * Used to inhibit booting threads if they've been disabled or
 146 * limited on the command line
 147 */
 148int smp_generic_cpu_bootable(unsigned int nr)
 149{
 150	/* Special case - we inhibit secondary thread startup
 151	 * during boot if the user requests it.
 152	 */
 153	if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
 154		if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
 155			return 0;
 156		if (smt_enabled_at_boot
 157		    && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
 158			return 0;
 159	}
 160
 161	return 1;
 162}
 163
 164
 165#ifdef CONFIG_PPC64
 166int smp_generic_kick_cpu(int nr)
 167{
 168	if (nr < 0 || nr >= nr_cpu_ids)
 169		return -EINVAL;
 170
 171	/*
 172	 * The processor is currently spinning, waiting for the
 173	 * cpu_start field to become non-zero After we set cpu_start,
 174	 * the processor will continue on to secondary_start
 175	 */
 176	if (!paca_ptrs[nr]->cpu_start) {
 177		paca_ptrs[nr]->cpu_start = 1;
 178		smp_mb();
 179		return 0;
 180	}
 181
 182#ifdef CONFIG_HOTPLUG_CPU
 183	/*
 184	 * Ok it's not there, so it might be soft-unplugged, let's
 185	 * try to bring it back
 186	 */
 187	generic_set_cpu_up(nr);
 188	smp_wmb();
 189	smp_send_reschedule(nr);
 190#endif /* CONFIG_HOTPLUG_CPU */
 191
 192	return 0;
 193}
 194#endif /* CONFIG_PPC64 */
 195
 196static irqreturn_t call_function_action(int irq, void *data)
 197{
 198	generic_smp_call_function_interrupt();
 199	return IRQ_HANDLED;
 200}
 201
 202static irqreturn_t reschedule_action(int irq, void *data)
 203{
 204	scheduler_ipi();
 205	return IRQ_HANDLED;
 206}
 207
 208#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 209static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
 210{
 211	timer_broadcast_interrupt();
 212	return IRQ_HANDLED;
 213}
 214#endif
 215
 216#ifdef CONFIG_NMI_IPI
 217static irqreturn_t nmi_ipi_action(int irq, void *data)
 218{
 219	smp_handle_nmi_ipi(get_irq_regs());
 220	return IRQ_HANDLED;
 221}
 222#endif
 223
 224static irq_handler_t smp_ipi_action[] = {
 225	[PPC_MSG_CALL_FUNCTION] =  call_function_action,
 226	[PPC_MSG_RESCHEDULE] = reschedule_action,
 227#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 228	[PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
 229#endif
 230#ifdef CONFIG_NMI_IPI
 231	[PPC_MSG_NMI_IPI] = nmi_ipi_action,
 232#endif
 233};
 234
 235/*
 236 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
 237 * than going through the call function infrastructure, and strongly
 238 * serialized, so it is more appropriate for debugging.
 239 */
 240const char *smp_ipi_name[] = {
 241	[PPC_MSG_CALL_FUNCTION] =  "ipi call function",
 242	[PPC_MSG_RESCHEDULE] = "ipi reschedule",
 243#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 244	[PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
 245#endif
 246#ifdef CONFIG_NMI_IPI
 247	[PPC_MSG_NMI_IPI] = "nmi ipi",
 248#endif
 249};
 250
 251/* optional function to request ipi, for controllers with >= 4 ipis */
 252int smp_request_message_ipi(int virq, int msg)
 253{
 254	int err;
 255
 256	if (msg < 0 || msg > PPC_MSG_NMI_IPI)
 257		return -EINVAL;
 258#ifndef CONFIG_NMI_IPI
 259	if (msg == PPC_MSG_NMI_IPI)
 260		return 1;
 261#endif
 262
 263	err = request_irq(virq, smp_ipi_action[msg],
 264			  IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
 265			  smp_ipi_name[msg], NULL);
 266	WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
 267		virq, smp_ipi_name[msg], err);
 268
 269	return err;
 270}
 271
 272#ifdef CONFIG_PPC_SMP_MUXED_IPI
 273struct cpu_messages {
 274	long messages;			/* current messages */
 275};
 276static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
 277
 278void smp_muxed_ipi_set_message(int cpu, int msg)
 279{
 280	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
 281	char *message = (char *)&info->messages;
 282
 283	/*
 284	 * Order previous accesses before accesses in the IPI handler.
 285	 */
 286	smp_mb();
 287	WRITE_ONCE(message[msg], 1);
 288}
 289
 290void smp_muxed_ipi_message_pass(int cpu, int msg)
 291{
 292	smp_muxed_ipi_set_message(cpu, msg);
 293
 294	/*
 295	 * cause_ipi functions are required to include a full barrier
 296	 * before doing whatever causes the IPI.
 297	 */
 298	smp_ops->cause_ipi(cpu);
 299}
 300
 301#ifdef __BIG_ENDIAN__
 302#define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
 303#else
 304#define IPI_MESSAGE(A) (1uL << (8 * (A)))
 305#endif
 306
 307irqreturn_t smp_ipi_demux(void)
 308{
 309	mb();	/* order any irq clear */
 310
 311	return smp_ipi_demux_relaxed();
 312}
 313
 314/* sync-free variant. Callers should ensure synchronization */
 315irqreturn_t smp_ipi_demux_relaxed(void)
 316{
 317	struct cpu_messages *info;
 318	unsigned long all;
 319
 320	info = this_cpu_ptr(&ipi_message);
 321	do {
 322		all = xchg(&info->messages, 0);
 323#if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
 324		/*
 325		 * Must check for PPC_MSG_RM_HOST_ACTION messages
 326		 * before PPC_MSG_CALL_FUNCTION messages because when
 327		 * a VM is destroyed, we call kick_all_cpus_sync()
 328		 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
 329		 * messages have completed before we free any VCPUs.
 330		 */
 331		if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
 332			kvmppc_xics_ipi_action();
 333#endif
 334		if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
 335			generic_smp_call_function_interrupt();
 336		if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
 337			scheduler_ipi();
 338#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 339		if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
 340			timer_broadcast_interrupt();
 341#endif
 342#ifdef CONFIG_NMI_IPI
 343		if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
 344			nmi_ipi_action(0, NULL);
 345#endif
 346	} while (READ_ONCE(info->messages));
 347
 348	return IRQ_HANDLED;
 349}
 350#endif /* CONFIG_PPC_SMP_MUXED_IPI */
 351
 352static inline void do_message_pass(int cpu, int msg)
 353{
 354	if (smp_ops->message_pass)
 355		smp_ops->message_pass(cpu, msg);
 356#ifdef CONFIG_PPC_SMP_MUXED_IPI
 357	else
 358		smp_muxed_ipi_message_pass(cpu, msg);
 359#endif
 360}
 361
 362void arch_smp_send_reschedule(int cpu)
 363{
 364	if (likely(smp_ops))
 365		do_message_pass(cpu, PPC_MSG_RESCHEDULE);
 366}
 367EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
 368
 369void arch_send_call_function_single_ipi(int cpu)
 370{
 371	do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
 372}
 373
 374void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 375{
 376	unsigned int cpu;
 377
 378	for_each_cpu(cpu, mask)
 379		do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
 380}
 381
 382#ifdef CONFIG_NMI_IPI
 383
 384/*
 385 * "NMI IPI" system.
 386 *
 387 * NMI IPIs may not be recoverable, so should not be used as ongoing part of
 388 * a running system. They can be used for crash, debug, halt/reboot, etc.
 389 *
 390 * The IPI call waits with interrupts disabled until all targets enter the
 391 * NMI handler, then returns. Subsequent IPIs can be issued before targets
 392 * have returned from their handlers, so there is no guarantee about
 393 * concurrency or re-entrancy.
 394 *
 395 * A new NMI can be issued before all targets exit the handler.
 396 *
 397 * The IPI call may time out without all targets entering the NMI handler.
 398 * In that case, there is some logic to recover (and ignore subsequent
 399 * NMI interrupts that may eventually be raised), but the platform interrupt
 400 * handler may not be able to distinguish this from other exception causes,
 401 * which may cause a crash.
 402 */
 403
 404static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
 405static struct cpumask nmi_ipi_pending_mask;
 406static bool nmi_ipi_busy = false;
 407static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
 408
 409noinstr static void nmi_ipi_lock_start(unsigned long *flags)
 410{
 411	raw_local_irq_save(*flags);
 412	hard_irq_disable();
 413	while (raw_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
 414		raw_local_irq_restore(*flags);
 415		spin_until_cond(raw_atomic_read(&__nmi_ipi_lock) == 0);
 416		raw_local_irq_save(*flags);
 417		hard_irq_disable();
 418	}
 419}
 420
 421noinstr static void nmi_ipi_lock(void)
 422{
 423	while (raw_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
 424		spin_until_cond(raw_atomic_read(&__nmi_ipi_lock) == 0);
 425}
 426
 427noinstr static void nmi_ipi_unlock(void)
 428{
 429	smp_mb();
 430	WARN_ON(raw_atomic_read(&__nmi_ipi_lock) != 1);
 431	raw_atomic_set(&__nmi_ipi_lock, 0);
 432}
 433
 434noinstr static void nmi_ipi_unlock_end(unsigned long *flags)
 435{
 436	nmi_ipi_unlock();
 437	raw_local_irq_restore(*flags);
 438}
 439
 440/*
 441 * Platform NMI handler calls this to ack
 442 */
 443noinstr int smp_handle_nmi_ipi(struct pt_regs *regs)
 444{
 445	void (*fn)(struct pt_regs *) = NULL;
 446	unsigned long flags;
 447	int me = raw_smp_processor_id();
 448	int ret = 0;
 449
 450	/*
 451	 * Unexpected NMIs are possible here because the interrupt may not
 452	 * be able to distinguish NMI IPIs from other types of NMIs, or
 453	 * because the caller may have timed out.
 454	 */
 455	nmi_ipi_lock_start(&flags);
 456	if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
 457		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
 458		fn = READ_ONCE(nmi_ipi_function);
 459		WARN_ON_ONCE(!fn);
 460		ret = 1;
 461	}
 462	nmi_ipi_unlock_end(&flags);
 463
 464	if (fn)
 465		fn(regs);
 466
 467	return ret;
 468}
 469
 470static void do_smp_send_nmi_ipi(int cpu, bool safe)
 471{
 472	if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
 473		return;
 474
 475	if (cpu >= 0) {
 476		do_message_pass(cpu, PPC_MSG_NMI_IPI);
 477	} else {
 478		int c;
 479
 480		for_each_online_cpu(c) {
 481			if (c == raw_smp_processor_id())
 482				continue;
 483			do_message_pass(c, PPC_MSG_NMI_IPI);
 484		}
 485	}
 486}
 487
 488/*
 489 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
 490 * - fn is the target callback function.
 491 * - delay_us > 0 is the delay before giving up waiting for targets to
 492 *   begin executing the handler, == 0 specifies indefinite delay.
 493 */
 494static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
 495				u64 delay_us, bool safe)
 496{
 497	unsigned long flags;
 498	int me = raw_smp_processor_id();
 499	int ret = 1;
 500
 501	BUG_ON(cpu == me);
 502	BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
 503
 504	if (unlikely(!smp_ops))
 505		return 0;
 506
 507	nmi_ipi_lock_start(&flags);
 508	while (nmi_ipi_busy) {
 509		nmi_ipi_unlock_end(&flags);
 510		spin_until_cond(!nmi_ipi_busy);
 511		nmi_ipi_lock_start(&flags);
 512	}
 513	nmi_ipi_busy = true;
 514	nmi_ipi_function = fn;
 515
 516	WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
 517
 518	if (cpu < 0) {
 519		/* ALL_OTHERS */
 520		cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
 521		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
 522	} else {
 523		cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
 524	}
 525
 526	nmi_ipi_unlock();
 527
 528	/* Interrupts remain hard disabled */
 529
 530	do_smp_send_nmi_ipi(cpu, safe);
 531
 532	nmi_ipi_lock();
 533	/* nmi_ipi_busy is set here, so unlock/lock is okay */
 534	while (!cpumask_empty(&nmi_ipi_pending_mask)) {
 535		nmi_ipi_unlock();
 536		udelay(1);
 537		nmi_ipi_lock();
 538		if (delay_us) {
 539			delay_us--;
 540			if (!delay_us)
 541				break;
 542		}
 543	}
 544
 545	if (!cpumask_empty(&nmi_ipi_pending_mask)) {
 546		/* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
 547		ret = 0;
 548		cpumask_clear(&nmi_ipi_pending_mask);
 549	}
 550
 551	nmi_ipi_function = NULL;
 552	nmi_ipi_busy = false;
 553
 554	nmi_ipi_unlock_end(&flags);
 555
 556	return ret;
 557}
 558
 559int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
 560{
 561	return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
 562}
 563
 564int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
 565{
 566	return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
 567}
 568#endif /* CONFIG_NMI_IPI */
 569
 570#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 571void tick_broadcast(const struct cpumask *mask)
 572{
 573	unsigned int cpu;
 574
 575	for_each_cpu(cpu, mask)
 576		do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
 577}
 578#endif
 579
 580#ifdef CONFIG_DEBUGGER
 581static void debugger_ipi_callback(struct pt_regs *regs)
 582{
 583	debugger_ipi(regs);
 584}
 585
 586void smp_send_debugger_break(void)
 587{
 588	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
 589}
 590#endif
 591
 592#ifdef CONFIG_CRASH_DUMP
 593void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
 594{
 595	int cpu;
 596
 597	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
 598	if (kdump_in_progress() && crash_wake_offline) {
 599		for_each_present_cpu(cpu) {
 600			if (cpu_online(cpu))
 601				continue;
 602			/*
 603			 * crash_ipi_callback will wait for
 604			 * all cpus, including offline CPUs.
 605			 * We don't care about nmi_ipi_function.
 606			 * Offline cpus will jump straight into
 607			 * crash_ipi_callback, we can skip the
 608			 * entire NMI dance and waiting for
 609			 * cpus to clear pending mask, etc.
 610			 */
 611			do_smp_send_nmi_ipi(cpu, false);
 612		}
 613	}
 614}
 615#endif
 616
 617void crash_smp_send_stop(void)
 618{
 619	static bool stopped = false;
 620
 621	/*
 622	 * In case of fadump, register data for all CPUs is captured by f/w
 623	 * on ibm,os-term rtas call. Skip IPI callbacks to other CPUs before
 624	 * this rtas call to avoid tricky post processing of those CPUs'
 625	 * backtraces.
 626	 */
 627	if (should_fadump_crash())
 628		return;
 629
 630	if (stopped)
 631		return;
 632
 633	stopped = true;
 634
 635#ifdef CONFIG_CRASH_DUMP
 636	if (kexec_crash_image) {
 637		crash_kexec_prepare();
 638		return;
 639	}
 640#endif
 641
 642	smp_send_stop();
 643}
 644
 645#ifdef CONFIG_NMI_IPI
 646static void nmi_stop_this_cpu(struct pt_regs *regs)
 647{
 648	/*
 649	 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
 650	 */
 651	set_cpu_online(smp_processor_id(), false);
 652
 653	spin_begin();
 654	while (1)
 655		spin_cpu_relax();
 656}
 657
 658void smp_send_stop(void)
 659{
 660	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
 661}
 662
 663#else /* CONFIG_NMI_IPI */
 664
 665static void stop_this_cpu(void *dummy)
 666{
 667	hard_irq_disable();
 668
 669	/*
 670	 * Offlining CPUs in stop_this_cpu can result in scheduler warnings,
 671	 * (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants
 672	 * to know other CPUs are offline before it breaks locks to flush
 673	 * printk buffers, in case we panic()ed while holding the lock.
 674	 */
 675	set_cpu_online(smp_processor_id(), false);
 676
 677	spin_begin();
 678	while (1)
 679		spin_cpu_relax();
 680}
 681
 682void smp_send_stop(void)
 683{
 684	static bool stopped = false;
 685
 686	/*
 687	 * Prevent waiting on csd lock from a previous smp_send_stop.
 688	 * This is racy, but in general callers try to do the right
 689	 * thing and only fire off one smp_send_stop (e.g., see
 690	 * kernel/panic.c)
 691	 */
 692	if (stopped)
 693		return;
 694
 695	stopped = true;
 696
 697	smp_call_function(stop_this_cpu, NULL, 0);
 698}
 699#endif /* CONFIG_NMI_IPI */
 700
 701static struct task_struct *current_set[NR_CPUS];
 702
 703static void smp_store_cpu_info(int id)
 704{
 705	per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
 706#ifdef CONFIG_PPC_E500
 707	per_cpu(next_tlbcam_idx, id)
 708		= (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
 709#endif
 710}
 711
 712/*
 713 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
 714 * rather than just passing around the cpumask we pass around a function that
 715 * returns the that cpumask for the given CPU.
 716 */
 717static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
 718{
 719	cpumask_set_cpu(i, get_cpumask(j));
 720	cpumask_set_cpu(j, get_cpumask(i));
 721}
 722
 723#ifdef CONFIG_HOTPLUG_CPU
 724static void set_cpus_unrelated(int i, int j,
 725		struct cpumask *(*get_cpumask)(int))
 726{
 727	cpumask_clear_cpu(i, get_cpumask(j));
 728	cpumask_clear_cpu(j, get_cpumask(i));
 729}
 730#endif
 731
 732/*
 733 * Extends set_cpus_related. Instead of setting one CPU at a time in
 734 * dstmask, set srcmask at oneshot. dstmask should be super set of srcmask.
 735 */
 736static void or_cpumasks_related(int i, int j, struct cpumask *(*srcmask)(int),
 737				struct cpumask *(*dstmask)(int))
 738{
 739	struct cpumask *mask;
 740	int k;
 741
 742	mask = srcmask(j);
 743	for_each_cpu(k, srcmask(i))
 744		cpumask_or(dstmask(k), dstmask(k), mask);
 745
 746	if (i == j)
 747		return;
 748
 749	mask = srcmask(i);
 750	for_each_cpu(k, srcmask(j))
 751		cpumask_or(dstmask(k), dstmask(k), mask);
 752}
 753
 754/*
 755 * parse_thread_groups: Parses the "ibm,thread-groups" device tree
 756 *                      property for the CPU device node @dn and stores
 757 *                      the parsed output in the thread_groups_list
 758 *                      structure @tglp.
 759 *
 760 * @dn: The device node of the CPU device.
 761 * @tglp: Pointer to a thread group list structure into which the parsed
 762 *      output of "ibm,thread-groups" is stored.
 763 *
 764 * ibm,thread-groups[0..N-1] array defines which group of threads in
 765 * the CPU-device node can be grouped together based on the property.
 766 *
 767 * This array can represent thread groupings for multiple properties.
 768 *
 769 * ibm,thread-groups[i + 0] tells us the property based on which the
 770 * threads are being grouped together. If this value is 1, it implies
 771 * that the threads in the same group share L1, translation cache. If
 772 * the value is 2, it implies that the threads in the same group share
 773 * the same L2 cache.
 774 *
 775 * ibm,thread-groups[i+1] tells us how many such thread groups exist for the
 776 * property ibm,thread-groups[i]
 777 *
 778 * ibm,thread-groups[i+2] tells us the number of threads in each such
 779 * group.
 780 * Suppose k = (ibm,thread-groups[i+1] * ibm,thread-groups[i+2]), then,
 781 *
 782 * ibm,thread-groups[i+3..i+k+2] (is the list of threads identified by
 783 * "ibm,ppc-interrupt-server#s" arranged as per their membership in
 784 * the grouping.
 785 *
 786 * Example:
 787 * If "ibm,thread-groups" = [1,2,4,8,10,12,14,9,11,13,15,2,2,4,8,10,12,14,9,11,13,15]
 788 * This can be decomposed up into two consecutive arrays:
 789 * a) [1,2,4,8,10,12,14,9,11,13,15]
 790 * b) [2,2,4,8,10,12,14,9,11,13,15]
 791 *
 792 * where in,
 793 *
 794 * a) provides information of Property "1" being shared by "2" groups,
 795 *  each with "4" threads each. The "ibm,ppc-interrupt-server#s" of
 796 *  the first group is {8,10,12,14} and the
 797 *  "ibm,ppc-interrupt-server#s" of the second group is
 798 *  {9,11,13,15}. Property "1" is indicative of the thread in the
 799 *  group sharing L1 cache, translation cache and Instruction Data
 800 *  flow.
 801 *
 802 * b) provides information of Property "2" being shared by "2" groups,
 803 *  each group with "4" threads. The "ibm,ppc-interrupt-server#s" of
 804 *  the first group is {8,10,12,14} and the
 805 *  "ibm,ppc-interrupt-server#s" of the second group is
 806 *  {9,11,13,15}. Property "2" indicates that the threads in each
 807 *  group share the L2-cache.
 808 *
 809 * Returns 0 on success, -EINVAL if the property does not exist,
 810 * -ENODATA if property does not have a value, and -EOVERFLOW if the
 811 * property data isn't large enough.
 812 */
 813static int parse_thread_groups(struct device_node *dn,
 814			       struct thread_groups_list *tglp)
 815{
 816	unsigned int property_idx = 0;
 817	u32 *thread_group_array;
 818	size_t total_threads;
 819	int ret = 0, count;
 820	u32 *thread_list;
 821	int i = 0;
 822
 823	count = of_property_count_u32_elems(dn, "ibm,thread-groups");
 824	thread_group_array = kcalloc(count, sizeof(u32), GFP_KERNEL);
 825	ret = of_property_read_u32_array(dn, "ibm,thread-groups",
 826					 thread_group_array, count);
 827	if (ret)
 828		goto out_free;
 829
 830	while (i < count && property_idx < MAX_THREAD_GROUP_PROPERTIES) {
 831		int j;
 832		struct thread_groups *tg = &tglp->property_tgs[property_idx++];
 833
 834		tg->property = thread_group_array[i];
 835		tg->nr_groups = thread_group_array[i + 1];
 836		tg->threads_per_group = thread_group_array[i + 2];
 837		total_threads = tg->nr_groups * tg->threads_per_group;
 838
 839		thread_list = &thread_group_array[i + 3];
 840
 841		for (j = 0; j < total_threads; j++)
 842			tg->thread_list[j] = thread_list[j];
 843		i = i + 3 + total_threads;
 844	}
 845
 846	tglp->nr_properties = property_idx;
 847
 848out_free:
 849	kfree(thread_group_array);
 850	return ret;
 851}
 852
 853/*
 854 * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
 855 *                              that @cpu belongs to.
 856 *
 857 * @cpu : The logical CPU whose thread group is being searched.
 858 * @tg : The thread-group structure of the CPU node which @cpu belongs
 859 *       to.
 860 *
 861 * Returns the index to tg->thread_list that points to the start
 862 * of the thread_group that @cpu belongs to.
 863 *
 864 * Returns -1 if cpu doesn't belong to any of the groups pointed to by
 865 * tg->thread_list.
 866 */
 867static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
 868{
 869	int hw_cpu_id = get_hard_smp_processor_id(cpu);
 870	int i, j;
 871
 872	for (i = 0; i < tg->nr_groups; i++) {
 873		int group_start = i * tg->threads_per_group;
 874
 875		for (j = 0; j < tg->threads_per_group; j++) {
 876			int idx = group_start + j;
 877
 878			if (tg->thread_list[idx] == hw_cpu_id)
 879				return group_start;
 880		}
 881	}
 882
 883	return -1;
 884}
 885
 886static struct thread_groups *__init get_thread_groups(int cpu,
 887						      int group_property,
 888						      int *err)
 889{
 890	struct device_node *dn = of_get_cpu_node(cpu, NULL);
 891	struct thread_groups_list *cpu_tgl = &tgl[cpu];
 892	struct thread_groups *tg = NULL;
 893	int i;
 894	*err = 0;
 895
 896	if (!dn) {
 897		*err = -ENODATA;
 898		return NULL;
 899	}
 900
 901	if (!cpu_tgl->nr_properties) {
 902		*err = parse_thread_groups(dn, cpu_tgl);
 903		if (*err)
 904			goto out;
 905	}
 906
 907	for (i = 0; i < cpu_tgl->nr_properties; i++) {
 908		if (cpu_tgl->property_tgs[i].property == group_property) {
 909			tg = &cpu_tgl->property_tgs[i];
 910			break;
 911		}
 912	}
 913
 914	if (!tg)
 915		*err = -EINVAL;
 916out:
 917	of_node_put(dn);
 918	return tg;
 919}
 920
 921static int __init update_mask_from_threadgroup(cpumask_var_t *mask, struct thread_groups *tg,
 922					       int cpu, int cpu_group_start)
 923{
 924	int first_thread = cpu_first_thread_sibling(cpu);
 925	int i;
 926
 927	zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu));
 928
 929	for (i = first_thread; i < first_thread + threads_per_core; i++) {
 930		int i_group_start = get_cpu_thread_group_start(i, tg);
 931
 932		if (unlikely(i_group_start == -1)) {
 933			WARN_ON_ONCE(1);
 934			return -ENODATA;
 935		}
 936
 937		if (i_group_start == cpu_group_start)
 938			cpumask_set_cpu(i, *mask);
 939	}
 940
 941	return 0;
 942}
 943
 944static int __init init_thread_group_cache_map(int cpu, int cache_property)
 945
 946{
 947	int cpu_group_start = -1, err = 0;
 948	struct thread_groups *tg = NULL;
 949	cpumask_var_t *mask = NULL;
 950
 951	if (cache_property != THREAD_GROUP_SHARE_L1 &&
 952	    cache_property != THREAD_GROUP_SHARE_L2_L3)
 953		return -EINVAL;
 954
 955	tg = get_thread_groups(cpu, cache_property, &err);
 956
 957	if (!tg)
 958		return err;
 959
 960	cpu_group_start = get_cpu_thread_group_start(cpu, tg);
 961
 962	if (unlikely(cpu_group_start == -1)) {
 963		WARN_ON_ONCE(1);
 964		return -ENODATA;
 965	}
 966
 967	if (cache_property == THREAD_GROUP_SHARE_L1) {
 968		mask = &per_cpu(thread_group_l1_cache_map, cpu);
 969		update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
 970	}
 971	else if (cache_property == THREAD_GROUP_SHARE_L2_L3) {
 972		mask = &per_cpu(thread_group_l2_cache_map, cpu);
 973		update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
 974		mask = &per_cpu(thread_group_l3_cache_map, cpu);
 975		update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
 976	}
 977
 978
 979	return 0;
 980}
 981
 982static bool shared_caches __ro_after_init;
 983
 984#ifdef CONFIG_SCHED_SMT
 985/* cpumask of CPUs with asymmetric SMT dependency */
 986static int powerpc_smt_flags(void)
 987{
 988	int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_LLC;
 989
 990	if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
 991		printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
 992		flags |= SD_ASYM_PACKING;
 993	}
 994	return flags;
 995}
 996#endif
 997
 998/*
 999 * On shared processor LPARs scheduled on a big core (which has two or more
1000 * independent thread groups per core), prefer lower numbered CPUs, so
1001 * that workload consolidates to lesser number of cores.
1002 */
1003static __ro_after_init DEFINE_STATIC_KEY_FALSE(splpar_asym_pack);
1004
1005/*
1006 * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
1007 * This topology makes it *much* cheaper to migrate tasks between adjacent cores
1008 * since the migrated task remains cache hot. We want to take advantage of this
1009 * at the scheduler level so an extra topology level is required.
1010 */
1011static int powerpc_shared_cache_flags(void)
1012{
1013	if (static_branch_unlikely(&splpar_asym_pack))
1014		return SD_SHARE_LLC | SD_ASYM_PACKING;
1015
1016	return SD_SHARE_LLC;
1017}
1018
1019static int powerpc_shared_proc_flags(void)
1020{
1021	if (static_branch_unlikely(&splpar_asym_pack))
1022		return SD_ASYM_PACKING;
1023
1024	return 0;
1025}
1026
1027/*
1028 * We can't just pass cpu_l2_cache_mask() directly because
1029 * returns a non-const pointer and the compiler barfs on that.
1030 */
1031static const struct cpumask *shared_cache_mask(int cpu)
1032{
1033	return per_cpu(cpu_l2_cache_map, cpu);
1034}
1035
1036#ifdef CONFIG_SCHED_SMT
1037static const struct cpumask *smallcore_smt_mask(int cpu)
1038{
1039	return cpu_smallcore_mask(cpu);
1040}
1041#endif
1042
1043static struct cpumask *cpu_coregroup_mask(int cpu)
1044{
1045	return per_cpu(cpu_coregroup_map, cpu);
1046}
1047
1048static bool has_coregroup_support(void)
1049{
1050	/* Coregroup identification not available on shared systems */
1051	if (is_shared_processor())
1052		return 0;
1053
1054	return coregroup_enabled;
1055}
1056
1057static const struct cpumask *cpu_mc_mask(int cpu)
1058{
1059	return cpu_coregroup_mask(cpu);
1060}
1061
1062static int __init init_big_cores(void)
1063{
1064	int cpu;
1065
1066	for_each_possible_cpu(cpu) {
1067		int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L1);
1068
1069		if (err)
1070			return err;
1071
1072		zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
1073					GFP_KERNEL,
1074					cpu_to_node(cpu));
1075	}
1076
1077	has_big_cores = true;
1078
1079	for_each_possible_cpu(cpu) {
1080		int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L2_L3);
1081
1082		if (err)
1083			return err;
1084	}
1085
1086	thread_group_shares_l2 = true;
1087	thread_group_shares_l3 = true;
1088	pr_debug("L2/L3 cache only shared by the threads in the small core\n");
1089
1090	return 0;
1091}
1092
1093void __init smp_prepare_cpus(unsigned int max_cpus)
1094{
1095	unsigned int cpu, num_threads;
1096
1097	DBG("smp_prepare_cpus\n");
1098
1099	/* 
1100	 * setup_cpu may need to be called on the boot cpu. We haven't
1101	 * spun any cpus up but lets be paranoid.
1102	 */
1103	BUG_ON(boot_cpuid != smp_processor_id());
1104
1105	/* Fixup boot cpu */
1106	smp_store_cpu_info(boot_cpuid);
1107	cpu_callin_map[boot_cpuid] = 1;
1108
1109	for_each_possible_cpu(cpu) {
1110		zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
1111					GFP_KERNEL, cpu_to_node(cpu));
1112		zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
1113					GFP_KERNEL, cpu_to_node(cpu));
1114		zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
1115					GFP_KERNEL, cpu_to_node(cpu));
1116		if (has_coregroup_support())
1117			zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
1118						GFP_KERNEL, cpu_to_node(cpu));
1119
1120#ifdef CONFIG_NUMA
1121		/*
1122		 * numa_node_id() works after this.
1123		 */
1124		if (cpu_present(cpu)) {
1125			set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
1126			set_cpu_numa_mem(cpu,
1127				local_memory_node(numa_cpu_lookup_table[cpu]));
1128		}
1129#endif
1130	}
1131
1132	/* Init the cpumasks so the boot CPU is related to itself */
1133	cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
1134	cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
1135	cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
1136
1137	if (has_coregroup_support())
1138		cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
1139
1140	init_big_cores();
1141	if (has_big_cores) {
1142		cpumask_set_cpu(boot_cpuid,
1143				cpu_smallcore_mask(boot_cpuid));
1144	}
1145
1146	if (cpu_to_chip_id(boot_cpuid) != -1) {
1147		int idx = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
1148
1149		/*
1150		 * All threads of a core will all belong to the same core,
1151		 * chip_id_lookup_table will have one entry per core.
1152		 * Assumption: if boot_cpuid doesn't have a chip-id, then no
1153		 * other CPUs, will also not have chip-id.
1154		 */
1155		chip_id_lookup_table = kcalloc(idx, sizeof(int), GFP_KERNEL);
1156		if (chip_id_lookup_table)
1157			memset(chip_id_lookup_table, -1, sizeof(int) * idx);
1158	}
1159
1160	if (smp_ops && smp_ops->probe)
1161		smp_ops->probe();
1162
1163	// Initalise the generic SMT topology support
1164	num_threads = 1;
1165	if (smt_enabled_at_boot)
1166		num_threads = smt_enabled_at_boot;
1167	cpu_smt_set_num_threads(num_threads, threads_per_core);
1168}
1169
1170void __init smp_prepare_boot_cpu(void)
1171{
1172	BUG_ON(smp_processor_id() != boot_cpuid);
1173#ifdef CONFIG_PPC64
1174	paca_ptrs[boot_cpuid]->__current = current;
1175#endif
1176	set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
1177	current_set[boot_cpuid] = current;
1178}
1179
1180#ifdef CONFIG_HOTPLUG_CPU
1181
1182int generic_cpu_disable(void)
1183{
1184	unsigned int cpu = smp_processor_id();
1185
1186	if (cpu == boot_cpuid)
1187		return -EBUSY;
1188
1189	set_cpu_online(cpu, false);
1190#ifdef CONFIG_PPC64_PROC_SYSTEMCFG
1191	systemcfg->processorCount--;
1192#endif
1193	/* Update affinity of all IRQs previously aimed at this CPU */
1194	irq_migrate_all_off_this_cpu();
1195
1196	/*
1197	 * Depending on the details of the interrupt controller, it's possible
1198	 * that one of the interrupts we just migrated away from this CPU is
1199	 * actually already pending on this CPU. If we leave it in that state
1200	 * the interrupt will never be EOI'ed, and will never fire again. So
1201	 * temporarily enable interrupts here, to allow any pending interrupt to
1202	 * be received (and EOI'ed), before we take this CPU offline.
1203	 */
1204	local_irq_enable();
1205	mdelay(1);
1206	local_irq_disable();
1207
1208	return 0;
1209}
1210
1211void generic_cpu_die(unsigned int cpu)
1212{
1213	int i;
1214
1215	for (i = 0; i < 100; i++) {
1216		smp_rmb();
1217		if (is_cpu_dead(cpu))
1218			return;
1219		msleep(100);
1220	}
1221	printk(KERN_ERR "CPU%d didn't die...\n", cpu);
1222}
1223
1224void generic_set_cpu_dead(unsigned int cpu)
1225{
1226	per_cpu(cpu_state, cpu) = CPU_DEAD;
1227}
1228
1229/*
1230 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
1231 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
1232 * which makes the delay in generic_cpu_die() not happen.
1233 */
1234void generic_set_cpu_up(unsigned int cpu)
1235{
1236	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1237}
1238
1239int generic_check_cpu_restart(unsigned int cpu)
1240{
1241	return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
1242}
1243
1244int is_cpu_dead(unsigned int cpu)
1245{
1246	return per_cpu(cpu_state, cpu) == CPU_DEAD;
1247}
1248
1249static bool secondaries_inhibited(void)
1250{
1251	return kvm_hv_mode_active();
1252}
1253
1254#else /* HOTPLUG_CPU */
1255
1256#define secondaries_inhibited()		0
1257
1258#endif
1259
1260static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
1261{
1262#ifdef CONFIG_PPC64
1263	paca_ptrs[cpu]->__current = idle;
1264	paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
1265				 THREAD_SIZE - STACK_FRAME_MIN_SIZE;
1266#endif
1267	task_thread_info(idle)->cpu = cpu;
1268	secondary_current = current_set[cpu] = idle;
1269}
1270
1271int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1272{
1273	const unsigned long boot_spin_ms = 5 * MSEC_PER_SEC;
1274	const bool booting = system_state < SYSTEM_RUNNING;
1275	const unsigned long hp_spin_ms = 1;
1276	unsigned long deadline;
1277	int rc;
1278	const unsigned long spin_wait_ms = booting ? boot_spin_ms : hp_spin_ms;
1279
1280	/*
1281	 * Don't allow secondary threads to come online if inhibited
1282	 */
1283	if (threads_per_core > 1 && secondaries_inhibited() &&
1284	    cpu_thread_in_subcore(cpu))
1285		return -EBUSY;
1286
1287	if (smp_ops == NULL ||
1288	    (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1289		return -EINVAL;
1290
1291	cpu_idle_thread_init(cpu, tidle);
1292
1293	/*
1294	 * The platform might need to allocate resources prior to bringing
1295	 * up the CPU
1296	 */
1297	if (smp_ops->prepare_cpu) {
1298		rc = smp_ops->prepare_cpu(cpu);
1299		if (rc)
1300			return rc;
1301	}
1302
1303	/* Make sure callin-map entry is 0 (can be leftover a CPU
1304	 * hotplug
1305	 */
1306	cpu_callin_map[cpu] = 0;
1307
1308	/* The information for processor bringup must
1309	 * be written out to main store before we release
1310	 * the processor.
1311	 */
1312	smp_mb();
1313
1314	/* wake up cpus */
1315	DBG("smp: kicking cpu %d\n", cpu);
1316	rc = smp_ops->kick_cpu(cpu);
1317	if (rc) {
1318		pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
1319		return rc;
1320	}
1321
1322	/*
1323	 * At boot time, simply spin on the callin word until the
1324	 * deadline passes.
1325	 *
1326	 * At run time, spin for an optimistic amount of time to avoid
1327	 * sleeping in the common case.
1328	 */
1329	deadline = jiffies + msecs_to_jiffies(spin_wait_ms);
1330	spin_until_cond(cpu_callin_map[cpu] || time_is_before_jiffies(deadline));
1331
1332	if (!cpu_callin_map[cpu] && system_state >= SYSTEM_RUNNING) {
1333		const unsigned long sleep_interval_us = 10 * USEC_PER_MSEC;
1334		const unsigned long sleep_wait_ms = 100 * MSEC_PER_SEC;
1335
1336		deadline = jiffies + msecs_to_jiffies(sleep_wait_ms);
1337		while (!cpu_callin_map[cpu] && time_is_after_jiffies(deadline))
1338			fsleep(sleep_interval_us);
1339	}
1340
1341	if (!cpu_callin_map[cpu]) {
1342		printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1343		return -ENOENT;
1344	}
1345
1346	DBG("Processor %u found.\n", cpu);
1347
1348	if (smp_ops->give_timebase)
1349		smp_ops->give_timebase();
1350
1351	/* Wait until cpu puts itself in the online & active maps */
1352	spin_until_cond(cpu_online(cpu));
1353
1354	return 0;
1355}
1356
1357/* Return the value of the reg property corresponding to the given
1358 * logical cpu.
1359 */
1360int cpu_to_core_id(int cpu)
1361{
1362	struct device_node *np;
1363	int id = -1;
1364
1365	np = of_get_cpu_node(cpu, NULL);
1366	if (!np)
1367		goto out;
1368
1369	id = of_get_cpu_hwid(np, 0);
1370out:
1371	of_node_put(np);
1372	return id;
1373}
1374EXPORT_SYMBOL_GPL(cpu_to_core_id);
1375
1376/* Helper routines for cpu to core mapping */
1377int cpu_core_index_of_thread(int cpu)
1378{
1379	return cpu >> threads_shift;
1380}
1381EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
1382
1383int cpu_first_thread_of_core(int core)
1384{
1385	return core << threads_shift;
1386}
1387EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
1388
1389/* Must be called when no change can occur to cpu_present_mask,
1390 * i.e. during cpu online or offline.
1391 */
1392static struct device_node *cpu_to_l2cache(int cpu)
1393{
1394	struct device_node *np;
1395	struct device_node *cache;
1396
1397	if (!cpu_present(cpu))
1398		return NULL;
1399
1400	np = of_get_cpu_node(cpu, NULL);
1401	if (np == NULL)
1402		return NULL;
1403
1404	cache = of_find_next_cache_node(np);
1405
1406	of_node_put(np);
1407
1408	return cache;
1409}
1410
1411static bool update_mask_by_l2(int cpu, cpumask_var_t *mask)
1412{
1413	struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1414	struct device_node *l2_cache, *np;
1415	int i;
1416
1417	if (has_big_cores)
1418		submask_fn = cpu_smallcore_mask;
1419
1420	/*
1421	 * If the threads in a thread-group share L2 cache, then the
1422	 * L2-mask can be obtained from thread_group_l2_cache_map.
1423	 */
1424	if (thread_group_shares_l2) {
1425		cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu));
1426
1427		for_each_cpu(i, per_cpu(thread_group_l2_cache_map, cpu)) {
1428			if (cpu_online(i))
1429				set_cpus_related(i, cpu, cpu_l2_cache_mask);
1430		}
1431
1432		/* Verify that L1-cache siblings are a subset of L2 cache-siblings */
1433		if (!cpumask_equal(submask_fn(cpu), cpu_l2_cache_mask(cpu)) &&
1434		    !cpumask_subset(submask_fn(cpu), cpu_l2_cache_mask(cpu))) {
1435			pr_warn_once("CPU %d : Inconsistent L1 and L2 cache siblings\n",
1436				     cpu);
1437		}
1438
1439		return true;
1440	}
1441
1442	l2_cache = cpu_to_l2cache(cpu);
1443	if (!l2_cache || !*mask) {
1444		/* Assume only core siblings share cache with this CPU */
1445		for_each_cpu(i, cpu_sibling_mask(cpu))
1446			set_cpus_related(cpu, i, cpu_l2_cache_mask);
1447
1448		return false;
1449	}
1450
1451	cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1452
1453	/* Update l2-cache mask with all the CPUs that are part of submask */
1454	or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask);
1455
1456	/* Skip all CPUs already part of current CPU l2-cache mask */
1457	cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(cpu));
1458
1459	for_each_cpu(i, *mask) {
1460		/*
1461		 * when updating the marks the current CPU has not been marked
1462		 * online, but we need to update the cache masks
1463		 */
1464		np = cpu_to_l2cache(i);
1465
1466		/* Skip all CPUs already part of current CPU l2-cache */
1467		if (np == l2_cache) {
1468			or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask);
1469			cpumask_andnot(*mask, *mask, submask_fn(i));
1470		} else {
1471			cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(i));
1472		}
1473
1474		of_node_put(np);
1475	}
1476	of_node_put(l2_cache);
1477
1478	return true;
1479}
1480
1481#ifdef CONFIG_HOTPLUG_CPU
1482static void remove_cpu_from_masks(int cpu)
1483{
1484	struct cpumask *(*mask_fn)(int) = cpu_sibling_mask;
1485	int i;
1486
1487	unmap_cpu_from_node(cpu);
1488
1489	if (shared_caches)
1490		mask_fn = cpu_l2_cache_mask;
1491
1492	for_each_cpu(i, mask_fn(cpu)) {
1493		set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
1494		set_cpus_unrelated(cpu, i, cpu_sibling_mask);
1495		if (has_big_cores)
1496			set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
1497	}
1498
1499	for_each_cpu(i, cpu_core_mask(cpu))
1500		set_cpus_unrelated(cpu, i, cpu_core_mask);
1501
1502	if (has_coregroup_support()) {
1503		for_each_cpu(i, cpu_coregroup_mask(cpu))
1504			set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
1505	}
1506}
1507#endif
1508
1509static inline void add_cpu_to_smallcore_masks(int cpu)
1510{
1511	int i;
1512
1513	if (!has_big_cores)
1514		return;
1515
1516	cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
1517
1518	for_each_cpu(i, per_cpu(thread_group_l1_cache_map, cpu)) {
1519		if (cpu_online(i))
1520			set_cpus_related(i, cpu, cpu_smallcore_mask);
1521	}
1522}
1523
1524static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
1525{
1526	struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1527	int coregroup_id = cpu_to_coregroup_id(cpu);
1528	int i;
1529
1530	if (shared_caches)
1531		submask_fn = cpu_l2_cache_mask;
1532
1533	if (!*mask) {
1534		/* Assume only siblings are part of this CPU's coregroup */
1535		for_each_cpu(i, submask_fn(cpu))
1536			set_cpus_related(cpu, i, cpu_coregroup_mask);
1537
1538		return;
1539	}
1540
1541	cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1542
1543	/* Update coregroup mask with all the CPUs that are part of submask */
1544	or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask);
1545
1546	/* Skip all CPUs already part of coregroup mask */
1547	cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu));
1548
1549	for_each_cpu(i, *mask) {
1550		/* Skip all CPUs not part of this coregroup */
1551		if (coregroup_id == cpu_to_coregroup_id(i)) {
1552			or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask);
1553			cpumask_andnot(*mask, *mask, submask_fn(i));
1554		} else {
1555			cpumask_andnot(*mask, *mask, cpu_coregroup_mask(i));
1556		}
1557	}
1558}
1559
1560static void add_cpu_to_masks(int cpu)
1561{
1562	struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1563	int first_thread = cpu_first_thread_sibling(cpu);
1564	cpumask_var_t mask;
1565	int chip_id = -1;
1566	bool ret;
1567	int i;
1568
1569	/*
1570	 * This CPU will not be in the online mask yet so we need to manually
1571	 * add it to its own thread sibling mask.
1572	 */
1573	map_cpu_to_node(cpu, cpu_to_node(cpu));
1574	cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1575	cpumask_set_cpu(cpu, cpu_core_mask(cpu));
1576
1577	for (i = first_thread; i < first_thread + threads_per_core; i++)
1578		if (cpu_online(i))
1579			set_cpus_related(i, cpu, cpu_sibling_mask);
1580
1581	add_cpu_to_smallcore_masks(cpu);
1582
1583	/* In CPU-hotplug path, hence use GFP_ATOMIC */
1584	ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
1585	update_mask_by_l2(cpu, &mask);
1586
1587	if (has_coregroup_support())
1588		update_coregroup_mask(cpu, &mask);
1589
1590	if (chip_id_lookup_table && ret)
1591		chip_id = cpu_to_chip_id(cpu);
1592
1593	if (shared_caches)
1594		submask_fn = cpu_l2_cache_mask;
1595
1596	/* Update core_mask with all the CPUs that are part of submask */
1597	or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask);
1598
1599	/* Skip all CPUs already part of current CPU core mask */
1600	cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
1601
1602	/* If chip_id is -1; limit the cpu_core_mask to within PKG */
1603	if (chip_id == -1)
1604		cpumask_and(mask, mask, cpu_cpu_mask(cpu));
1605
1606	for_each_cpu(i, mask) {
1607		if (chip_id == cpu_to_chip_id(i)) {
1608			or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
1609			cpumask_andnot(mask, mask, submask_fn(i));
1610		} else {
1611			cpumask_andnot(mask, mask, cpu_core_mask(i));
1612		}
1613	}
1614
1615	free_cpumask_var(mask);
1616}
1617
1618/* Activate a secondary processor. */
1619__no_stack_protector
1620void start_secondary(void *unused)
1621{
1622	unsigned int cpu = raw_smp_processor_id();
1623
1624	/* PPC64 calls setup_kup() in early_setup_secondary() */
1625	if (IS_ENABLED(CONFIG_PPC32))
1626		setup_kup();
1627
1628	mmgrab_lazy_tlb(&init_mm);
1629	current->active_mm = &init_mm;
1630	VM_WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(&init_mm)));
1631	cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
1632	inc_mm_active_cpus(&init_mm);
1633
1634	smp_store_cpu_info(cpu);
1635	set_dec(tb_ticks_per_jiffy);
1636	rcutree_report_cpu_starting(cpu);
1637	cpu_callin_map[cpu] = 1;
1638
1639	if (smp_ops->setup_cpu)
1640		smp_ops->setup_cpu(cpu);
1641	if (smp_ops->take_timebase)
1642		smp_ops->take_timebase();
1643
1644	secondary_cpu_time_init();
1645
1646#ifdef CONFIG_PPC64_PROC_SYSTEMCFG
1647	if (system_state == SYSTEM_RUNNING)
1648		systemcfg->processorCount++;
1649#endif
1650
1651#ifdef CONFIG_PPC64
1652	vdso_getcpu_init();
1653#endif
1654	set_numa_node(numa_cpu_lookup_table[cpu]);
1655	set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1656
1657	/* Update topology CPU masks */
1658	add_cpu_to_masks(cpu);
1659
1660	/*
1661	 * Check for any shared caches. Note that this must be done on a
1662	 * per-core basis because one core in the pair might be disabled.
1663	 */
1664	if (!shared_caches) {
1665		struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
1666		struct cpumask *mask = cpu_l2_cache_mask(cpu);
1667
1668		if (has_big_cores)
1669			sibling_mask = cpu_smallcore_mask;
1670
1671		if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu)))
1672			shared_caches = true;
1673	}
1674
1675	smp_wmb();
1676	notify_cpu_starting(cpu);
1677	set_cpu_online(cpu, true);
1678
1679	boot_init_stack_canary();
1680
1681	local_irq_enable();
1682
1683	/* We can enable ftrace for secondary cpus now */
1684	this_cpu_enable_ftrace();
1685
1686	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1687
1688	BUG();
1689}
1690
1691static struct sched_domain_topology_level powerpc_topology[6];
1692
1693static void __init build_sched_topology(void)
1694{
1695	int i = 0;
1696
1697	if (is_shared_processor() && has_big_cores)
1698		static_branch_enable(&splpar_asym_pack);
1699
1700#ifdef CONFIG_SCHED_SMT
1701	if (has_big_cores) {
1702		pr_info("Big cores detected but using small core scheduling\n");
1703		powerpc_topology[i++] = (struct sched_domain_topology_level){
1704			smallcore_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT)
1705		};
1706	} else {
1707		powerpc_topology[i++] = (struct sched_domain_topology_level){
1708			cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT)
1709		};
1710	}
1711#endif
1712	if (shared_caches) {
1713		powerpc_topology[i++] = (struct sched_domain_topology_level){
1714			shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE)
1715		};
1716	}
1717	if (has_coregroup_support()) {
1718		powerpc_topology[i++] = (struct sched_domain_topology_level){
1719			cpu_mc_mask, powerpc_shared_proc_flags, SD_INIT_NAME(MC)
1720		};
1721	}
1722	powerpc_topology[i++] = (struct sched_domain_topology_level){
1723		cpu_cpu_mask, powerpc_shared_proc_flags, SD_INIT_NAME(PKG)
1724	};
1725
1726	/* There must be one trailing NULL entry left.  */
1727	BUG_ON(i >= ARRAY_SIZE(powerpc_topology) - 1);
1728
1729	set_sched_topology(powerpc_topology);
1730}
1731
1732void __init smp_cpus_done(unsigned int max_cpus)
1733{
1734	/*
1735	 * We are running pinned to the boot CPU, see rest_init().
1736	 */
1737	if (smp_ops && smp_ops->setup_cpu)
1738		smp_ops->setup_cpu(boot_cpuid);
1739
1740	if (smp_ops && smp_ops->bringup_done)
1741		smp_ops->bringup_done();
1742
1743	dump_numa_cpu_topology();
1744	build_sched_topology();
1745}
1746
1747/*
1748 * For asym packing, by default lower numbered CPU has higher priority.
1749 * On shared processors, pack to lower numbered core. However avoid moving
1750 * between thread_groups within the same core.
1751 */
1752int arch_asym_cpu_priority(int cpu)
1753{
1754	if (static_branch_unlikely(&splpar_asym_pack))
1755		return -cpu / threads_per_core;
1756
1757	return -cpu;
1758}
1759
1760#ifdef CONFIG_HOTPLUG_CPU
1761int __cpu_disable(void)
1762{
1763	int cpu = smp_processor_id();
1764	int err;
1765
1766	if (!smp_ops->cpu_disable)
1767		return -ENOSYS;
1768
1769	this_cpu_disable_ftrace();
1770
1771	err = smp_ops->cpu_disable();
1772	if (err)
1773		return err;
1774
1775	/* Update sibling maps */
1776	remove_cpu_from_masks(cpu);
1777
1778	return 0;
1779}
1780
1781void __cpu_die(unsigned int cpu)
1782{
1783	/*
1784	 * This could perhaps be a generic call in idlea_task_dead(), but
1785	 * that requires testing from all archs, so first put it here to
1786	 */
1787	VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(&init_mm)));
1788	dec_mm_active_cpus(&init_mm);
1789	cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
1790
1791	if (smp_ops->cpu_die)
1792		smp_ops->cpu_die(cpu);
1793}
1794
1795void __noreturn arch_cpu_idle_dead(void)
1796{
1797	/*
1798	 * Disable on the down path. This will be re-enabled by
1799	 * start_secondary() via start_secondary_resume() below
1800	 */
1801	this_cpu_disable_ftrace();
1802
1803	if (smp_ops->cpu_offline_self)
1804		smp_ops->cpu_offline_self();
1805
1806	/* If we return, we re-enter start_secondary */
1807	start_secondary_resume();
1808}
1809
1810#endif