Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * SMP support for ppc.
   4 *
   5 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
   6 * deal of code from the sparc and intel versions.
   7 *
   8 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
   9 *
  10 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
  11 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
  12 */
  13
  14#undef DEBUG
  15
  16#include <linux/kernel.h>
  17#include <linux/export.h>
  18#include <linux/sched/mm.h>
  19#include <linux/sched/task_stack.h>
  20#include <linux/sched/topology.h>
  21#include <linux/smp.h>
  22#include <linux/interrupt.h>
  23#include <linux/delay.h>
  24#include <linux/init.h>
  25#include <linux/spinlock.h>
  26#include <linux/cache.h>
  27#include <linux/err.h>
  28#include <linux/device.h>
  29#include <linux/cpu.h>
  30#include <linux/notifier.h>
  31#include <linux/topology.h>
  32#include <linux/profile.h>
  33#include <linux/processor.h>
  34#include <linux/random.h>
  35#include <linux/stackprotector.h>
  36#include <linux/pgtable.h>
  37#include <linux/clockchips.h>
  38#include <linux/kexec.h>
  39
  40#include <asm/ptrace.h>
  41#include <linux/atomic.h>
  42#include <asm/irq.h>
  43#include <asm/hw_irq.h>
  44#include <asm/kvm_ppc.h>
  45#include <asm/dbell.h>
  46#include <asm/page.h>
 
 
  47#include <asm/smp.h>
  48#include <asm/time.h>
  49#include <asm/machdep.h>
  50#include <asm/cputhreads.h>
  51#include <asm/cputable.h>
  52#include <asm/mpic.h>
  53#include <asm/vdso_datapage.h>
  54#ifdef CONFIG_PPC64
  55#include <asm/paca.h>
  56#endif
  57#include <asm/vdso.h>
  58#include <asm/debug.h>
 
 
  59#include <asm/cpu_has_feature.h>
  60#include <asm/ftrace.h>
  61#include <asm/kup.h>
  62#include <asm/fadump.h>
  63
  64#ifdef DEBUG
  65#include <asm/udbg.h>
  66#define DBG(fmt...) udbg_printf(fmt)
  67#else
  68#define DBG(fmt...)
  69#endif
  70
  71#ifdef CONFIG_HOTPLUG_CPU
  72/* State of each CPU during hotplug phases */
  73static DEFINE_PER_CPU(int, cpu_state) = { 0 };
  74#endif
  75
  76struct task_struct *secondary_current;
  77bool has_big_cores;
  78bool coregroup_enabled;
  79bool thread_group_shares_l2;
  80bool thread_group_shares_l3;
  81
  82DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
  83DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
  84DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
  85DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
  86static DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map);
  87
  88EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  89EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
  90EXPORT_PER_CPU_SYMBOL(cpu_core_map);
  91EXPORT_SYMBOL_GPL(has_big_cores);
  92
  93enum {
  94#ifdef CONFIG_SCHED_SMT
  95	smt_idx,
  96#endif
  97	cache_idx,
  98	mc_idx,
  99	die_idx,
 100};
 101
 102#define MAX_THREAD_LIST_SIZE	8
 103#define THREAD_GROUP_SHARE_L1   1
 104#define THREAD_GROUP_SHARE_L2_L3 2
 105struct thread_groups {
 106	unsigned int property;
 107	unsigned int nr_groups;
 108	unsigned int threads_per_group;
 109	unsigned int thread_list[MAX_THREAD_LIST_SIZE];
 110};
 111
 112/* Maximum number of properties that groups of threads within a core can share */
 113#define MAX_THREAD_GROUP_PROPERTIES 2
 114
 115struct thread_groups_list {
 116	unsigned int nr_properties;
 117	struct thread_groups property_tgs[MAX_THREAD_GROUP_PROPERTIES];
 118};
 119
 120static struct thread_groups_list tgl[NR_CPUS] __initdata;
 121/*
 122 * On big-cores system, thread_group_l1_cache_map for each CPU corresponds to
 123 * the set its siblings that share the L1-cache.
 124 */
 125DEFINE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map);
 126
 127/*
 128 * On some big-cores system, thread_group_l2_cache_map for each CPU
 129 * corresponds to the set its siblings within the core that share the
 130 * L2-cache.
 131 */
 132DEFINE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map);
 133
 134/*
 135 * On P10, thread_group_l3_cache_map for each CPU is equal to the
 136 * thread_group_l2_cache_map
 137 */
 138DEFINE_PER_CPU(cpumask_var_t, thread_group_l3_cache_map);
 139
 140/* SMP operations for this machine */
 141struct smp_ops_t *smp_ops;
 142
 143/* Can't be static due to PowerMac hackery */
 144volatile unsigned int cpu_callin_map[NR_CPUS];
 145
 146int smt_enabled_at_boot = 1;
 147
 148/*
 149 * Returns 1 if the specified cpu should be brought up during boot.
 150 * Used to inhibit booting threads if they've been disabled or
 151 * limited on the command line
 152 */
 153int smp_generic_cpu_bootable(unsigned int nr)
 154{
 155	/* Special case - we inhibit secondary thread startup
 156	 * during boot if the user requests it.
 157	 */
 158	if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
 159		if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
 160			return 0;
 161		if (smt_enabled_at_boot
 162		    && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
 163			return 0;
 164	}
 165
 166	return 1;
 167}
 168
 169
 170#ifdef CONFIG_PPC64
 171int smp_generic_kick_cpu(int nr)
 172{
 173	if (nr < 0 || nr >= nr_cpu_ids)
 174		return -EINVAL;
 175
 176	/*
 177	 * The processor is currently spinning, waiting for the
 178	 * cpu_start field to become non-zero After we set cpu_start,
 179	 * the processor will continue on to secondary_start
 180	 */
 181	if (!paca_ptrs[nr]->cpu_start) {
 182		paca_ptrs[nr]->cpu_start = 1;
 183		smp_mb();
 184		return 0;
 185	}
 186
 187#ifdef CONFIG_HOTPLUG_CPU
 188	/*
 189	 * Ok it's not there, so it might be soft-unplugged, let's
 190	 * try to bring it back
 191	 */
 192	generic_set_cpu_up(nr);
 193	smp_wmb();
 194	smp_send_reschedule(nr);
 195#endif /* CONFIG_HOTPLUG_CPU */
 196
 197	return 0;
 198}
 199#endif /* CONFIG_PPC64 */
 200
 201static irqreturn_t call_function_action(int irq, void *data)
 202{
 203	generic_smp_call_function_interrupt();
 204	return IRQ_HANDLED;
 205}
 206
 207static irqreturn_t reschedule_action(int irq, void *data)
 208{
 209	scheduler_ipi();
 210	return IRQ_HANDLED;
 211}
 212
 213#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 214static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
 215{
 216	timer_broadcast_interrupt();
 217	return IRQ_HANDLED;
 218}
 219#endif
 220
 221#ifdef CONFIG_NMI_IPI
 222static irqreturn_t nmi_ipi_action(int irq, void *data)
 223{
 224	smp_handle_nmi_ipi(get_irq_regs());
 225	return IRQ_HANDLED;
 226}
 227#endif
 228
 229static irq_handler_t smp_ipi_action[] = {
 230	[PPC_MSG_CALL_FUNCTION] =  call_function_action,
 231	[PPC_MSG_RESCHEDULE] = reschedule_action,
 232#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 233	[PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
 234#endif
 235#ifdef CONFIG_NMI_IPI
 236	[PPC_MSG_NMI_IPI] = nmi_ipi_action,
 237#endif
 238};
 239
 240/*
 241 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
 242 * than going through the call function infrastructure, and strongly
 243 * serialized, so it is more appropriate for debugging.
 244 */
 245const char *smp_ipi_name[] = {
 246	[PPC_MSG_CALL_FUNCTION] =  "ipi call function",
 247	[PPC_MSG_RESCHEDULE] = "ipi reschedule",
 248#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 249	[PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
 250#endif
 251#ifdef CONFIG_NMI_IPI
 252	[PPC_MSG_NMI_IPI] = "nmi ipi",
 253#endif
 254};
 255
 256/* optional function to request ipi, for controllers with >= 4 ipis */
 257int smp_request_message_ipi(int virq, int msg)
 258{
 259	int err;
 260
 261	if (msg < 0 || msg > PPC_MSG_NMI_IPI)
 262		return -EINVAL;
 263#ifndef CONFIG_NMI_IPI
 264	if (msg == PPC_MSG_NMI_IPI)
 265		return 1;
 266#endif
 267
 268	err = request_irq(virq, smp_ipi_action[msg],
 269			  IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
 270			  smp_ipi_name[msg], NULL);
 271	WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
 272		virq, smp_ipi_name[msg], err);
 273
 274	return err;
 275}
 276
 277#ifdef CONFIG_PPC_SMP_MUXED_IPI
 278struct cpu_messages {
 279	long messages;			/* current messages */
 280};
 281static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
 282
 283void smp_muxed_ipi_set_message(int cpu, int msg)
 284{
 285	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
 286	char *message = (char *)&info->messages;
 287
 288	/*
 289	 * Order previous accesses before accesses in the IPI handler.
 290	 */
 291	smp_mb();
 292	message[msg] = 1;
 293}
 294
 295void smp_muxed_ipi_message_pass(int cpu, int msg)
 296{
 297	smp_muxed_ipi_set_message(cpu, msg);
 298
 299	/*
 300	 * cause_ipi functions are required to include a full barrier
 301	 * before doing whatever causes the IPI.
 302	 */
 303	smp_ops->cause_ipi(cpu);
 304}
 305
 306#ifdef __BIG_ENDIAN__
 307#define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
 308#else
 309#define IPI_MESSAGE(A) (1uL << (8 * (A)))
 310#endif
 311
 312irqreturn_t smp_ipi_demux(void)
 313{
 314	mb();	/* order any irq clear */
 315
 316	return smp_ipi_demux_relaxed();
 317}
 318
 319/* sync-free variant. Callers should ensure synchronization */
 320irqreturn_t smp_ipi_demux_relaxed(void)
 321{
 322	struct cpu_messages *info;
 323	unsigned long all;
 324
 325	info = this_cpu_ptr(&ipi_message);
 326	do {
 327		all = xchg(&info->messages, 0);
 328#if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
 329		/*
 330		 * Must check for PPC_MSG_RM_HOST_ACTION messages
 331		 * before PPC_MSG_CALL_FUNCTION messages because when
 332		 * a VM is destroyed, we call kick_all_cpus_sync()
 333		 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
 334		 * messages have completed before we free any VCPUs.
 335		 */
 336		if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
 337			kvmppc_xics_ipi_action();
 338#endif
 339		if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
 340			generic_smp_call_function_interrupt();
 341		if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
 342			scheduler_ipi();
 343#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 344		if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
 345			timer_broadcast_interrupt();
 346#endif
 347#ifdef CONFIG_NMI_IPI
 348		if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
 349			nmi_ipi_action(0, NULL);
 350#endif
 351	} while (info->messages);
 352
 353	return IRQ_HANDLED;
 354}
 355#endif /* CONFIG_PPC_SMP_MUXED_IPI */
 356
 357static inline void do_message_pass(int cpu, int msg)
 358{
 359	if (smp_ops->message_pass)
 360		smp_ops->message_pass(cpu, msg);
 361#ifdef CONFIG_PPC_SMP_MUXED_IPI
 362	else
 363		smp_muxed_ipi_message_pass(cpu, msg);
 364#endif
 365}
 366
 367void smp_send_reschedule(int cpu)
 368{
 369	if (likely(smp_ops))
 370		do_message_pass(cpu, PPC_MSG_RESCHEDULE);
 371}
 372EXPORT_SYMBOL_GPL(smp_send_reschedule);
 373
 374void arch_send_call_function_single_ipi(int cpu)
 375{
 376	do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
 377}
 378
 379void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 380{
 381	unsigned int cpu;
 382
 383	for_each_cpu(cpu, mask)
 384		do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
 385}
 386
 387#ifdef CONFIG_NMI_IPI
 388
 389/*
 390 * "NMI IPI" system.
 391 *
 392 * NMI IPIs may not be recoverable, so should not be used as ongoing part of
 393 * a running system. They can be used for crash, debug, halt/reboot, etc.
 394 *
 395 * The IPI call waits with interrupts disabled until all targets enter the
 396 * NMI handler, then returns. Subsequent IPIs can be issued before targets
 397 * have returned from their handlers, so there is no guarantee about
 398 * concurrency or re-entrancy.
 399 *
 400 * A new NMI can be issued before all targets exit the handler.
 401 *
 402 * The IPI call may time out without all targets entering the NMI handler.
 403 * In that case, there is some logic to recover (and ignore subsequent
 404 * NMI interrupts that may eventually be raised), but the platform interrupt
 405 * handler may not be able to distinguish this from other exception causes,
 406 * which may cause a crash.
 407 */
 408
 409static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
 410static struct cpumask nmi_ipi_pending_mask;
 411static bool nmi_ipi_busy = false;
 412static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
 413
 414noinstr static void nmi_ipi_lock_start(unsigned long *flags)
 415{
 416	raw_local_irq_save(*flags);
 417	hard_irq_disable();
 418	while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
 419		raw_local_irq_restore(*flags);
 420		spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0);
 421		raw_local_irq_save(*flags);
 422		hard_irq_disable();
 423	}
 424}
 425
 426noinstr static void nmi_ipi_lock(void)
 427{
 428	while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
 429		spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0);
 430}
 431
 432noinstr static void nmi_ipi_unlock(void)
 433{
 434	smp_mb();
 435	WARN_ON(arch_atomic_read(&__nmi_ipi_lock) != 1);
 436	arch_atomic_set(&__nmi_ipi_lock, 0);
 437}
 438
 439noinstr static void nmi_ipi_unlock_end(unsigned long *flags)
 440{
 441	nmi_ipi_unlock();
 442	raw_local_irq_restore(*flags);
 443}
 444
 445/*
 446 * Platform NMI handler calls this to ack
 447 */
 448noinstr int smp_handle_nmi_ipi(struct pt_regs *regs)
 449{
 450	void (*fn)(struct pt_regs *) = NULL;
 451	unsigned long flags;
 452	int me = raw_smp_processor_id();
 453	int ret = 0;
 454
 455	/*
 456	 * Unexpected NMIs are possible here because the interrupt may not
 457	 * be able to distinguish NMI IPIs from other types of NMIs, or
 458	 * because the caller may have timed out.
 459	 */
 460	nmi_ipi_lock_start(&flags);
 461	if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
 462		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
 463		fn = READ_ONCE(nmi_ipi_function);
 464		WARN_ON_ONCE(!fn);
 465		ret = 1;
 466	}
 467	nmi_ipi_unlock_end(&flags);
 468
 469	if (fn)
 470		fn(regs);
 471
 472	return ret;
 473}
 474
 475static void do_smp_send_nmi_ipi(int cpu, bool safe)
 476{
 477	if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
 478		return;
 479
 480	if (cpu >= 0) {
 481		do_message_pass(cpu, PPC_MSG_NMI_IPI);
 482	} else {
 483		int c;
 484
 485		for_each_online_cpu(c) {
 486			if (c == raw_smp_processor_id())
 487				continue;
 488			do_message_pass(c, PPC_MSG_NMI_IPI);
 489		}
 490	}
 491}
 492
 493/*
 494 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
 495 * - fn is the target callback function.
 496 * - delay_us > 0 is the delay before giving up waiting for targets to
 497 *   begin executing the handler, == 0 specifies indefinite delay.
 498 */
 499static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
 500				u64 delay_us, bool safe)
 501{
 502	unsigned long flags;
 503	int me = raw_smp_processor_id();
 504	int ret = 1;
 505
 506	BUG_ON(cpu == me);
 507	BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
 508
 509	if (unlikely(!smp_ops))
 510		return 0;
 511
 512	nmi_ipi_lock_start(&flags);
 513	while (nmi_ipi_busy) {
 514		nmi_ipi_unlock_end(&flags);
 515		spin_until_cond(!nmi_ipi_busy);
 516		nmi_ipi_lock_start(&flags);
 517	}
 518	nmi_ipi_busy = true;
 519	nmi_ipi_function = fn;
 520
 521	WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
 522
 523	if (cpu < 0) {
 524		/* ALL_OTHERS */
 525		cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
 526		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
 527	} else {
 528		cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
 529	}
 530
 531	nmi_ipi_unlock();
 532
 533	/* Interrupts remain hard disabled */
 534
 535	do_smp_send_nmi_ipi(cpu, safe);
 536
 537	nmi_ipi_lock();
 538	/* nmi_ipi_busy is set here, so unlock/lock is okay */
 539	while (!cpumask_empty(&nmi_ipi_pending_mask)) {
 540		nmi_ipi_unlock();
 541		udelay(1);
 542		nmi_ipi_lock();
 543		if (delay_us) {
 544			delay_us--;
 545			if (!delay_us)
 546				break;
 547		}
 548	}
 549
 550	if (!cpumask_empty(&nmi_ipi_pending_mask)) {
 551		/* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
 552		ret = 0;
 553		cpumask_clear(&nmi_ipi_pending_mask);
 554	}
 555
 556	nmi_ipi_function = NULL;
 557	nmi_ipi_busy = false;
 558
 559	nmi_ipi_unlock_end(&flags);
 560
 561	return ret;
 562}
 563
 564int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
 565{
 566	return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
 567}
 568
 569int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
 570{
 571	return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
 572}
 573#endif /* CONFIG_NMI_IPI */
 574
 575#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 576void tick_broadcast(const struct cpumask *mask)
 577{
 578	unsigned int cpu;
 579
 580	for_each_cpu(cpu, mask)
 581		do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
 582}
 583#endif
 584
 585#ifdef CONFIG_DEBUGGER
 586static void debugger_ipi_callback(struct pt_regs *regs)
 587{
 588	debugger_ipi(regs);
 589}
 590
 591void smp_send_debugger_break(void)
 592{
 593	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
 594}
 595#endif
 596
 597#ifdef CONFIG_KEXEC_CORE
 598void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
 599{
 600	int cpu;
 601
 602	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
 603	if (kdump_in_progress() && crash_wake_offline) {
 604		for_each_present_cpu(cpu) {
 605			if (cpu_online(cpu))
 606				continue;
 607			/*
 608			 * crash_ipi_callback will wait for
 609			 * all cpus, including offline CPUs.
 610			 * We don't care about nmi_ipi_function.
 611			 * Offline cpus will jump straight into
 612			 * crash_ipi_callback, we can skip the
 613			 * entire NMI dance and waiting for
 614			 * cpus to clear pending mask, etc.
 615			 */
 616			do_smp_send_nmi_ipi(cpu, false);
 617		}
 618	}
 619}
 620#endif
 621
 622void crash_smp_send_stop(void)
 623{
 624	static bool stopped = false;
 625
 626	/*
 627	 * In case of fadump, register data for all CPUs is captured by f/w
 628	 * on ibm,os-term rtas call. Skip IPI callbacks to other CPUs before
 629	 * this rtas call to avoid tricky post processing of those CPUs'
 630	 * backtraces.
 631	 */
 632	if (should_fadump_crash())
 633		return;
 634
 635	if (stopped)
 636		return;
 637
 638	stopped = true;
 639
 640#ifdef CONFIG_KEXEC_CORE
 641	if (kexec_crash_image) {
 642		crash_kexec_prepare();
 643		return;
 644	}
 645#endif
 646
 647	smp_send_stop();
 648}
 649
 650#ifdef CONFIG_NMI_IPI
 651static void nmi_stop_this_cpu(struct pt_regs *regs)
 652{
 653	/*
 654	 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
 655	 */
 656	set_cpu_online(smp_processor_id(), false);
 657
 658	spin_begin();
 659	while (1)
 660		spin_cpu_relax();
 661}
 662
 663void smp_send_stop(void)
 664{
 665	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
 666}
 667
 668#else /* CONFIG_NMI_IPI */
 669
 670static void stop_this_cpu(void *dummy)
 671{
 672	hard_irq_disable();
 673
 674	/*
 675	 * Offlining CPUs in stop_this_cpu can result in scheduler warnings,
 676	 * (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants
 677	 * to know other CPUs are offline before it breaks locks to flush
 678	 * printk buffers, in case we panic()ed while holding the lock.
 679	 */
 680	set_cpu_online(smp_processor_id(), false);
 681
 682	spin_begin();
 683	while (1)
 684		spin_cpu_relax();
 685}
 686
 687void smp_send_stop(void)
 688{
 689	static bool stopped = false;
 690
 691	/*
 692	 * Prevent waiting on csd lock from a previous smp_send_stop.
 693	 * This is racy, but in general callers try to do the right
 694	 * thing and only fire off one smp_send_stop (e.g., see
 695	 * kernel/panic.c)
 696	 */
 697	if (stopped)
 698		return;
 699
 700	stopped = true;
 701
 702	smp_call_function(stop_this_cpu, NULL, 0);
 703}
 704#endif /* CONFIG_NMI_IPI */
 705
 706static struct task_struct *current_set[NR_CPUS];
 707
 708static void smp_store_cpu_info(int id)
 709{
 710	per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
 711#ifdef CONFIG_PPC_E500
 712	per_cpu(next_tlbcam_idx, id)
 713		= (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
 714#endif
 715}
 716
 717/*
 718 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
 719 * rather than just passing around the cpumask we pass around a function that
 720 * returns the that cpumask for the given CPU.
 721 */
 722static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
 723{
 724	cpumask_set_cpu(i, get_cpumask(j));
 725	cpumask_set_cpu(j, get_cpumask(i));
 726}
 727
 728#ifdef CONFIG_HOTPLUG_CPU
 729static void set_cpus_unrelated(int i, int j,
 730		struct cpumask *(*get_cpumask)(int))
 731{
 732	cpumask_clear_cpu(i, get_cpumask(j));
 733	cpumask_clear_cpu(j, get_cpumask(i));
 734}
 735#endif
 736
 737/*
 738 * Extends set_cpus_related. Instead of setting one CPU at a time in
 739 * dstmask, set srcmask at oneshot. dstmask should be super set of srcmask.
 740 */
 741static void or_cpumasks_related(int i, int j, struct cpumask *(*srcmask)(int),
 742				struct cpumask *(*dstmask)(int))
 743{
 744	struct cpumask *mask;
 745	int k;
 746
 747	mask = srcmask(j);
 748	for_each_cpu(k, srcmask(i))
 749		cpumask_or(dstmask(k), dstmask(k), mask);
 750
 751	if (i == j)
 752		return;
 753
 754	mask = srcmask(i);
 755	for_each_cpu(k, srcmask(j))
 756		cpumask_or(dstmask(k), dstmask(k), mask);
 757}
 758
 759/*
 760 * parse_thread_groups: Parses the "ibm,thread-groups" device tree
 761 *                      property for the CPU device node @dn and stores
 762 *                      the parsed output in the thread_groups_list
 763 *                      structure @tglp.
 
 764 *
 765 * @dn: The device node of the CPU device.
 766 * @tglp: Pointer to a thread group list structure into which the parsed
 767 *      output of "ibm,thread-groups" is stored.
 
 
 768 *
 769 * ibm,thread-groups[0..N-1] array defines which group of threads in
 770 * the CPU-device node can be grouped together based on the property.
 771 *
 772 * This array can represent thread groupings for multiple properties.
 773 *
 774 * ibm,thread-groups[i + 0] tells us the property based on which the
 775 * threads are being grouped together. If this value is 1, it implies
 776 * that the threads in the same group share L1, translation cache. If
 777 * the value is 2, it implies that the threads in the same group share
 778 * the same L2 cache.
 779 *
 780 * ibm,thread-groups[i+1] tells us how many such thread groups exist for the
 781 * property ibm,thread-groups[i]
 782 *
 783 * ibm,thread-groups[i+2] tells us the number of threads in each such
 784 * group.
 785 * Suppose k = (ibm,thread-groups[i+1] * ibm,thread-groups[i+2]), then,
 786 *
 787 * ibm,thread-groups[i+3..i+k+2] (is the list of threads identified by
 788 * "ibm,ppc-interrupt-server#s" arranged as per their membership in
 789 * the grouping.
 790 *
 791 * Example:
 792 * If "ibm,thread-groups" = [1,2,4,8,10,12,14,9,11,13,15,2,2,4,8,10,12,14,9,11,13,15]
 793 * This can be decomposed up into two consecutive arrays:
 794 * a) [1,2,4,8,10,12,14,9,11,13,15]
 795 * b) [2,2,4,8,10,12,14,9,11,13,15]
 796 *
 797 * where in,
 798 *
 799 * a) provides information of Property "1" being shared by "2" groups,
 800 *  each with "4" threads each. The "ibm,ppc-interrupt-server#s" of
 801 *  the first group is {8,10,12,14} and the
 802 *  "ibm,ppc-interrupt-server#s" of the second group is
 803 *  {9,11,13,15}. Property "1" is indicative of the thread in the
 804 *  group sharing L1 cache, translation cache and Instruction Data
 805 *  flow.
 806 *
 807 * b) provides information of Property "2" being shared by "2" groups,
 808 *  each group with "4" threads. The "ibm,ppc-interrupt-server#s" of
 809 *  the first group is {8,10,12,14} and the
 810 *  "ibm,ppc-interrupt-server#s" of the second group is
 811 *  {9,11,13,15}. Property "2" indicates that the threads in each
 812 *  group share the L2-cache.
 813 *
 814 * Returns 0 on success, -EINVAL if the property does not exist,
 815 * -ENODATA if property does not have a value, and -EOVERFLOW if the
 816 * property data isn't large enough.
 817 */
 818static int parse_thread_groups(struct device_node *dn,
 819			       struct thread_groups_list *tglp)
 
 820{
 821	unsigned int property_idx = 0;
 822	u32 *thread_group_array;
 823	size_t total_threads;
 824	int ret = 0, count;
 825	u32 *thread_list;
 826	int i = 0;
 
 827
 828	count = of_property_count_u32_elems(dn, "ibm,thread-groups");
 829	thread_group_array = kcalloc(count, sizeof(u32), GFP_KERNEL);
 830	ret = of_property_read_u32_array(dn, "ibm,thread-groups",
 831					 thread_group_array, count);
 832	if (ret)
 833		goto out_free;
 834
 835	while (i < count && property_idx < MAX_THREAD_GROUP_PROPERTIES) {
 836		int j;
 837		struct thread_groups *tg = &tglp->property_tgs[property_idx++];
 
 
 
 
 838
 839		tg->property = thread_group_array[i];
 840		tg->nr_groups = thread_group_array[i + 1];
 841		tg->threads_per_group = thread_group_array[i + 2];
 842		total_threads = tg->nr_groups * tg->threads_per_group;
 843
 844		thread_list = &thread_group_array[i + 3];
 
 
 
 
 845
 846		for (j = 0; j < total_threads; j++)
 847			tg->thread_list[j] = thread_list[j];
 848		i = i + 3 + total_threads;
 849	}
 850
 851	tglp->nr_properties = property_idx;
 
 852
 853out_free:
 854	kfree(thread_group_array);
 855	return ret;
 856}
 857
 858/*
 859 * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
 860 *                              that @cpu belongs to.
 861 *
 862 * @cpu : The logical CPU whose thread group is being searched.
 863 * @tg : The thread-group structure of the CPU node which @cpu belongs
 864 *       to.
 865 *
 866 * Returns the index to tg->thread_list that points to the start
 867 * of the thread_group that @cpu belongs to.
 868 *
 869 * Returns -1 if cpu doesn't belong to any of the groups pointed to by
 870 * tg->thread_list.
 871 */
 872static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
 873{
 874	int hw_cpu_id = get_hard_smp_processor_id(cpu);
 875	int i, j;
 876
 877	for (i = 0; i < tg->nr_groups; i++) {
 878		int group_start = i * tg->threads_per_group;
 879
 880		for (j = 0; j < tg->threads_per_group; j++) {
 881			int idx = group_start + j;
 882
 883			if (tg->thread_list[idx] == hw_cpu_id)
 884				return group_start;
 885		}
 886	}
 887
 888	return -1;
 889}
 890
 891static struct thread_groups *__init get_thread_groups(int cpu,
 892						      int group_property,
 893						      int *err)
 894{
 895	struct device_node *dn = of_get_cpu_node(cpu, NULL);
 896	struct thread_groups_list *cpu_tgl = &tgl[cpu];
 897	struct thread_groups *tg = NULL;
 898	int i;
 899	*err = 0;
 900
 901	if (!dn) {
 902		*err = -ENODATA;
 903		return NULL;
 904	}
 905
 906	if (!cpu_tgl->nr_properties) {
 907		*err = parse_thread_groups(dn, cpu_tgl);
 908		if (*err)
 909			goto out;
 910	}
 911
 912	for (i = 0; i < cpu_tgl->nr_properties; i++) {
 913		if (cpu_tgl->property_tgs[i].property == group_property) {
 914			tg = &cpu_tgl->property_tgs[i];
 915			break;
 916		}
 917	}
 918
 919	if (!tg)
 920		*err = -EINVAL;
 921out:
 922	of_node_put(dn);
 923	return tg;
 924}
 925
 926static int __init update_mask_from_threadgroup(cpumask_var_t *mask, struct thread_groups *tg,
 927					       int cpu, int cpu_group_start)
 928{
 929	int first_thread = cpu_first_thread_sibling(cpu);
 930	int i;
 931
 932	zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu));
 
 
 
 
 933
 934	for (i = first_thread; i < first_thread + threads_per_core; i++) {
 935		int i_group_start = get_cpu_thread_group_start(i, tg);
 936
 937		if (unlikely(i_group_start == -1)) {
 938			WARN_ON_ONCE(1);
 939			return -ENODATA;
 
 940		}
 941
 942		if (i_group_start == cpu_group_start)
 943			cpumask_set_cpu(i, *mask);
 944	}
 945
 946	return 0;
 947}
 948
 949static int __init init_thread_group_cache_map(int cpu, int cache_property)
 950
 951{
 952	int cpu_group_start = -1, err = 0;
 953	struct thread_groups *tg = NULL;
 954	cpumask_var_t *mask = NULL;
 955
 956	if (cache_property != THREAD_GROUP_SHARE_L1 &&
 957	    cache_property != THREAD_GROUP_SHARE_L2_L3)
 958		return -EINVAL;
 959
 960	tg = get_thread_groups(cpu, cache_property, &err);
 961
 962	if (!tg)
 963		return err;
 964
 965	cpu_group_start = get_cpu_thread_group_start(cpu, tg);
 966
 967	if (unlikely(cpu_group_start == -1)) {
 968		WARN_ON_ONCE(1);
 969		return -ENODATA;
 970	}
 971
 972	if (cache_property == THREAD_GROUP_SHARE_L1) {
 973		mask = &per_cpu(thread_group_l1_cache_map, cpu);
 974		update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
 975	}
 976	else if (cache_property == THREAD_GROUP_SHARE_L2_L3) {
 977		mask = &per_cpu(thread_group_l2_cache_map, cpu);
 978		update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
 979		mask = &per_cpu(thread_group_l3_cache_map, cpu);
 980		update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
 981	}
 982
 983
 984	return 0;
 985}
 986
 987static bool shared_caches;
 988
 989#ifdef CONFIG_SCHED_SMT
 990/* cpumask of CPUs with asymmetric SMT dependency */
 991static int powerpc_smt_flags(void)
 992{
 993	int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
 994
 995	if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
 996		printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
 997		flags |= SD_ASYM_PACKING;
 998	}
 999	return flags;
1000}
1001#endif
1002
1003/*
1004 * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
1005 * This topology makes it *much* cheaper to migrate tasks between adjacent cores
1006 * since the migrated task remains cache hot. We want to take advantage of this
1007 * at the scheduler level so an extra topology level is required.
1008 */
1009static int powerpc_shared_cache_flags(void)
1010{
1011	return SD_SHARE_PKG_RESOURCES;
1012}
1013
1014/*
1015 * We can't just pass cpu_l2_cache_mask() directly because
1016 * returns a non-const pointer and the compiler barfs on that.
1017 */
1018static const struct cpumask *shared_cache_mask(int cpu)
1019{
1020	return per_cpu(cpu_l2_cache_map, cpu);
1021}
1022
1023#ifdef CONFIG_SCHED_SMT
1024static const struct cpumask *smallcore_smt_mask(int cpu)
1025{
1026	return cpu_smallcore_mask(cpu);
1027}
1028#endif
1029
1030static struct cpumask *cpu_coregroup_mask(int cpu)
1031{
1032	return per_cpu(cpu_coregroup_map, cpu);
1033}
1034
1035static bool has_coregroup_support(void)
1036{
1037	return coregroup_enabled;
1038}
1039
1040static const struct cpumask *cpu_mc_mask(int cpu)
1041{
1042	return cpu_coregroup_mask(cpu);
1043}
1044
1045static struct sched_domain_topology_level powerpc_topology[] = {
1046#ifdef CONFIG_SCHED_SMT
1047	{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1048#endif
1049	{ shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
1050	{ cpu_mc_mask, SD_INIT_NAME(MC) },
1051	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
1052	{ NULL, },
1053};
1054
1055static int __init init_big_cores(void)
1056{
1057	int cpu;
1058
1059	for_each_possible_cpu(cpu) {
1060		int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L1);
1061
1062		if (err)
1063			return err;
1064
1065		zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
1066					GFP_KERNEL,
1067					cpu_to_node(cpu));
1068	}
1069
1070	has_big_cores = true;
1071
1072	for_each_possible_cpu(cpu) {
1073		int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L2_L3);
1074
1075		if (err)
1076			return err;
1077	}
1078
1079	thread_group_shares_l2 = true;
1080	thread_group_shares_l3 = true;
1081	pr_debug("L2/L3 cache only shared by the threads in the small core\n");
1082
1083	return 0;
1084}
1085
1086void __init smp_prepare_cpus(unsigned int max_cpus)
1087{
1088	unsigned int cpu;
1089
1090	DBG("smp_prepare_cpus\n");
1091
1092	/* 
1093	 * setup_cpu may need to be called on the boot cpu. We haven't
1094	 * spun any cpus up but lets be paranoid.
1095	 */
1096	BUG_ON(boot_cpuid != smp_processor_id());
1097
1098	/* Fixup boot cpu */
1099	smp_store_cpu_info(boot_cpuid);
1100	cpu_callin_map[boot_cpuid] = 1;
1101
1102	for_each_possible_cpu(cpu) {
1103		zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
1104					GFP_KERNEL, cpu_to_node(cpu));
1105		zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
1106					GFP_KERNEL, cpu_to_node(cpu));
1107		zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
1108					GFP_KERNEL, cpu_to_node(cpu));
1109		if (has_coregroup_support())
1110			zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
1111						GFP_KERNEL, cpu_to_node(cpu));
1112
1113#ifdef CONFIG_NUMA
1114		/*
1115		 * numa_node_id() works after this.
1116		 */
1117		if (cpu_present(cpu)) {
1118			set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
1119			set_cpu_numa_mem(cpu,
1120				local_memory_node(numa_cpu_lookup_table[cpu]));
1121		}
1122#endif
1123	}
1124
1125	/* Init the cpumasks so the boot CPU is related to itself */
1126	cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
1127	cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
1128	cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
1129
1130	if (has_coregroup_support())
1131		cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
1132
1133	init_big_cores();
1134	if (has_big_cores) {
1135		cpumask_set_cpu(boot_cpuid,
1136				cpu_smallcore_mask(boot_cpuid));
1137	}
1138
1139	if (cpu_to_chip_id(boot_cpuid) != -1) {
1140		int idx = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
1141
1142		/*
1143		 * All threads of a core will all belong to the same core,
1144		 * chip_id_lookup_table will have one entry per core.
1145		 * Assumption: if boot_cpuid doesn't have a chip-id, then no
1146		 * other CPUs, will also not have chip-id.
1147		 */
1148		chip_id_lookup_table = kcalloc(idx, sizeof(int), GFP_KERNEL);
1149		if (chip_id_lookup_table)
1150			memset(chip_id_lookup_table, -1, sizeof(int) * idx);
1151	}
1152
1153	if (smp_ops && smp_ops->probe)
1154		smp_ops->probe();
1155}
1156
1157void smp_prepare_boot_cpu(void)
1158{
1159	BUG_ON(smp_processor_id() != boot_cpuid);
1160#ifdef CONFIG_PPC64
1161	paca_ptrs[boot_cpuid]->__current = current;
1162#endif
1163	set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
1164	current_set[boot_cpuid] = current;
1165}
1166
1167#ifdef CONFIG_HOTPLUG_CPU
1168
1169int generic_cpu_disable(void)
1170{
1171	unsigned int cpu = smp_processor_id();
1172
1173	if (cpu == boot_cpuid)
1174		return -EBUSY;
1175
1176	set_cpu_online(cpu, false);
1177#ifdef CONFIG_PPC64
1178	vdso_data->processorCount--;
1179#endif
1180	/* Update affinity of all IRQs previously aimed at this CPU */
1181	irq_migrate_all_off_this_cpu();
1182
1183	/*
1184	 * Depending on the details of the interrupt controller, it's possible
1185	 * that one of the interrupts we just migrated away from this CPU is
1186	 * actually already pending on this CPU. If we leave it in that state
1187	 * the interrupt will never be EOI'ed, and will never fire again. So
1188	 * temporarily enable interrupts here, to allow any pending interrupt to
1189	 * be received (and EOI'ed), before we take this CPU offline.
1190	 */
1191	local_irq_enable();
1192	mdelay(1);
1193	local_irq_disable();
1194
1195	return 0;
1196}
1197
1198void generic_cpu_die(unsigned int cpu)
1199{
1200	int i;
1201
1202	for (i = 0; i < 100; i++) {
1203		smp_rmb();
1204		if (is_cpu_dead(cpu))
1205			return;
1206		msleep(100);
1207	}
1208	printk(KERN_ERR "CPU%d didn't die...\n", cpu);
1209}
1210
1211void generic_set_cpu_dead(unsigned int cpu)
1212{
1213	per_cpu(cpu_state, cpu) = CPU_DEAD;
1214}
1215
1216/*
1217 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
1218 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
1219 * which makes the delay in generic_cpu_die() not happen.
1220 */
1221void generic_set_cpu_up(unsigned int cpu)
1222{
1223	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1224}
1225
1226int generic_check_cpu_restart(unsigned int cpu)
1227{
1228	return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
1229}
1230
1231int is_cpu_dead(unsigned int cpu)
1232{
1233	return per_cpu(cpu_state, cpu) == CPU_DEAD;
1234}
1235
1236static bool secondaries_inhibited(void)
1237{
1238	return kvm_hv_mode_active();
1239}
1240
1241#else /* HOTPLUG_CPU */
1242
1243#define secondaries_inhibited()		0
1244
1245#endif
1246
1247static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
1248{
1249#ifdef CONFIG_PPC64
1250	paca_ptrs[cpu]->__current = idle;
1251	paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
1252				 THREAD_SIZE - STACK_FRAME_MIN_SIZE;
1253#endif
1254	task_thread_info(idle)->cpu = cpu;
1255	secondary_current = current_set[cpu] = idle;
1256}
1257
1258int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1259{
1260	const unsigned long boot_spin_ms = 5 * MSEC_PER_SEC;
1261	const bool booting = system_state < SYSTEM_RUNNING;
1262	const unsigned long hp_spin_ms = 1;
1263	unsigned long deadline;
1264	int rc;
1265	const unsigned long spin_wait_ms = booting ? boot_spin_ms : hp_spin_ms;
1266
1267	/*
1268	 * Don't allow secondary threads to come online if inhibited
1269	 */
1270	if (threads_per_core > 1 && secondaries_inhibited() &&
1271	    cpu_thread_in_subcore(cpu))
1272		return -EBUSY;
1273
1274	if (smp_ops == NULL ||
1275	    (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1276		return -EINVAL;
1277
1278	cpu_idle_thread_init(cpu, tidle);
1279
1280	/*
1281	 * The platform might need to allocate resources prior to bringing
1282	 * up the CPU
1283	 */
1284	if (smp_ops->prepare_cpu) {
1285		rc = smp_ops->prepare_cpu(cpu);
1286		if (rc)
1287			return rc;
1288	}
1289
1290	/* Make sure callin-map entry is 0 (can be leftover a CPU
1291	 * hotplug
1292	 */
1293	cpu_callin_map[cpu] = 0;
1294
1295	/* The information for processor bringup must
1296	 * be written out to main store before we release
1297	 * the processor.
1298	 */
1299	smp_mb();
1300
1301	/* wake up cpus */
1302	DBG("smp: kicking cpu %d\n", cpu);
1303	rc = smp_ops->kick_cpu(cpu);
1304	if (rc) {
1305		pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
1306		return rc;
1307	}
1308
1309	/*
1310	 * At boot time, simply spin on the callin word until the
1311	 * deadline passes.
1312	 *
1313	 * At run time, spin for an optimistic amount of time to avoid
1314	 * sleeping in the common case.
1315	 */
1316	deadline = jiffies + msecs_to_jiffies(spin_wait_ms);
1317	spin_until_cond(cpu_callin_map[cpu] || time_is_before_jiffies(deadline));
1318
1319	if (!cpu_callin_map[cpu] && system_state >= SYSTEM_RUNNING) {
1320		const unsigned long sleep_interval_us = 10 * USEC_PER_MSEC;
1321		const unsigned long sleep_wait_ms = 100 * MSEC_PER_SEC;
1322
1323		deadline = jiffies + msecs_to_jiffies(sleep_wait_ms);
1324		while (!cpu_callin_map[cpu] && time_is_after_jiffies(deadline))
1325			fsleep(sleep_interval_us);
1326	}
1327
1328	if (!cpu_callin_map[cpu]) {
1329		printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1330		return -ENOENT;
1331	}
1332
1333	DBG("Processor %u found.\n", cpu);
1334
1335	if (smp_ops->give_timebase)
1336		smp_ops->give_timebase();
1337
1338	/* Wait until cpu puts itself in the online & active maps */
1339	spin_until_cond(cpu_online(cpu));
1340
1341	return 0;
1342}
1343
1344/* Return the value of the reg property corresponding to the given
1345 * logical cpu.
1346 */
1347int cpu_to_core_id(int cpu)
1348{
1349	struct device_node *np;
 
1350	int id = -1;
1351
1352	np = of_get_cpu_node(cpu, NULL);
1353	if (!np)
1354		goto out;
1355
1356	id = of_get_cpu_hwid(np, 0);
 
 
 
 
1357out:
1358	of_node_put(np);
1359	return id;
1360}
1361EXPORT_SYMBOL_GPL(cpu_to_core_id);
1362
1363/* Helper routines for cpu to core mapping */
1364int cpu_core_index_of_thread(int cpu)
1365{
1366	return cpu >> threads_shift;
1367}
1368EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
1369
1370int cpu_first_thread_of_core(int core)
1371{
1372	return core << threads_shift;
1373}
1374EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
1375
1376/* Must be called when no change can occur to cpu_present_mask,
1377 * i.e. during cpu online or offline.
1378 */
1379static struct device_node *cpu_to_l2cache(int cpu)
1380{
1381	struct device_node *np;
1382	struct device_node *cache;
1383
1384	if (!cpu_present(cpu))
1385		return NULL;
1386
1387	np = of_get_cpu_node(cpu, NULL);
1388	if (np == NULL)
1389		return NULL;
1390
1391	cache = of_find_next_cache_node(np);
1392
1393	of_node_put(np);
1394
1395	return cache;
1396}
1397
1398static bool update_mask_by_l2(int cpu, cpumask_var_t *mask)
1399{
1400	struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1401	struct device_node *l2_cache, *np;
1402	int i;
1403
1404	if (has_big_cores)
1405		submask_fn = cpu_smallcore_mask;
1406
1407	/*
1408	 * If the threads in a thread-group share L2 cache, then the
1409	 * L2-mask can be obtained from thread_group_l2_cache_map.
1410	 */
1411	if (thread_group_shares_l2) {
1412		cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu));
1413
1414		for_each_cpu(i, per_cpu(thread_group_l2_cache_map, cpu)) {
1415			if (cpu_online(i))
1416				set_cpus_related(i, cpu, cpu_l2_cache_mask);
1417		}
1418
1419		/* Verify that L1-cache siblings are a subset of L2 cache-siblings */
1420		if (!cpumask_equal(submask_fn(cpu), cpu_l2_cache_mask(cpu)) &&
1421		    !cpumask_subset(submask_fn(cpu), cpu_l2_cache_mask(cpu))) {
1422			pr_warn_once("CPU %d : Inconsistent L1 and L2 cache siblings\n",
1423				     cpu);
1424		}
1425
1426		return true;
1427	}
1428
1429	l2_cache = cpu_to_l2cache(cpu);
1430	if (!l2_cache || !*mask) {
1431		/* Assume only core siblings share cache with this CPU */
1432		for_each_cpu(i, cpu_sibling_mask(cpu))
1433			set_cpus_related(cpu, i, cpu_l2_cache_mask);
1434
1435		return false;
1436	}
1437
1438	cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1439
1440	/* Update l2-cache mask with all the CPUs that are part of submask */
1441	or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask);
1442
1443	/* Skip all CPUs already part of current CPU l2-cache mask */
1444	cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(cpu));
1445
1446	for_each_cpu(i, *mask) {
1447		/*
1448		 * when updating the marks the current CPU has not been marked
1449		 * online, but we need to update the cache masks
1450		 */
1451		np = cpu_to_l2cache(i);
 
 
1452
1453		/* Skip all CPUs already part of current CPU l2-cache */
1454		if (np == l2_cache) {
1455			or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask);
1456			cpumask_andnot(*mask, *mask, submask_fn(i));
1457		} else {
1458			cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(i));
1459		}
1460
1461		of_node_put(np);
1462	}
1463	of_node_put(l2_cache);
1464
1465	return true;
1466}
1467
1468#ifdef CONFIG_HOTPLUG_CPU
1469static void remove_cpu_from_masks(int cpu)
1470{
1471	struct cpumask *(*mask_fn)(int) = cpu_sibling_mask;
1472	int i;
1473
1474	unmap_cpu_from_node(cpu);
1475
1476	if (shared_caches)
1477		mask_fn = cpu_l2_cache_mask;
1478
1479	for_each_cpu(i, mask_fn(cpu)) {
1480		set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
1481		set_cpus_unrelated(cpu, i, cpu_sibling_mask);
1482		if (has_big_cores)
1483			set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
1484	}
1485
1486	for_each_cpu(i, cpu_core_mask(cpu))
1487		set_cpus_unrelated(cpu, i, cpu_core_mask);
1488
1489	if (has_coregroup_support()) {
1490		for_each_cpu(i, cpu_coregroup_mask(cpu))
1491			set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
1492	}
1493}
1494#endif
1495
1496static inline void add_cpu_to_smallcore_masks(int cpu)
1497{
1498	int i;
 
1499
1500	if (!has_big_cores)
1501		return;
1502
1503	cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
1504
1505	for_each_cpu(i, per_cpu(thread_group_l1_cache_map, cpu)) {
1506		if (cpu_online(i))
1507			set_cpus_related(i, cpu, cpu_smallcore_mask);
1508	}
1509}
1510
1511static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
1512{
1513	struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1514	int coregroup_id = cpu_to_coregroup_id(cpu);
1515	int i;
1516
1517	if (shared_caches)
1518		submask_fn = cpu_l2_cache_mask;
1519
1520	if (!*mask) {
1521		/* Assume only siblings are part of this CPU's coregroup */
1522		for_each_cpu(i, submask_fn(cpu))
1523			set_cpus_related(cpu, i, cpu_coregroup_mask);
1524
1525		return;
1526	}
1527
1528	cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1529
1530	/* Update coregroup mask with all the CPUs that are part of submask */
1531	or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask);
1532
1533	/* Skip all CPUs already part of coregroup mask */
1534	cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu));
1535
1536	for_each_cpu(i, *mask) {
1537		/* Skip all CPUs not part of this coregroup */
1538		if (coregroup_id == cpu_to_coregroup_id(i)) {
1539			or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask);
1540			cpumask_andnot(*mask, *mask, submask_fn(i));
1541		} else {
1542			cpumask_andnot(*mask, *mask, cpu_coregroup_mask(i));
1543		}
1544	}
1545}
1546
1547static void add_cpu_to_masks(int cpu)
1548{
1549	struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1550	int first_thread = cpu_first_thread_sibling(cpu);
1551	cpumask_var_t mask;
1552	int chip_id = -1;
1553	bool ret;
1554	int i;
1555
1556	/*
1557	 * This CPU will not be in the online mask yet so we need to manually
1558	 * add it to it's own thread sibling mask.
1559	 */
1560	map_cpu_to_node(cpu, cpu_to_node(cpu));
1561	cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1562	cpumask_set_cpu(cpu, cpu_core_mask(cpu));
1563
1564	for (i = first_thread; i < first_thread + threads_per_core; i++)
1565		if (cpu_online(i))
1566			set_cpus_related(i, cpu, cpu_sibling_mask);
1567
1568	add_cpu_to_smallcore_masks(cpu);
 
 
 
 
 
 
 
1569
1570	/* In CPU-hotplug path, hence use GFP_ATOMIC */
1571	ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
1572	update_mask_by_l2(cpu, &mask);
1573
1574	if (has_coregroup_support())
1575		update_coregroup_mask(cpu, &mask);
1576
1577	if (chip_id_lookup_table && ret)
1578		chip_id = cpu_to_chip_id(cpu);
1579
1580	if (shared_caches)
1581		submask_fn = cpu_l2_cache_mask;
1582
1583	/* Update core_mask with all the CPUs that are part of submask */
1584	or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask);
1585
1586	/* Skip all CPUs already part of current CPU core mask */
1587	cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
1588
1589	/* If chip_id is -1; limit the cpu_core_mask to within DIE*/
1590	if (chip_id == -1)
1591		cpumask_and(mask, mask, cpu_cpu_mask(cpu));
1592
1593	for_each_cpu(i, mask) {
1594		if (chip_id == cpu_to_chip_id(i)) {
1595			or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
1596			cpumask_andnot(mask, mask, submask_fn(i));
1597		} else {
1598			cpumask_andnot(mask, mask, cpu_core_mask(i));
1599		}
1600	}
1601
1602	free_cpumask_var(mask);
 
 
 
 
 
1603}
1604
 
 
1605/* Activate a secondary processor. */
1606void start_secondary(void *unused)
1607{
1608	unsigned int cpu = raw_smp_processor_id();
1609
1610	/* PPC64 calls setup_kup() in early_setup_secondary() */
1611	if (IS_ENABLED(CONFIG_PPC32))
1612		setup_kup();
1613
1614	mmgrab(&init_mm);
1615	current->active_mm = &init_mm;
1616
1617	smp_store_cpu_info(cpu);
1618	set_dec(tb_ticks_per_jiffy);
1619	rcu_cpu_starting(cpu);
1620	cpu_callin_map[cpu] = 1;
1621
1622	if (smp_ops->setup_cpu)
1623		smp_ops->setup_cpu(cpu);
1624	if (smp_ops->take_timebase)
1625		smp_ops->take_timebase();
1626
1627	secondary_cpu_time_init();
1628
1629#ifdef CONFIG_PPC64
1630	if (system_state == SYSTEM_RUNNING)
1631		vdso_data->processorCount++;
1632
1633	vdso_getcpu_init();
1634#endif
1635	set_numa_node(numa_cpu_lookup_table[cpu]);
1636	set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1637
1638	/* Update topology CPU masks */
1639	add_cpu_to_masks(cpu);
1640
 
 
1641	/*
1642	 * Check for any shared caches. Note that this must be done on a
1643	 * per-core basis because one core in the pair might be disabled.
1644	 */
1645	if (!shared_caches) {
1646		struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
1647		struct cpumask *mask = cpu_l2_cache_mask(cpu);
1648
1649		if (has_big_cores)
1650			sibling_mask = cpu_smallcore_mask;
1651
1652		if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu)))
1653			shared_caches = true;
1654	}
1655
1656	smp_wmb();
1657	notify_cpu_starting(cpu);
1658	set_cpu_online(cpu, true);
1659
1660	boot_init_stack_canary();
1661
1662	local_irq_enable();
1663
1664	/* We can enable ftrace for secondary cpus now */
1665	this_cpu_enable_ftrace();
1666
1667	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1668
1669	BUG();
1670}
1671
1672static void __init fixup_topology(void)
1673{
1674	int i;
 
1675
1676#ifdef CONFIG_SCHED_SMT
1677	if (has_big_cores) {
1678		pr_info("Big cores detected but using small core scheduling\n");
1679		powerpc_topology[smt_idx].mask = smallcore_smt_mask;
 
 
 
 
 
1680	}
 
 
1681#endif
1682
1683	if (!has_coregroup_support())
1684		powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask;
 
 
 
 
 
1685
1686	/*
1687	 * Try to consolidate topology levels here instead of
1688	 * allowing scheduler to degenerate.
1689	 * - Dont consolidate if masks are different.
1690	 * - Dont consolidate if sd_flags exists and are different.
1691	 */
1692	for (i = 1; i <= die_idx; i++) {
1693		if (powerpc_topology[i].mask != powerpc_topology[i - 1].mask)
1694			continue;
 
1695
1696		if (powerpc_topology[i].sd_flags && powerpc_topology[i - 1].sd_flags &&
1697				powerpc_topology[i].sd_flags != powerpc_topology[i - 1].sd_flags)
1698			continue;
 
 
 
 
 
1699
1700		if (!powerpc_topology[i - 1].sd_flags)
1701			powerpc_topology[i - 1].sd_flags = powerpc_topology[i].sd_flags;
 
 
 
 
1702
1703		powerpc_topology[i].mask = powerpc_topology[i + 1].mask;
1704		powerpc_topology[i].sd_flags = powerpc_topology[i + 1].sd_flags;
1705#ifdef CONFIG_SCHED_DEBUG
1706		powerpc_topology[i].name = powerpc_topology[i + 1].name;
1707#endif
1708	}
1709}
 
 
1710
1711void __init smp_cpus_done(unsigned int max_cpus)
1712{
1713	/*
1714	 * We are running pinned to the boot CPU, see rest_init().
1715	 */
1716	if (smp_ops && smp_ops->setup_cpu)
1717		smp_ops->setup_cpu(boot_cpuid);
1718
1719	if (smp_ops && smp_ops->bringup_done)
1720		smp_ops->bringup_done();
1721
 
 
 
 
 
1722	dump_numa_cpu_topology();
1723
1724	fixup_topology();
1725	set_sched_topology(powerpc_topology);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1726}
1727
1728#ifdef CONFIG_HOTPLUG_CPU
1729int __cpu_disable(void)
1730{
1731	int cpu = smp_processor_id();
1732	int err;
1733
1734	if (!smp_ops->cpu_disable)
1735		return -ENOSYS;
1736
1737	this_cpu_disable_ftrace();
1738
1739	err = smp_ops->cpu_disable();
1740	if (err)
1741		return err;
1742
1743	/* Update sibling maps */
1744	remove_cpu_from_masks(cpu);
1745
1746	return 0;
1747}
1748
1749void __cpu_die(unsigned int cpu)
1750{
1751	if (smp_ops->cpu_die)
1752		smp_ops->cpu_die(cpu);
1753}
1754
1755void arch_cpu_idle_dead(void)
1756{
1757	/*
1758	 * Disable on the down path. This will be re-enabled by
1759	 * start_secondary() via start_secondary_resume() below
1760	 */
1761	this_cpu_disable_ftrace();
1762
1763	if (smp_ops->cpu_offline_self)
1764		smp_ops->cpu_offline_self();
1765
1766	/* If we return, we re-enter start_secondary */
1767	start_secondary_resume();
1768}
1769
1770#endif
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * SMP support for ppc.
   4 *
   5 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
   6 * deal of code from the sparc and intel versions.
   7 *
   8 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
   9 *
  10 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
  11 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
  12 */
  13
  14#undef DEBUG
  15
  16#include <linux/kernel.h>
  17#include <linux/export.h>
  18#include <linux/sched/mm.h>
  19#include <linux/sched/task_stack.h>
  20#include <linux/sched/topology.h>
  21#include <linux/smp.h>
  22#include <linux/interrupt.h>
  23#include <linux/delay.h>
  24#include <linux/init.h>
  25#include <linux/spinlock.h>
  26#include <linux/cache.h>
  27#include <linux/err.h>
  28#include <linux/device.h>
  29#include <linux/cpu.h>
  30#include <linux/notifier.h>
  31#include <linux/topology.h>
  32#include <linux/profile.h>
  33#include <linux/processor.h>
  34#include <linux/random.h>
  35#include <linux/stackprotector.h>
 
 
 
  36
  37#include <asm/ptrace.h>
  38#include <linux/atomic.h>
  39#include <asm/irq.h>
  40#include <asm/hw_irq.h>
  41#include <asm/kvm_ppc.h>
  42#include <asm/dbell.h>
  43#include <asm/page.h>
  44#include <asm/pgtable.h>
  45#include <asm/prom.h>
  46#include <asm/smp.h>
  47#include <asm/time.h>
  48#include <asm/machdep.h>
  49#include <asm/cputhreads.h>
  50#include <asm/cputable.h>
  51#include <asm/mpic.h>
  52#include <asm/vdso_datapage.h>
  53#ifdef CONFIG_PPC64
  54#include <asm/paca.h>
  55#endif
  56#include <asm/vdso.h>
  57#include <asm/debug.h>
  58#include <asm/kexec.h>
  59#include <asm/asm-prototypes.h>
  60#include <asm/cpu_has_feature.h>
  61#include <asm/ftrace.h>
 
 
  62
  63#ifdef DEBUG
  64#include <asm/udbg.h>
  65#define DBG(fmt...) udbg_printf(fmt)
  66#else
  67#define DBG(fmt...)
  68#endif
  69
  70#ifdef CONFIG_HOTPLUG_CPU
  71/* State of each CPU during hotplug phases */
  72static DEFINE_PER_CPU(int, cpu_state) = { 0 };
  73#endif
  74
  75struct task_struct *secondary_current;
  76bool has_big_cores;
 
 
 
  77
  78DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
  79DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
  80DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
  81DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
 
  82
  83EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  84EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
  85EXPORT_PER_CPU_SYMBOL(cpu_core_map);
  86EXPORT_SYMBOL_GPL(has_big_cores);
  87
 
 
 
 
 
 
 
 
 
  88#define MAX_THREAD_LIST_SIZE	8
  89#define THREAD_GROUP_SHARE_L1   1
 
  90struct thread_groups {
  91	unsigned int property;
  92	unsigned int nr_groups;
  93	unsigned int threads_per_group;
  94	unsigned int thread_list[MAX_THREAD_LIST_SIZE];
  95};
  96
 
 
 
 
 
 
 
 
 
  97/*
  98 * On big-cores system, cpu_l1_cache_map for each CPU corresponds to
  99 * the set its siblings that share the L1-cache.
 100 */
 101DEFINE_PER_CPU(cpumask_var_t, cpu_l1_cache_map);
 
 
 
 
 
 
 
 
 
 
 
 
 
 102
 103/* SMP operations for this machine */
 104struct smp_ops_t *smp_ops;
 105
 106/* Can't be static due to PowerMac hackery */
 107volatile unsigned int cpu_callin_map[NR_CPUS];
 108
 109int smt_enabled_at_boot = 1;
 110
 111/*
 112 * Returns 1 if the specified cpu should be brought up during boot.
 113 * Used to inhibit booting threads if they've been disabled or
 114 * limited on the command line
 115 */
 116int smp_generic_cpu_bootable(unsigned int nr)
 117{
 118	/* Special case - we inhibit secondary thread startup
 119	 * during boot if the user requests it.
 120	 */
 121	if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
 122		if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
 123			return 0;
 124		if (smt_enabled_at_boot
 125		    && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
 126			return 0;
 127	}
 128
 129	return 1;
 130}
 131
 132
 133#ifdef CONFIG_PPC64
 134int smp_generic_kick_cpu(int nr)
 135{
 136	if (nr < 0 || nr >= nr_cpu_ids)
 137		return -EINVAL;
 138
 139	/*
 140	 * The processor is currently spinning, waiting for the
 141	 * cpu_start field to become non-zero After we set cpu_start,
 142	 * the processor will continue on to secondary_start
 143	 */
 144	if (!paca_ptrs[nr]->cpu_start) {
 145		paca_ptrs[nr]->cpu_start = 1;
 146		smp_mb();
 147		return 0;
 148	}
 149
 150#ifdef CONFIG_HOTPLUG_CPU
 151	/*
 152	 * Ok it's not there, so it might be soft-unplugged, let's
 153	 * try to bring it back
 154	 */
 155	generic_set_cpu_up(nr);
 156	smp_wmb();
 157	smp_send_reschedule(nr);
 158#endif /* CONFIG_HOTPLUG_CPU */
 159
 160	return 0;
 161}
 162#endif /* CONFIG_PPC64 */
 163
 164static irqreturn_t call_function_action(int irq, void *data)
 165{
 166	generic_smp_call_function_interrupt();
 167	return IRQ_HANDLED;
 168}
 169
 170static irqreturn_t reschedule_action(int irq, void *data)
 171{
 172	scheduler_ipi();
 173	return IRQ_HANDLED;
 174}
 175
 176#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 177static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
 178{
 179	timer_broadcast_interrupt();
 180	return IRQ_HANDLED;
 181}
 182#endif
 183
 184#ifdef CONFIG_NMI_IPI
 185static irqreturn_t nmi_ipi_action(int irq, void *data)
 186{
 187	smp_handle_nmi_ipi(get_irq_regs());
 188	return IRQ_HANDLED;
 189}
 190#endif
 191
 192static irq_handler_t smp_ipi_action[] = {
 193	[PPC_MSG_CALL_FUNCTION] =  call_function_action,
 194	[PPC_MSG_RESCHEDULE] = reschedule_action,
 195#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 196	[PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
 197#endif
 198#ifdef CONFIG_NMI_IPI
 199	[PPC_MSG_NMI_IPI] = nmi_ipi_action,
 200#endif
 201};
 202
 203/*
 204 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
 205 * than going through the call function infrastructure, and strongly
 206 * serialized, so it is more appropriate for debugging.
 207 */
 208const char *smp_ipi_name[] = {
 209	[PPC_MSG_CALL_FUNCTION] =  "ipi call function",
 210	[PPC_MSG_RESCHEDULE] = "ipi reschedule",
 211#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 212	[PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
 213#endif
 214#ifdef CONFIG_NMI_IPI
 215	[PPC_MSG_NMI_IPI] = "nmi ipi",
 216#endif
 217};
 218
 219/* optional function to request ipi, for controllers with >= 4 ipis */
 220int smp_request_message_ipi(int virq, int msg)
 221{
 222	int err;
 223
 224	if (msg < 0 || msg > PPC_MSG_NMI_IPI)
 225		return -EINVAL;
 226#ifndef CONFIG_NMI_IPI
 227	if (msg == PPC_MSG_NMI_IPI)
 228		return 1;
 229#endif
 230
 231	err = request_irq(virq, smp_ipi_action[msg],
 232			  IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
 233			  smp_ipi_name[msg], NULL);
 234	WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
 235		virq, smp_ipi_name[msg], err);
 236
 237	return err;
 238}
 239
 240#ifdef CONFIG_PPC_SMP_MUXED_IPI
 241struct cpu_messages {
 242	long messages;			/* current messages */
 243};
 244static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
 245
 246void smp_muxed_ipi_set_message(int cpu, int msg)
 247{
 248	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
 249	char *message = (char *)&info->messages;
 250
 251	/*
 252	 * Order previous accesses before accesses in the IPI handler.
 253	 */
 254	smp_mb();
 255	message[msg] = 1;
 256}
 257
 258void smp_muxed_ipi_message_pass(int cpu, int msg)
 259{
 260	smp_muxed_ipi_set_message(cpu, msg);
 261
 262	/*
 263	 * cause_ipi functions are required to include a full barrier
 264	 * before doing whatever causes the IPI.
 265	 */
 266	smp_ops->cause_ipi(cpu);
 267}
 268
 269#ifdef __BIG_ENDIAN__
 270#define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
 271#else
 272#define IPI_MESSAGE(A) (1uL << (8 * (A)))
 273#endif
 274
 275irqreturn_t smp_ipi_demux(void)
 276{
 277	mb();	/* order any irq clear */
 278
 279	return smp_ipi_demux_relaxed();
 280}
 281
 282/* sync-free variant. Callers should ensure synchronization */
 283irqreturn_t smp_ipi_demux_relaxed(void)
 284{
 285	struct cpu_messages *info;
 286	unsigned long all;
 287
 288	info = this_cpu_ptr(&ipi_message);
 289	do {
 290		all = xchg(&info->messages, 0);
 291#if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
 292		/*
 293		 * Must check for PPC_MSG_RM_HOST_ACTION messages
 294		 * before PPC_MSG_CALL_FUNCTION messages because when
 295		 * a VM is destroyed, we call kick_all_cpus_sync()
 296		 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
 297		 * messages have completed before we free any VCPUs.
 298		 */
 299		if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
 300			kvmppc_xics_ipi_action();
 301#endif
 302		if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
 303			generic_smp_call_function_interrupt();
 304		if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
 305			scheduler_ipi();
 306#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 307		if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
 308			timer_broadcast_interrupt();
 309#endif
 310#ifdef CONFIG_NMI_IPI
 311		if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
 312			nmi_ipi_action(0, NULL);
 313#endif
 314	} while (info->messages);
 315
 316	return IRQ_HANDLED;
 317}
 318#endif /* CONFIG_PPC_SMP_MUXED_IPI */
 319
 320static inline void do_message_pass(int cpu, int msg)
 321{
 322	if (smp_ops->message_pass)
 323		smp_ops->message_pass(cpu, msg);
 324#ifdef CONFIG_PPC_SMP_MUXED_IPI
 325	else
 326		smp_muxed_ipi_message_pass(cpu, msg);
 327#endif
 328}
 329
 330void smp_send_reschedule(int cpu)
 331{
 332	if (likely(smp_ops))
 333		do_message_pass(cpu, PPC_MSG_RESCHEDULE);
 334}
 335EXPORT_SYMBOL_GPL(smp_send_reschedule);
 336
 337void arch_send_call_function_single_ipi(int cpu)
 338{
 339	do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
 340}
 341
 342void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 343{
 344	unsigned int cpu;
 345
 346	for_each_cpu(cpu, mask)
 347		do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
 348}
 349
 350#ifdef CONFIG_NMI_IPI
 351
 352/*
 353 * "NMI IPI" system.
 354 *
 355 * NMI IPIs may not be recoverable, so should not be used as ongoing part of
 356 * a running system. They can be used for crash, debug, halt/reboot, etc.
 357 *
 358 * The IPI call waits with interrupts disabled until all targets enter the
 359 * NMI handler, then returns. Subsequent IPIs can be issued before targets
 360 * have returned from their handlers, so there is no guarantee about
 361 * concurrency or re-entrancy.
 362 *
 363 * A new NMI can be issued before all targets exit the handler.
 364 *
 365 * The IPI call may time out without all targets entering the NMI handler.
 366 * In that case, there is some logic to recover (and ignore subsequent
 367 * NMI interrupts that may eventually be raised), but the platform interrupt
 368 * handler may not be able to distinguish this from other exception causes,
 369 * which may cause a crash.
 370 */
 371
 372static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
 373static struct cpumask nmi_ipi_pending_mask;
 374static bool nmi_ipi_busy = false;
 375static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
 376
 377static void nmi_ipi_lock_start(unsigned long *flags)
 378{
 379	raw_local_irq_save(*flags);
 380	hard_irq_disable();
 381	while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
 382		raw_local_irq_restore(*flags);
 383		spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
 384		raw_local_irq_save(*flags);
 385		hard_irq_disable();
 386	}
 387}
 388
 389static void nmi_ipi_lock(void)
 390{
 391	while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
 392		spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
 393}
 394
 395static void nmi_ipi_unlock(void)
 396{
 397	smp_mb();
 398	WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
 399	atomic_set(&__nmi_ipi_lock, 0);
 400}
 401
 402static void nmi_ipi_unlock_end(unsigned long *flags)
 403{
 404	nmi_ipi_unlock();
 405	raw_local_irq_restore(*flags);
 406}
 407
 408/*
 409 * Platform NMI handler calls this to ack
 410 */
 411int smp_handle_nmi_ipi(struct pt_regs *regs)
 412{
 413	void (*fn)(struct pt_regs *) = NULL;
 414	unsigned long flags;
 415	int me = raw_smp_processor_id();
 416	int ret = 0;
 417
 418	/*
 419	 * Unexpected NMIs are possible here because the interrupt may not
 420	 * be able to distinguish NMI IPIs from other types of NMIs, or
 421	 * because the caller may have timed out.
 422	 */
 423	nmi_ipi_lock_start(&flags);
 424	if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
 425		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
 426		fn = READ_ONCE(nmi_ipi_function);
 427		WARN_ON_ONCE(!fn);
 428		ret = 1;
 429	}
 430	nmi_ipi_unlock_end(&flags);
 431
 432	if (fn)
 433		fn(regs);
 434
 435	return ret;
 436}
 437
 438static void do_smp_send_nmi_ipi(int cpu, bool safe)
 439{
 440	if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
 441		return;
 442
 443	if (cpu >= 0) {
 444		do_message_pass(cpu, PPC_MSG_NMI_IPI);
 445	} else {
 446		int c;
 447
 448		for_each_online_cpu(c) {
 449			if (c == raw_smp_processor_id())
 450				continue;
 451			do_message_pass(c, PPC_MSG_NMI_IPI);
 452		}
 453	}
 454}
 455
 456/*
 457 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
 458 * - fn is the target callback function.
 459 * - delay_us > 0 is the delay before giving up waiting for targets to
 460 *   begin executing the handler, == 0 specifies indefinite delay.
 461 */
 462static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
 463				u64 delay_us, bool safe)
 464{
 465	unsigned long flags;
 466	int me = raw_smp_processor_id();
 467	int ret = 1;
 468
 469	BUG_ON(cpu == me);
 470	BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
 471
 472	if (unlikely(!smp_ops))
 473		return 0;
 474
 475	nmi_ipi_lock_start(&flags);
 476	while (nmi_ipi_busy) {
 477		nmi_ipi_unlock_end(&flags);
 478		spin_until_cond(!nmi_ipi_busy);
 479		nmi_ipi_lock_start(&flags);
 480	}
 481	nmi_ipi_busy = true;
 482	nmi_ipi_function = fn;
 483
 484	WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
 485
 486	if (cpu < 0) {
 487		/* ALL_OTHERS */
 488		cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
 489		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
 490	} else {
 491		cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
 492	}
 493
 494	nmi_ipi_unlock();
 495
 496	/* Interrupts remain hard disabled */
 497
 498	do_smp_send_nmi_ipi(cpu, safe);
 499
 500	nmi_ipi_lock();
 501	/* nmi_ipi_busy is set here, so unlock/lock is okay */
 502	while (!cpumask_empty(&nmi_ipi_pending_mask)) {
 503		nmi_ipi_unlock();
 504		udelay(1);
 505		nmi_ipi_lock();
 506		if (delay_us) {
 507			delay_us--;
 508			if (!delay_us)
 509				break;
 510		}
 511	}
 512
 513	if (!cpumask_empty(&nmi_ipi_pending_mask)) {
 514		/* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
 515		ret = 0;
 516		cpumask_clear(&nmi_ipi_pending_mask);
 517	}
 518
 519	nmi_ipi_function = NULL;
 520	nmi_ipi_busy = false;
 521
 522	nmi_ipi_unlock_end(&flags);
 523
 524	return ret;
 525}
 526
 527int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
 528{
 529	return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
 530}
 531
 532int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
 533{
 534	return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
 535}
 536#endif /* CONFIG_NMI_IPI */
 537
 538#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 539void tick_broadcast(const struct cpumask *mask)
 540{
 541	unsigned int cpu;
 542
 543	for_each_cpu(cpu, mask)
 544		do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
 545}
 546#endif
 547
 548#ifdef CONFIG_DEBUGGER
 549void debugger_ipi_callback(struct pt_regs *regs)
 550{
 551	debugger_ipi(regs);
 552}
 553
 554void smp_send_debugger_break(void)
 555{
 556	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
 557}
 558#endif
 559
 560#ifdef CONFIG_KEXEC_CORE
 561void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
 562{
 563	int cpu;
 564
 565	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
 566	if (kdump_in_progress() && crash_wake_offline) {
 567		for_each_present_cpu(cpu) {
 568			if (cpu_online(cpu))
 569				continue;
 570			/*
 571			 * crash_ipi_callback will wait for
 572			 * all cpus, including offline CPUs.
 573			 * We don't care about nmi_ipi_function.
 574			 * Offline cpus will jump straight into
 575			 * crash_ipi_callback, we can skip the
 576			 * entire NMI dance and waiting for
 577			 * cpus to clear pending mask, etc.
 578			 */
 579			do_smp_send_nmi_ipi(cpu, false);
 580		}
 581	}
 582}
 583#endif
 584
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 585#ifdef CONFIG_NMI_IPI
 586static void nmi_stop_this_cpu(struct pt_regs *regs)
 587{
 588	/*
 589	 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
 590	 */
 
 
 591	spin_begin();
 592	while (1)
 593		spin_cpu_relax();
 594}
 595
 596void smp_send_stop(void)
 597{
 598	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
 599}
 600
 601#else /* CONFIG_NMI_IPI */
 602
 603static void stop_this_cpu(void *dummy)
 604{
 605	hard_irq_disable();
 
 
 
 
 
 
 
 
 
 606	spin_begin();
 607	while (1)
 608		spin_cpu_relax();
 609}
 610
 611void smp_send_stop(void)
 612{
 613	static bool stopped = false;
 614
 615	/*
 616	 * Prevent waiting on csd lock from a previous smp_send_stop.
 617	 * This is racy, but in general callers try to do the right
 618	 * thing and only fire off one smp_send_stop (e.g., see
 619	 * kernel/panic.c)
 620	 */
 621	if (stopped)
 622		return;
 623
 624	stopped = true;
 625
 626	smp_call_function(stop_this_cpu, NULL, 0);
 627}
 628#endif /* CONFIG_NMI_IPI */
 629
 630struct task_struct *current_set[NR_CPUS];
 631
 632static void smp_store_cpu_info(int id)
 633{
 634	per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
 635#ifdef CONFIG_PPC_FSL_BOOK3E
 636	per_cpu(next_tlbcam_idx, id)
 637		= (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
 638#endif
 639}
 640
 641/*
 642 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
 643 * rather than just passing around the cpumask we pass around a function that
 644 * returns the that cpumask for the given CPU.
 645 */
 646static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
 647{
 648	cpumask_set_cpu(i, get_cpumask(j));
 649	cpumask_set_cpu(j, get_cpumask(i));
 650}
 651
 652#ifdef CONFIG_HOTPLUG_CPU
 653static void set_cpus_unrelated(int i, int j,
 654		struct cpumask *(*get_cpumask)(int))
 655{
 656	cpumask_clear_cpu(i, get_cpumask(j));
 657	cpumask_clear_cpu(j, get_cpumask(i));
 658}
 659#endif
 660
 661/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 662 * parse_thread_groups: Parses the "ibm,thread-groups" device tree
 663 *                      property for the CPU device node @dn and stores
 664 *                      the parsed output in the thread_groups
 665 *                      structure @tg if the ibm,thread-groups[0]
 666 *                      matches @property.
 667 *
 668 * @dn: The device node of the CPU device.
 669 * @tg: Pointer to a thread group structure into which the parsed
 670 *      output of "ibm,thread-groups" is stored.
 671 * @property: The property of the thread-group that the caller is
 672 *            interested in.
 673 *
 674 * ibm,thread-groups[0..N-1] array defines which group of threads in
 675 * the CPU-device node can be grouped together based on the property.
 676 *
 677 * ibm,thread-groups[0] tells us the property based on which the
 
 
 678 * threads are being grouped together. If this value is 1, it implies
 679 * that the threads in the same group share L1, translation cache.
 
 
 680 *
 681 * ibm,thread-groups[1] tells us how many such thread groups exist.
 
 682 *
 683 * ibm,thread-groups[2] tells us the number of threads in each such
 684 * group.
 
 685 *
 686 * ibm,thread-groups[3..N-1] is the list of threads identified by
 687 * "ibm,ppc-interrupt-server#s" arranged as per their membership in
 688 * the grouping.
 689 *
 690 * Example: If ibm,thread-groups = [1,2,4,5,6,7,8,9,10,11,12] it
 691 * implies that there are 2 groups of 4 threads each, where each group
 692 * of threads share L1, translation cache.
 
 
 
 
 693 *
 694 * The "ibm,ppc-interrupt-server#s" of the first group is {5,6,7,8}
 695 * and the "ibm,ppc-interrupt-server#s" of the second group is {9, 10,
 696 * 11, 12} structure
 
 
 
 
 
 
 
 
 
 
 
 697 *
 698 * Returns 0 on success, -EINVAL if the property does not exist,
 699 * -ENODATA if property does not have a value, and -EOVERFLOW if the
 700 * property data isn't large enough.
 701 */
 702static int parse_thread_groups(struct device_node *dn,
 703			       struct thread_groups *tg,
 704			       unsigned int property)
 705{
 706	int i;
 707	u32 thread_group_array[3 + MAX_THREAD_LIST_SIZE];
 
 
 708	u32 *thread_list;
 709	size_t total_threads;
 710	int ret;
 711
 
 
 712	ret = of_property_read_u32_array(dn, "ibm,thread-groups",
 713					 thread_group_array, 3);
 714	if (ret)
 715		return ret;
 716
 717	tg->property = thread_group_array[0];
 718	tg->nr_groups = thread_group_array[1];
 719	tg->threads_per_group = thread_group_array[2];
 720	if (tg->property != property ||
 721	    tg->nr_groups < 1 ||
 722	    tg->threads_per_group < 1)
 723		return -ENODATA;
 724
 725	total_threads = tg->nr_groups * tg->threads_per_group;
 
 
 
 726
 727	ret = of_property_read_u32_array(dn, "ibm,thread-groups",
 728					 thread_group_array,
 729					 3 + total_threads);
 730	if (ret)
 731		return ret;
 732
 733	thread_list = &thread_group_array[3];
 
 
 
 734
 735	for (i = 0 ; i < total_threads; i++)
 736		tg->thread_list[i] = thread_list[i];
 737
 738	return 0;
 
 
 739}
 740
 741/*
 742 * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
 743 *                              that @cpu belongs to.
 744 *
 745 * @cpu : The logical CPU whose thread group is being searched.
 746 * @tg : The thread-group structure of the CPU node which @cpu belongs
 747 *       to.
 748 *
 749 * Returns the index to tg->thread_list that points to the the start
 750 * of the thread_group that @cpu belongs to.
 751 *
 752 * Returns -1 if cpu doesn't belong to any of the groups pointed to by
 753 * tg->thread_list.
 754 */
 755static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
 756{
 757	int hw_cpu_id = get_hard_smp_processor_id(cpu);
 758	int i, j;
 759
 760	for (i = 0; i < tg->nr_groups; i++) {
 761		int group_start = i * tg->threads_per_group;
 762
 763		for (j = 0; j < tg->threads_per_group; j++) {
 764			int idx = group_start + j;
 765
 766			if (tg->thread_list[idx] == hw_cpu_id)
 767				return group_start;
 768		}
 769	}
 770
 771	return -1;
 772}
 773
 774static int init_cpu_l1_cache_map(int cpu)
 775
 
 776{
 777	struct device_node *dn = of_get_cpu_node(cpu, NULL);
 778	struct thread_groups tg = {.property = 0,
 779				   .nr_groups = 0,
 780				   .threads_per_group = 0};
 781	int first_thread = cpu_first_thread_sibling(cpu);
 782	int i, cpu_group_start = -1, err = 0;
 
 
 
 
 783
 784	if (!dn)
 785		return -ENODATA;
 
 
 
 786
 787	err = parse_thread_groups(dn, &tg, THREAD_GROUP_SHARE_L1);
 788	if (err)
 789		goto out;
 
 
 
 790
 791	zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu),
 792				GFP_KERNEL,
 793				cpu_to_node(cpu));
 
 
 
 794
 795	cpu_group_start = get_cpu_thread_group_start(cpu, &tg);
 
 
 
 
 796
 797	if (unlikely(cpu_group_start == -1)) {
 798		WARN_ON_ONCE(1);
 799		err = -ENODATA;
 800		goto out;
 801	}
 802
 803	for (i = first_thread; i < first_thread + threads_per_core; i++) {
 804		int i_group_start = get_cpu_thread_group_start(i, &tg);
 805
 806		if (unlikely(i_group_start == -1)) {
 807			WARN_ON_ONCE(1);
 808			err = -ENODATA;
 809			goto out;
 810		}
 811
 812		if (i_group_start == cpu_group_start)
 813			cpumask_set_cpu(i, per_cpu(cpu_l1_cache_map, cpu));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 814	}
 
 
 
 815
 816out:
 817	of_node_put(dn);
 818	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 819}
 820
 821static int init_big_cores(void)
 
 
 
 
 
 
 
 
 
 
 822{
 823	int cpu;
 824
 825	for_each_possible_cpu(cpu) {
 826		int err = init_cpu_l1_cache_map(cpu);
 827
 828		if (err)
 829			return err;
 830
 831		zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
 832					GFP_KERNEL,
 833					cpu_to_node(cpu));
 834	}
 835
 836	has_big_cores = true;
 
 
 
 
 
 
 
 
 
 
 
 
 837	return 0;
 838}
 839
 840void __init smp_prepare_cpus(unsigned int max_cpus)
 841{
 842	unsigned int cpu;
 843
 844	DBG("smp_prepare_cpus\n");
 845
 846	/* 
 847	 * setup_cpu may need to be called on the boot cpu. We havent
 848	 * spun any cpus up but lets be paranoid.
 849	 */
 850	BUG_ON(boot_cpuid != smp_processor_id());
 851
 852	/* Fixup boot cpu */
 853	smp_store_cpu_info(boot_cpuid);
 854	cpu_callin_map[boot_cpuid] = 1;
 855
 856	for_each_possible_cpu(cpu) {
 857		zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
 858					GFP_KERNEL, cpu_to_node(cpu));
 859		zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
 860					GFP_KERNEL, cpu_to_node(cpu));
 861		zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
 862					GFP_KERNEL, cpu_to_node(cpu));
 
 
 
 
 
 863		/*
 864		 * numa_node_id() works after this.
 865		 */
 866		if (cpu_present(cpu)) {
 867			set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
 868			set_cpu_numa_mem(cpu,
 869				local_memory_node(numa_cpu_lookup_table[cpu]));
 870		}
 
 871	}
 872
 873	/* Init the cpumasks so the boot CPU is related to itself */
 874	cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
 875	cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
 876	cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
 877
 
 
 
 878	init_big_cores();
 879	if (has_big_cores) {
 880		cpumask_set_cpu(boot_cpuid,
 881				cpu_smallcore_mask(boot_cpuid));
 882	}
 883
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 884	if (smp_ops && smp_ops->probe)
 885		smp_ops->probe();
 886}
 887
 888void smp_prepare_boot_cpu(void)
 889{
 890	BUG_ON(smp_processor_id() != boot_cpuid);
 891#ifdef CONFIG_PPC64
 892	paca_ptrs[boot_cpuid]->__current = current;
 893#endif
 894	set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
 895	current_set[boot_cpuid] = current;
 896}
 897
 898#ifdef CONFIG_HOTPLUG_CPU
 899
 900int generic_cpu_disable(void)
 901{
 902	unsigned int cpu = smp_processor_id();
 903
 904	if (cpu == boot_cpuid)
 905		return -EBUSY;
 906
 907	set_cpu_online(cpu, false);
 908#ifdef CONFIG_PPC64
 909	vdso_data->processorCount--;
 910#endif
 911	/* Update affinity of all IRQs previously aimed at this CPU */
 912	irq_migrate_all_off_this_cpu();
 913
 914	/*
 915	 * Depending on the details of the interrupt controller, it's possible
 916	 * that one of the interrupts we just migrated away from this CPU is
 917	 * actually already pending on this CPU. If we leave it in that state
 918	 * the interrupt will never be EOI'ed, and will never fire again. So
 919	 * temporarily enable interrupts here, to allow any pending interrupt to
 920	 * be received (and EOI'ed), before we take this CPU offline.
 921	 */
 922	local_irq_enable();
 923	mdelay(1);
 924	local_irq_disable();
 925
 926	return 0;
 927}
 928
 929void generic_cpu_die(unsigned int cpu)
 930{
 931	int i;
 932
 933	for (i = 0; i < 100; i++) {
 934		smp_rmb();
 935		if (is_cpu_dead(cpu))
 936			return;
 937		msleep(100);
 938	}
 939	printk(KERN_ERR "CPU%d didn't die...\n", cpu);
 940}
 941
 942void generic_set_cpu_dead(unsigned int cpu)
 943{
 944	per_cpu(cpu_state, cpu) = CPU_DEAD;
 945}
 946
 947/*
 948 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
 949 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
 950 * which makes the delay in generic_cpu_die() not happen.
 951 */
 952void generic_set_cpu_up(unsigned int cpu)
 953{
 954	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
 955}
 956
 957int generic_check_cpu_restart(unsigned int cpu)
 958{
 959	return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
 960}
 961
 962int is_cpu_dead(unsigned int cpu)
 963{
 964	return per_cpu(cpu_state, cpu) == CPU_DEAD;
 965}
 966
 967static bool secondaries_inhibited(void)
 968{
 969	return kvm_hv_mode_active();
 970}
 971
 972#else /* HOTPLUG_CPU */
 973
 974#define secondaries_inhibited()		0
 975
 976#endif
 977
 978static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
 979{
 980#ifdef CONFIG_PPC64
 981	paca_ptrs[cpu]->__current = idle;
 982	paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
 983				 THREAD_SIZE - STACK_FRAME_OVERHEAD;
 984#endif
 985	idle->cpu = cpu;
 986	secondary_current = current_set[cpu] = idle;
 987}
 988
 989int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 990{
 991	int rc, c;
 
 
 
 
 
 992
 993	/*
 994	 * Don't allow secondary threads to come online if inhibited
 995	 */
 996	if (threads_per_core > 1 && secondaries_inhibited() &&
 997	    cpu_thread_in_subcore(cpu))
 998		return -EBUSY;
 999
1000	if (smp_ops == NULL ||
1001	    (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1002		return -EINVAL;
1003
1004	cpu_idle_thread_init(cpu, tidle);
1005
1006	/*
1007	 * The platform might need to allocate resources prior to bringing
1008	 * up the CPU
1009	 */
1010	if (smp_ops->prepare_cpu) {
1011		rc = smp_ops->prepare_cpu(cpu);
1012		if (rc)
1013			return rc;
1014	}
1015
1016	/* Make sure callin-map entry is 0 (can be leftover a CPU
1017	 * hotplug
1018	 */
1019	cpu_callin_map[cpu] = 0;
1020
1021	/* The information for processor bringup must
1022	 * be written out to main store before we release
1023	 * the processor.
1024	 */
1025	smp_mb();
1026
1027	/* wake up cpus */
1028	DBG("smp: kicking cpu %d\n", cpu);
1029	rc = smp_ops->kick_cpu(cpu);
1030	if (rc) {
1031		pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
1032		return rc;
1033	}
1034
1035	/*
1036	 * wait to see if the cpu made a callin (is actually up).
1037	 * use this value that I found through experimentation.
1038	 * -- Cort
1039	 */
1040	if (system_state < SYSTEM_RUNNING)
1041		for (c = 50000; c && !cpu_callin_map[cpu]; c--)
1042			udelay(100);
1043#ifdef CONFIG_HOTPLUG_CPU
1044	else
1045		/*
1046		 * CPUs can take much longer to come up in the
1047		 * hotplug case.  Wait five seconds.
1048		 */
1049		for (c = 5000; c && !cpu_callin_map[cpu]; c--)
1050			msleep(1);
1051#endif
 
1052
1053	if (!cpu_callin_map[cpu]) {
1054		printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1055		return -ENOENT;
1056	}
1057
1058	DBG("Processor %u found.\n", cpu);
1059
1060	if (smp_ops->give_timebase)
1061		smp_ops->give_timebase();
1062
1063	/* Wait until cpu puts itself in the online & active maps */
1064	spin_until_cond(cpu_online(cpu));
1065
1066	return 0;
1067}
1068
1069/* Return the value of the reg property corresponding to the given
1070 * logical cpu.
1071 */
1072int cpu_to_core_id(int cpu)
1073{
1074	struct device_node *np;
1075	const __be32 *reg;
1076	int id = -1;
1077
1078	np = of_get_cpu_node(cpu, NULL);
1079	if (!np)
1080		goto out;
1081
1082	reg = of_get_property(np, "reg", NULL);
1083	if (!reg)
1084		goto out;
1085
1086	id = be32_to_cpup(reg);
1087out:
1088	of_node_put(np);
1089	return id;
1090}
1091EXPORT_SYMBOL_GPL(cpu_to_core_id);
1092
1093/* Helper routines for cpu to core mapping */
1094int cpu_core_index_of_thread(int cpu)
1095{
1096	return cpu >> threads_shift;
1097}
1098EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
1099
1100int cpu_first_thread_of_core(int core)
1101{
1102	return core << threads_shift;
1103}
1104EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
1105
1106/* Must be called when no change can occur to cpu_present_mask,
1107 * i.e. during cpu online or offline.
1108 */
1109static struct device_node *cpu_to_l2cache(int cpu)
1110{
1111	struct device_node *np;
1112	struct device_node *cache;
1113
1114	if (!cpu_present(cpu))
1115		return NULL;
1116
1117	np = of_get_cpu_node(cpu, NULL);
1118	if (np == NULL)
1119		return NULL;
1120
1121	cache = of_find_next_cache_node(np);
1122
1123	of_node_put(np);
1124
1125	return cache;
1126}
1127
1128static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int))
1129{
 
1130	struct device_node *l2_cache, *np;
1131	int i;
1132
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1133	l2_cache = cpu_to_l2cache(cpu);
1134	if (!l2_cache)
 
 
 
 
1135		return false;
 
 
 
 
 
 
1136
1137	for_each_cpu(i, cpu_online_mask) {
 
 
 
1138		/*
1139		 * when updating the marks the current CPU has not been marked
1140		 * online, but we need to update the cache masks
1141		 */
1142		np = cpu_to_l2cache(i);
1143		if (!np)
1144			continue;
1145
1146		if (np == l2_cache)
1147			set_cpus_related(cpu, i, mask_fn);
 
 
 
 
 
1148
1149		of_node_put(np);
1150	}
1151	of_node_put(l2_cache);
1152
1153	return true;
1154}
1155
1156#ifdef CONFIG_HOTPLUG_CPU
1157static void remove_cpu_from_masks(int cpu)
1158{
 
1159	int i;
1160
1161	/* NB: cpu_core_mask is a superset of the others */
1162	for_each_cpu(i, cpu_core_mask(cpu)) {
1163		set_cpus_unrelated(cpu, i, cpu_core_mask);
 
 
 
1164		set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
1165		set_cpus_unrelated(cpu, i, cpu_sibling_mask);
1166		if (has_big_cores)
1167			set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
1168	}
 
 
 
 
 
 
 
 
1169}
1170#endif
1171
1172static inline void add_cpu_to_smallcore_masks(int cpu)
1173{
1174	struct cpumask *this_l1_cache_map = per_cpu(cpu_l1_cache_map, cpu);
1175	int i, first_thread = cpu_first_thread_sibling(cpu);
1176
1177	if (!has_big_cores)
1178		return;
1179
1180	cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
1181
1182	for (i = first_thread; i < first_thread + threads_per_core; i++) {
1183		if (cpu_online(i) && cpumask_test_cpu(i, this_l1_cache_map))
1184			set_cpus_related(i, cpu, cpu_smallcore_mask);
1185	}
1186}
1187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1188static void add_cpu_to_masks(int cpu)
1189{
 
1190	int first_thread = cpu_first_thread_sibling(cpu);
1191	int chipid = cpu_to_chip_id(cpu);
 
 
1192	int i;
1193
1194	/*
1195	 * This CPU will not be in the online mask yet so we need to manually
1196	 * add it to it's own thread sibling mask.
1197	 */
 
1198	cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
 
1199
1200	for (i = first_thread; i < first_thread + threads_per_core; i++)
1201		if (cpu_online(i))
1202			set_cpus_related(i, cpu, cpu_sibling_mask);
1203
1204	add_cpu_to_smallcore_masks(cpu);
1205	/*
1206	 * Copy the thread sibling mask into the cache sibling mask
1207	 * and mark any CPUs that share an L2 with this CPU.
1208	 */
1209	for_each_cpu(i, cpu_sibling_mask(cpu))
1210		set_cpus_related(cpu, i, cpu_l2_cache_mask);
1211	update_mask_by_l2(cpu, cpu_l2_cache_mask);
1212
1213	/*
1214	 * Copy the cache sibling mask into core sibling mask and mark
1215	 * any CPUs on the same chip as this CPU.
1216	 */
1217	for_each_cpu(i, cpu_l2_cache_mask(cpu))
1218		set_cpus_related(cpu, i, cpu_core_mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1219
1220	if (chipid == -1)
1221		return;
1222
1223	for_each_cpu(i, cpu_online_mask)
1224		if (cpu_to_chip_id(i) == chipid)
1225			set_cpus_related(cpu, i, cpu_core_mask);
1226}
1227
1228static bool shared_caches;
1229
1230/* Activate a secondary processor. */
1231void start_secondary(void *unused)
1232{
1233	unsigned int cpu = smp_processor_id();
1234	struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
 
 
 
1235
1236	mmgrab(&init_mm);
1237	current->active_mm = &init_mm;
1238
1239	smp_store_cpu_info(cpu);
1240	set_dec(tb_ticks_per_jiffy);
1241	preempt_disable();
1242	cpu_callin_map[cpu] = 1;
1243
1244	if (smp_ops->setup_cpu)
1245		smp_ops->setup_cpu(cpu);
1246	if (smp_ops->take_timebase)
1247		smp_ops->take_timebase();
1248
1249	secondary_cpu_time_init();
1250
1251#ifdef CONFIG_PPC64
1252	if (system_state == SYSTEM_RUNNING)
1253		vdso_data->processorCount++;
1254
1255	vdso_getcpu_init();
1256#endif
 
 
 
1257	/* Update topology CPU masks */
1258	add_cpu_to_masks(cpu);
1259
1260	if (has_big_cores)
1261		sibling_mask = cpu_smallcore_mask;
1262	/*
1263	 * Check for any shared caches. Note that this must be done on a
1264	 * per-core basis because one core in the pair might be disabled.
1265	 */
1266	if (!cpumask_equal(cpu_l2_cache_mask(cpu), sibling_mask(cpu)))
1267		shared_caches = true;
 
 
 
 
1268
1269	set_numa_node(numa_cpu_lookup_table[cpu]);
1270	set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
 
1271
1272	smp_wmb();
1273	notify_cpu_starting(cpu);
1274	set_cpu_online(cpu, true);
1275
1276	boot_init_stack_canary();
1277
1278	local_irq_enable();
1279
1280	/* We can enable ftrace for secondary cpus now */
1281	this_cpu_enable_ftrace();
1282
1283	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1284
1285	BUG();
1286}
1287
1288int setup_profiling_timer(unsigned int multiplier)
1289{
1290	return 0;
1291}
1292
1293#ifdef CONFIG_SCHED_SMT
1294/* cpumask of CPUs with asymetric SMT dependancy */
1295static int powerpc_smt_flags(void)
1296{
1297	int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
1298
1299	if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
1300		printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
1301		flags |= SD_ASYM_PACKING;
1302	}
1303	return flags;
1304}
1305#endif
1306
1307static struct sched_domain_topology_level powerpc_topology[] = {
1308#ifdef CONFIG_SCHED_SMT
1309	{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1310#endif
1311	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
1312	{ NULL, },
1313};
1314
1315/*
1316 * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
1317 * This topology makes it *much* cheaper to migrate tasks between adjacent cores
1318 * since the migrated task remains cache hot. We want to take advantage of this
1319 * at the scheduler level so an extra topology level is required.
1320 */
1321static int powerpc_shared_cache_flags(void)
1322{
1323	return SD_SHARE_PKG_RESOURCES;
1324}
1325
1326/*
1327 * We can't just pass cpu_l2_cache_mask() directly because
1328 * returns a non-const pointer and the compiler barfs on that.
1329 */
1330static const struct cpumask *shared_cache_mask(int cpu)
1331{
1332	return cpu_l2_cache_mask(cpu);
1333}
1334
1335#ifdef CONFIG_SCHED_SMT
1336static const struct cpumask *smallcore_smt_mask(int cpu)
1337{
1338	return cpu_smallcore_mask(cpu);
1339}
1340#endif
1341
1342static struct sched_domain_topology_level power9_topology[] = {
1343#ifdef CONFIG_SCHED_SMT
1344	{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
 
1345#endif
1346	{ shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
1347	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
1348	{ NULL, },
1349};
1350
1351void __init smp_cpus_done(unsigned int max_cpus)
1352{
1353	/*
1354	 * We are running pinned to the boot CPU, see rest_init().
1355	 */
1356	if (smp_ops && smp_ops->setup_cpu)
1357		smp_ops->setup_cpu(boot_cpuid);
1358
1359	if (smp_ops && smp_ops->bringup_done)
1360		smp_ops->bringup_done();
1361
1362	/*
1363	 * On a shared LPAR, associativity needs to be requested.
1364	 * Hence, get numa topology before dumping cpu topology
1365	 */
1366	shared_proc_topology_init();
1367	dump_numa_cpu_topology();
1368
1369#ifdef CONFIG_SCHED_SMT
1370	if (has_big_cores) {
1371		pr_info("Using small cores at SMT level\n");
1372		power9_topology[0].mask = smallcore_smt_mask;
1373		powerpc_topology[0].mask = smallcore_smt_mask;
1374	}
1375#endif
1376	/*
1377	 * If any CPU detects that it's sharing a cache with another CPU then
1378	 * use the deeper topology that is aware of this sharing.
1379	 */
1380	if (shared_caches) {
1381		pr_info("Using shared cache scheduler topology\n");
1382		set_sched_topology(power9_topology);
1383	} else {
1384		pr_info("Using standard scheduler topology\n");
1385		set_sched_topology(powerpc_topology);
1386	}
1387}
1388
1389#ifdef CONFIG_HOTPLUG_CPU
1390int __cpu_disable(void)
1391{
1392	int cpu = smp_processor_id();
1393	int err;
1394
1395	if (!smp_ops->cpu_disable)
1396		return -ENOSYS;
1397
1398	this_cpu_disable_ftrace();
1399
1400	err = smp_ops->cpu_disable();
1401	if (err)
1402		return err;
1403
1404	/* Update sibling maps */
1405	remove_cpu_from_masks(cpu);
1406
1407	return 0;
1408}
1409
1410void __cpu_die(unsigned int cpu)
1411{
1412	if (smp_ops->cpu_die)
1413		smp_ops->cpu_die(cpu);
1414}
1415
1416void cpu_die(void)
1417{
1418	/*
1419	 * Disable on the down path. This will be re-enabled by
1420	 * start_secondary() via start_secondary_resume() below
1421	 */
1422	this_cpu_disable_ftrace();
1423
1424	if (ppc_md.cpu_die)
1425		ppc_md.cpu_die();
1426
1427	/* If we return, we re-enter start_secondary */
1428	start_secondary_resume();
1429}
1430
1431#endif