Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * SMP support for ppc.
   4 *
   5 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
   6 * deal of code from the sparc and intel versions.
   7 *
   8 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
   9 *
  10 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
  11 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
  12 */
  13
  14#undef DEBUG
  15
  16#include <linux/kernel.h>
  17#include <linux/export.h>
  18#include <linux/sched/mm.h>
  19#include <linux/sched/task_stack.h>
  20#include <linux/sched/topology.h>
  21#include <linux/smp.h>
  22#include <linux/interrupt.h>
  23#include <linux/delay.h>
  24#include <linux/init.h>
  25#include <linux/spinlock.h>
  26#include <linux/cache.h>
  27#include <linux/err.h>
  28#include <linux/device.h>
  29#include <linux/cpu.h>
  30#include <linux/notifier.h>
  31#include <linux/topology.h>
  32#include <linux/profile.h>
  33#include <linux/processor.h>
  34#include <linux/random.h>
  35#include <linux/stackprotector.h>
  36#include <linux/pgtable.h>
  37
  38#include <asm/ptrace.h>
  39#include <linux/atomic.h>
  40#include <asm/irq.h>
  41#include <asm/hw_irq.h>
  42#include <asm/kvm_ppc.h>
  43#include <asm/dbell.h>
  44#include <asm/page.h>
 
  45#include <asm/prom.h>
  46#include <asm/smp.h>
  47#include <asm/time.h>
  48#include <asm/machdep.h>
  49#include <asm/cputhreads.h>
  50#include <asm/cputable.h>
  51#include <asm/mpic.h>
  52#include <asm/vdso_datapage.h>
  53#ifdef CONFIG_PPC64
  54#include <asm/paca.h>
  55#endif
  56#include <asm/vdso.h>
  57#include <asm/debug.h>
  58#include <asm/kexec.h>
  59#include <asm/asm-prototypes.h>
  60#include <asm/cpu_has_feature.h>
  61#include <asm/ftrace.h>
  62#include <asm/kup.h>
  63
  64#ifdef DEBUG
  65#include <asm/udbg.h>
  66#define DBG(fmt...) udbg_printf(fmt)
  67#else
  68#define DBG(fmt...)
  69#endif
  70
  71#ifdef CONFIG_HOTPLUG_CPU
  72/* State of each CPU during hotplug phases */
  73static DEFINE_PER_CPU(int, cpu_state) = { 0 };
  74#endif
  75
  76struct task_struct *secondary_current;
  77bool has_big_cores;
  78
  79DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
  80DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
  81DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
  82DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
  83
  84EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  85EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
  86EXPORT_PER_CPU_SYMBOL(cpu_core_map);
  87EXPORT_SYMBOL_GPL(has_big_cores);
  88
  89#define MAX_THREAD_LIST_SIZE	8
  90#define THREAD_GROUP_SHARE_L1   1
  91struct thread_groups {
  92	unsigned int property;
  93	unsigned int nr_groups;
  94	unsigned int threads_per_group;
  95	unsigned int thread_list[MAX_THREAD_LIST_SIZE];
  96};
  97
  98/*
  99 * On big-cores system, cpu_l1_cache_map for each CPU corresponds to
 100 * the set its siblings that share the L1-cache.
 101 */
 102DEFINE_PER_CPU(cpumask_var_t, cpu_l1_cache_map);
 103
 104/* SMP operations for this machine */
 105struct smp_ops_t *smp_ops;
 106
 107/* Can't be static due to PowerMac hackery */
 108volatile unsigned int cpu_callin_map[NR_CPUS];
 109
 110int smt_enabled_at_boot = 1;
 111
 112/*
 113 * Returns 1 if the specified cpu should be brought up during boot.
 114 * Used to inhibit booting threads if they've been disabled or
 115 * limited on the command line
 116 */
 117int smp_generic_cpu_bootable(unsigned int nr)
 118{
 119	/* Special case - we inhibit secondary thread startup
 120	 * during boot if the user requests it.
 121	 */
 122	if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
 123		if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
 124			return 0;
 125		if (smt_enabled_at_boot
 126		    && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
 127			return 0;
 128	}
 129
 130	return 1;
 131}
 132
 133
 134#ifdef CONFIG_PPC64
 135int smp_generic_kick_cpu(int nr)
 136{
 137	if (nr < 0 || nr >= nr_cpu_ids)
 138		return -EINVAL;
 139
 140	/*
 141	 * The processor is currently spinning, waiting for the
 142	 * cpu_start field to become non-zero After we set cpu_start,
 143	 * the processor will continue on to secondary_start
 144	 */
 145	if (!paca_ptrs[nr]->cpu_start) {
 146		paca_ptrs[nr]->cpu_start = 1;
 147		smp_mb();
 148		return 0;
 149	}
 150
 151#ifdef CONFIG_HOTPLUG_CPU
 152	/*
 153	 * Ok it's not there, so it might be soft-unplugged, let's
 154	 * try to bring it back
 155	 */
 156	generic_set_cpu_up(nr);
 157	smp_wmb();
 158	smp_send_reschedule(nr);
 159#endif /* CONFIG_HOTPLUG_CPU */
 160
 161	return 0;
 162}
 163#endif /* CONFIG_PPC64 */
 164
 165static irqreturn_t call_function_action(int irq, void *data)
 166{
 167	generic_smp_call_function_interrupt();
 168	return IRQ_HANDLED;
 169}
 170
 171static irqreturn_t reschedule_action(int irq, void *data)
 172{
 173	scheduler_ipi();
 174	return IRQ_HANDLED;
 175}
 176
 177#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 178static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
 179{
 180	timer_broadcast_interrupt();
 181	return IRQ_HANDLED;
 182}
 183#endif
 184
 185#ifdef CONFIG_NMI_IPI
 186static irqreturn_t nmi_ipi_action(int irq, void *data)
 187{
 188	smp_handle_nmi_ipi(get_irq_regs());
 189	return IRQ_HANDLED;
 190}
 191#endif
 192
 193static irq_handler_t smp_ipi_action[] = {
 194	[PPC_MSG_CALL_FUNCTION] =  call_function_action,
 195	[PPC_MSG_RESCHEDULE] = reschedule_action,
 196#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 197	[PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
 198#endif
 199#ifdef CONFIG_NMI_IPI
 200	[PPC_MSG_NMI_IPI] = nmi_ipi_action,
 201#endif
 202};
 203
 204/*
 205 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
 206 * than going through the call function infrastructure, and strongly
 207 * serialized, so it is more appropriate for debugging.
 208 */
 209const char *smp_ipi_name[] = {
 210	[PPC_MSG_CALL_FUNCTION] =  "ipi call function",
 211	[PPC_MSG_RESCHEDULE] = "ipi reschedule",
 212#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 213	[PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
 214#endif
 215#ifdef CONFIG_NMI_IPI
 216	[PPC_MSG_NMI_IPI] = "nmi ipi",
 217#endif
 218};
 219
 220/* optional function to request ipi, for controllers with >= 4 ipis */
 221int smp_request_message_ipi(int virq, int msg)
 222{
 223	int err;
 224
 225	if (msg < 0 || msg > PPC_MSG_NMI_IPI)
 226		return -EINVAL;
 227#ifndef CONFIG_NMI_IPI
 228	if (msg == PPC_MSG_NMI_IPI)
 229		return 1;
 230#endif
 231
 232	err = request_irq(virq, smp_ipi_action[msg],
 233			  IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
 234			  smp_ipi_name[msg], NULL);
 235	WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
 236		virq, smp_ipi_name[msg], err);
 237
 238	return err;
 239}
 240
 241#ifdef CONFIG_PPC_SMP_MUXED_IPI
 242struct cpu_messages {
 243	long messages;			/* current messages */
 244};
 245static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
 246
 247void smp_muxed_ipi_set_message(int cpu, int msg)
 248{
 249	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
 250	char *message = (char *)&info->messages;
 251
 252	/*
 253	 * Order previous accesses before accesses in the IPI handler.
 254	 */
 255	smp_mb();
 256	message[msg] = 1;
 257}
 258
 259void smp_muxed_ipi_message_pass(int cpu, int msg)
 260{
 261	smp_muxed_ipi_set_message(cpu, msg);
 262
 263	/*
 264	 * cause_ipi functions are required to include a full barrier
 265	 * before doing whatever causes the IPI.
 266	 */
 267	smp_ops->cause_ipi(cpu);
 268}
 269
 270#ifdef __BIG_ENDIAN__
 271#define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
 272#else
 273#define IPI_MESSAGE(A) (1uL << (8 * (A)))
 274#endif
 275
 276irqreturn_t smp_ipi_demux(void)
 277{
 278	mb();	/* order any irq clear */
 279
 280	return smp_ipi_demux_relaxed();
 281}
 282
 283/* sync-free variant. Callers should ensure synchronization */
 284irqreturn_t smp_ipi_demux_relaxed(void)
 285{
 286	struct cpu_messages *info;
 287	unsigned long all;
 288
 289	info = this_cpu_ptr(&ipi_message);
 290	do {
 291		all = xchg(&info->messages, 0);
 292#if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
 293		/*
 294		 * Must check for PPC_MSG_RM_HOST_ACTION messages
 295		 * before PPC_MSG_CALL_FUNCTION messages because when
 296		 * a VM is destroyed, we call kick_all_cpus_sync()
 297		 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
 298		 * messages have completed before we free any VCPUs.
 299		 */
 300		if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
 301			kvmppc_xics_ipi_action();
 302#endif
 303		if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
 304			generic_smp_call_function_interrupt();
 305		if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
 306			scheduler_ipi();
 307#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 308		if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
 309			timer_broadcast_interrupt();
 310#endif
 311#ifdef CONFIG_NMI_IPI
 312		if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
 313			nmi_ipi_action(0, NULL);
 314#endif
 315	} while (info->messages);
 316
 317	return IRQ_HANDLED;
 318}
 319#endif /* CONFIG_PPC_SMP_MUXED_IPI */
 320
 321static inline void do_message_pass(int cpu, int msg)
 322{
 323	if (smp_ops->message_pass)
 324		smp_ops->message_pass(cpu, msg);
 325#ifdef CONFIG_PPC_SMP_MUXED_IPI
 326	else
 327		smp_muxed_ipi_message_pass(cpu, msg);
 328#endif
 329}
 330
 331void smp_send_reschedule(int cpu)
 332{
 333	if (likely(smp_ops))
 334		do_message_pass(cpu, PPC_MSG_RESCHEDULE);
 335}
 336EXPORT_SYMBOL_GPL(smp_send_reschedule);
 337
 338void arch_send_call_function_single_ipi(int cpu)
 339{
 340	do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
 341}
 342
 343void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 344{
 345	unsigned int cpu;
 346
 347	for_each_cpu(cpu, mask)
 348		do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
 349}
 350
 351#ifdef CONFIG_NMI_IPI
 352
 353/*
 354 * "NMI IPI" system.
 355 *
 356 * NMI IPIs may not be recoverable, so should not be used as ongoing part of
 357 * a running system. They can be used for crash, debug, halt/reboot, etc.
 358 *
 359 * The IPI call waits with interrupts disabled until all targets enter the
 360 * NMI handler, then returns. Subsequent IPIs can be issued before targets
 361 * have returned from their handlers, so there is no guarantee about
 362 * concurrency or re-entrancy.
 363 *
 364 * A new NMI can be issued before all targets exit the handler.
 365 *
 366 * The IPI call may time out without all targets entering the NMI handler.
 367 * In that case, there is some logic to recover (and ignore subsequent
 368 * NMI interrupts that may eventually be raised), but the platform interrupt
 369 * handler may not be able to distinguish this from other exception causes,
 370 * which may cause a crash.
 371 */
 372
 373static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
 374static struct cpumask nmi_ipi_pending_mask;
 375static bool nmi_ipi_busy = false;
 376static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
 377
 378static void nmi_ipi_lock_start(unsigned long *flags)
 379{
 380	raw_local_irq_save(*flags);
 381	hard_irq_disable();
 382	while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
 383		raw_local_irq_restore(*flags);
 384		spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
 385		raw_local_irq_save(*flags);
 386		hard_irq_disable();
 387	}
 388}
 389
 390static void nmi_ipi_lock(void)
 391{
 392	while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
 393		spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
 394}
 395
 396static void nmi_ipi_unlock(void)
 397{
 398	smp_mb();
 399	WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
 400	atomic_set(&__nmi_ipi_lock, 0);
 401}
 402
 403static void nmi_ipi_unlock_end(unsigned long *flags)
 404{
 405	nmi_ipi_unlock();
 406	raw_local_irq_restore(*flags);
 407}
 408
 409/*
 410 * Platform NMI handler calls this to ack
 411 */
 412int smp_handle_nmi_ipi(struct pt_regs *regs)
 413{
 414	void (*fn)(struct pt_regs *) = NULL;
 415	unsigned long flags;
 416	int me = raw_smp_processor_id();
 417	int ret = 0;
 418
 419	/*
 420	 * Unexpected NMIs are possible here because the interrupt may not
 421	 * be able to distinguish NMI IPIs from other types of NMIs, or
 422	 * because the caller may have timed out.
 423	 */
 424	nmi_ipi_lock_start(&flags);
 425	if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
 426		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
 427		fn = READ_ONCE(nmi_ipi_function);
 428		WARN_ON_ONCE(!fn);
 429		ret = 1;
 430	}
 431	nmi_ipi_unlock_end(&flags);
 432
 433	if (fn)
 434		fn(regs);
 435
 436	return ret;
 437}
 438
 439static void do_smp_send_nmi_ipi(int cpu, bool safe)
 440{
 441	if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
 442		return;
 443
 444	if (cpu >= 0) {
 445		do_message_pass(cpu, PPC_MSG_NMI_IPI);
 446	} else {
 447		int c;
 448
 449		for_each_online_cpu(c) {
 450			if (c == raw_smp_processor_id())
 451				continue;
 452			do_message_pass(c, PPC_MSG_NMI_IPI);
 453		}
 454	}
 455}
 456
 457/*
 458 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
 459 * - fn is the target callback function.
 460 * - delay_us > 0 is the delay before giving up waiting for targets to
 461 *   begin executing the handler, == 0 specifies indefinite delay.
 462 */
 463static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
 464				u64 delay_us, bool safe)
 465{
 466	unsigned long flags;
 467	int me = raw_smp_processor_id();
 468	int ret = 1;
 469
 470	BUG_ON(cpu == me);
 471	BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
 472
 473	if (unlikely(!smp_ops))
 474		return 0;
 475
 476	nmi_ipi_lock_start(&flags);
 477	while (nmi_ipi_busy) {
 478		nmi_ipi_unlock_end(&flags);
 479		spin_until_cond(!nmi_ipi_busy);
 480		nmi_ipi_lock_start(&flags);
 481	}
 482	nmi_ipi_busy = true;
 483	nmi_ipi_function = fn;
 484
 485	WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
 486
 487	if (cpu < 0) {
 488		/* ALL_OTHERS */
 489		cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
 490		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
 491	} else {
 492		cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
 493	}
 494
 495	nmi_ipi_unlock();
 496
 497	/* Interrupts remain hard disabled */
 498
 499	do_smp_send_nmi_ipi(cpu, safe);
 500
 501	nmi_ipi_lock();
 502	/* nmi_ipi_busy is set here, so unlock/lock is okay */
 503	while (!cpumask_empty(&nmi_ipi_pending_mask)) {
 504		nmi_ipi_unlock();
 505		udelay(1);
 506		nmi_ipi_lock();
 507		if (delay_us) {
 508			delay_us--;
 509			if (!delay_us)
 510				break;
 511		}
 512	}
 513
 514	if (!cpumask_empty(&nmi_ipi_pending_mask)) {
 515		/* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
 516		ret = 0;
 517		cpumask_clear(&nmi_ipi_pending_mask);
 518	}
 519
 520	nmi_ipi_function = NULL;
 521	nmi_ipi_busy = false;
 522
 523	nmi_ipi_unlock_end(&flags);
 524
 525	return ret;
 526}
 527
 528int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
 529{
 530	return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
 531}
 532
 533int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
 534{
 535	return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
 536}
 537#endif /* CONFIG_NMI_IPI */
 538
 539#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 540void tick_broadcast(const struct cpumask *mask)
 541{
 542	unsigned int cpu;
 543
 544	for_each_cpu(cpu, mask)
 545		do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
 546}
 547#endif
 548
 549#ifdef CONFIG_DEBUGGER
 550void debugger_ipi_callback(struct pt_regs *regs)
 551{
 552	debugger_ipi(regs);
 553}
 554
 555void smp_send_debugger_break(void)
 556{
 557	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
 558}
 559#endif
 560
 561#ifdef CONFIG_KEXEC_CORE
 562void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
 563{
 564	int cpu;
 565
 566	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
 567	if (kdump_in_progress() && crash_wake_offline) {
 568		for_each_present_cpu(cpu) {
 569			if (cpu_online(cpu))
 570				continue;
 571			/*
 572			 * crash_ipi_callback will wait for
 573			 * all cpus, including offline CPUs.
 574			 * We don't care about nmi_ipi_function.
 575			 * Offline cpus will jump straight into
 576			 * crash_ipi_callback, we can skip the
 577			 * entire NMI dance and waiting for
 578			 * cpus to clear pending mask, etc.
 579			 */
 580			do_smp_send_nmi_ipi(cpu, false);
 581		}
 582	}
 583}
 584#endif
 585
 586#ifdef CONFIG_NMI_IPI
 587static void nmi_stop_this_cpu(struct pt_regs *regs)
 588{
 589	/*
 590	 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
 591	 */
 592	spin_begin();
 593	while (1)
 594		spin_cpu_relax();
 595}
 596
 597void smp_send_stop(void)
 598{
 599	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
 600}
 601
 602#else /* CONFIG_NMI_IPI */
 603
 604static void stop_this_cpu(void *dummy)
 605{
 606	hard_irq_disable();
 607	spin_begin();
 608	while (1)
 609		spin_cpu_relax();
 610}
 611
 612void smp_send_stop(void)
 613{
 614	static bool stopped = false;
 615
 616	/*
 617	 * Prevent waiting on csd lock from a previous smp_send_stop.
 618	 * This is racy, but in general callers try to do the right
 619	 * thing and only fire off one smp_send_stop (e.g., see
 620	 * kernel/panic.c)
 621	 */
 622	if (stopped)
 623		return;
 624
 625	stopped = true;
 626
 627	smp_call_function(stop_this_cpu, NULL, 0);
 628}
 629#endif /* CONFIG_NMI_IPI */
 630
 631struct task_struct *current_set[NR_CPUS];
 632
 633static void smp_store_cpu_info(int id)
 634{
 635	per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
 636#ifdef CONFIG_PPC_FSL_BOOK3E
 637	per_cpu(next_tlbcam_idx, id)
 638		= (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
 639#endif
 640}
 641
 642/*
 643 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
 644 * rather than just passing around the cpumask we pass around a function that
 645 * returns the that cpumask for the given CPU.
 646 */
 647static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
 648{
 649	cpumask_set_cpu(i, get_cpumask(j));
 650	cpumask_set_cpu(j, get_cpumask(i));
 651}
 652
 653#ifdef CONFIG_HOTPLUG_CPU
 654static void set_cpus_unrelated(int i, int j,
 655		struct cpumask *(*get_cpumask)(int))
 656{
 657	cpumask_clear_cpu(i, get_cpumask(j));
 658	cpumask_clear_cpu(j, get_cpumask(i));
 659}
 660#endif
 661
 662/*
 663 * parse_thread_groups: Parses the "ibm,thread-groups" device tree
 664 *                      property for the CPU device node @dn and stores
 665 *                      the parsed output in the thread_groups
 666 *                      structure @tg if the ibm,thread-groups[0]
 667 *                      matches @property.
 668 *
 669 * @dn: The device node of the CPU device.
 670 * @tg: Pointer to a thread group structure into which the parsed
 671 *      output of "ibm,thread-groups" is stored.
 672 * @property: The property of the thread-group that the caller is
 673 *            interested in.
 674 *
 675 * ibm,thread-groups[0..N-1] array defines which group of threads in
 676 * the CPU-device node can be grouped together based on the property.
 677 *
 678 * ibm,thread-groups[0] tells us the property based on which the
 679 * threads are being grouped together. If this value is 1, it implies
 680 * that the threads in the same group share L1, translation cache.
 681 *
 682 * ibm,thread-groups[1] tells us how many such thread groups exist.
 683 *
 684 * ibm,thread-groups[2] tells us the number of threads in each such
 685 * group.
 686 *
 687 * ibm,thread-groups[3..N-1] is the list of threads identified by
 688 * "ibm,ppc-interrupt-server#s" arranged as per their membership in
 689 * the grouping.
 690 *
 691 * Example: If ibm,thread-groups = [1,2,4,5,6,7,8,9,10,11,12] it
 692 * implies that there are 2 groups of 4 threads each, where each group
 693 * of threads share L1, translation cache.
 694 *
 695 * The "ibm,ppc-interrupt-server#s" of the first group is {5,6,7,8}
 696 * and the "ibm,ppc-interrupt-server#s" of the second group is {9, 10,
 697 * 11, 12} structure
 698 *
 699 * Returns 0 on success, -EINVAL if the property does not exist,
 700 * -ENODATA if property does not have a value, and -EOVERFLOW if the
 701 * property data isn't large enough.
 702 */
 703static int parse_thread_groups(struct device_node *dn,
 704			       struct thread_groups *tg,
 705			       unsigned int property)
 706{
 707	int i;
 708	u32 thread_group_array[3 + MAX_THREAD_LIST_SIZE];
 709	u32 *thread_list;
 710	size_t total_threads;
 711	int ret;
 712
 713	ret = of_property_read_u32_array(dn, "ibm,thread-groups",
 714					 thread_group_array, 3);
 715	if (ret)
 716		return ret;
 717
 718	tg->property = thread_group_array[0];
 719	tg->nr_groups = thread_group_array[1];
 720	tg->threads_per_group = thread_group_array[2];
 721	if (tg->property != property ||
 722	    tg->nr_groups < 1 ||
 723	    tg->threads_per_group < 1)
 724		return -ENODATA;
 725
 726	total_threads = tg->nr_groups * tg->threads_per_group;
 727
 728	ret = of_property_read_u32_array(dn, "ibm,thread-groups",
 729					 thread_group_array,
 730					 3 + total_threads);
 731	if (ret)
 732		return ret;
 733
 734	thread_list = &thread_group_array[3];
 735
 736	for (i = 0 ; i < total_threads; i++)
 737		tg->thread_list[i] = thread_list[i];
 738
 739	return 0;
 740}
 741
 742/*
 743 * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
 744 *                              that @cpu belongs to.
 745 *
 746 * @cpu : The logical CPU whose thread group is being searched.
 747 * @tg : The thread-group structure of the CPU node which @cpu belongs
 748 *       to.
 749 *
 750 * Returns the index to tg->thread_list that points to the the start
 751 * of the thread_group that @cpu belongs to.
 752 *
 753 * Returns -1 if cpu doesn't belong to any of the groups pointed to by
 754 * tg->thread_list.
 755 */
 756static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
 757{
 758	int hw_cpu_id = get_hard_smp_processor_id(cpu);
 759	int i, j;
 760
 761	for (i = 0; i < tg->nr_groups; i++) {
 762		int group_start = i * tg->threads_per_group;
 763
 764		for (j = 0; j < tg->threads_per_group; j++) {
 765			int idx = group_start + j;
 766
 767			if (tg->thread_list[idx] == hw_cpu_id)
 768				return group_start;
 769		}
 770	}
 771
 772	return -1;
 773}
 774
 775static int init_cpu_l1_cache_map(int cpu)
 776
 777{
 778	struct device_node *dn = of_get_cpu_node(cpu, NULL);
 779	struct thread_groups tg = {.property = 0,
 780				   .nr_groups = 0,
 781				   .threads_per_group = 0};
 782	int first_thread = cpu_first_thread_sibling(cpu);
 783	int i, cpu_group_start = -1, err = 0;
 784
 785	if (!dn)
 786		return -ENODATA;
 787
 788	err = parse_thread_groups(dn, &tg, THREAD_GROUP_SHARE_L1);
 789	if (err)
 790		goto out;
 791
 792	zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu),
 793				GFP_KERNEL,
 794				cpu_to_node(cpu));
 795
 796	cpu_group_start = get_cpu_thread_group_start(cpu, &tg);
 797
 798	if (unlikely(cpu_group_start == -1)) {
 799		WARN_ON_ONCE(1);
 800		err = -ENODATA;
 801		goto out;
 802	}
 803
 804	for (i = first_thread; i < first_thread + threads_per_core; i++) {
 805		int i_group_start = get_cpu_thread_group_start(i, &tg);
 806
 807		if (unlikely(i_group_start == -1)) {
 808			WARN_ON_ONCE(1);
 809			err = -ENODATA;
 810			goto out;
 811		}
 812
 813		if (i_group_start == cpu_group_start)
 814			cpumask_set_cpu(i, per_cpu(cpu_l1_cache_map, cpu));
 815	}
 816
 817out:
 818	of_node_put(dn);
 819	return err;
 820}
 821
 822static int init_big_cores(void)
 823{
 824	int cpu;
 825
 826	for_each_possible_cpu(cpu) {
 827		int err = init_cpu_l1_cache_map(cpu);
 828
 829		if (err)
 830			return err;
 831
 832		zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
 833					GFP_KERNEL,
 834					cpu_to_node(cpu));
 835	}
 836
 837	has_big_cores = true;
 838	return 0;
 839}
 840
 841void __init smp_prepare_cpus(unsigned int max_cpus)
 842{
 843	unsigned int cpu;
 844
 845	DBG("smp_prepare_cpus\n");
 846
 847	/* 
 848	 * setup_cpu may need to be called on the boot cpu. We havent
 849	 * spun any cpus up but lets be paranoid.
 850	 */
 851	BUG_ON(boot_cpuid != smp_processor_id());
 852
 853	/* Fixup boot cpu */
 854	smp_store_cpu_info(boot_cpuid);
 855	cpu_callin_map[boot_cpuid] = 1;
 856
 857	for_each_possible_cpu(cpu) {
 858		zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
 859					GFP_KERNEL, cpu_to_node(cpu));
 860		zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
 861					GFP_KERNEL, cpu_to_node(cpu));
 862		zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
 863					GFP_KERNEL, cpu_to_node(cpu));
 864		/*
 865		 * numa_node_id() works after this.
 866		 */
 867		if (cpu_present(cpu)) {
 868			set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
 869			set_cpu_numa_mem(cpu,
 870				local_memory_node(numa_cpu_lookup_table[cpu]));
 871		}
 872	}
 873
 874	/* Init the cpumasks so the boot CPU is related to itself */
 875	cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
 876	cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
 877	cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
 878
 879	init_big_cores();
 880	if (has_big_cores) {
 881		cpumask_set_cpu(boot_cpuid,
 882				cpu_smallcore_mask(boot_cpuid));
 883	}
 884
 885	if (smp_ops && smp_ops->probe)
 886		smp_ops->probe();
 887}
 888
 889void smp_prepare_boot_cpu(void)
 890{
 891	BUG_ON(smp_processor_id() != boot_cpuid);
 892#ifdef CONFIG_PPC64
 893	paca_ptrs[boot_cpuid]->__current = current;
 894#endif
 895	set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
 896	current_set[boot_cpuid] = current;
 897}
 898
 899#ifdef CONFIG_HOTPLUG_CPU
 900
 901int generic_cpu_disable(void)
 902{
 903	unsigned int cpu = smp_processor_id();
 904
 905	if (cpu == boot_cpuid)
 906		return -EBUSY;
 907
 908	set_cpu_online(cpu, false);
 909#ifdef CONFIG_PPC64
 910	vdso_data->processorCount--;
 911#endif
 912	/* Update affinity of all IRQs previously aimed at this CPU */
 913	irq_migrate_all_off_this_cpu();
 914
 915	/*
 916	 * Depending on the details of the interrupt controller, it's possible
 917	 * that one of the interrupts we just migrated away from this CPU is
 918	 * actually already pending on this CPU. If we leave it in that state
 919	 * the interrupt will never be EOI'ed, and will never fire again. So
 920	 * temporarily enable interrupts here, to allow any pending interrupt to
 921	 * be received (and EOI'ed), before we take this CPU offline.
 922	 */
 923	local_irq_enable();
 924	mdelay(1);
 925	local_irq_disable();
 926
 927	return 0;
 928}
 929
 930void generic_cpu_die(unsigned int cpu)
 931{
 932	int i;
 933
 934	for (i = 0; i < 100; i++) {
 935		smp_rmb();
 936		if (is_cpu_dead(cpu))
 937			return;
 938		msleep(100);
 939	}
 940	printk(KERN_ERR "CPU%d didn't die...\n", cpu);
 941}
 942
 943void generic_set_cpu_dead(unsigned int cpu)
 944{
 945	per_cpu(cpu_state, cpu) = CPU_DEAD;
 946}
 947
 948/*
 949 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
 950 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
 951 * which makes the delay in generic_cpu_die() not happen.
 952 */
 953void generic_set_cpu_up(unsigned int cpu)
 954{
 955	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
 956}
 957
 958int generic_check_cpu_restart(unsigned int cpu)
 959{
 960	return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
 961}
 962
 963int is_cpu_dead(unsigned int cpu)
 964{
 965	return per_cpu(cpu_state, cpu) == CPU_DEAD;
 966}
 967
 968static bool secondaries_inhibited(void)
 969{
 970	return kvm_hv_mode_active();
 971}
 972
 973#else /* HOTPLUG_CPU */
 974
 975#define secondaries_inhibited()		0
 976
 977#endif
 978
 979static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
 980{
 981#ifdef CONFIG_PPC64
 982	paca_ptrs[cpu]->__current = idle;
 983	paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
 984				 THREAD_SIZE - STACK_FRAME_OVERHEAD;
 985#endif
 986	idle->cpu = cpu;
 987	secondary_current = current_set[cpu] = idle;
 988}
 989
 990int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 991{
 992	int rc, c;
 993
 994	/*
 995	 * Don't allow secondary threads to come online if inhibited
 996	 */
 997	if (threads_per_core > 1 && secondaries_inhibited() &&
 998	    cpu_thread_in_subcore(cpu))
 999		return -EBUSY;
1000
1001	if (smp_ops == NULL ||
1002	    (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1003		return -EINVAL;
1004
1005	cpu_idle_thread_init(cpu, tidle);
1006
1007	/*
1008	 * The platform might need to allocate resources prior to bringing
1009	 * up the CPU
1010	 */
1011	if (smp_ops->prepare_cpu) {
1012		rc = smp_ops->prepare_cpu(cpu);
1013		if (rc)
1014			return rc;
1015	}
1016
1017	/* Make sure callin-map entry is 0 (can be leftover a CPU
1018	 * hotplug
1019	 */
1020	cpu_callin_map[cpu] = 0;
1021
1022	/* The information for processor bringup must
1023	 * be written out to main store before we release
1024	 * the processor.
1025	 */
1026	smp_mb();
1027
1028	/* wake up cpus */
1029	DBG("smp: kicking cpu %d\n", cpu);
1030	rc = smp_ops->kick_cpu(cpu);
1031	if (rc) {
1032		pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
1033		return rc;
1034	}
1035
1036	/*
1037	 * wait to see if the cpu made a callin (is actually up).
1038	 * use this value that I found through experimentation.
1039	 * -- Cort
1040	 */
1041	if (system_state < SYSTEM_RUNNING)
1042		for (c = 50000; c && !cpu_callin_map[cpu]; c--)
1043			udelay(100);
1044#ifdef CONFIG_HOTPLUG_CPU
1045	else
1046		/*
1047		 * CPUs can take much longer to come up in the
1048		 * hotplug case.  Wait five seconds.
1049		 */
1050		for (c = 5000; c && !cpu_callin_map[cpu]; c--)
1051			msleep(1);
1052#endif
1053
1054	if (!cpu_callin_map[cpu]) {
1055		printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1056		return -ENOENT;
1057	}
1058
1059	DBG("Processor %u found.\n", cpu);
1060
1061	if (smp_ops->give_timebase)
1062		smp_ops->give_timebase();
1063
1064	/* Wait until cpu puts itself in the online & active maps */
1065	spin_until_cond(cpu_online(cpu));
1066
1067	return 0;
1068}
1069
1070/* Return the value of the reg property corresponding to the given
1071 * logical cpu.
1072 */
1073int cpu_to_core_id(int cpu)
1074{
1075	struct device_node *np;
1076	const __be32 *reg;
1077	int id = -1;
1078
1079	np = of_get_cpu_node(cpu, NULL);
1080	if (!np)
1081		goto out;
1082
1083	reg = of_get_property(np, "reg", NULL);
1084	if (!reg)
1085		goto out;
1086
1087	id = be32_to_cpup(reg);
1088out:
1089	of_node_put(np);
1090	return id;
1091}
1092EXPORT_SYMBOL_GPL(cpu_to_core_id);
1093
1094/* Helper routines for cpu to core mapping */
1095int cpu_core_index_of_thread(int cpu)
1096{
1097	return cpu >> threads_shift;
1098}
1099EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
1100
1101int cpu_first_thread_of_core(int core)
1102{
1103	return core << threads_shift;
1104}
1105EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
1106
1107/* Must be called when no change can occur to cpu_present_mask,
1108 * i.e. during cpu online or offline.
1109 */
1110static struct device_node *cpu_to_l2cache(int cpu)
1111{
1112	struct device_node *np;
1113	struct device_node *cache;
1114
1115	if (!cpu_present(cpu))
1116		return NULL;
1117
1118	np = of_get_cpu_node(cpu, NULL);
1119	if (np == NULL)
1120		return NULL;
1121
1122	cache = of_find_next_cache_node(np);
1123
1124	of_node_put(np);
1125
1126	return cache;
1127}
1128
1129static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int))
1130{
1131	struct device_node *l2_cache, *np;
1132	int i;
1133
1134	l2_cache = cpu_to_l2cache(cpu);
1135	if (!l2_cache)
1136		return false;
1137
1138	for_each_cpu(i, cpu_online_mask) {
1139		/*
1140		 * when updating the marks the current CPU has not been marked
1141		 * online, but we need to update the cache masks
1142		 */
1143		np = cpu_to_l2cache(i);
1144		if (!np)
1145			continue;
1146
1147		if (np == l2_cache)
1148			set_cpus_related(cpu, i, mask_fn);
1149
1150		of_node_put(np);
1151	}
1152	of_node_put(l2_cache);
1153
1154	return true;
1155}
1156
1157#ifdef CONFIG_HOTPLUG_CPU
1158static void remove_cpu_from_masks(int cpu)
1159{
1160	int i;
1161
1162	/* NB: cpu_core_mask is a superset of the others */
1163	for_each_cpu(i, cpu_core_mask(cpu)) {
1164		set_cpus_unrelated(cpu, i, cpu_core_mask);
1165		set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
1166		set_cpus_unrelated(cpu, i, cpu_sibling_mask);
1167		if (has_big_cores)
1168			set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
1169	}
1170}
1171#endif
1172
1173static inline void add_cpu_to_smallcore_masks(int cpu)
1174{
1175	struct cpumask *this_l1_cache_map = per_cpu(cpu_l1_cache_map, cpu);
1176	int i, first_thread = cpu_first_thread_sibling(cpu);
1177
1178	if (!has_big_cores)
1179		return;
1180
1181	cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
1182
1183	for (i = first_thread; i < first_thread + threads_per_core; i++) {
1184		if (cpu_online(i) && cpumask_test_cpu(i, this_l1_cache_map))
1185			set_cpus_related(i, cpu, cpu_smallcore_mask);
1186	}
1187}
1188
1189int get_physical_package_id(int cpu)
1190{
1191	int pkg_id = cpu_to_chip_id(cpu);
1192
1193	/*
1194	 * If the platform is PowerNV or Guest on KVM, ibm,chip-id is
1195	 * defined. Hence we would return the chip-id as the result of
1196	 * get_physical_package_id.
1197	 */
1198	if (pkg_id == -1 && firmware_has_feature(FW_FEATURE_LPAR) &&
1199	    IS_ENABLED(CONFIG_PPC_SPLPAR)) {
1200		struct device_node *np = of_get_cpu_node(cpu, NULL);
1201		pkg_id = of_node_to_nid(np);
1202		of_node_put(np);
1203	}
1204
1205	return pkg_id;
1206}
1207EXPORT_SYMBOL_GPL(get_physical_package_id);
1208
1209static void add_cpu_to_masks(int cpu)
1210{
1211	int first_thread = cpu_first_thread_sibling(cpu);
1212	int pkg_id = get_physical_package_id(cpu);
1213	int i;
1214
1215	/*
1216	 * This CPU will not be in the online mask yet so we need to manually
1217	 * add it to it's own thread sibling mask.
1218	 */
1219	cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1220
1221	for (i = first_thread; i < first_thread + threads_per_core; i++)
1222		if (cpu_online(i))
1223			set_cpus_related(i, cpu, cpu_sibling_mask);
1224
1225	add_cpu_to_smallcore_masks(cpu);
1226	/*
1227	 * Copy the thread sibling mask into the cache sibling mask
1228	 * and mark any CPUs that share an L2 with this CPU.
1229	 */
1230	for_each_cpu(i, cpu_sibling_mask(cpu))
1231		set_cpus_related(cpu, i, cpu_l2_cache_mask);
1232	update_mask_by_l2(cpu, cpu_l2_cache_mask);
1233
1234	/*
1235	 * Copy the cache sibling mask into core sibling mask and mark
1236	 * any CPUs on the same chip as this CPU.
1237	 */
1238	for_each_cpu(i, cpu_l2_cache_mask(cpu))
1239		set_cpus_related(cpu, i, cpu_core_mask);
1240
1241	if (pkg_id == -1)
1242		return;
1243
1244	for_each_cpu(i, cpu_online_mask)
1245		if (get_physical_package_id(i) == pkg_id)
1246			set_cpus_related(cpu, i, cpu_core_mask);
1247}
1248
1249static bool shared_caches;
1250
1251/* Activate a secondary processor. */
1252void start_secondary(void *unused)
1253{
1254	unsigned int cpu = smp_processor_id();
1255	struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
1256
1257	mmgrab(&init_mm);
1258	current->active_mm = &init_mm;
1259
1260	smp_store_cpu_info(cpu);
1261	set_dec(tb_ticks_per_jiffy);
1262	preempt_disable();
1263	cpu_callin_map[cpu] = 1;
1264
1265	if (smp_ops->setup_cpu)
1266		smp_ops->setup_cpu(cpu);
1267	if (smp_ops->take_timebase)
1268		smp_ops->take_timebase();
1269
1270	secondary_cpu_time_init();
1271
1272#ifdef CONFIG_PPC64
1273	if (system_state == SYSTEM_RUNNING)
1274		vdso_data->processorCount++;
1275
1276	vdso_getcpu_init();
1277#endif
1278	/* Update topology CPU masks */
1279	add_cpu_to_masks(cpu);
1280
1281	if (has_big_cores)
1282		sibling_mask = cpu_smallcore_mask;
1283	/*
1284	 * Check for any shared caches. Note that this must be done on a
1285	 * per-core basis because one core in the pair might be disabled.
1286	 */
1287	if (!cpumask_equal(cpu_l2_cache_mask(cpu), sibling_mask(cpu)))
1288		shared_caches = true;
1289
1290	set_numa_node(numa_cpu_lookup_table[cpu]);
1291	set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1292
1293	smp_wmb();
1294	notify_cpu_starting(cpu);
1295	set_cpu_online(cpu, true);
1296
1297	boot_init_stack_canary();
1298
1299	local_irq_enable();
1300
1301	/* We can enable ftrace for secondary cpus now */
1302	this_cpu_enable_ftrace();
1303
1304	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1305
1306	BUG();
1307}
1308
1309int setup_profiling_timer(unsigned int multiplier)
1310{
1311	return 0;
1312}
1313
1314#ifdef CONFIG_SCHED_SMT
1315/* cpumask of CPUs with asymetric SMT dependancy */
1316static int powerpc_smt_flags(void)
1317{
1318	int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
1319
1320	if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
1321		printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
1322		flags |= SD_ASYM_PACKING;
1323	}
1324	return flags;
1325}
1326#endif
1327
1328static struct sched_domain_topology_level powerpc_topology[] = {
1329#ifdef CONFIG_SCHED_SMT
1330	{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1331#endif
1332	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
1333	{ NULL, },
1334};
1335
1336/*
1337 * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
1338 * This topology makes it *much* cheaper to migrate tasks between adjacent cores
1339 * since the migrated task remains cache hot. We want to take advantage of this
1340 * at the scheduler level so an extra topology level is required.
1341 */
1342static int powerpc_shared_cache_flags(void)
1343{
1344	return SD_SHARE_PKG_RESOURCES;
1345}
1346
1347/*
1348 * We can't just pass cpu_l2_cache_mask() directly because
1349 * returns a non-const pointer and the compiler barfs on that.
1350 */
1351static const struct cpumask *shared_cache_mask(int cpu)
1352{
1353	return cpu_l2_cache_mask(cpu);
1354}
1355
1356#ifdef CONFIG_SCHED_SMT
1357static const struct cpumask *smallcore_smt_mask(int cpu)
1358{
1359	return cpu_smallcore_mask(cpu);
1360}
1361#endif
1362
1363static struct sched_domain_topology_level power9_topology[] = {
1364#ifdef CONFIG_SCHED_SMT
1365	{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1366#endif
1367	{ shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
1368	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
1369	{ NULL, },
1370};
1371
1372void __init smp_cpus_done(unsigned int max_cpus)
1373{
1374	/*
1375	 * We are running pinned to the boot CPU, see rest_init().
1376	 */
1377	if (smp_ops && smp_ops->setup_cpu)
1378		smp_ops->setup_cpu(boot_cpuid);
1379
1380	if (smp_ops && smp_ops->bringup_done)
1381		smp_ops->bringup_done();
1382
 
 
 
 
 
1383	dump_numa_cpu_topology();
1384
1385#ifdef CONFIG_SCHED_SMT
1386	if (has_big_cores) {
1387		pr_info("Big cores detected but using small core scheduling\n");
1388		power9_topology[0].mask = smallcore_smt_mask;
1389		powerpc_topology[0].mask = smallcore_smt_mask;
1390	}
1391#endif
1392	/*
1393	 * If any CPU detects that it's sharing a cache with another CPU then
1394	 * use the deeper topology that is aware of this sharing.
1395	 */
1396	if (shared_caches) {
1397		pr_info("Using shared cache scheduler topology\n");
1398		set_sched_topology(power9_topology);
1399	} else {
1400		pr_info("Using standard scheduler topology\n");
1401		set_sched_topology(powerpc_topology);
1402	}
1403}
1404
1405#ifdef CONFIG_HOTPLUG_CPU
1406int __cpu_disable(void)
1407{
1408	int cpu = smp_processor_id();
1409	int err;
1410
1411	if (!smp_ops->cpu_disable)
1412		return -ENOSYS;
1413
1414	this_cpu_disable_ftrace();
1415
1416	err = smp_ops->cpu_disable();
1417	if (err)
1418		return err;
1419
1420	/* Update sibling maps */
1421	remove_cpu_from_masks(cpu);
1422
1423	return 0;
1424}
1425
1426void __cpu_die(unsigned int cpu)
1427{
1428	if (smp_ops->cpu_die)
1429		smp_ops->cpu_die(cpu);
1430}
1431
1432void cpu_die(void)
1433{
1434	/*
1435	 * Disable on the down path. This will be re-enabled by
1436	 * start_secondary() via start_secondary_resume() below
1437	 */
1438	this_cpu_disable_ftrace();
1439
1440	if (ppc_md.cpu_die)
1441		ppc_md.cpu_die();
1442
1443	/* If we return, we re-enter start_secondary */
1444	start_secondary_resume();
1445}
1446
1447#endif
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * SMP support for ppc.
   4 *
   5 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
   6 * deal of code from the sparc and intel versions.
   7 *
   8 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
   9 *
  10 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
  11 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
  12 */
  13
  14#undef DEBUG
  15
  16#include <linux/kernel.h>
  17#include <linux/export.h>
  18#include <linux/sched/mm.h>
  19#include <linux/sched/task_stack.h>
  20#include <linux/sched/topology.h>
  21#include <linux/smp.h>
  22#include <linux/interrupt.h>
  23#include <linux/delay.h>
  24#include <linux/init.h>
  25#include <linux/spinlock.h>
  26#include <linux/cache.h>
  27#include <linux/err.h>
  28#include <linux/device.h>
  29#include <linux/cpu.h>
  30#include <linux/notifier.h>
  31#include <linux/topology.h>
  32#include <linux/profile.h>
  33#include <linux/processor.h>
  34#include <linux/random.h>
  35#include <linux/stackprotector.h>
 
  36
  37#include <asm/ptrace.h>
  38#include <linux/atomic.h>
  39#include <asm/irq.h>
  40#include <asm/hw_irq.h>
  41#include <asm/kvm_ppc.h>
  42#include <asm/dbell.h>
  43#include <asm/page.h>
  44#include <asm/pgtable.h>
  45#include <asm/prom.h>
  46#include <asm/smp.h>
  47#include <asm/time.h>
  48#include <asm/machdep.h>
  49#include <asm/cputhreads.h>
  50#include <asm/cputable.h>
  51#include <asm/mpic.h>
  52#include <asm/vdso_datapage.h>
  53#ifdef CONFIG_PPC64
  54#include <asm/paca.h>
  55#endif
  56#include <asm/vdso.h>
  57#include <asm/debug.h>
  58#include <asm/kexec.h>
  59#include <asm/asm-prototypes.h>
  60#include <asm/cpu_has_feature.h>
  61#include <asm/ftrace.h>
 
  62
  63#ifdef DEBUG
  64#include <asm/udbg.h>
  65#define DBG(fmt...) udbg_printf(fmt)
  66#else
  67#define DBG(fmt...)
  68#endif
  69
  70#ifdef CONFIG_HOTPLUG_CPU
  71/* State of each CPU during hotplug phases */
  72static DEFINE_PER_CPU(int, cpu_state) = { 0 };
  73#endif
  74
  75struct task_struct *secondary_current;
  76bool has_big_cores;
  77
  78DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
  79DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
  80DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
  81DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
  82
  83EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  84EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
  85EXPORT_PER_CPU_SYMBOL(cpu_core_map);
  86EXPORT_SYMBOL_GPL(has_big_cores);
  87
  88#define MAX_THREAD_LIST_SIZE	8
  89#define THREAD_GROUP_SHARE_L1   1
  90struct thread_groups {
  91	unsigned int property;
  92	unsigned int nr_groups;
  93	unsigned int threads_per_group;
  94	unsigned int thread_list[MAX_THREAD_LIST_SIZE];
  95};
  96
  97/*
  98 * On big-cores system, cpu_l1_cache_map for each CPU corresponds to
  99 * the set its siblings that share the L1-cache.
 100 */
 101DEFINE_PER_CPU(cpumask_var_t, cpu_l1_cache_map);
 102
 103/* SMP operations for this machine */
 104struct smp_ops_t *smp_ops;
 105
 106/* Can't be static due to PowerMac hackery */
 107volatile unsigned int cpu_callin_map[NR_CPUS];
 108
 109int smt_enabled_at_boot = 1;
 110
 111/*
 112 * Returns 1 if the specified cpu should be brought up during boot.
 113 * Used to inhibit booting threads if they've been disabled or
 114 * limited on the command line
 115 */
 116int smp_generic_cpu_bootable(unsigned int nr)
 117{
 118	/* Special case - we inhibit secondary thread startup
 119	 * during boot if the user requests it.
 120	 */
 121	if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
 122		if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
 123			return 0;
 124		if (smt_enabled_at_boot
 125		    && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
 126			return 0;
 127	}
 128
 129	return 1;
 130}
 131
 132
 133#ifdef CONFIG_PPC64
 134int smp_generic_kick_cpu(int nr)
 135{
 136	if (nr < 0 || nr >= nr_cpu_ids)
 137		return -EINVAL;
 138
 139	/*
 140	 * The processor is currently spinning, waiting for the
 141	 * cpu_start field to become non-zero After we set cpu_start,
 142	 * the processor will continue on to secondary_start
 143	 */
 144	if (!paca_ptrs[nr]->cpu_start) {
 145		paca_ptrs[nr]->cpu_start = 1;
 146		smp_mb();
 147		return 0;
 148	}
 149
 150#ifdef CONFIG_HOTPLUG_CPU
 151	/*
 152	 * Ok it's not there, so it might be soft-unplugged, let's
 153	 * try to bring it back
 154	 */
 155	generic_set_cpu_up(nr);
 156	smp_wmb();
 157	smp_send_reschedule(nr);
 158#endif /* CONFIG_HOTPLUG_CPU */
 159
 160	return 0;
 161}
 162#endif /* CONFIG_PPC64 */
 163
 164static irqreturn_t call_function_action(int irq, void *data)
 165{
 166	generic_smp_call_function_interrupt();
 167	return IRQ_HANDLED;
 168}
 169
 170static irqreturn_t reschedule_action(int irq, void *data)
 171{
 172	scheduler_ipi();
 173	return IRQ_HANDLED;
 174}
 175
 176#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 177static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
 178{
 179	timer_broadcast_interrupt();
 180	return IRQ_HANDLED;
 181}
 182#endif
 183
 184#ifdef CONFIG_NMI_IPI
 185static irqreturn_t nmi_ipi_action(int irq, void *data)
 186{
 187	smp_handle_nmi_ipi(get_irq_regs());
 188	return IRQ_HANDLED;
 189}
 190#endif
 191
 192static irq_handler_t smp_ipi_action[] = {
 193	[PPC_MSG_CALL_FUNCTION] =  call_function_action,
 194	[PPC_MSG_RESCHEDULE] = reschedule_action,
 195#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 196	[PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
 197#endif
 198#ifdef CONFIG_NMI_IPI
 199	[PPC_MSG_NMI_IPI] = nmi_ipi_action,
 200#endif
 201};
 202
 203/*
 204 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
 205 * than going through the call function infrastructure, and strongly
 206 * serialized, so it is more appropriate for debugging.
 207 */
 208const char *smp_ipi_name[] = {
 209	[PPC_MSG_CALL_FUNCTION] =  "ipi call function",
 210	[PPC_MSG_RESCHEDULE] = "ipi reschedule",
 211#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 212	[PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
 213#endif
 214#ifdef CONFIG_NMI_IPI
 215	[PPC_MSG_NMI_IPI] = "nmi ipi",
 216#endif
 217};
 218
 219/* optional function to request ipi, for controllers with >= 4 ipis */
 220int smp_request_message_ipi(int virq, int msg)
 221{
 222	int err;
 223
 224	if (msg < 0 || msg > PPC_MSG_NMI_IPI)
 225		return -EINVAL;
 226#ifndef CONFIG_NMI_IPI
 227	if (msg == PPC_MSG_NMI_IPI)
 228		return 1;
 229#endif
 230
 231	err = request_irq(virq, smp_ipi_action[msg],
 232			  IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
 233			  smp_ipi_name[msg], NULL);
 234	WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
 235		virq, smp_ipi_name[msg], err);
 236
 237	return err;
 238}
 239
 240#ifdef CONFIG_PPC_SMP_MUXED_IPI
 241struct cpu_messages {
 242	long messages;			/* current messages */
 243};
 244static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
 245
 246void smp_muxed_ipi_set_message(int cpu, int msg)
 247{
 248	struct cpu_messages *info = &per_cpu(ipi_message, cpu);
 249	char *message = (char *)&info->messages;
 250
 251	/*
 252	 * Order previous accesses before accesses in the IPI handler.
 253	 */
 254	smp_mb();
 255	message[msg] = 1;
 256}
 257
 258void smp_muxed_ipi_message_pass(int cpu, int msg)
 259{
 260	smp_muxed_ipi_set_message(cpu, msg);
 261
 262	/*
 263	 * cause_ipi functions are required to include a full barrier
 264	 * before doing whatever causes the IPI.
 265	 */
 266	smp_ops->cause_ipi(cpu);
 267}
 268
 269#ifdef __BIG_ENDIAN__
 270#define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
 271#else
 272#define IPI_MESSAGE(A) (1uL << (8 * (A)))
 273#endif
 274
 275irqreturn_t smp_ipi_demux(void)
 276{
 277	mb();	/* order any irq clear */
 278
 279	return smp_ipi_demux_relaxed();
 280}
 281
 282/* sync-free variant. Callers should ensure synchronization */
 283irqreturn_t smp_ipi_demux_relaxed(void)
 284{
 285	struct cpu_messages *info;
 286	unsigned long all;
 287
 288	info = this_cpu_ptr(&ipi_message);
 289	do {
 290		all = xchg(&info->messages, 0);
 291#if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
 292		/*
 293		 * Must check for PPC_MSG_RM_HOST_ACTION messages
 294		 * before PPC_MSG_CALL_FUNCTION messages because when
 295		 * a VM is destroyed, we call kick_all_cpus_sync()
 296		 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
 297		 * messages have completed before we free any VCPUs.
 298		 */
 299		if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
 300			kvmppc_xics_ipi_action();
 301#endif
 302		if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
 303			generic_smp_call_function_interrupt();
 304		if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
 305			scheduler_ipi();
 306#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 307		if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
 308			timer_broadcast_interrupt();
 309#endif
 310#ifdef CONFIG_NMI_IPI
 311		if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
 312			nmi_ipi_action(0, NULL);
 313#endif
 314	} while (info->messages);
 315
 316	return IRQ_HANDLED;
 317}
 318#endif /* CONFIG_PPC_SMP_MUXED_IPI */
 319
 320static inline void do_message_pass(int cpu, int msg)
 321{
 322	if (smp_ops->message_pass)
 323		smp_ops->message_pass(cpu, msg);
 324#ifdef CONFIG_PPC_SMP_MUXED_IPI
 325	else
 326		smp_muxed_ipi_message_pass(cpu, msg);
 327#endif
 328}
 329
 330void smp_send_reschedule(int cpu)
 331{
 332	if (likely(smp_ops))
 333		do_message_pass(cpu, PPC_MSG_RESCHEDULE);
 334}
 335EXPORT_SYMBOL_GPL(smp_send_reschedule);
 336
 337void arch_send_call_function_single_ipi(int cpu)
 338{
 339	do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
 340}
 341
 342void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 343{
 344	unsigned int cpu;
 345
 346	for_each_cpu(cpu, mask)
 347		do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
 348}
 349
 350#ifdef CONFIG_NMI_IPI
 351
 352/*
 353 * "NMI IPI" system.
 354 *
 355 * NMI IPIs may not be recoverable, so should not be used as ongoing part of
 356 * a running system. They can be used for crash, debug, halt/reboot, etc.
 357 *
 358 * The IPI call waits with interrupts disabled until all targets enter the
 359 * NMI handler, then returns. Subsequent IPIs can be issued before targets
 360 * have returned from their handlers, so there is no guarantee about
 361 * concurrency or re-entrancy.
 362 *
 363 * A new NMI can be issued before all targets exit the handler.
 364 *
 365 * The IPI call may time out without all targets entering the NMI handler.
 366 * In that case, there is some logic to recover (and ignore subsequent
 367 * NMI interrupts that may eventually be raised), but the platform interrupt
 368 * handler may not be able to distinguish this from other exception causes,
 369 * which may cause a crash.
 370 */
 371
 372static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
 373static struct cpumask nmi_ipi_pending_mask;
 374static bool nmi_ipi_busy = false;
 375static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
 376
 377static void nmi_ipi_lock_start(unsigned long *flags)
 378{
 379	raw_local_irq_save(*flags);
 380	hard_irq_disable();
 381	while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
 382		raw_local_irq_restore(*flags);
 383		spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
 384		raw_local_irq_save(*flags);
 385		hard_irq_disable();
 386	}
 387}
 388
 389static void nmi_ipi_lock(void)
 390{
 391	while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
 392		spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
 393}
 394
 395static void nmi_ipi_unlock(void)
 396{
 397	smp_mb();
 398	WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
 399	atomic_set(&__nmi_ipi_lock, 0);
 400}
 401
 402static void nmi_ipi_unlock_end(unsigned long *flags)
 403{
 404	nmi_ipi_unlock();
 405	raw_local_irq_restore(*flags);
 406}
 407
 408/*
 409 * Platform NMI handler calls this to ack
 410 */
 411int smp_handle_nmi_ipi(struct pt_regs *regs)
 412{
 413	void (*fn)(struct pt_regs *) = NULL;
 414	unsigned long flags;
 415	int me = raw_smp_processor_id();
 416	int ret = 0;
 417
 418	/*
 419	 * Unexpected NMIs are possible here because the interrupt may not
 420	 * be able to distinguish NMI IPIs from other types of NMIs, or
 421	 * because the caller may have timed out.
 422	 */
 423	nmi_ipi_lock_start(&flags);
 424	if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
 425		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
 426		fn = READ_ONCE(nmi_ipi_function);
 427		WARN_ON_ONCE(!fn);
 428		ret = 1;
 429	}
 430	nmi_ipi_unlock_end(&flags);
 431
 432	if (fn)
 433		fn(regs);
 434
 435	return ret;
 436}
 437
 438static void do_smp_send_nmi_ipi(int cpu, bool safe)
 439{
 440	if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
 441		return;
 442
 443	if (cpu >= 0) {
 444		do_message_pass(cpu, PPC_MSG_NMI_IPI);
 445	} else {
 446		int c;
 447
 448		for_each_online_cpu(c) {
 449			if (c == raw_smp_processor_id())
 450				continue;
 451			do_message_pass(c, PPC_MSG_NMI_IPI);
 452		}
 453	}
 454}
 455
 456/*
 457 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
 458 * - fn is the target callback function.
 459 * - delay_us > 0 is the delay before giving up waiting for targets to
 460 *   begin executing the handler, == 0 specifies indefinite delay.
 461 */
 462static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
 463				u64 delay_us, bool safe)
 464{
 465	unsigned long flags;
 466	int me = raw_smp_processor_id();
 467	int ret = 1;
 468
 469	BUG_ON(cpu == me);
 470	BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
 471
 472	if (unlikely(!smp_ops))
 473		return 0;
 474
 475	nmi_ipi_lock_start(&flags);
 476	while (nmi_ipi_busy) {
 477		nmi_ipi_unlock_end(&flags);
 478		spin_until_cond(!nmi_ipi_busy);
 479		nmi_ipi_lock_start(&flags);
 480	}
 481	nmi_ipi_busy = true;
 482	nmi_ipi_function = fn;
 483
 484	WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
 485
 486	if (cpu < 0) {
 487		/* ALL_OTHERS */
 488		cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
 489		cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
 490	} else {
 491		cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
 492	}
 493
 494	nmi_ipi_unlock();
 495
 496	/* Interrupts remain hard disabled */
 497
 498	do_smp_send_nmi_ipi(cpu, safe);
 499
 500	nmi_ipi_lock();
 501	/* nmi_ipi_busy is set here, so unlock/lock is okay */
 502	while (!cpumask_empty(&nmi_ipi_pending_mask)) {
 503		nmi_ipi_unlock();
 504		udelay(1);
 505		nmi_ipi_lock();
 506		if (delay_us) {
 507			delay_us--;
 508			if (!delay_us)
 509				break;
 510		}
 511	}
 512
 513	if (!cpumask_empty(&nmi_ipi_pending_mask)) {
 514		/* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
 515		ret = 0;
 516		cpumask_clear(&nmi_ipi_pending_mask);
 517	}
 518
 519	nmi_ipi_function = NULL;
 520	nmi_ipi_busy = false;
 521
 522	nmi_ipi_unlock_end(&flags);
 523
 524	return ret;
 525}
 526
 527int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
 528{
 529	return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
 530}
 531
 532int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
 533{
 534	return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
 535}
 536#endif /* CONFIG_NMI_IPI */
 537
 538#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 539void tick_broadcast(const struct cpumask *mask)
 540{
 541	unsigned int cpu;
 542
 543	for_each_cpu(cpu, mask)
 544		do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
 545}
 546#endif
 547
 548#ifdef CONFIG_DEBUGGER
 549void debugger_ipi_callback(struct pt_regs *regs)
 550{
 551	debugger_ipi(regs);
 552}
 553
 554void smp_send_debugger_break(void)
 555{
 556	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
 557}
 558#endif
 559
 560#ifdef CONFIG_KEXEC_CORE
 561void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
 562{
 563	int cpu;
 564
 565	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
 566	if (kdump_in_progress() && crash_wake_offline) {
 567		for_each_present_cpu(cpu) {
 568			if (cpu_online(cpu))
 569				continue;
 570			/*
 571			 * crash_ipi_callback will wait for
 572			 * all cpus, including offline CPUs.
 573			 * We don't care about nmi_ipi_function.
 574			 * Offline cpus will jump straight into
 575			 * crash_ipi_callback, we can skip the
 576			 * entire NMI dance and waiting for
 577			 * cpus to clear pending mask, etc.
 578			 */
 579			do_smp_send_nmi_ipi(cpu, false);
 580		}
 581	}
 582}
 583#endif
 584
 585#ifdef CONFIG_NMI_IPI
 586static void nmi_stop_this_cpu(struct pt_regs *regs)
 587{
 588	/*
 589	 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
 590	 */
 591	spin_begin();
 592	while (1)
 593		spin_cpu_relax();
 594}
 595
 596void smp_send_stop(void)
 597{
 598	smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
 599}
 600
 601#else /* CONFIG_NMI_IPI */
 602
 603static void stop_this_cpu(void *dummy)
 604{
 605	hard_irq_disable();
 606	spin_begin();
 607	while (1)
 608		spin_cpu_relax();
 609}
 610
 611void smp_send_stop(void)
 612{
 613	static bool stopped = false;
 614
 615	/*
 616	 * Prevent waiting on csd lock from a previous smp_send_stop.
 617	 * This is racy, but in general callers try to do the right
 618	 * thing and only fire off one smp_send_stop (e.g., see
 619	 * kernel/panic.c)
 620	 */
 621	if (stopped)
 622		return;
 623
 624	stopped = true;
 625
 626	smp_call_function(stop_this_cpu, NULL, 0);
 627}
 628#endif /* CONFIG_NMI_IPI */
 629
 630struct task_struct *current_set[NR_CPUS];
 631
 632static void smp_store_cpu_info(int id)
 633{
 634	per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
 635#ifdef CONFIG_PPC_FSL_BOOK3E
 636	per_cpu(next_tlbcam_idx, id)
 637		= (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
 638#endif
 639}
 640
 641/*
 642 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
 643 * rather than just passing around the cpumask we pass around a function that
 644 * returns the that cpumask for the given CPU.
 645 */
 646static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
 647{
 648	cpumask_set_cpu(i, get_cpumask(j));
 649	cpumask_set_cpu(j, get_cpumask(i));
 650}
 651
 652#ifdef CONFIG_HOTPLUG_CPU
 653static void set_cpus_unrelated(int i, int j,
 654		struct cpumask *(*get_cpumask)(int))
 655{
 656	cpumask_clear_cpu(i, get_cpumask(j));
 657	cpumask_clear_cpu(j, get_cpumask(i));
 658}
 659#endif
 660
 661/*
 662 * parse_thread_groups: Parses the "ibm,thread-groups" device tree
 663 *                      property for the CPU device node @dn and stores
 664 *                      the parsed output in the thread_groups
 665 *                      structure @tg if the ibm,thread-groups[0]
 666 *                      matches @property.
 667 *
 668 * @dn: The device node of the CPU device.
 669 * @tg: Pointer to a thread group structure into which the parsed
 670 *      output of "ibm,thread-groups" is stored.
 671 * @property: The property of the thread-group that the caller is
 672 *            interested in.
 673 *
 674 * ibm,thread-groups[0..N-1] array defines which group of threads in
 675 * the CPU-device node can be grouped together based on the property.
 676 *
 677 * ibm,thread-groups[0] tells us the property based on which the
 678 * threads are being grouped together. If this value is 1, it implies
 679 * that the threads in the same group share L1, translation cache.
 680 *
 681 * ibm,thread-groups[1] tells us how many such thread groups exist.
 682 *
 683 * ibm,thread-groups[2] tells us the number of threads in each such
 684 * group.
 685 *
 686 * ibm,thread-groups[3..N-1] is the list of threads identified by
 687 * "ibm,ppc-interrupt-server#s" arranged as per their membership in
 688 * the grouping.
 689 *
 690 * Example: If ibm,thread-groups = [1,2,4,5,6,7,8,9,10,11,12] it
 691 * implies that there are 2 groups of 4 threads each, where each group
 692 * of threads share L1, translation cache.
 693 *
 694 * The "ibm,ppc-interrupt-server#s" of the first group is {5,6,7,8}
 695 * and the "ibm,ppc-interrupt-server#s" of the second group is {9, 10,
 696 * 11, 12} structure
 697 *
 698 * Returns 0 on success, -EINVAL if the property does not exist,
 699 * -ENODATA if property does not have a value, and -EOVERFLOW if the
 700 * property data isn't large enough.
 701 */
 702static int parse_thread_groups(struct device_node *dn,
 703			       struct thread_groups *tg,
 704			       unsigned int property)
 705{
 706	int i;
 707	u32 thread_group_array[3 + MAX_THREAD_LIST_SIZE];
 708	u32 *thread_list;
 709	size_t total_threads;
 710	int ret;
 711
 712	ret = of_property_read_u32_array(dn, "ibm,thread-groups",
 713					 thread_group_array, 3);
 714	if (ret)
 715		return ret;
 716
 717	tg->property = thread_group_array[0];
 718	tg->nr_groups = thread_group_array[1];
 719	tg->threads_per_group = thread_group_array[2];
 720	if (tg->property != property ||
 721	    tg->nr_groups < 1 ||
 722	    tg->threads_per_group < 1)
 723		return -ENODATA;
 724
 725	total_threads = tg->nr_groups * tg->threads_per_group;
 726
 727	ret = of_property_read_u32_array(dn, "ibm,thread-groups",
 728					 thread_group_array,
 729					 3 + total_threads);
 730	if (ret)
 731		return ret;
 732
 733	thread_list = &thread_group_array[3];
 734
 735	for (i = 0 ; i < total_threads; i++)
 736		tg->thread_list[i] = thread_list[i];
 737
 738	return 0;
 739}
 740
 741/*
 742 * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
 743 *                              that @cpu belongs to.
 744 *
 745 * @cpu : The logical CPU whose thread group is being searched.
 746 * @tg : The thread-group structure of the CPU node which @cpu belongs
 747 *       to.
 748 *
 749 * Returns the index to tg->thread_list that points to the the start
 750 * of the thread_group that @cpu belongs to.
 751 *
 752 * Returns -1 if cpu doesn't belong to any of the groups pointed to by
 753 * tg->thread_list.
 754 */
 755static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
 756{
 757	int hw_cpu_id = get_hard_smp_processor_id(cpu);
 758	int i, j;
 759
 760	for (i = 0; i < tg->nr_groups; i++) {
 761		int group_start = i * tg->threads_per_group;
 762
 763		for (j = 0; j < tg->threads_per_group; j++) {
 764			int idx = group_start + j;
 765
 766			if (tg->thread_list[idx] == hw_cpu_id)
 767				return group_start;
 768		}
 769	}
 770
 771	return -1;
 772}
 773
 774static int init_cpu_l1_cache_map(int cpu)
 775
 776{
 777	struct device_node *dn = of_get_cpu_node(cpu, NULL);
 778	struct thread_groups tg = {.property = 0,
 779				   .nr_groups = 0,
 780				   .threads_per_group = 0};
 781	int first_thread = cpu_first_thread_sibling(cpu);
 782	int i, cpu_group_start = -1, err = 0;
 783
 784	if (!dn)
 785		return -ENODATA;
 786
 787	err = parse_thread_groups(dn, &tg, THREAD_GROUP_SHARE_L1);
 788	if (err)
 789		goto out;
 790
 791	zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu),
 792				GFP_KERNEL,
 793				cpu_to_node(cpu));
 794
 795	cpu_group_start = get_cpu_thread_group_start(cpu, &tg);
 796
 797	if (unlikely(cpu_group_start == -1)) {
 798		WARN_ON_ONCE(1);
 799		err = -ENODATA;
 800		goto out;
 801	}
 802
 803	for (i = first_thread; i < first_thread + threads_per_core; i++) {
 804		int i_group_start = get_cpu_thread_group_start(i, &tg);
 805
 806		if (unlikely(i_group_start == -1)) {
 807			WARN_ON_ONCE(1);
 808			err = -ENODATA;
 809			goto out;
 810		}
 811
 812		if (i_group_start == cpu_group_start)
 813			cpumask_set_cpu(i, per_cpu(cpu_l1_cache_map, cpu));
 814	}
 815
 816out:
 817	of_node_put(dn);
 818	return err;
 819}
 820
 821static int init_big_cores(void)
 822{
 823	int cpu;
 824
 825	for_each_possible_cpu(cpu) {
 826		int err = init_cpu_l1_cache_map(cpu);
 827
 828		if (err)
 829			return err;
 830
 831		zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
 832					GFP_KERNEL,
 833					cpu_to_node(cpu));
 834	}
 835
 836	has_big_cores = true;
 837	return 0;
 838}
 839
 840void __init smp_prepare_cpus(unsigned int max_cpus)
 841{
 842	unsigned int cpu;
 843
 844	DBG("smp_prepare_cpus\n");
 845
 846	/* 
 847	 * setup_cpu may need to be called on the boot cpu. We havent
 848	 * spun any cpus up but lets be paranoid.
 849	 */
 850	BUG_ON(boot_cpuid != smp_processor_id());
 851
 852	/* Fixup boot cpu */
 853	smp_store_cpu_info(boot_cpuid);
 854	cpu_callin_map[boot_cpuid] = 1;
 855
 856	for_each_possible_cpu(cpu) {
 857		zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
 858					GFP_KERNEL, cpu_to_node(cpu));
 859		zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
 860					GFP_KERNEL, cpu_to_node(cpu));
 861		zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
 862					GFP_KERNEL, cpu_to_node(cpu));
 863		/*
 864		 * numa_node_id() works after this.
 865		 */
 866		if (cpu_present(cpu)) {
 867			set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
 868			set_cpu_numa_mem(cpu,
 869				local_memory_node(numa_cpu_lookup_table[cpu]));
 870		}
 871	}
 872
 873	/* Init the cpumasks so the boot CPU is related to itself */
 874	cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
 875	cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
 876	cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
 877
 878	init_big_cores();
 879	if (has_big_cores) {
 880		cpumask_set_cpu(boot_cpuid,
 881				cpu_smallcore_mask(boot_cpuid));
 882	}
 883
 884	if (smp_ops && smp_ops->probe)
 885		smp_ops->probe();
 886}
 887
 888void smp_prepare_boot_cpu(void)
 889{
 890	BUG_ON(smp_processor_id() != boot_cpuid);
 891#ifdef CONFIG_PPC64
 892	paca_ptrs[boot_cpuid]->__current = current;
 893#endif
 894	set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
 895	current_set[boot_cpuid] = current;
 896}
 897
 898#ifdef CONFIG_HOTPLUG_CPU
 899
 900int generic_cpu_disable(void)
 901{
 902	unsigned int cpu = smp_processor_id();
 903
 904	if (cpu == boot_cpuid)
 905		return -EBUSY;
 906
 907	set_cpu_online(cpu, false);
 908#ifdef CONFIG_PPC64
 909	vdso_data->processorCount--;
 910#endif
 911	/* Update affinity of all IRQs previously aimed at this CPU */
 912	irq_migrate_all_off_this_cpu();
 913
 914	/*
 915	 * Depending on the details of the interrupt controller, it's possible
 916	 * that one of the interrupts we just migrated away from this CPU is
 917	 * actually already pending on this CPU. If we leave it in that state
 918	 * the interrupt will never be EOI'ed, and will never fire again. So
 919	 * temporarily enable interrupts here, to allow any pending interrupt to
 920	 * be received (and EOI'ed), before we take this CPU offline.
 921	 */
 922	local_irq_enable();
 923	mdelay(1);
 924	local_irq_disable();
 925
 926	return 0;
 927}
 928
 929void generic_cpu_die(unsigned int cpu)
 930{
 931	int i;
 932
 933	for (i = 0; i < 100; i++) {
 934		smp_rmb();
 935		if (is_cpu_dead(cpu))
 936			return;
 937		msleep(100);
 938	}
 939	printk(KERN_ERR "CPU%d didn't die...\n", cpu);
 940}
 941
 942void generic_set_cpu_dead(unsigned int cpu)
 943{
 944	per_cpu(cpu_state, cpu) = CPU_DEAD;
 945}
 946
 947/*
 948 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
 949 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
 950 * which makes the delay in generic_cpu_die() not happen.
 951 */
 952void generic_set_cpu_up(unsigned int cpu)
 953{
 954	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
 955}
 956
 957int generic_check_cpu_restart(unsigned int cpu)
 958{
 959	return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
 960}
 961
 962int is_cpu_dead(unsigned int cpu)
 963{
 964	return per_cpu(cpu_state, cpu) == CPU_DEAD;
 965}
 966
 967static bool secondaries_inhibited(void)
 968{
 969	return kvm_hv_mode_active();
 970}
 971
 972#else /* HOTPLUG_CPU */
 973
 974#define secondaries_inhibited()		0
 975
 976#endif
 977
 978static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
 979{
 980#ifdef CONFIG_PPC64
 981	paca_ptrs[cpu]->__current = idle;
 982	paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
 983				 THREAD_SIZE - STACK_FRAME_OVERHEAD;
 984#endif
 985	idle->cpu = cpu;
 986	secondary_current = current_set[cpu] = idle;
 987}
 988
 989int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 990{
 991	int rc, c;
 992
 993	/*
 994	 * Don't allow secondary threads to come online if inhibited
 995	 */
 996	if (threads_per_core > 1 && secondaries_inhibited() &&
 997	    cpu_thread_in_subcore(cpu))
 998		return -EBUSY;
 999
1000	if (smp_ops == NULL ||
1001	    (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1002		return -EINVAL;
1003
1004	cpu_idle_thread_init(cpu, tidle);
1005
1006	/*
1007	 * The platform might need to allocate resources prior to bringing
1008	 * up the CPU
1009	 */
1010	if (smp_ops->prepare_cpu) {
1011		rc = smp_ops->prepare_cpu(cpu);
1012		if (rc)
1013			return rc;
1014	}
1015
1016	/* Make sure callin-map entry is 0 (can be leftover a CPU
1017	 * hotplug
1018	 */
1019	cpu_callin_map[cpu] = 0;
1020
1021	/* The information for processor bringup must
1022	 * be written out to main store before we release
1023	 * the processor.
1024	 */
1025	smp_mb();
1026
1027	/* wake up cpus */
1028	DBG("smp: kicking cpu %d\n", cpu);
1029	rc = smp_ops->kick_cpu(cpu);
1030	if (rc) {
1031		pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
1032		return rc;
1033	}
1034
1035	/*
1036	 * wait to see if the cpu made a callin (is actually up).
1037	 * use this value that I found through experimentation.
1038	 * -- Cort
1039	 */
1040	if (system_state < SYSTEM_RUNNING)
1041		for (c = 50000; c && !cpu_callin_map[cpu]; c--)
1042			udelay(100);
1043#ifdef CONFIG_HOTPLUG_CPU
1044	else
1045		/*
1046		 * CPUs can take much longer to come up in the
1047		 * hotplug case.  Wait five seconds.
1048		 */
1049		for (c = 5000; c && !cpu_callin_map[cpu]; c--)
1050			msleep(1);
1051#endif
1052
1053	if (!cpu_callin_map[cpu]) {
1054		printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1055		return -ENOENT;
1056	}
1057
1058	DBG("Processor %u found.\n", cpu);
1059
1060	if (smp_ops->give_timebase)
1061		smp_ops->give_timebase();
1062
1063	/* Wait until cpu puts itself in the online & active maps */
1064	spin_until_cond(cpu_online(cpu));
1065
1066	return 0;
1067}
1068
1069/* Return the value of the reg property corresponding to the given
1070 * logical cpu.
1071 */
1072int cpu_to_core_id(int cpu)
1073{
1074	struct device_node *np;
1075	const __be32 *reg;
1076	int id = -1;
1077
1078	np = of_get_cpu_node(cpu, NULL);
1079	if (!np)
1080		goto out;
1081
1082	reg = of_get_property(np, "reg", NULL);
1083	if (!reg)
1084		goto out;
1085
1086	id = be32_to_cpup(reg);
1087out:
1088	of_node_put(np);
1089	return id;
1090}
1091EXPORT_SYMBOL_GPL(cpu_to_core_id);
1092
1093/* Helper routines for cpu to core mapping */
1094int cpu_core_index_of_thread(int cpu)
1095{
1096	return cpu >> threads_shift;
1097}
1098EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
1099
1100int cpu_first_thread_of_core(int core)
1101{
1102	return core << threads_shift;
1103}
1104EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
1105
1106/* Must be called when no change can occur to cpu_present_mask,
1107 * i.e. during cpu online or offline.
1108 */
1109static struct device_node *cpu_to_l2cache(int cpu)
1110{
1111	struct device_node *np;
1112	struct device_node *cache;
1113
1114	if (!cpu_present(cpu))
1115		return NULL;
1116
1117	np = of_get_cpu_node(cpu, NULL);
1118	if (np == NULL)
1119		return NULL;
1120
1121	cache = of_find_next_cache_node(np);
1122
1123	of_node_put(np);
1124
1125	return cache;
1126}
1127
1128static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int))
1129{
1130	struct device_node *l2_cache, *np;
1131	int i;
1132
1133	l2_cache = cpu_to_l2cache(cpu);
1134	if (!l2_cache)
1135		return false;
1136
1137	for_each_cpu(i, cpu_online_mask) {
1138		/*
1139		 * when updating the marks the current CPU has not been marked
1140		 * online, but we need to update the cache masks
1141		 */
1142		np = cpu_to_l2cache(i);
1143		if (!np)
1144			continue;
1145
1146		if (np == l2_cache)
1147			set_cpus_related(cpu, i, mask_fn);
1148
1149		of_node_put(np);
1150	}
1151	of_node_put(l2_cache);
1152
1153	return true;
1154}
1155
1156#ifdef CONFIG_HOTPLUG_CPU
1157static void remove_cpu_from_masks(int cpu)
1158{
1159	int i;
1160
1161	/* NB: cpu_core_mask is a superset of the others */
1162	for_each_cpu(i, cpu_core_mask(cpu)) {
1163		set_cpus_unrelated(cpu, i, cpu_core_mask);
1164		set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
1165		set_cpus_unrelated(cpu, i, cpu_sibling_mask);
1166		if (has_big_cores)
1167			set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
1168	}
1169}
1170#endif
1171
1172static inline void add_cpu_to_smallcore_masks(int cpu)
1173{
1174	struct cpumask *this_l1_cache_map = per_cpu(cpu_l1_cache_map, cpu);
1175	int i, first_thread = cpu_first_thread_sibling(cpu);
1176
1177	if (!has_big_cores)
1178		return;
1179
1180	cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
1181
1182	for (i = first_thread; i < first_thread + threads_per_core; i++) {
1183		if (cpu_online(i) && cpumask_test_cpu(i, this_l1_cache_map))
1184			set_cpus_related(i, cpu, cpu_smallcore_mask);
1185	}
1186}
1187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1188static void add_cpu_to_masks(int cpu)
1189{
1190	int first_thread = cpu_first_thread_sibling(cpu);
1191	int chipid = cpu_to_chip_id(cpu);
1192	int i;
1193
1194	/*
1195	 * This CPU will not be in the online mask yet so we need to manually
1196	 * add it to it's own thread sibling mask.
1197	 */
1198	cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1199
1200	for (i = first_thread; i < first_thread + threads_per_core; i++)
1201		if (cpu_online(i))
1202			set_cpus_related(i, cpu, cpu_sibling_mask);
1203
1204	add_cpu_to_smallcore_masks(cpu);
1205	/*
1206	 * Copy the thread sibling mask into the cache sibling mask
1207	 * and mark any CPUs that share an L2 with this CPU.
1208	 */
1209	for_each_cpu(i, cpu_sibling_mask(cpu))
1210		set_cpus_related(cpu, i, cpu_l2_cache_mask);
1211	update_mask_by_l2(cpu, cpu_l2_cache_mask);
1212
1213	/*
1214	 * Copy the cache sibling mask into core sibling mask and mark
1215	 * any CPUs on the same chip as this CPU.
1216	 */
1217	for_each_cpu(i, cpu_l2_cache_mask(cpu))
1218		set_cpus_related(cpu, i, cpu_core_mask);
1219
1220	if (chipid == -1)
1221		return;
1222
1223	for_each_cpu(i, cpu_online_mask)
1224		if (cpu_to_chip_id(i) == chipid)
1225			set_cpus_related(cpu, i, cpu_core_mask);
1226}
1227
1228static bool shared_caches;
1229
1230/* Activate a secondary processor. */
1231void start_secondary(void *unused)
1232{
1233	unsigned int cpu = smp_processor_id();
1234	struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
1235
1236	mmgrab(&init_mm);
1237	current->active_mm = &init_mm;
1238
1239	smp_store_cpu_info(cpu);
1240	set_dec(tb_ticks_per_jiffy);
1241	preempt_disable();
1242	cpu_callin_map[cpu] = 1;
1243
1244	if (smp_ops->setup_cpu)
1245		smp_ops->setup_cpu(cpu);
1246	if (smp_ops->take_timebase)
1247		smp_ops->take_timebase();
1248
1249	secondary_cpu_time_init();
1250
1251#ifdef CONFIG_PPC64
1252	if (system_state == SYSTEM_RUNNING)
1253		vdso_data->processorCount++;
1254
1255	vdso_getcpu_init();
1256#endif
1257	/* Update topology CPU masks */
1258	add_cpu_to_masks(cpu);
1259
1260	if (has_big_cores)
1261		sibling_mask = cpu_smallcore_mask;
1262	/*
1263	 * Check for any shared caches. Note that this must be done on a
1264	 * per-core basis because one core in the pair might be disabled.
1265	 */
1266	if (!cpumask_equal(cpu_l2_cache_mask(cpu), sibling_mask(cpu)))
1267		shared_caches = true;
1268
1269	set_numa_node(numa_cpu_lookup_table[cpu]);
1270	set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1271
1272	smp_wmb();
1273	notify_cpu_starting(cpu);
1274	set_cpu_online(cpu, true);
1275
1276	boot_init_stack_canary();
1277
1278	local_irq_enable();
1279
1280	/* We can enable ftrace for secondary cpus now */
1281	this_cpu_enable_ftrace();
1282
1283	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1284
1285	BUG();
1286}
1287
1288int setup_profiling_timer(unsigned int multiplier)
1289{
1290	return 0;
1291}
1292
1293#ifdef CONFIG_SCHED_SMT
1294/* cpumask of CPUs with asymetric SMT dependancy */
1295static int powerpc_smt_flags(void)
1296{
1297	int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
1298
1299	if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
1300		printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
1301		flags |= SD_ASYM_PACKING;
1302	}
1303	return flags;
1304}
1305#endif
1306
1307static struct sched_domain_topology_level powerpc_topology[] = {
1308#ifdef CONFIG_SCHED_SMT
1309	{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1310#endif
1311	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
1312	{ NULL, },
1313};
1314
1315/*
1316 * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
1317 * This topology makes it *much* cheaper to migrate tasks between adjacent cores
1318 * since the migrated task remains cache hot. We want to take advantage of this
1319 * at the scheduler level so an extra topology level is required.
1320 */
1321static int powerpc_shared_cache_flags(void)
1322{
1323	return SD_SHARE_PKG_RESOURCES;
1324}
1325
1326/*
1327 * We can't just pass cpu_l2_cache_mask() directly because
1328 * returns a non-const pointer and the compiler barfs on that.
1329 */
1330static const struct cpumask *shared_cache_mask(int cpu)
1331{
1332	return cpu_l2_cache_mask(cpu);
1333}
1334
1335#ifdef CONFIG_SCHED_SMT
1336static const struct cpumask *smallcore_smt_mask(int cpu)
1337{
1338	return cpu_smallcore_mask(cpu);
1339}
1340#endif
1341
1342static struct sched_domain_topology_level power9_topology[] = {
1343#ifdef CONFIG_SCHED_SMT
1344	{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1345#endif
1346	{ shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
1347	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
1348	{ NULL, },
1349};
1350
1351void __init smp_cpus_done(unsigned int max_cpus)
1352{
1353	/*
1354	 * We are running pinned to the boot CPU, see rest_init().
1355	 */
1356	if (smp_ops && smp_ops->setup_cpu)
1357		smp_ops->setup_cpu(boot_cpuid);
1358
1359	if (smp_ops && smp_ops->bringup_done)
1360		smp_ops->bringup_done();
1361
1362	/*
1363	 * On a shared LPAR, associativity needs to be requested.
1364	 * Hence, get numa topology before dumping cpu topology
1365	 */
1366	shared_proc_topology_init();
1367	dump_numa_cpu_topology();
1368
1369#ifdef CONFIG_SCHED_SMT
1370	if (has_big_cores) {
1371		pr_info("Using small cores at SMT level\n");
1372		power9_topology[0].mask = smallcore_smt_mask;
1373		powerpc_topology[0].mask = smallcore_smt_mask;
1374	}
1375#endif
1376	/*
1377	 * If any CPU detects that it's sharing a cache with another CPU then
1378	 * use the deeper topology that is aware of this sharing.
1379	 */
1380	if (shared_caches) {
1381		pr_info("Using shared cache scheduler topology\n");
1382		set_sched_topology(power9_topology);
1383	} else {
1384		pr_info("Using standard scheduler topology\n");
1385		set_sched_topology(powerpc_topology);
1386	}
1387}
1388
1389#ifdef CONFIG_HOTPLUG_CPU
1390int __cpu_disable(void)
1391{
1392	int cpu = smp_processor_id();
1393	int err;
1394
1395	if (!smp_ops->cpu_disable)
1396		return -ENOSYS;
1397
1398	this_cpu_disable_ftrace();
1399
1400	err = smp_ops->cpu_disable();
1401	if (err)
1402		return err;
1403
1404	/* Update sibling maps */
1405	remove_cpu_from_masks(cpu);
1406
1407	return 0;
1408}
1409
1410void __cpu_die(unsigned int cpu)
1411{
1412	if (smp_ops->cpu_die)
1413		smp_ops->cpu_die(cpu);
1414}
1415
1416void cpu_die(void)
1417{
1418	/*
1419	 * Disable on the down path. This will be re-enabled by
1420	 * start_secondary() via start_secondary_resume() below
1421	 */
1422	this_cpu_disable_ftrace();
1423
1424	if (ppc_md.cpu_die)
1425		ppc_md.cpu_die();
1426
1427	/* If we return, we re-enter start_secondary */
1428	start_secondary_resume();
1429}
1430
1431#endif