Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3 *	x86 SMP booting functions
   4 *
   5 *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
   6 *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
   7 *	Copyright 2001 Andi Kleen, SuSE Labs.
   8 *
   9 *	Much of the core SMP work is based on previous work by Thomas Radke, to
  10 *	whom a great many thanks are extended.
  11 *
  12 *	Thanks to Intel for making available several different Pentium,
  13 *	Pentium Pro and Pentium-II/Xeon MP machines.
  14 *	Original development of Linux SMP code supported by Caldera.
  15 *
  16 *	Fixes
  17 *		Felix Koop	:	NR_CPUS used properly
  18 *		Jose Renau	:	Handle single CPU case.
  19 *		Alan Cox	:	By repeated request 8) - Total BogoMIPS report.
  20 *		Greg Wright	:	Fix for kernel stacks panic.
  21 *		Erich Boleyn	:	MP v1.4 and additional changes.
  22 *	Matthias Sattler	:	Changes for 2.1 kernel map.
  23 *	Michel Lespinasse	:	Changes for 2.1 kernel map.
  24 *	Michael Chastain	:	Change trampoline.S to gnu as.
  25 *		Alan Cox	:	Dumb bug: 'B' step PPro's are fine
  26 *		Ingo Molnar	:	Added APIC timers, based on code
  27 *					from Jose Renau
  28 *		Ingo Molnar	:	various cleanups and rewrites
  29 *		Tigran Aivazian	:	fixed "0.00 in /proc/uptime on SMP" bug.
  30 *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs
  31 *	Andi Kleen		:	Changed for SMP boot into long mode.
  32 *		Martin J. Bligh	: 	Added support for multi-quad systems
  33 *		Dave Jones	:	Report invalid combinations of Athlon CPUs.
  34 *		Rusty Russell	:	Hacked into shape for new "hotplug" boot process.
  35 *      Andi Kleen              :       Converted to new state machine.
  36 *	Ashok Raj		: 	CPU hotplug support
  37 *	Glauber Costa		:	i386 and x86_64 integration
  38 */
  39
  40#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  41
  42#include <linux/init.h>
  43#include <linux/smp.h>
  44#include <linux/export.h>
  45#include <linux/sched.h>
  46#include <linux/sched/topology.h>
  47#include <linux/sched/hotplug.h>
  48#include <linux/sched/task_stack.h>
  49#include <linux/percpu.h>
  50#include <linux/memblock.h>
  51#include <linux/err.h>
  52#include <linux/nmi.h>
  53#include <linux/tboot.h>
  54#include <linux/gfp.h>
  55#include <linux/cpuidle.h>
  56#include <linux/kexec.h>
  57#include <linux/numa.h>
  58#include <linux/pgtable.h>
  59#include <linux/overflow.h>
  60#include <linux/stackprotector.h>
  61#include <linux/cpuhotplug.h>
  62#include <linux/mc146818rtc.h>
  63
  64#include <asm/acpi.h>
  65#include <asm/cacheinfo.h>
  66#include <asm/desc.h>
  67#include <asm/nmi.h>
  68#include <asm/irq.h>
  69#include <asm/realmode.h>
  70#include <asm/cpu.h>
  71#include <asm/numa.h>
  72#include <asm/tlbflush.h>
  73#include <asm/mtrr.h>
  74#include <asm/mwait.h>
  75#include <asm/apic.h>
  76#include <asm/io_apic.h>
  77#include <asm/fpu/api.h>
  78#include <asm/setup.h>
  79#include <asm/uv/uv.h>
  80#include <asm/microcode.h>
  81#include <asm/i8259.h>
  82#include <asm/misc.h>
  83#include <asm/qspinlock.h>
  84#include <asm/intel-family.h>
  85#include <asm/cpu_device_id.h>
  86#include <asm/spec-ctrl.h>
  87#include <asm/hw_irq.h>
  88#include <asm/stackprotector.h>
  89#include <asm/sev.h>
  90#include <asm/spec-ctrl.h>
 
 
  91
  92/* representing HT siblings of each logical CPU */
  93DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
  94EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  95
  96/* representing HT and core siblings of each logical CPU */
  97DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
  98EXPORT_PER_CPU_SYMBOL(cpu_core_map);
  99
 100/* representing HT, core, and die siblings of each logical CPU */
 101DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map);
 102EXPORT_PER_CPU_SYMBOL(cpu_die_map);
 103
 
 
 104/* Per CPU bogomips and other parameters */
 105DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
 106EXPORT_PER_CPU_SYMBOL(cpu_info);
 107
 108/* CPUs which are the primary SMT threads */
 109struct cpumask __cpu_primary_thread_mask __read_mostly;
 110
 111/* Representing CPUs for which sibling maps can be computed */
 112static cpumask_var_t cpu_sibling_setup_mask;
 113
 114struct mwait_cpu_dead {
 115	unsigned int	control;
 116	unsigned int	status;
 117};
 118
 119#define CPUDEAD_MWAIT_WAIT	0xDEADBEEF
 120#define CPUDEAD_MWAIT_KEXEC_HLT	0x4A17DEAD
 121
 122/*
 123 * Cache line aligned data for mwait_play_dead(). Separate on purpose so
 124 * that it's unlikely to be touched by other CPUs.
 125 */
 126static DEFINE_PER_CPU_ALIGNED(struct mwait_cpu_dead, mwait_cpu_dead);
 127
 128/* Logical package management. */
 129struct logical_maps {
 130	u32	phys_pkg_id;
 131	u32	phys_die_id;
 132	u32	logical_pkg_id;
 133	u32	logical_die_id;
 134};
 135
 136/* Temporary workaround until the full topology mechanics is in place */
 137static DEFINE_PER_CPU_READ_MOSTLY(struct logical_maps, logical_maps) = {
 138	.phys_pkg_id	= U32_MAX,
 139	.phys_die_id	= U32_MAX,
 140};
 141
 142unsigned int __max_logical_packages __read_mostly;
 143EXPORT_SYMBOL(__max_logical_packages);
 144static unsigned int logical_packages __read_mostly;
 145static unsigned int logical_die __read_mostly;
 146
 147/* Maximum number of SMT threads on any online core */
 148int __read_mostly __max_smt_threads = 1;
 149
 150/* Flag to indicate if a complete sched domain rebuild is required */
 151bool x86_topology_update;
 152
 153int arch_update_cpu_topology(void)
 154{
 155	int retval = x86_topology_update;
 156
 157	x86_topology_update = false;
 158	return retval;
 159}
 160
 161static unsigned int smpboot_warm_reset_vector_count;
 162
 163static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
 164{
 165	unsigned long flags;
 166
 167	spin_lock_irqsave(&rtc_lock, flags);
 168	if (!smpboot_warm_reset_vector_count++) {
 169		CMOS_WRITE(0xa, 0xf);
 170		*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = start_eip >> 4;
 171		*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = start_eip & 0xf;
 172	}
 173	spin_unlock_irqrestore(&rtc_lock, flags);
 
 
 
 
 174}
 175
 176static inline void smpboot_restore_warm_reset_vector(void)
 177{
 178	unsigned long flags;
 179
 180	/*
 181	 * Paranoid:  Set warm reset code and vector here back
 182	 * to default values.
 183	 */
 184	spin_lock_irqsave(&rtc_lock, flags);
 185	if (!--smpboot_warm_reset_vector_count) {
 186		CMOS_WRITE(0, 0xf);
 187		*((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
 188	}
 189	spin_unlock_irqrestore(&rtc_lock, flags);
 190
 
 191}
 192
 193/* Run the next set of setup steps for the upcoming CPU */
 194static void ap_starting(void)
 
 
 
 
 
 195{
 196	int cpuid = smp_processor_id();
 197
 198	/* Mop up eventual mwait_play_dead() wreckage */
 199	this_cpu_write(mwait_cpu_dead.status, 0);
 200	this_cpu_write(mwait_cpu_dead.control, 0);
 
 
 
 
 201
 202	/*
 203	 * If woken up by an INIT in an 82489DX configuration the alive
 204	 * synchronization guarantees that the CPU does not reach this
 205	 * point before an INIT_deassert IPI reaches the local APIC, so it
 206	 * is now safe to touch the local APIC.
 207	 *
 208	 * Set up this CPU, first the APIC, which is probably redundant on
 209	 * most boards.
 210	 */
 211	apic_ap_setup();
 212
 213	/* Save the processor parameters. */
 
 
 
 214	smp_store_cpu_info(cpuid);
 215
 216	/*
 217	 * The topology information must be up to date before
 218	 * notify_cpu_starting().
 219	 */
 220	set_cpu_sibling_map(cpuid);
 221
 222	ap_init_aperfmperf();
 223
 
 
 
 
 
 
 
 
 224	pr_debug("Stack at about %p\n", &cpuid);
 225
 226	wmb();
 227
 228	/*
 229	 * This runs the AP through all the cpuhp states to its target
 230	 * state CPUHP_ONLINE.
 231	 */
 232	notify_cpu_starting(cpuid);
 233}
 234
 235static void ap_calibrate_delay(void)
 236{
 237	/*
 238	 * Calibrate the delay loop and update loops_per_jiffy in cpu_data.
 239	 * smp_store_cpu_info() stored a value that is close but not as
 240	 * accurate as the value just calculated.
 241	 *
 242	 * As this is invoked after the TSC synchronization check,
 243	 * calibrate_delay_is_known() will skip the calibration routine
 244	 * when TSC is synchronized across sockets.
 245	 */
 246	calibrate_delay();
 247	cpu_data(smp_processor_id()).loops_per_jiffy = loops_per_jiffy;
 248}
 249
 
 
 250/*
 251 * Activate a secondary processor.
 252 */
 253static void notrace start_secondary(void *unused)
 254{
 255	/*
 256	 * Don't put *anything* except direct CPU state initialization
 257	 * before cpu_init(), SMP booting is too fragile that we want to
 258	 * limit the things done here to the most necessary things.
 259	 */
 260	cr4_init();
 261
 262	/*
 263	 * 32-bit specific. 64-bit reaches this code with the correct page
 264	 * table established. Yet another historical divergence.
 265	 */
 266	if (IS_ENABLED(CONFIG_X86_32)) {
 267		/* switch away from the initial page table */
 268		load_cr3(swapper_pg_dir);
 269		__flush_tlb_all();
 270	}
 271
 272	cpu_init_exception_handling();
 273
 274	/*
 275	 * Load the microcode before reaching the AP alive synchronization
 276	 * point below so it is not part of the full per CPU serialized
 277	 * bringup part when "parallel" bringup is enabled.
 278	 *
 279	 * That's even safe when hyperthreading is enabled in the CPU as
 280	 * the core code starts the primary threads first and leaves the
 281	 * secondary threads waiting for SIPI. Loading microcode on
 282	 * physical cores concurrently is a safe operation.
 283	 *
 284	 * This covers both the Intel specific issue that concurrent
 285	 * microcode loading on SMT siblings must be prohibited and the
 286	 * vendor independent issue`that microcode loading which changes
 287	 * CPUID, MSRs etc. must be strictly serialized to maintain
 288	 * software state correctness.
 289	 */
 290	load_ucode_ap();
 291
 292	/*
 293	 * Synchronization point with the hotplug core. Sets this CPUs
 294	 * synchronization state to ALIVE and spin-waits for the control CPU to
 295	 * release this CPU for further bringup.
 296	 */
 297	cpuhp_ap_sync_alive();
 298
 299	cpu_init();
 300	fpu__init_cpu();
 301	rcutree_report_cpu_starting(raw_smp_processor_id());
 302	x86_cpuinit.early_percpu_clock_init();
 
 303
 304	ap_starting();
 305
 306	/* Check TSC synchronization with the control CPU. */
 307	check_tsc_sync_target();
 308
 
 
 309	/*
 310	 * Calibrate the delay loop after the TSC synchronization check.
 311	 * This allows to skip the calibration when TSC is synchronized
 312	 * across sockets.
 313	 */
 314	ap_calibrate_delay();
 315
 316	speculative_store_bypass_ht_init();
 317
 318	/*
 319	 * Lock vector_lock, set CPU online and bring the vector
 320	 * allocator online. Online must be set with vector_lock held
 321	 * to prevent a concurrent irq setup/teardown from seeing a
 322	 * half valid vector space.
 323	 */
 324	lock_vector_lock();
 325	set_cpu_online(smp_processor_id(), true);
 326	lapic_online();
 327	unlock_vector_lock();
 
 328	x86_platform.nmi_init();
 329
 330	/* enable local interrupts */
 331	local_irq_enable();
 332
 333	x86_cpuinit.setup_percpu_clockev();
 334
 335	wmb();
 336	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 337}
 338
 339/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 340 * topology_phys_to_logical_pkg - Map a physical package id to a logical
 341 * @phys_pkg:	The physical package id to map
 342 *
 343 * Returns logical package id or -1 if not found
 344 */
 345int topology_phys_to_logical_pkg(unsigned int phys_pkg)
 346{
 347	int cpu;
 348
 349	for_each_possible_cpu(cpu) {
 350		if (per_cpu(logical_maps.phys_pkg_id, cpu) == phys_pkg)
 351			return per_cpu(logical_maps.logical_pkg_id, cpu);
 
 
 352	}
 353	return -1;
 354}
 355EXPORT_SYMBOL(topology_phys_to_logical_pkg);
 356
 357/**
 358 * topology_phys_to_logical_die - Map a physical die id to logical
 359 * @die_id:	The physical die id to map
 360 * @cur_cpu:	The CPU for which the mapping is done
 361 *
 362 * Returns logical die id or -1 if not found
 363 */
 364static int topology_phys_to_logical_die(unsigned int die_id, unsigned int cur_cpu)
 365{
 366	int cpu, proc_id = cpu_data(cur_cpu).topo.pkg_id;
 
 367
 368	for_each_possible_cpu(cpu) {
 369		if (per_cpu(logical_maps.phys_pkg_id, cpu) == proc_id &&
 370		    per_cpu(logical_maps.phys_die_id, cpu) == die_id)
 371			return per_cpu(logical_maps.logical_die_id, cpu);
 
 
 372	}
 373	return -1;
 374}
 
 375
 376/**
 377 * topology_update_package_map - Update the physical to logical package map
 378 * @pkg:	The physical package id as retrieved via CPUID
 379 * @cpu:	The cpu for which this is updated
 380 */
 381int topology_update_package_map(unsigned int pkg, unsigned int cpu)
 382{
 383	int new;
 384
 385	/* Already available somewhere? */
 386	new = topology_phys_to_logical_pkg(pkg);
 387	if (new >= 0)
 388		goto found;
 389
 390	new = logical_packages++;
 391	if (new != pkg) {
 392		pr_info("CPU %u Converting physical %u to logical package %u\n",
 393			cpu, pkg, new);
 394	}
 395found:
 396	per_cpu(logical_maps.phys_pkg_id, cpu) = pkg;
 397	per_cpu(logical_maps.logical_pkg_id, cpu) = new;
 398	cpu_data(cpu).topo.logical_pkg_id = new;
 399	return 0;
 400}
 401/**
 402 * topology_update_die_map - Update the physical to logical die map
 403 * @die:	The die id as retrieved via CPUID
 404 * @cpu:	The cpu for which this is updated
 405 */
 406int topology_update_die_map(unsigned int die, unsigned int cpu)
 407{
 408	int new;
 409
 410	/* Already available somewhere? */
 411	new = topology_phys_to_logical_die(die, cpu);
 412	if (new >= 0)
 413		goto found;
 414
 415	new = logical_die++;
 416	if (new != die) {
 417		pr_info("CPU %u Converting physical %u to logical die %u\n",
 418			cpu, die, new);
 419	}
 420found:
 421	per_cpu(logical_maps.phys_die_id, cpu) = die;
 422	per_cpu(logical_maps.logical_die_id, cpu) = new;
 423	cpu_data(cpu).topo.logical_die_id = new;
 424	return 0;
 425}
 426
 427static void __init smp_store_boot_cpu_info(void)
 428{
 429	int id = 0; /* CPU 0 */
 430	struct cpuinfo_x86 *c = &cpu_data(id);
 431
 432	*c = boot_cpu_data;
 433	c->cpu_index = id;
 434	topology_update_package_map(c->topo.pkg_id, id);
 435	topology_update_die_map(c->topo.die_id, id);
 436	c->initialized = true;
 437}
 438
 439/*
 440 * The bootstrap kernel entry code has set these up. Save them for
 441 * a given CPU
 442 */
 443void smp_store_cpu_info(int id)
 444{
 445	struct cpuinfo_x86 *c = &cpu_data(id);
 446
 447	/* Copy boot_cpu_data only on the first bringup */
 448	if (!c->initialized)
 449		*c = boot_cpu_data;
 450	c->cpu_index = id;
 451	/*
 452	 * During boot time, CPU0 has this setup already. Save the info when
 453	 * bringing up an AP.
 454	 */
 455	identify_secondary_cpu(c);
 456	c->initialized = true;
 457}
 458
 459static bool
 460topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 461{
 462	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 463
 464	return (cpu_to_node(cpu1) == cpu_to_node(cpu2));
 465}
 466
 467static bool
 468topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
 469{
 470	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 471
 472	return !WARN_ONCE(!topology_same_node(c, o),
 473		"sched: CPU #%d's %s-sibling CPU #%d is not on the same node! "
 474		"[node: %d != %d]. Ignoring dependency.\n",
 475		cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2));
 476}
 477
 478#define link_mask(mfunc, c1, c2)					\
 479do {									\
 480	cpumask_set_cpu((c1), mfunc(c2));				\
 481	cpumask_set_cpu((c2), mfunc(c1));				\
 482} while (0)
 483
 484static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 485{
 486	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
 487		int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 488
 489		if (c->topo.pkg_id == o->topo.pkg_id &&
 490		    c->topo.die_id == o->topo.die_id &&
 491		    per_cpu_llc_id(cpu1) == per_cpu_llc_id(cpu2)) {
 492			if (c->topo.core_id == o->topo.core_id)
 493				return topology_sane(c, o, "smt");
 494
 495			if ((c->topo.cu_id != 0xff) &&
 496			    (o->topo.cu_id != 0xff) &&
 497			    (c->topo.cu_id == o->topo.cu_id))
 498				return topology_sane(c, o, "smt");
 499		}
 500
 501	} else if (c->topo.pkg_id == o->topo.pkg_id &&
 502		   c->topo.die_id == o->topo.die_id &&
 503		   c->topo.core_id == o->topo.core_id) {
 504		return topology_sane(c, o, "smt");
 505	}
 506
 507	return false;
 508}
 509
 510static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 511{
 512	if (c->topo.pkg_id == o->topo.pkg_id &&
 513	    c->topo.die_id == o->topo.die_id)
 514		return true;
 515	return false;
 516}
 517
 518static bool match_l2c(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 519{
 520	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 521
 522	/* If the arch didn't set up l2c_id, fall back to SMT */
 523	if (per_cpu_l2c_id(cpu1) == BAD_APICID)
 524		return match_smt(c, o);
 525
 526	/* Do not match if L2 cache id does not match: */
 527	if (per_cpu_l2c_id(cpu1) != per_cpu_l2c_id(cpu2))
 528		return false;
 529
 530	return topology_sane(c, o, "l2c");
 531}
 532
 533/*
 534 * Unlike the other levels, we do not enforce keeping a
 535 * multicore group inside a NUMA node.  If this happens, we will
 536 * discard the MC level of the topology later.
 537 */
 538static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 539{
 540	if (c->topo.pkg_id == o->topo.pkg_id)
 541		return true;
 542	return false;
 543}
 544
 545/*
 546 * Define intel_cod_cpu[] for Intel COD (Cluster-on-Die) CPUs.
 547 *
 548 * Any Intel CPU that has multiple nodes per package and does not
 549 * match intel_cod_cpu[] has the SNC (Sub-NUMA Cluster) topology.
 550 *
 551 * When in SNC mode, these CPUs enumerate an LLC that is shared
 552 * by multiple NUMA nodes. The LLC is shared for off-package data
 553 * access but private to the NUMA node (half of the package) for
 554 * on-package access. CPUID (the source of the information about
 555 * the LLC) can only enumerate the cache as shared or unshared,
 556 * but not this particular configuration.
 557 */
 558
 559static const struct x86_cpu_id intel_cod_cpu[] = {
 560	X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, 0),	/* COD */
 561	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, 0),	/* COD */
 562	X86_MATCH_INTEL_FAM6_MODEL(ANY, 1),		/* SNC */
 563	{}
 564};
 565
 566static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 567{
 568	const struct x86_cpu_id *id = x86_match_cpu(intel_cod_cpu);
 569	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 570	bool intel_snc = id && id->driver_data;
 571
 572	/* Do not match if we do not have a valid APICID for cpu: */
 573	if (per_cpu_llc_id(cpu1) == BAD_APICID)
 574		return false;
 575
 576	/* Do not match if LLC id does not match: */
 577	if (per_cpu_llc_id(cpu1) != per_cpu_llc_id(cpu2))
 578		return false;
 579
 580	/*
 581	 * Allow the SNC topology without warning. Return of false
 582	 * means 'c' does not share the LLC of 'o'. This will be
 583	 * reflected to userspace.
 584	 */
 585	if (match_pkg(c, o) && !topology_same_node(c, o) && intel_snc)
 586		return false;
 587
 588	return topology_sane(c, o, "llc");
 589}
 590
 591
 
 592static inline int x86_sched_itmt_flags(void)
 593{
 594	return sysctl_sched_itmt_enabled ? SD_ASYM_PACKING : 0;
 595}
 596
 597#ifdef CONFIG_SCHED_MC
 598static int x86_core_flags(void)
 599{
 600	return cpu_core_flags() | x86_sched_itmt_flags();
 601}
 602#endif
 603#ifdef CONFIG_SCHED_SMT
 604static int x86_smt_flags(void)
 605{
 606	return cpu_smt_flags();
 607}
 608#endif
 609#ifdef CONFIG_SCHED_CLUSTER
 610static int x86_cluster_flags(void)
 611{
 612	return cpu_cluster_flags() | x86_sched_itmt_flags();
 613}
 614#endif
 615
 616static int x86_die_flags(void)
 617{
 618	if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
 619	       return x86_sched_itmt_flags();
 
 
 
 
 
 620
 621	return 0;
 622}
 
 
 
 
 
 
 
 
 623
 624/*
 625 * Set if a package/die has multiple NUMA nodes inside.
 626 * AMD Magny-Cours, Intel Cluster-on-Die, and Intel
 627 * Sub-NUMA Clustering have this.
 628 */
 629static bool x86_has_numa_in_package;
 630
 631static struct sched_domain_topology_level x86_topology[6];
 632
 633static void __init build_sched_topology(void)
 634{
 635	int i = 0;
 636
 637#ifdef CONFIG_SCHED_SMT
 638	x86_topology[i++] = (struct sched_domain_topology_level){
 639		cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT)
 640	};
 641#endif
 642#ifdef CONFIG_SCHED_CLUSTER
 643	x86_topology[i++] = (struct sched_domain_topology_level){
 644		cpu_clustergroup_mask, x86_cluster_flags, SD_INIT_NAME(CLS)
 645	};
 646#endif
 647#ifdef CONFIG_SCHED_MC
 648	x86_topology[i++] = (struct sched_domain_topology_level){
 649		cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC)
 650	};
 651#endif
 652	/*
 653	 * When there is NUMA topology inside the package skip the PKG domain
 654	 * since the NUMA domains will auto-magically create the right spanning
 655	 * domains based on the SLIT.
 656	 */
 657	if (!x86_has_numa_in_package) {
 658		x86_topology[i++] = (struct sched_domain_topology_level){
 659			cpu_cpu_mask, x86_die_flags, SD_INIT_NAME(PKG)
 660		};
 661	}
 662
 663	/*
 664	 * There must be one trailing NULL entry left.
 665	 */
 666	BUG_ON(i >= ARRAY_SIZE(x86_topology)-1);
 667
 668	set_sched_topology(x86_topology);
 669}
 670
 671void set_cpu_sibling_map(int cpu)
 672{
 673	bool has_smt = smp_num_siblings > 1;
 674	bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
 675	struct cpuinfo_x86 *c = &cpu_data(cpu);
 676	struct cpuinfo_x86 *o;
 677	int i, threads;
 678
 679	cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
 680
 681	if (!has_mp) {
 682		cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu));
 683		cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
 684		cpumask_set_cpu(cpu, cpu_l2c_shared_mask(cpu));
 685		cpumask_set_cpu(cpu, topology_core_cpumask(cpu));
 686		cpumask_set_cpu(cpu, topology_die_cpumask(cpu));
 687		c->booted_cores = 1;
 688		return;
 689	}
 690
 691	for_each_cpu(i, cpu_sibling_setup_mask) {
 692		o = &cpu_data(i);
 693
 694		if (match_pkg(c, o) && !topology_same_node(c, o))
 695			x86_has_numa_in_package = true;
 696
 697		if ((i == cpu) || (has_smt && match_smt(c, o)))
 698			link_mask(topology_sibling_cpumask, cpu, i);
 699
 700		if ((i == cpu) || (has_mp && match_llc(c, o)))
 701			link_mask(cpu_llc_shared_mask, cpu, i);
 702
 703		if ((i == cpu) || (has_mp && match_l2c(c, o)))
 704			link_mask(cpu_l2c_shared_mask, cpu, i);
 705
 706		if ((i == cpu) || (has_mp && match_die(c, o)))
 707			link_mask(topology_die_cpumask, cpu, i);
 708	}
 709
 710	threads = cpumask_weight(topology_sibling_cpumask(cpu));
 711	if (threads > __max_smt_threads)
 712		__max_smt_threads = threads;
 713
 714	for_each_cpu(i, topology_sibling_cpumask(cpu))
 715		cpu_data(i).smt_active = threads > 1;
 716
 717	/*
 718	 * This needs a separate iteration over the cpus because we rely on all
 719	 * topology_sibling_cpumask links to be set-up.
 720	 */
 721	for_each_cpu(i, cpu_sibling_setup_mask) {
 722		o = &cpu_data(i);
 723
 724		if ((i == cpu) || (has_mp && match_pkg(c, o))) {
 725			link_mask(topology_core_cpumask, cpu, i);
 726
 727			/*
 728			 *  Does this new cpu bringup a new core?
 729			 */
 730			if (threads == 1) {
 731				/*
 732				 * for each core in package, increment
 733				 * the booted_cores for this new cpu
 734				 */
 735				if (cpumask_first(
 736				    topology_sibling_cpumask(i)) == i)
 737					c->booted_cores++;
 738				/*
 739				 * increment the core count for all
 740				 * the other cpus in this package
 741				 */
 742				if (i != cpu)
 743					cpu_data(i).booted_cores++;
 744			} else if (i != cpu && !c->booted_cores)
 745				c->booted_cores = cpu_data(i).booted_cores;
 746		}
 747	}
 748}
 749
 750/* maps the cpu to the sched domain representing multi-core */
 751const struct cpumask *cpu_coregroup_mask(int cpu)
 752{
 753	return cpu_llc_shared_mask(cpu);
 754}
 755
 756const struct cpumask *cpu_clustergroup_mask(int cpu)
 757{
 758	return cpu_l2c_shared_mask(cpu);
 759}
 760EXPORT_SYMBOL_GPL(cpu_clustergroup_mask);
 761
 762static void impress_friends(void)
 763{
 764	int cpu;
 765	unsigned long bogosum = 0;
 766	/*
 767	 * Allow the user to impress friends.
 768	 */
 769	pr_debug("Before bogomips\n");
 770	for_each_online_cpu(cpu)
 771		bogosum += cpu_data(cpu).loops_per_jiffy;
 772
 773	pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n",
 774		num_online_cpus(),
 775		bogosum/(500000/HZ),
 776		(bogosum/(5000/HZ))%100);
 777
 778	pr_debug("Before bogocount - setting activated=1\n");
 779}
 780
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 781/*
 782 * The Multiprocessor Specification 1.4 (1997) example code suggests
 783 * that there should be a 10ms delay between the BSP asserting INIT
 784 * and de-asserting INIT, when starting a remote processor.
 785 * But that slows boot and resume on modern processors, which include
 786 * many cores and don't require that delay.
 787 *
 788 * Cmdline "init_cpu_udelay=" is available to over-ride this delay.
 789 * Modern processor families are quirked to remove the delay entirely.
 790 */
 791#define UDELAY_10MS_DEFAULT 10000
 792
 793static unsigned int init_udelay = UINT_MAX;
 794
 795static int __init cpu_init_udelay(char *str)
 796{
 797	get_option(&str, &init_udelay);
 798
 799	return 0;
 800}
 801early_param("cpu_init_udelay", cpu_init_udelay);
 802
 803static void __init smp_quirk_init_udelay(void)
 804{
 805	/* if cmdline changed it from default, leave it alone */
 806	if (init_udelay != UINT_MAX)
 807		return;
 808
 809	/* if modern processor, use no delay */
 810	if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
 811	    ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && (boot_cpu_data.x86 >= 0x18)) ||
 812	    ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
 813		init_udelay = 0;
 814		return;
 815	}
 816	/* else, use legacy delay */
 817	init_udelay = UDELAY_10MS_DEFAULT;
 818}
 819
 820/*
 821 * Wake up AP by INIT, INIT, STARTUP sequence.
 
 
 822 */
 823static void send_init_sequence(u32 phys_apicid)
 
 824{
 825	int maxlvt = lapic_get_maxlvt();
 
 
 
 
 
 
 
 
 
 
 826
 827	/* Be paranoid about clearing APIC errors. */
 
 
 
 828	if (APIC_INTEGRATED(boot_cpu_apic_version)) {
 829		/* Due to the Pentium erratum 3AP.  */
 830		if (maxlvt > 3)
 831			apic_write(APIC_ESR, 0);
 832		apic_read(APIC_ESR);
 833	}
 
 834
 835	/* Assert INIT on the target CPU */
 836	apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT, phys_apicid);
 837	safe_apic_wait_icr_idle();
 838
 839	udelay(init_udelay);
 840
 841	/* Deassert INIT on the target CPU */
 842	apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
 843	safe_apic_wait_icr_idle();
 844}
 845
 846/*
 847 * Wake up AP by INIT, INIT, STARTUP sequence.
 848 */
 849static int wakeup_secondary_cpu_via_init(u32 phys_apicid, unsigned long start_eip)
 850{
 851	unsigned long send_status = 0, accept_status = 0;
 852	int num_starts, j, maxlvt;
 853
 854	preempt_disable();
 855	maxlvt = lapic_get_maxlvt();
 856	send_init_sequence(phys_apicid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 857
 858	mb();
 859
 860	/*
 861	 * Should we send STARTUP IPIs ?
 862	 *
 863	 * Determine this based on the APIC version.
 864	 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
 865	 */
 866	if (APIC_INTEGRATED(boot_cpu_apic_version))
 867		num_starts = 2;
 868	else
 869		num_starts = 0;
 870
 871	/*
 872	 * Run STARTUP IPI loop.
 873	 */
 874	pr_debug("#startup loops: %d\n", num_starts);
 875
 876	for (j = 1; j <= num_starts; j++) {
 877		pr_debug("Sending STARTUP #%d\n", j);
 878		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
 879			apic_write(APIC_ESR, 0);
 880		apic_read(APIC_ESR);
 881		pr_debug("After apic_write\n");
 882
 883		/*
 884		 * STARTUP IPI
 885		 */
 886
 887		/* Target chip */
 888		/* Boot on the stack */
 889		/* Kick the second */
 890		apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
 891			       phys_apicid);
 892
 893		/*
 894		 * Give the other CPU some time to accept the IPI.
 895		 */
 896		if (init_udelay == 0)
 897			udelay(10);
 898		else
 899			udelay(300);
 900
 901		pr_debug("Startup point 1\n");
 902
 903		pr_debug("Waiting for send to finish...\n");
 904		send_status = safe_apic_wait_icr_idle();
 905
 906		/*
 907		 * Give the other CPU some time to accept the IPI.
 908		 */
 909		if (init_udelay == 0)
 910			udelay(10);
 911		else
 912			udelay(200);
 913
 914		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
 915			apic_write(APIC_ESR, 0);
 916		accept_status = (apic_read(APIC_ESR) & 0xEF);
 917		if (send_status || accept_status)
 918			break;
 919	}
 920	pr_debug("After Startup\n");
 921
 922	if (send_status)
 923		pr_err("APIC never delivered???\n");
 924	if (accept_status)
 925		pr_err("APIC delivery error (%lx)\n", accept_status);
 926
 927	preempt_enable();
 928	return (send_status | accept_status);
 929}
 930
 931/* reduce the number of lines printed when booting a large cpu count system */
 932static void announce_cpu(int cpu, int apicid)
 933{
 934	static int width, node_width, first = 1;
 935	static int current_node = NUMA_NO_NODE;
 936	int node = early_cpu_to_node(cpu);
 
 937
 938	if (!width)
 939		width = num_digits(num_possible_cpus()) + 1; /* + '#' sign */
 940
 941	if (!node_width)
 942		node_width = num_digits(num_possible_nodes()) + 1; /* + '#' */
 943
 944	if (system_state < SYSTEM_RUNNING) {
 945		if (first)
 946			pr_info("x86: Booting SMP configuration:\n");
 947
 
 948		if (node != current_node) {
 949			if (current_node > (-1))
 950				pr_cont("\n");
 951			current_node = node;
 952
 953			printk(KERN_INFO ".... node %*s#%d, CPUs:  ",
 954			       node_width - num_digits(node), " ", node);
 955		}
 956
 957		/* Add padding for the BSP */
 958		if (first)
 959			pr_cont("%*s", width + 1, " ");
 960		first = 0;
 961
 962		pr_cont("%*s#%d", width - num_digits(cpu), " ", cpu);
 
 963	} else
 964		pr_info("Booting Node %d Processor %d APIC 0x%x\n",
 965			node, cpu, apicid);
 966}
 967
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 968int common_cpu_up(unsigned int cpu, struct task_struct *idle)
 969{
 970	int ret;
 971
 972	/* Just in case we booted with a single CPU. */
 973	alternatives_enable_smp();
 974
 975	per_cpu(pcpu_hot.current_task, cpu) = idle;
 976	cpu_init_stack_canary(cpu, idle);
 977
 978	/* Initialize the interrupt stack(s) */
 979	ret = irq_init_percpu_irqstack(cpu);
 980	if (ret)
 981		return ret;
 982
 983#ifdef CONFIG_X86_32
 984	/* Stack for startup_32 can be just as for start_secondary onwards */
 985	per_cpu(pcpu_hot.top_of_stack, cpu) = task_top_of_stack(idle);
 
 
 986#endif
 987	return 0;
 988}
 989
 990/*
 991 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
 992 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
 993 * Returns zero if startup was successfully sent, else error code from
 994 * ->wakeup_secondary_cpu.
 995 */
 996static int do_boot_cpu(u32 apicid, int cpu, struct task_struct *idle)
 
 997{
 
 998	unsigned long start_ip = real_mode_header->trampoline_start;
 999	int ret;
1000
1001#ifdef CONFIG_X86_64
1002	/* If 64-bit wakeup method exists, use the 64-bit mode trampoline IP */
1003	if (apic->wakeup_secondary_cpu_64)
1004		start_ip = real_mode_header->trampoline_start64;
1005#endif
1006	idle->thread.sp = (unsigned long)task_pt_regs(idle);
 
1007	initial_code = (unsigned long)start_secondary;
1008
1009	if (IS_ENABLED(CONFIG_X86_32)) {
1010		early_gdt_descr.address = (unsigned long)get_cpu_gdt_rw(cpu);
1011		initial_stack  = idle->thread.sp;
1012	} else if (!(smpboot_control & STARTUP_PARALLEL_MASK)) {
1013		smpboot_control = cpu;
1014	}
1015
1016	/* Enable the espfix hack for this CPU */
1017	init_espfix_ap(cpu);
1018
1019	/* So we see what's up */
1020	announce_cpu(cpu, apicid);
1021
1022	/*
1023	 * This grunge runs the startup process for
1024	 * the targeted processor.
1025	 */
 
1026	if (x86_platform.legacy.warm_reset) {
1027
1028		pr_debug("Setting warm reset code and vector.\n");
1029
1030		smpboot_setup_warm_reset_vector(start_ip);
1031		/*
1032		 * Be paranoid about clearing APIC errors.
1033		*/
1034		if (APIC_INTEGRATED(boot_cpu_apic_version)) {
1035			apic_write(APIC_ESR, 0);
1036			apic_read(APIC_ESR);
1037		}
1038	}
1039
 
 
 
 
 
 
 
1040	smp_mb();
1041
1042	/*
1043	 * Wake up a CPU in difference cases:
1044	 * - Use a method from the APIC driver if one defined, with wakeup
1045	 *   straight to 64-bit mode preferred over wakeup to RM.
1046	 * Otherwise,
1047	 * - Use an INIT boot APIC message
1048	 */
1049	if (apic->wakeup_secondary_cpu_64)
1050		ret = apic->wakeup_secondary_cpu_64(apicid, start_ip);
1051	else if (apic->wakeup_secondary_cpu)
1052		ret = apic->wakeup_secondary_cpu(apicid, start_ip);
1053	else
1054		ret = wakeup_secondary_cpu_via_init(apicid, start_ip);
 
1055
1056	/* If the wakeup mechanism failed, cleanup the warm reset vector */
1057	if (ret)
1058		arch_cpuhp_cleanup_kick_cpu(cpu);
1059	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1060}
1061
1062int native_kick_ap(unsigned int cpu, struct task_struct *tidle)
1063{
1064	u32 apicid = apic->cpu_present_to_apicid(cpu);
1065	int err;
 
 
1066
1067	lockdep_assert_irqs_enabled();
1068
1069	pr_debug("++++++++++++++++++++=_---CPU UP  %u\n", cpu);
1070
1071	if (apicid == BAD_APICID || !physid_isset(apicid, phys_cpu_present_map) ||
1072	    !apic_id_valid(apicid)) {
 
1073		pr_err("%s: bad cpu %d\n", __func__, cpu);
1074		return -EINVAL;
1075	}
1076
1077	/*
 
 
 
 
 
 
 
 
1078	 * Save current MTRR state in case it was changed since early boot
1079	 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
1080	 */
1081	mtrr_save_state();
1082
 
 
 
 
 
1083	/* the FPU context is blank, nobody can own it */
1084	per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
1085
1086	err = common_cpu_up(cpu, tidle);
1087	if (err)
1088		return err;
1089
1090	err = do_boot_cpu(apicid, cpu, tidle);
1091	if (err)
1092		pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
 
 
 
1093
1094	return err;
1095}
1096
1097int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle)
1098{
1099	return smp_ops.kick_ap_alive(cpu, tidle);
1100}
1101
1102void arch_cpuhp_cleanup_kick_cpu(unsigned int cpu)
1103{
1104	/* Cleanup possible dangling ends... */
1105	if (smp_ops.kick_ap_alive == native_kick_ap && x86_platform.legacy.warm_reset)
1106		smpboot_restore_warm_reset_vector();
1107}
1108
1109void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
1110{
1111	if (smp_ops.cleanup_dead_cpu)
1112		smp_ops.cleanup_dead_cpu(cpu);
1113
1114	if (system_state == SYSTEM_RUNNING)
1115		pr_info("CPU %u is now offline\n", cpu);
1116}
 
 
 
 
1117
1118void arch_cpuhp_sync_state_poll(void)
1119{
1120	if (smp_ops.poll_sync_state)
1121		smp_ops.poll_sync_state();
1122}
1123
1124/**
1125 * arch_disable_smp_support() - Disables SMP support for x86 at boottime
1126 */
1127void __init arch_disable_smp_support(void)
1128{
1129	disable_ioapic_support();
1130}
1131
1132/*
1133 * Fall back to non SMP mode after errors.
1134 *
1135 * RED-PEN audit/test this more. I bet there is more state messed up here.
1136 */
1137static __init void disable_smp(void)
1138{
1139	pr_info("SMP disabled\n");
1140
1141	disable_ioapic_support();
1142
1143	init_cpu_present(cpumask_of(0));
1144	init_cpu_possible(cpumask_of(0));
1145
1146	if (smp_found_config)
1147		physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
1148	else
1149		physid_set_mask_of_physid(0, &phys_cpu_present_map);
1150	cpumask_set_cpu(0, topology_sibling_cpumask(0));
1151	cpumask_set_cpu(0, topology_core_cpumask(0));
1152	cpumask_set_cpu(0, topology_die_cpumask(0));
1153}
1154
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1155static void __init smp_cpu_index_default(void)
1156{
1157	int i;
1158	struct cpuinfo_x86 *c;
1159
1160	for_each_possible_cpu(i) {
1161		c = &cpu_data(i);
1162		/* mark all to hotplug */
1163		c->cpu_index = nr_cpu_ids;
1164	}
1165}
1166
1167void __init smp_prepare_cpus_common(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
1168{
1169	unsigned int i;
1170
1171	smp_cpu_index_default();
1172
1173	/*
1174	 * Setup boot CPU information
1175	 */
1176	smp_store_boot_cpu_info(); /* Final full version of the data */
 
1177	mb();
1178
1179	for_each_possible_cpu(i) {
1180		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
1181		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
1182		zalloc_cpumask_var(&per_cpu(cpu_die_map, i), GFP_KERNEL);
1183		zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
1184		zalloc_cpumask_var(&per_cpu(cpu_l2c_shared_map, i), GFP_KERNEL);
1185	}
1186
1187	set_cpu_sibling_map(0);
1188}
1189
1190#ifdef CONFIG_X86_64
1191/* Establish whether parallel bringup can be supported. */
1192bool __init arch_cpuhp_init_parallel_bringup(void)
1193{
1194	if (!x86_cpuinit.parallel_bringup) {
1195		pr_info("Parallel CPU startup disabled by the platform\n");
1196		return false;
1197	}
1198
1199	smpboot_control = STARTUP_READ_APICID;
1200	pr_debug("Parallel CPU startup enabled: 0x%08x\n", smpboot_control);
1201	return true;
1202}
1203#endif
 
 
 
1204
1205/*
1206 * Prepare for SMP bootup.
1207 * @max_cpus: configured maximum number of CPUs, It is a legacy parameter
1208 *            for common interface support.
1209 */
1210void __init native_smp_prepare_cpus(unsigned int max_cpus)
1211{
1212	smp_prepare_cpus_common();
1213
1214	switch (apic_intr_mode) {
1215	case APIC_PIC:
1216	case APIC_VIRTUAL_WIRE_NO_CONFIG:
1217		disable_smp();
1218		return;
1219	case APIC_SYMMETRIC_IO_NO_ROUTING:
1220		disable_smp();
1221		/* Setup local timer */
1222		x86_init.timers.setup_percpu_clockev();
1223		return;
1224	case APIC_VIRTUAL_WIRE:
1225	case APIC_SYMMETRIC_IO:
1226		break;
1227	}
1228
1229	/* Setup local timer */
1230	x86_init.timers.setup_percpu_clockev();
1231
 
 
1232	pr_info("CPU0: ");
1233	print_cpu_info(&cpu_data(0));
1234
1235	uv_system_init();
1236
 
 
1237	smp_quirk_init_udelay();
1238
1239	speculative_store_bypass_ht_init();
1240
1241	snp_set_wakeup_secondary_cpu();
1242}
1243
1244void arch_thaw_secondary_cpus_begin(void)
1245{
1246	set_cache_aps_delayed_init(true);
1247}
1248
1249void arch_thaw_secondary_cpus_end(void)
1250{
1251	cache_aps_init();
1252}
1253
1254/*
1255 * Early setup to make printk work.
1256 */
1257void __init native_smp_prepare_boot_cpu(void)
1258{
1259	int me = smp_processor_id();
1260
1261	/* SMP handles this from setup_per_cpu_areas() */
1262	if (!IS_ENABLED(CONFIG_SMP))
1263		switch_gdt_and_percpu_base(me);
1264
1265	native_pv_lock_init();
1266}
1267
1268void __init calculate_max_logical_packages(void)
1269{
1270	int ncpus;
1271
1272	/*
1273	 * Today neither Intel nor AMD support heterogeneous systems so
1274	 * extrapolate the boot cpu's data to all packages.
1275	 */
1276	ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
1277	__max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
1278	pr_info("Max logical packages: %u\n", __max_logical_packages);
1279}
1280
1281void __init native_smp_cpus_done(unsigned int max_cpus)
1282{
1283	pr_debug("Boot done\n");
1284
1285	calculate_max_logical_packages();
1286	build_sched_topology();
 
 
 
1287	nmi_selftest();
1288	impress_friends();
1289	cache_aps_init();
1290}
1291
1292static int __initdata setup_possible_cpus = -1;
1293static int __init _setup_possible_cpus(char *str)
1294{
1295	get_option(&str, &setup_possible_cpus);
1296	return 0;
1297}
1298early_param("possible_cpus", _setup_possible_cpus);
1299
1300
1301/*
1302 * cpu_possible_mask should be static, it cannot change as cpu's
1303 * are onlined, or offlined. The reason is per-cpu data-structures
1304 * are allocated by some modules at init time, and don't expect to
1305 * do this dynamically on cpu arrival/departure.
1306 * cpu_present_mask on the other hand can change dynamically.
1307 * In case when cpu_hotplug is not compiled, then we resort to current
1308 * behaviour, which is cpu_possible == cpu_present.
1309 * - Ashok Raj
1310 *
1311 * Three ways to find out the number of additional hotplug CPUs:
1312 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
1313 * - The user can overwrite it with possible_cpus=NUM
1314 * - Otherwise don't reserve additional CPUs.
1315 * We do this because additional CPUs waste a lot of memory.
1316 * -AK
1317 */
1318__init void prefill_possible_map(void)
1319{
1320	int i, possible;
1321
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1322	i = setup_max_cpus ?: 1;
1323	if (setup_possible_cpus == -1) {
1324		possible = num_processors;
1325#ifdef CONFIG_HOTPLUG_CPU
1326		if (setup_max_cpus)
1327			possible += disabled_cpus;
1328#else
1329		if (possible > i)
1330			possible = i;
1331#endif
1332	} else
1333		possible = setup_possible_cpus;
1334
1335	total_cpus = max_t(int, possible, num_processors + disabled_cpus);
1336
1337	/* nr_cpu_ids could be reduced via nr_cpus= */
1338	if (possible > nr_cpu_ids) {
1339		pr_warn("%d Processors exceeds NR_CPUS limit of %u\n",
1340			possible, nr_cpu_ids);
1341		possible = nr_cpu_ids;
1342	}
1343
1344#ifdef CONFIG_HOTPLUG_CPU
1345	if (!setup_max_cpus)
1346#endif
1347	if (possible > i) {
1348		pr_warn("%d Processors exceeds max_cpus limit of %u\n",
1349			possible, setup_max_cpus);
1350		possible = i;
1351	}
1352
1353	set_nr_cpu_ids(possible);
1354
1355	pr_info("Allowing %d CPUs, %d hotplug CPUs\n",
1356		possible, max_t(int, possible - num_processors, 0));
1357
1358	reset_cpu_possible_mask();
1359
1360	for (i = 0; i < possible; i++)
1361		set_cpu_possible(i, true);
1362}
1363
1364/* correctly size the local cpu masks */
1365void __init setup_cpu_local_masks(void)
1366{
1367	alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
1368}
1369
1370#ifdef CONFIG_HOTPLUG_CPU
1371
1372/* Recompute SMT state for all CPUs on offline */
1373static void recompute_smt_state(void)
1374{
1375	int max_threads, cpu;
1376
1377	max_threads = 0;
1378	for_each_online_cpu (cpu) {
1379		int threads = cpumask_weight(topology_sibling_cpumask(cpu));
1380
1381		if (threads > max_threads)
1382			max_threads = threads;
1383	}
1384	__max_smt_threads = max_threads;
1385}
1386
1387static void remove_siblinginfo(int cpu)
1388{
1389	int sibling;
1390	struct cpuinfo_x86 *c = &cpu_data(cpu);
1391
1392	for_each_cpu(sibling, topology_core_cpumask(cpu)) {
1393		cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
1394		/*/
1395		 * last thread sibling in this cpu core going down
1396		 */
1397		if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1)
1398			cpu_data(sibling).booted_cores--;
1399	}
1400
1401	for_each_cpu(sibling, topology_die_cpumask(cpu))
1402		cpumask_clear_cpu(cpu, topology_die_cpumask(sibling));
1403
1404	for_each_cpu(sibling, topology_sibling_cpumask(cpu)) {
1405		cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
1406		if (cpumask_weight(topology_sibling_cpumask(sibling)) == 1)
1407			cpu_data(sibling).smt_active = false;
1408	}
1409
1410	for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
1411		cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
1412	for_each_cpu(sibling, cpu_l2c_shared_mask(cpu))
1413		cpumask_clear_cpu(cpu, cpu_l2c_shared_mask(sibling));
1414	cpumask_clear(cpu_llc_shared_mask(cpu));
1415	cpumask_clear(cpu_l2c_shared_mask(cpu));
1416	cpumask_clear(topology_sibling_cpumask(cpu));
1417	cpumask_clear(topology_core_cpumask(cpu));
1418	cpumask_clear(topology_die_cpumask(cpu));
1419	c->topo.core_id = 0;
1420	c->booted_cores = 0;
1421	cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
1422	recompute_smt_state();
1423}
1424
1425static void remove_cpu_from_maps(int cpu)
1426{
1427	set_cpu_online(cpu, false);
 
 
 
 
1428	numa_remove_cpu(cpu);
1429}
1430
1431void cpu_disable_common(void)
1432{
1433	int cpu = smp_processor_id();
1434
1435	remove_siblinginfo(cpu);
1436
1437	/* It's now safe to remove this processor from the online map */
1438	lock_vector_lock();
1439	remove_cpu_from_maps(cpu);
1440	unlock_vector_lock();
1441	fixup_irqs();
1442	lapic_offline();
1443}
1444
1445int native_cpu_disable(void)
1446{
1447	int ret;
1448
1449	ret = lapic_can_unplug_cpu();
1450	if (ret)
1451		return ret;
1452
1453	cpu_disable_common();
1454
1455        /*
1456         * Disable the local APIC. Otherwise IPI broadcasts will reach
1457         * it. It still responds normally to INIT, NMI, SMI, and SIPI
1458         * messages.
1459         *
1460         * Disabling the APIC must happen after cpu_disable_common()
1461         * which invokes fixup_irqs().
1462         *
1463         * Disabling the APIC preserves already set bits in IRR, but
1464         * an interrupt arriving after disabling the local APIC does not
1465         * set the corresponding IRR bit.
1466         *
1467         * fixup_irqs() scans IRR for set bits so it can raise a not
1468         * yet handled interrupt on the new destination CPU via an IPI
1469         * but obviously it can't do so for IRR bits which are not set.
1470         * IOW, interrupts arriving after disabling the local APIC will
1471         * be lost.
1472         */
1473	apic_soft_disable();
1474
1475	return 0;
1476}
1477
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1478void play_dead_common(void)
1479{
1480	idle_task_exit();
1481
1482	cpuhp_ap_report_dead();
 
1483
 
 
 
1484	local_irq_disable();
1485}
1486
 
 
 
 
 
 
 
 
 
 
 
 
1487/*
1488 * We need to flush the caches before going to sleep, lest we have
1489 * dirty data in our caches when we come back up.
1490 */
1491static inline void mwait_play_dead(void)
1492{
1493	struct mwait_cpu_dead *md = this_cpu_ptr(&mwait_cpu_dead);
1494	unsigned int eax, ebx, ecx, edx;
1495	unsigned int highest_cstate = 0;
1496	unsigned int highest_subcstate = 0;
 
1497	int i;
1498
1499	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1500	    boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
1501		return;
1502	if (!this_cpu_has(X86_FEATURE_MWAIT))
1503		return;
1504	if (!this_cpu_has(X86_FEATURE_CLFLUSH))
1505		return;
1506	if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
1507		return;
1508
1509	eax = CPUID_MWAIT_LEAF;
1510	ecx = 0;
1511	native_cpuid(&eax, &ebx, &ecx, &edx);
1512
1513	/*
1514	 * eax will be 0 if EDX enumeration is not valid.
1515	 * Initialized below to cstate, sub_cstate value when EDX is valid.
1516	 */
1517	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
1518		eax = 0;
1519	} else {
1520		edx >>= MWAIT_SUBSTATE_SIZE;
1521		for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
1522			if (edx & MWAIT_SUBSTATE_MASK) {
1523				highest_cstate = i;
1524				highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
1525			}
1526		}
1527		eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
1528			(highest_subcstate - 1);
1529	}
1530
1531	/* Set up state for the kexec() hack below */
1532	md->status = CPUDEAD_MWAIT_WAIT;
1533	md->control = CPUDEAD_MWAIT_WAIT;
 
 
 
1534
1535	wbinvd();
1536
1537	while (1) {
1538		/*
1539		 * The CLFLUSH is a workaround for erratum AAI65 for
1540		 * the Xeon 7400 series.  It's not clear it is actually
1541		 * needed, but it should be harmless in either case.
1542		 * The WBINVD is insufficient due to the spurious-wakeup
1543		 * case where we return around the loop.
1544		 */
1545		mb();
1546		clflush(md);
1547		mb();
1548		__monitor(md, 0, 0);
1549		mb();
1550		__mwait(eax, 0);
1551
1552		if (READ_ONCE(md->control) == CPUDEAD_MWAIT_KEXEC_HLT) {
1553			/*
1554			 * Kexec is about to happen. Don't go back into mwait() as
1555			 * the kexec kernel might overwrite text and data including
1556			 * page tables and stack. So mwait() would resume when the
1557			 * monitor cache line is written to and then the CPU goes
1558			 * south due to overwritten text, page tables and stack.
1559			 *
1560			 * Note: This does _NOT_ protect against a stray MCE, NMI,
1561			 * SMI. They will resume execution at the instruction
1562			 * following the HLT instruction and run into the problem
1563			 * which this is trying to prevent.
1564			 */
1565			WRITE_ONCE(md->status, CPUDEAD_MWAIT_KEXEC_HLT);
1566			while(1)
1567				native_halt();
1568		}
1569	}
1570}
1571
1572/*
1573 * Kick all "offline" CPUs out of mwait on kexec(). See comment in
1574 * mwait_play_dead().
1575 */
1576void smp_kick_mwait_play_dead(void)
1577{
1578	u32 newstate = CPUDEAD_MWAIT_KEXEC_HLT;
1579	struct mwait_cpu_dead *md;
1580	unsigned int cpu, i;
1581
1582	for_each_cpu_andnot(cpu, cpu_present_mask, cpu_online_mask) {
1583		md = per_cpu_ptr(&mwait_cpu_dead, cpu);
1584
1585		/* Does it sit in mwait_play_dead() ? */
1586		if (READ_ONCE(md->status) != CPUDEAD_MWAIT_WAIT)
1587			continue;
1588
1589		/* Wait up to 5ms */
1590		for (i = 0; READ_ONCE(md->status) != newstate && i < 1000; i++) {
1591			/* Bring it out of mwait */
1592			WRITE_ONCE(md->control, newstate);
1593			udelay(5);
1594		}
1595
1596		if (READ_ONCE(md->status) != newstate)
1597			pr_err_once("CPU%u is stuck in mwait_play_dead()\n", cpu);
1598	}
1599}
1600
1601void __noreturn hlt_play_dead(void)
1602{
1603	if (__this_cpu_read(cpu_info.x86) >= 4)
1604		wbinvd();
1605
1606	while (1)
1607		native_halt();
 
 
 
1608}
1609
1610/*
1611 * native_play_dead() is essentially a __noreturn function, but it can't
1612 * be marked as such as the compiler may complain about it.
1613 */
1614void native_play_dead(void)
1615{
1616	if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
1617		__update_spec_ctrl(0);
1618
1619	play_dead_common();
1620	tboot_shutdown(TB_SHUTDOWN_WFS);
1621
1622	mwait_play_dead();
1623	if (cpuidle_play_dead())
1624		hlt_play_dead();
1625}
1626
1627#else /* ... !CONFIG_HOTPLUG_CPU */
1628int native_cpu_disable(void)
1629{
1630	return -ENOSYS;
1631}
1632
 
 
 
 
 
 
1633void native_play_dead(void)
1634{
1635	BUG();
1636}
1637
1638#endif
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3 *	x86 SMP booting functions
   4 *
   5 *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
   6 *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
   7 *	Copyright 2001 Andi Kleen, SuSE Labs.
   8 *
   9 *	Much of the core SMP work is based on previous work by Thomas Radke, to
  10 *	whom a great many thanks are extended.
  11 *
  12 *	Thanks to Intel for making available several different Pentium,
  13 *	Pentium Pro and Pentium-II/Xeon MP machines.
  14 *	Original development of Linux SMP code supported by Caldera.
  15 *
  16 *	Fixes
  17 *		Felix Koop	:	NR_CPUS used properly
  18 *		Jose Renau	:	Handle single CPU case.
  19 *		Alan Cox	:	By repeated request 8) - Total BogoMIPS report.
  20 *		Greg Wright	:	Fix for kernel stacks panic.
  21 *		Erich Boleyn	:	MP v1.4 and additional changes.
  22 *	Matthias Sattler	:	Changes for 2.1 kernel map.
  23 *	Michel Lespinasse	:	Changes for 2.1 kernel map.
  24 *	Michael Chastain	:	Change trampoline.S to gnu as.
  25 *		Alan Cox	:	Dumb bug: 'B' step PPro's are fine
  26 *		Ingo Molnar	:	Added APIC timers, based on code
  27 *					from Jose Renau
  28 *		Ingo Molnar	:	various cleanups and rewrites
  29 *		Tigran Aivazian	:	fixed "0.00 in /proc/uptime on SMP" bug.
  30 *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs
  31 *	Andi Kleen		:	Changed for SMP boot into long mode.
  32 *		Martin J. Bligh	: 	Added support for multi-quad systems
  33 *		Dave Jones	:	Report invalid combinations of Athlon CPUs.
  34 *		Rusty Russell	:	Hacked into shape for new "hotplug" boot process.
  35 *      Andi Kleen              :       Converted to new state machine.
  36 *	Ashok Raj		: 	CPU hotplug support
  37 *	Glauber Costa		:	i386 and x86_64 integration
  38 */
  39
  40#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  41
  42#include <linux/init.h>
  43#include <linux/smp.h>
  44#include <linux/export.h>
  45#include <linux/sched.h>
  46#include <linux/sched/topology.h>
  47#include <linux/sched/hotplug.h>
  48#include <linux/sched/task_stack.h>
  49#include <linux/percpu.h>
  50#include <linux/memblock.h>
  51#include <linux/err.h>
  52#include <linux/nmi.h>
  53#include <linux/tboot.h>
  54#include <linux/gfp.h>
  55#include <linux/cpuidle.h>
 
  56#include <linux/numa.h>
  57#include <linux/pgtable.h>
  58#include <linux/overflow.h>
  59#include <linux/syscore_ops.h>
 
 
  60
  61#include <asm/acpi.h>
 
  62#include <asm/desc.h>
  63#include <asm/nmi.h>
  64#include <asm/irq.h>
  65#include <asm/realmode.h>
  66#include <asm/cpu.h>
  67#include <asm/numa.h>
  68#include <asm/tlbflush.h>
  69#include <asm/mtrr.h>
  70#include <asm/mwait.h>
  71#include <asm/apic.h>
  72#include <asm/io_apic.h>
  73#include <asm/fpu/internal.h>
  74#include <asm/setup.h>
  75#include <asm/uv/uv.h>
  76#include <linux/mc146818rtc.h>
  77#include <asm/i8259.h>
  78#include <asm/misc.h>
  79#include <asm/qspinlock.h>
  80#include <asm/intel-family.h>
  81#include <asm/cpu_device_id.h>
  82#include <asm/spec-ctrl.h>
  83#include <asm/hw_irq.h>
  84#include <asm/stackprotector.h>
  85
  86#ifdef CONFIG_ACPI_CPPC_LIB
  87#include <acpi/cppc_acpi.h>
  88#endif
  89
  90/* representing HT siblings of each logical CPU */
  91DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
  92EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  93
  94/* representing HT and core siblings of each logical CPU */
  95DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
  96EXPORT_PER_CPU_SYMBOL(cpu_core_map);
  97
  98/* representing HT, core, and die siblings of each logical CPU */
  99DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map);
 100EXPORT_PER_CPU_SYMBOL(cpu_die_map);
 101
 102DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
 103
 104/* Per CPU bogomips and other parameters */
 105DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
 106EXPORT_PER_CPU_SYMBOL(cpu_info);
 107
 108/* Logical package management. We might want to allocate that dynamically */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 109unsigned int __max_logical_packages __read_mostly;
 110EXPORT_SYMBOL(__max_logical_packages);
 111static unsigned int logical_packages __read_mostly;
 112static unsigned int logical_die __read_mostly;
 113
 114/* Maximum number of SMT threads on any online core */
 115int __read_mostly __max_smt_threads = 1;
 116
 117/* Flag to indicate if a complete sched domain rebuild is required */
 118bool x86_topology_update;
 119
 120int arch_update_cpu_topology(void)
 121{
 122	int retval = x86_topology_update;
 123
 124	x86_topology_update = false;
 125	return retval;
 126}
 127
 
 
 128static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
 129{
 130	unsigned long flags;
 131
 132	spin_lock_irqsave(&rtc_lock, flags);
 133	CMOS_WRITE(0xa, 0xf);
 
 
 
 
 134	spin_unlock_irqrestore(&rtc_lock, flags);
 135	*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
 136							start_eip >> 4;
 137	*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
 138							start_eip & 0xf;
 139}
 140
 141static inline void smpboot_restore_warm_reset_vector(void)
 142{
 143	unsigned long flags;
 144
 145	/*
 146	 * Paranoid:  Set warm reset code and vector here back
 147	 * to default values.
 148	 */
 149	spin_lock_irqsave(&rtc_lock, flags);
 150	CMOS_WRITE(0, 0xf);
 
 
 
 151	spin_unlock_irqrestore(&rtc_lock, flags);
 152
 153	*((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
 154}
 155
 156static void init_freq_invariance(bool secondary, bool cppc_ready);
 157
 158/*
 159 * Report back to the Boot Processor during boot time or to the caller processor
 160 * during CPU online.
 161 */
 162static void smp_callin(void)
 163{
 164	int cpuid;
 165
 166	/*
 167	 * If waken up by an INIT in an 82489DX configuration
 168	 * cpu_callout_mask guarantees we don't get here before
 169	 * an INIT_deassert IPI reaches our local APIC, so it is
 170	 * now safe to touch our local APIC.
 171	 */
 172	cpuid = smp_processor_id();
 173
 174	/*
 175	 * the boot CPU has finished the init stage and is spinning
 176	 * on callin_map until we finish. We are free to set up this
 177	 * CPU, first the APIC. (this is probably redundant on most
 178	 * boards)
 
 
 
 179	 */
 180	apic_ap_setup();
 181
 182	/*
 183	 * Save our processor parameters. Note: this information
 184	 * is needed for clock calibration.
 185	 */
 186	smp_store_cpu_info(cpuid);
 187
 188	/*
 189	 * The topology information must be up to date before
 190	 * calibrate_delay() and notify_cpu_starting().
 191	 */
 192	set_cpu_sibling_map(raw_smp_processor_id());
 193
 194	init_freq_invariance(true, false);
 195
 196	/*
 197	 * Get our bogomips.
 198	 * Update loops_per_jiffy in cpu_data. Previous call to
 199	 * smp_store_cpu_info() stored a value that is close but not as
 200	 * accurate as the value just calculated.
 201	 */
 202	calibrate_delay();
 203	cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
 204	pr_debug("Stack at about %p\n", &cpuid);
 205
 206	wmb();
 207
 
 
 
 
 208	notify_cpu_starting(cpuid);
 
 209
 
 
 210	/*
 211	 * Allow the master to continue.
 
 
 
 
 
 
 212	 */
 213	cpumask_set_cpu(cpuid, cpu_callin_mask);
 
 214}
 215
 216static int cpu0_logical_apicid;
 217static int enable_start_cpu0;
 218/*
 219 * Activate a secondary processor.
 220 */
 221static void notrace start_secondary(void *unused)
 222{
 223	/*
 224	 * Don't put *anything* except direct CPU state initialization
 225	 * before cpu_init(), SMP booting is too fragile that we want to
 226	 * limit the things done here to the most necessary things.
 227	 */
 228	cr4_init();
 229
 230#ifdef CONFIG_X86_32
 231	/* switch away from the initial page table */
 232	load_cr3(swapper_pg_dir);
 233	__flush_tlb_all();
 234#endif
 235	cpu_init_secondary();
 236	rcu_cpu_starting(raw_smp_processor_id());
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 237	x86_cpuinit.early_percpu_clock_init();
 238	smp_callin();
 239
 240	enable_start_cpu0 = 0;
 
 
 
 241
 242	/* otherwise gcc will move up smp_processor_id before the cpu_init */
 243	barrier();
 244	/*
 245	 * Check TSC synchronization with the boot CPU:
 
 
 246	 */
 247	check_tsc_sync_target();
 248
 249	speculative_store_bypass_ht_init();
 250
 251	/*
 252	 * Lock vector_lock, set CPU online and bring the vector
 253	 * allocator online. Online must be set with vector_lock held
 254	 * to prevent a concurrent irq setup/teardown from seeing a
 255	 * half valid vector space.
 256	 */
 257	lock_vector_lock();
 258	set_cpu_online(smp_processor_id(), true);
 259	lapic_online();
 260	unlock_vector_lock();
 261	cpu_set_state_online(smp_processor_id());
 262	x86_platform.nmi_init();
 263
 264	/* enable local interrupts */
 265	local_irq_enable();
 266
 267	x86_cpuinit.setup_percpu_clockev();
 268
 269	wmb();
 270	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 271}
 272
 273/**
 274 * topology_is_primary_thread - Check whether CPU is the primary SMT thread
 275 * @cpu:	CPU to check
 276 */
 277bool topology_is_primary_thread(unsigned int cpu)
 278{
 279	return apic_id_is_primary_thread(per_cpu(x86_cpu_to_apicid, cpu));
 280}
 281
 282/**
 283 * topology_smt_supported - Check whether SMT is supported by the CPUs
 284 */
 285bool topology_smt_supported(void)
 286{
 287	return smp_num_siblings > 1;
 288}
 289
 290/**
 291 * topology_phys_to_logical_pkg - Map a physical package id to a logical
 
 292 *
 293 * Returns logical package id or -1 if not found
 294 */
 295int topology_phys_to_logical_pkg(unsigned int phys_pkg)
 296{
 297	int cpu;
 298
 299	for_each_possible_cpu(cpu) {
 300		struct cpuinfo_x86 *c = &cpu_data(cpu);
 301
 302		if (c->initialized && c->phys_proc_id == phys_pkg)
 303			return c->logical_proc_id;
 304	}
 305	return -1;
 306}
 307EXPORT_SYMBOL(topology_phys_to_logical_pkg);
 
 308/**
 309 * topology_phys_to_logical_die - Map a physical die id to logical
 
 
 310 *
 311 * Returns logical die id or -1 if not found
 312 */
 313int topology_phys_to_logical_die(unsigned int die_id, unsigned int cur_cpu)
 314{
 315	int cpu;
 316	int proc_id = cpu_data(cur_cpu).phys_proc_id;
 317
 318	for_each_possible_cpu(cpu) {
 319		struct cpuinfo_x86 *c = &cpu_data(cpu);
 320
 321		if (c->initialized && c->cpu_die_id == die_id &&
 322		    c->phys_proc_id == proc_id)
 323			return c->logical_die_id;
 324	}
 325	return -1;
 326}
 327EXPORT_SYMBOL(topology_phys_to_logical_die);
 328
 329/**
 330 * topology_update_package_map - Update the physical to logical package map
 331 * @pkg:	The physical package id as retrieved via CPUID
 332 * @cpu:	The cpu for which this is updated
 333 */
 334int topology_update_package_map(unsigned int pkg, unsigned int cpu)
 335{
 336	int new;
 337
 338	/* Already available somewhere? */
 339	new = topology_phys_to_logical_pkg(pkg);
 340	if (new >= 0)
 341		goto found;
 342
 343	new = logical_packages++;
 344	if (new != pkg) {
 345		pr_info("CPU %u Converting physical %u to logical package %u\n",
 346			cpu, pkg, new);
 347	}
 348found:
 349	cpu_data(cpu).logical_proc_id = new;
 
 
 350	return 0;
 351}
 352/**
 353 * topology_update_die_map - Update the physical to logical die map
 354 * @die:	The die id as retrieved via CPUID
 355 * @cpu:	The cpu for which this is updated
 356 */
 357int topology_update_die_map(unsigned int die, unsigned int cpu)
 358{
 359	int new;
 360
 361	/* Already available somewhere? */
 362	new = topology_phys_to_logical_die(die, cpu);
 363	if (new >= 0)
 364		goto found;
 365
 366	new = logical_die++;
 367	if (new != die) {
 368		pr_info("CPU %u Converting physical %u to logical die %u\n",
 369			cpu, die, new);
 370	}
 371found:
 372	cpu_data(cpu).logical_die_id = new;
 
 
 373	return 0;
 374}
 375
 376void __init smp_store_boot_cpu_info(void)
 377{
 378	int id = 0; /* CPU 0 */
 379	struct cpuinfo_x86 *c = &cpu_data(id);
 380
 381	*c = boot_cpu_data;
 382	c->cpu_index = id;
 383	topology_update_package_map(c->phys_proc_id, id);
 384	topology_update_die_map(c->cpu_die_id, id);
 385	c->initialized = true;
 386}
 387
 388/*
 389 * The bootstrap kernel entry code has set these up. Save them for
 390 * a given CPU
 391 */
 392void smp_store_cpu_info(int id)
 393{
 394	struct cpuinfo_x86 *c = &cpu_data(id);
 395
 396	/* Copy boot_cpu_data only on the first bringup */
 397	if (!c->initialized)
 398		*c = boot_cpu_data;
 399	c->cpu_index = id;
 400	/*
 401	 * During boot time, CPU0 has this setup already. Save the info when
 402	 * bringing up AP or offlined CPU0.
 403	 */
 404	identify_secondary_cpu(c);
 405	c->initialized = true;
 406}
 407
 408static bool
 409topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 410{
 411	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 412
 413	return (cpu_to_node(cpu1) == cpu_to_node(cpu2));
 414}
 415
 416static bool
 417topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
 418{
 419	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 420
 421	return !WARN_ONCE(!topology_same_node(c, o),
 422		"sched: CPU #%d's %s-sibling CPU #%d is not on the same node! "
 423		"[node: %d != %d]. Ignoring dependency.\n",
 424		cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2));
 425}
 426
 427#define link_mask(mfunc, c1, c2)					\
 428do {									\
 429	cpumask_set_cpu((c1), mfunc(c2));				\
 430	cpumask_set_cpu((c2), mfunc(c1));				\
 431} while (0)
 432
 433static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 434{
 435	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
 436		int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 437
 438		if (c->phys_proc_id == o->phys_proc_id &&
 439		    c->cpu_die_id == o->cpu_die_id &&
 440		    per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
 441			if (c->cpu_core_id == o->cpu_core_id)
 442				return topology_sane(c, o, "smt");
 443
 444			if ((c->cu_id != 0xff) &&
 445			    (o->cu_id != 0xff) &&
 446			    (c->cu_id == o->cu_id))
 447				return topology_sane(c, o, "smt");
 448		}
 449
 450	} else if (c->phys_proc_id == o->phys_proc_id &&
 451		   c->cpu_die_id == o->cpu_die_id &&
 452		   c->cpu_core_id == o->cpu_core_id) {
 453		return topology_sane(c, o, "smt");
 454	}
 455
 456	return false;
 457}
 458
 459static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 460{
 461	if (c->phys_proc_id == o->phys_proc_id &&
 462	    c->cpu_die_id == o->cpu_die_id)
 463		return true;
 464	return false;
 465}
 466
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 467/*
 468 * Unlike the other levels, we do not enforce keeping a
 469 * multicore group inside a NUMA node.  If this happens, we will
 470 * discard the MC level of the topology later.
 471 */
 472static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 473{
 474	if (c->phys_proc_id == o->phys_proc_id)
 475		return true;
 476	return false;
 477}
 478
 479/*
 480 * Define intel_cod_cpu[] for Intel COD (Cluster-on-Die) CPUs.
 481 *
 482 * Any Intel CPU that has multiple nodes per package and does not
 483 * match intel_cod_cpu[] has the SNC (Sub-NUMA Cluster) topology.
 484 *
 485 * When in SNC mode, these CPUs enumerate an LLC that is shared
 486 * by multiple NUMA nodes. The LLC is shared for off-package data
 487 * access but private to the NUMA node (half of the package) for
 488 * on-package access. CPUID (the source of the information about
 489 * the LLC) can only enumerate the cache as shared or unshared,
 490 * but not this particular configuration.
 491 */
 492
 493static const struct x86_cpu_id intel_cod_cpu[] = {
 494	X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, 0),	/* COD */
 495	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, 0),	/* COD */
 496	X86_MATCH_INTEL_FAM6_MODEL(ANY, 1),		/* SNC */
 497	{}
 498};
 499
 500static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 501{
 502	const struct x86_cpu_id *id = x86_match_cpu(intel_cod_cpu);
 503	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 504	bool intel_snc = id && id->driver_data;
 505
 506	/* Do not match if we do not have a valid APICID for cpu: */
 507	if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
 508		return false;
 509
 510	/* Do not match if LLC id does not match: */
 511	if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2))
 512		return false;
 513
 514	/*
 515	 * Allow the SNC topology without warning. Return of false
 516	 * means 'c' does not share the LLC of 'o'. This will be
 517	 * reflected to userspace.
 518	 */
 519	if (match_pkg(c, o) && !topology_same_node(c, o) && intel_snc)
 520		return false;
 521
 522	return topology_sane(c, o, "llc");
 523}
 524
 525
 526#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
 527static inline int x86_sched_itmt_flags(void)
 528{
 529	return sysctl_sched_itmt_enabled ? SD_ASYM_PACKING : 0;
 530}
 531
 532#ifdef CONFIG_SCHED_MC
 533static int x86_core_flags(void)
 534{
 535	return cpu_core_flags() | x86_sched_itmt_flags();
 536}
 537#endif
 538#ifdef CONFIG_SCHED_SMT
 539static int x86_smt_flags(void)
 540{
 541	return cpu_smt_flags() | x86_sched_itmt_flags();
 542}
 543#endif
 
 
 
 
 
 544#endif
 545
 546static struct sched_domain_topology_level x86_numa_in_package_topology[] = {
 547#ifdef CONFIG_SCHED_SMT
 548	{ cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
 549#endif
 550#ifdef CONFIG_SCHED_MC
 551	{ cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
 552#endif
 553	{ NULL, },
 554};
 555
 556static struct sched_domain_topology_level x86_topology[] = {
 557#ifdef CONFIG_SCHED_SMT
 558	{ cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
 559#endif
 560#ifdef CONFIG_SCHED_MC
 561	{ cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
 562#endif
 563	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
 564	{ NULL, },
 565};
 566
 567/*
 568 * Set if a package/die has multiple NUMA nodes inside.
 569 * AMD Magny-Cours, Intel Cluster-on-Die, and Intel
 570 * Sub-NUMA Clustering have this.
 571 */
 572static bool x86_has_numa_in_package;
 573
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 574void set_cpu_sibling_map(int cpu)
 575{
 576	bool has_smt = smp_num_siblings > 1;
 577	bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
 578	struct cpuinfo_x86 *c = &cpu_data(cpu);
 579	struct cpuinfo_x86 *o;
 580	int i, threads;
 581
 582	cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
 583
 584	if (!has_mp) {
 585		cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu));
 586		cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
 
 587		cpumask_set_cpu(cpu, topology_core_cpumask(cpu));
 588		cpumask_set_cpu(cpu, topology_die_cpumask(cpu));
 589		c->booted_cores = 1;
 590		return;
 591	}
 592
 593	for_each_cpu(i, cpu_sibling_setup_mask) {
 594		o = &cpu_data(i);
 595
 596		if (match_pkg(c, o) && !topology_same_node(c, o))
 597			x86_has_numa_in_package = true;
 598
 599		if ((i == cpu) || (has_smt && match_smt(c, o)))
 600			link_mask(topology_sibling_cpumask, cpu, i);
 601
 602		if ((i == cpu) || (has_mp && match_llc(c, o)))
 603			link_mask(cpu_llc_shared_mask, cpu, i);
 604
 
 
 
 605		if ((i == cpu) || (has_mp && match_die(c, o)))
 606			link_mask(topology_die_cpumask, cpu, i);
 607	}
 608
 609	threads = cpumask_weight(topology_sibling_cpumask(cpu));
 610	if (threads > __max_smt_threads)
 611		__max_smt_threads = threads;
 612
 
 
 
 613	/*
 614	 * This needs a separate iteration over the cpus because we rely on all
 615	 * topology_sibling_cpumask links to be set-up.
 616	 */
 617	for_each_cpu(i, cpu_sibling_setup_mask) {
 618		o = &cpu_data(i);
 619
 620		if ((i == cpu) || (has_mp && match_pkg(c, o))) {
 621			link_mask(topology_core_cpumask, cpu, i);
 622
 623			/*
 624			 *  Does this new cpu bringup a new core?
 625			 */
 626			if (threads == 1) {
 627				/*
 628				 * for each core in package, increment
 629				 * the booted_cores for this new cpu
 630				 */
 631				if (cpumask_first(
 632				    topology_sibling_cpumask(i)) == i)
 633					c->booted_cores++;
 634				/*
 635				 * increment the core count for all
 636				 * the other cpus in this package
 637				 */
 638				if (i != cpu)
 639					cpu_data(i).booted_cores++;
 640			} else if (i != cpu && !c->booted_cores)
 641				c->booted_cores = cpu_data(i).booted_cores;
 642		}
 643	}
 644}
 645
 646/* maps the cpu to the sched domain representing multi-core */
 647const struct cpumask *cpu_coregroup_mask(int cpu)
 648{
 649	return cpu_llc_shared_mask(cpu);
 650}
 651
 
 
 
 
 
 
 652static void impress_friends(void)
 653{
 654	int cpu;
 655	unsigned long bogosum = 0;
 656	/*
 657	 * Allow the user to impress friends.
 658	 */
 659	pr_debug("Before bogomips\n");
 660	for_each_possible_cpu(cpu)
 661		if (cpumask_test_cpu(cpu, cpu_callout_mask))
 662			bogosum += cpu_data(cpu).loops_per_jiffy;
 663	pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n",
 664		num_online_cpus(),
 665		bogosum/(500000/HZ),
 666		(bogosum/(5000/HZ))%100);
 667
 668	pr_debug("Before bogocount - setting activated=1\n");
 669}
 670
 671void __inquire_remote_apic(int apicid)
 672{
 673	unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
 674	const char * const names[] = { "ID", "VERSION", "SPIV" };
 675	int timeout;
 676	u32 status;
 677
 678	pr_info("Inquiring remote APIC 0x%x...\n", apicid);
 679
 680	for (i = 0; i < ARRAY_SIZE(regs); i++) {
 681		pr_info("... APIC 0x%x %s: ", apicid, names[i]);
 682
 683		/*
 684		 * Wait for idle.
 685		 */
 686		status = safe_apic_wait_icr_idle();
 687		if (status)
 688			pr_cont("a previous APIC delivery may have failed\n");
 689
 690		apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
 691
 692		timeout = 0;
 693		do {
 694			udelay(100);
 695			status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
 696		} while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
 697
 698		switch (status) {
 699		case APIC_ICR_RR_VALID:
 700			status = apic_read(APIC_RRR);
 701			pr_cont("%08x\n", status);
 702			break;
 703		default:
 704			pr_cont("failed\n");
 705		}
 706	}
 707}
 708
 709/*
 710 * The Multiprocessor Specification 1.4 (1997) example code suggests
 711 * that there should be a 10ms delay between the BSP asserting INIT
 712 * and de-asserting INIT, when starting a remote processor.
 713 * But that slows boot and resume on modern processors, which include
 714 * many cores and don't require that delay.
 715 *
 716 * Cmdline "init_cpu_udelay=" is available to over-ride this delay.
 717 * Modern processor families are quirked to remove the delay entirely.
 718 */
 719#define UDELAY_10MS_DEFAULT 10000
 720
 721static unsigned int init_udelay = UINT_MAX;
 722
 723static int __init cpu_init_udelay(char *str)
 724{
 725	get_option(&str, &init_udelay);
 726
 727	return 0;
 728}
 729early_param("cpu_init_udelay", cpu_init_udelay);
 730
 731static void __init smp_quirk_init_udelay(void)
 732{
 733	/* if cmdline changed it from default, leave it alone */
 734	if (init_udelay != UINT_MAX)
 735		return;
 736
 737	/* if modern processor, use no delay */
 738	if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
 739	    ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && (boot_cpu_data.x86 >= 0x18)) ||
 740	    ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
 741		init_udelay = 0;
 742		return;
 743	}
 744	/* else, use legacy delay */
 745	init_udelay = UDELAY_10MS_DEFAULT;
 746}
 747
 748/*
 749 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
 750 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
 751 * won't ... remember to clear down the APIC, etc later.
 752 */
 753int
 754wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
 755{
 756	u32 dm = apic->dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL;
 757	unsigned long send_status, accept_status = 0;
 758	int maxlvt;
 759
 760	/* Target chip */
 761	/* Boot on the stack */
 762	/* Kick the second */
 763	apic_icr_write(APIC_DM_NMI | dm, apicid);
 764
 765	pr_debug("Waiting for send to finish...\n");
 766	send_status = safe_apic_wait_icr_idle();
 767
 768	/*
 769	 * Give the other CPU some time to accept the IPI.
 770	 */
 771	udelay(200);
 772	if (APIC_INTEGRATED(boot_cpu_apic_version)) {
 773		maxlvt = lapic_get_maxlvt();
 774		if (maxlvt > 3)			/* Due to the Pentium erratum 3AP.  */
 775			apic_write(APIC_ESR, 0);
 776		accept_status = (apic_read(APIC_ESR) & 0xEF);
 777	}
 778	pr_debug("NMI sent\n");
 779
 780	if (send_status)
 781		pr_err("APIC never delivered???\n");
 782	if (accept_status)
 783		pr_err("APIC delivery error (%lx)\n", accept_status);
 
 784
 785	return (send_status | accept_status);
 
 
 786}
 787
 788static int
 789wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
 
 
 790{
 791	unsigned long send_status = 0, accept_status = 0;
 792	int maxlvt, num_starts, j;
 793
 
 794	maxlvt = lapic_get_maxlvt();
 795
 796	/*
 797	 * Be paranoid about clearing APIC errors.
 798	 */
 799	if (APIC_INTEGRATED(boot_cpu_apic_version)) {
 800		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
 801			apic_write(APIC_ESR, 0);
 802		apic_read(APIC_ESR);
 803	}
 804
 805	pr_debug("Asserting INIT\n");
 806
 807	/*
 808	 * Turn INIT on target chip
 809	 */
 810	/*
 811	 * Send IPI
 812	 */
 813	apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
 814		       phys_apicid);
 815
 816	pr_debug("Waiting for send to finish...\n");
 817	send_status = safe_apic_wait_icr_idle();
 818
 819	udelay(init_udelay);
 820
 821	pr_debug("Deasserting INIT\n");
 822
 823	/* Target chip */
 824	/* Send IPI */
 825	apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
 826
 827	pr_debug("Waiting for send to finish...\n");
 828	send_status = safe_apic_wait_icr_idle();
 829
 830	mb();
 831
 832	/*
 833	 * Should we send STARTUP IPIs ?
 834	 *
 835	 * Determine this based on the APIC version.
 836	 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
 837	 */
 838	if (APIC_INTEGRATED(boot_cpu_apic_version))
 839		num_starts = 2;
 840	else
 841		num_starts = 0;
 842
 843	/*
 844	 * Run STARTUP IPI loop.
 845	 */
 846	pr_debug("#startup loops: %d\n", num_starts);
 847
 848	for (j = 1; j <= num_starts; j++) {
 849		pr_debug("Sending STARTUP #%d\n", j);
 850		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
 851			apic_write(APIC_ESR, 0);
 852		apic_read(APIC_ESR);
 853		pr_debug("After apic_write\n");
 854
 855		/*
 856		 * STARTUP IPI
 857		 */
 858
 859		/* Target chip */
 860		/* Boot on the stack */
 861		/* Kick the second */
 862		apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
 863			       phys_apicid);
 864
 865		/*
 866		 * Give the other CPU some time to accept the IPI.
 867		 */
 868		if (init_udelay == 0)
 869			udelay(10);
 870		else
 871			udelay(300);
 872
 873		pr_debug("Startup point 1\n");
 874
 875		pr_debug("Waiting for send to finish...\n");
 876		send_status = safe_apic_wait_icr_idle();
 877
 878		/*
 879		 * Give the other CPU some time to accept the IPI.
 880		 */
 881		if (init_udelay == 0)
 882			udelay(10);
 883		else
 884			udelay(200);
 885
 886		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
 887			apic_write(APIC_ESR, 0);
 888		accept_status = (apic_read(APIC_ESR) & 0xEF);
 889		if (send_status || accept_status)
 890			break;
 891	}
 892	pr_debug("After Startup\n");
 893
 894	if (send_status)
 895		pr_err("APIC never delivered???\n");
 896	if (accept_status)
 897		pr_err("APIC delivery error (%lx)\n", accept_status);
 898
 
 899	return (send_status | accept_status);
 900}
 901
 902/* reduce the number of lines printed when booting a large cpu count system */
 903static void announce_cpu(int cpu, int apicid)
 904{
 
 905	static int current_node = NUMA_NO_NODE;
 906	int node = early_cpu_to_node(cpu);
 907	static int width, node_width;
 908
 909	if (!width)
 910		width = num_digits(num_possible_cpus()) + 1; /* + '#' sign */
 911
 912	if (!node_width)
 913		node_width = num_digits(num_possible_nodes()) + 1; /* + '#' */
 914
 915	if (cpu == 1)
 916		printk(KERN_INFO "x86: Booting SMP configuration:\n");
 
 917
 918	if (system_state < SYSTEM_RUNNING) {
 919		if (node != current_node) {
 920			if (current_node > (-1))
 921				pr_cont("\n");
 922			current_node = node;
 923
 924			printk(KERN_INFO ".... node %*s#%d, CPUs:  ",
 925			       node_width - num_digits(node), " ", node);
 926		}
 927
 928		/* Add padding for the BSP */
 929		if (cpu == 1)
 930			pr_cont("%*s", width + 1, " ");
 
 931
 932		pr_cont("%*s#%d", width - num_digits(cpu), " ", cpu);
 933
 934	} else
 935		pr_info("Booting Node %d Processor %d APIC 0x%x\n",
 936			node, cpu, apicid);
 937}
 938
 939static int wakeup_cpu0_nmi(unsigned int cmd, struct pt_regs *regs)
 940{
 941	int cpu;
 942
 943	cpu = smp_processor_id();
 944	if (cpu == 0 && !cpu_online(cpu) && enable_start_cpu0)
 945		return NMI_HANDLED;
 946
 947	return NMI_DONE;
 948}
 949
 950/*
 951 * Wake up AP by INIT, INIT, STARTUP sequence.
 952 *
 953 * Instead of waiting for STARTUP after INITs, BSP will execute the BIOS
 954 * boot-strap code which is not a desired behavior for waking up BSP. To
 955 * void the boot-strap code, wake up CPU0 by NMI instead.
 956 *
 957 * This works to wake up soft offlined CPU0 only. If CPU0 is hard offlined
 958 * (i.e. physically hot removed and then hot added), NMI won't wake it up.
 959 * We'll change this code in the future to wake up hard offlined CPU0 if
 960 * real platform and request are available.
 961 */
 962static int
 963wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
 964	       int *cpu0_nmi_registered)
 965{
 966	int id;
 967	int boot_error;
 968
 969	preempt_disable();
 970
 971	/*
 972	 * Wake up AP by INIT, INIT, STARTUP sequence.
 973	 */
 974	if (cpu) {
 975		boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
 976		goto out;
 977	}
 978
 979	/*
 980	 * Wake up BSP by nmi.
 981	 *
 982	 * Register a NMI handler to help wake up CPU0.
 983	 */
 984	boot_error = register_nmi_handler(NMI_LOCAL,
 985					  wakeup_cpu0_nmi, 0, "wake_cpu0");
 986
 987	if (!boot_error) {
 988		enable_start_cpu0 = 1;
 989		*cpu0_nmi_registered = 1;
 990		id = apic->dest_mode_logical ? cpu0_logical_apicid : apicid;
 991		boot_error = wakeup_secondary_cpu_via_nmi(id, start_ip);
 992	}
 993
 994out:
 995	preempt_enable();
 996
 997	return boot_error;
 998}
 999
1000int common_cpu_up(unsigned int cpu, struct task_struct *idle)
1001{
1002	int ret;
1003
1004	/* Just in case we booted with a single CPU. */
1005	alternatives_enable_smp();
1006
1007	per_cpu(current_task, cpu) = idle;
1008	cpu_init_stack_canary(cpu, idle);
1009
1010	/* Initialize the interrupt stack(s) */
1011	ret = irq_init_percpu_irqstack(cpu);
1012	if (ret)
1013		return ret;
1014
1015#ifdef CONFIG_X86_32
1016	/* Stack for startup_32 can be just as for start_secondary onwards */
1017	per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle);
1018#else
1019	initial_gs = per_cpu_offset(cpu);
1020#endif
1021	return 0;
1022}
1023
1024/*
1025 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
1026 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
1027 * Returns zero if CPU booted OK, else error code from
1028 * ->wakeup_secondary_cpu.
1029 */
1030static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
1031		       int *cpu0_nmi_registered)
1032{
1033	/* start_ip had better be page-aligned! */
1034	unsigned long start_ip = real_mode_header->trampoline_start;
 
1035
1036	unsigned long boot_error = 0;
1037	unsigned long timeout;
1038
 
 
1039	idle->thread.sp = (unsigned long)task_pt_regs(idle);
1040	early_gdt_descr.address = (unsigned long)get_cpu_gdt_rw(cpu);
1041	initial_code = (unsigned long)start_secondary;
1042	initial_stack  = idle->thread.sp;
 
 
 
 
 
 
1043
1044	/* Enable the espfix hack for this CPU */
1045	init_espfix_ap(cpu);
1046
1047	/* So we see what's up */
1048	announce_cpu(cpu, apicid);
1049
1050	/*
1051	 * This grunge runs the startup process for
1052	 * the targeted processor.
1053	 */
1054
1055	if (x86_platform.legacy.warm_reset) {
1056
1057		pr_debug("Setting warm reset code and vector.\n");
1058
1059		smpboot_setup_warm_reset_vector(start_ip);
1060		/*
1061		 * Be paranoid about clearing APIC errors.
1062		*/
1063		if (APIC_INTEGRATED(boot_cpu_apic_version)) {
1064			apic_write(APIC_ESR, 0);
1065			apic_read(APIC_ESR);
1066		}
1067	}
1068
1069	/*
1070	 * AP might wait on cpu_callout_mask in cpu_init() with
1071	 * cpu_initialized_mask set if previous attempt to online
1072	 * it timed-out. Clear cpu_initialized_mask so that after
1073	 * INIT/SIPI it could start with a clean state.
1074	 */
1075	cpumask_clear_cpu(cpu, cpu_initialized_mask);
1076	smp_mb();
1077
1078	/*
1079	 * Wake up a CPU in difference cases:
1080	 * - Use the method in the APIC driver if it's defined
 
1081	 * Otherwise,
1082	 * - Use an INIT boot APIC message for APs or NMI for BSP.
1083	 */
1084	if (apic->wakeup_secondary_cpu)
1085		boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
 
 
1086	else
1087		boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid,
1088						     cpu0_nmi_registered);
1089
1090	if (!boot_error) {
1091		/*
1092		 * Wait 10s total for first sign of life from AP
1093		 */
1094		boot_error = -1;
1095		timeout = jiffies + 10*HZ;
1096		while (time_before(jiffies, timeout)) {
1097			if (cpumask_test_cpu(cpu, cpu_initialized_mask)) {
1098				/*
1099				 * Tell AP to proceed with initialization
1100				 */
1101				cpumask_set_cpu(cpu, cpu_callout_mask);
1102				boot_error = 0;
1103				break;
1104			}
1105			schedule();
1106		}
1107	}
1108
1109	if (!boot_error) {
1110		/*
1111		 * Wait till AP completes initial initialization
1112		 */
1113		while (!cpumask_test_cpu(cpu, cpu_callin_mask)) {
1114			/*
1115			 * Allow other tasks to run while we wait for the
1116			 * AP to come online. This also gives a chance
1117			 * for the MTRR work(triggered by the AP coming online)
1118			 * to be completed in the stop machine context.
1119			 */
1120			schedule();
1121		}
1122	}
1123
1124	if (x86_platform.legacy.warm_reset) {
1125		/*
1126		 * Cleanup possible dangling ends...
1127		 */
1128		smpboot_restore_warm_reset_vector();
1129	}
1130
1131	return boot_error;
1132}
1133
1134int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
1135{
1136	int apicid = apic->cpu_present_to_apicid(cpu);
1137	int cpu0_nmi_registered = 0;
1138	unsigned long flags;
1139	int err, ret = 0;
1140
1141	lockdep_assert_irqs_enabled();
1142
1143	pr_debug("++++++++++++++++++++=_---CPU UP  %u\n", cpu);
1144
1145	if (apicid == BAD_APICID ||
1146	    !physid_isset(apicid, phys_cpu_present_map) ||
1147	    !apic->apic_id_valid(apicid)) {
1148		pr_err("%s: bad cpu %d\n", __func__, cpu);
1149		return -EINVAL;
1150	}
1151
1152	/*
1153	 * Already booted CPU?
1154	 */
1155	if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
1156		pr_debug("do_boot_cpu %d Already started\n", cpu);
1157		return -ENOSYS;
1158	}
1159
1160	/*
1161	 * Save current MTRR state in case it was changed since early boot
1162	 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
1163	 */
1164	mtrr_save_state();
1165
1166	/* x86 CPUs take themselves offline, so delayed offline is OK. */
1167	err = cpu_check_up_prepare(cpu);
1168	if (err && err != -EBUSY)
1169		return err;
1170
1171	/* the FPU context is blank, nobody can own it */
1172	per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
1173
1174	err = common_cpu_up(cpu, tidle);
1175	if (err)
1176		return err;
1177
1178	err = do_boot_cpu(apicid, cpu, tidle, &cpu0_nmi_registered);
1179	if (err) {
1180		pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
1181		ret = -EIO;
1182		goto unreg_nmi;
1183	}
1184
1185	/*
1186	 * Check TSC synchronization with the AP (keep irqs disabled
1187	 * while doing so):
1188	 */
1189	local_irq_save(flags);
1190	check_tsc_sync_source(cpu);
1191	local_irq_restore(flags);
 
 
 
 
 
 
 
1192
1193	while (!cpu_online(cpu)) {
1194		cpu_relax();
1195		touch_nmi_watchdog();
1196	}
1197
1198unreg_nmi:
1199	/*
1200	 * Clean up the nmi handler. Do this after the callin and callout sync
1201	 * to avoid impact of possible long unregister time.
1202	 */
1203	if (cpu0_nmi_registered)
1204		unregister_nmi_handler(NMI_LOCAL, "wake_cpu0");
1205
1206	return ret;
 
 
 
1207}
1208
1209/**
1210 * arch_disable_smp_support() - disables SMP support for x86 at runtime
1211 */
1212void arch_disable_smp_support(void)
1213{
1214	disable_ioapic_support();
1215}
1216
1217/*
1218 * Fall back to non SMP mode after errors.
1219 *
1220 * RED-PEN audit/test this more. I bet there is more state messed up here.
1221 */
1222static __init void disable_smp(void)
1223{
1224	pr_info("SMP disabled\n");
1225
1226	disable_ioapic_support();
1227
1228	init_cpu_present(cpumask_of(0));
1229	init_cpu_possible(cpumask_of(0));
1230
1231	if (smp_found_config)
1232		physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
1233	else
1234		physid_set_mask_of_physid(0, &phys_cpu_present_map);
1235	cpumask_set_cpu(0, topology_sibling_cpumask(0));
1236	cpumask_set_cpu(0, topology_core_cpumask(0));
1237	cpumask_set_cpu(0, topology_die_cpumask(0));
1238}
1239
1240/*
1241 * Various sanity checks.
1242 */
1243static void __init smp_sanity_check(void)
1244{
1245	preempt_disable();
1246
1247#if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
1248	if (def_to_bigsmp && nr_cpu_ids > 8) {
1249		unsigned int cpu;
1250		unsigned nr;
1251
1252		pr_warn("More than 8 CPUs detected - skipping them\n"
1253			"Use CONFIG_X86_BIGSMP\n");
1254
1255		nr = 0;
1256		for_each_present_cpu(cpu) {
1257			if (nr >= 8)
1258				set_cpu_present(cpu, false);
1259			nr++;
1260		}
1261
1262		nr = 0;
1263		for_each_possible_cpu(cpu) {
1264			if (nr >= 8)
1265				set_cpu_possible(cpu, false);
1266			nr++;
1267		}
1268
1269		nr_cpu_ids = 8;
1270	}
1271#endif
1272
1273	if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
1274		pr_warn("weird, boot CPU (#%d) not listed by the BIOS\n",
1275			hard_smp_processor_id());
1276
1277		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1278	}
1279
1280	/*
1281	 * Should not be necessary because the MP table should list the boot
1282	 * CPU too, but we do it for the sake of robustness anyway.
1283	 */
1284	if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
1285		pr_notice("weird, boot CPU (#%d) not listed by the BIOS\n",
1286			  boot_cpu_physical_apicid);
1287		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1288	}
1289	preempt_enable();
1290}
1291
1292static void __init smp_cpu_index_default(void)
1293{
1294	int i;
1295	struct cpuinfo_x86 *c;
1296
1297	for_each_possible_cpu(i) {
1298		c = &cpu_data(i);
1299		/* mark all to hotplug */
1300		c->cpu_index = nr_cpu_ids;
1301	}
1302}
1303
1304static void __init smp_get_logical_apicid(void)
1305{
1306	if (x2apic_mode)
1307		cpu0_logical_apicid = apic_read(APIC_LDR);
1308	else
1309		cpu0_logical_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
1310}
1311
1312/*
1313 * Prepare for SMP bootup.
1314 * @max_cpus: configured maximum number of CPUs, It is a legacy parameter
1315 *            for common interface support.
1316 */
1317void __init native_smp_prepare_cpus(unsigned int max_cpus)
1318{
1319	unsigned int i;
1320
1321	smp_cpu_index_default();
1322
1323	/*
1324	 * Setup boot CPU information
1325	 */
1326	smp_store_boot_cpu_info(); /* Final full version of the data */
1327	cpumask_copy(cpu_callin_mask, cpumask_of(0));
1328	mb();
1329
1330	for_each_possible_cpu(i) {
1331		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
1332		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
1333		zalloc_cpumask_var(&per_cpu(cpu_die_map, i), GFP_KERNEL);
1334		zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
1335	}
1336
1337	/*
1338	 * Set 'default' x86 topology, this matches default_topology() in that
1339	 * it has NUMA nodes as a topology level. See also
1340	 * native_smp_cpus_done().
1341	 *
1342	 * Must be done before set_cpus_sibling_map() is ran.
1343	 */
1344	set_sched_topology(x86_topology);
1345
1346	set_cpu_sibling_map(0);
1347	init_freq_invariance(false, false);
1348	smp_sanity_check();
 
 
 
 
 
1349
1350	switch (apic_intr_mode) {
1351	case APIC_PIC:
1352	case APIC_VIRTUAL_WIRE_NO_CONFIG:
1353		disable_smp();
1354		return;
1355	case APIC_SYMMETRIC_IO_NO_ROUTING:
1356		disable_smp();
1357		/* Setup local timer */
1358		x86_init.timers.setup_percpu_clockev();
1359		return;
1360	case APIC_VIRTUAL_WIRE:
1361	case APIC_SYMMETRIC_IO:
1362		break;
1363	}
1364
1365	/* Setup local timer */
1366	x86_init.timers.setup_percpu_clockev();
1367
1368	smp_get_logical_apicid();
1369
1370	pr_info("CPU0: ");
1371	print_cpu_info(&cpu_data(0));
1372
1373	uv_system_init();
1374
1375	set_mtrr_aps_delayed_init();
1376
1377	smp_quirk_init_udelay();
1378
1379	speculative_store_bypass_ht_init();
 
 
1380}
1381
1382void arch_thaw_secondary_cpus_begin(void)
1383{
1384	set_mtrr_aps_delayed_init();
1385}
1386
1387void arch_thaw_secondary_cpus_end(void)
1388{
1389	mtrr_aps_init();
1390}
1391
1392/*
1393 * Early setup to make printk work.
1394 */
1395void __init native_smp_prepare_boot_cpu(void)
1396{
1397	int me = smp_processor_id();
1398	switch_to_new_gdt(me);
1399	/* already set me in cpu_online_mask in boot_cpu_init() */
1400	cpumask_set_cpu(me, cpu_callout_mask);
1401	cpu_set_state_online(me);
 
1402	native_pv_lock_init();
1403}
1404
1405void __init calculate_max_logical_packages(void)
1406{
1407	int ncpus;
1408
1409	/*
1410	 * Today neither Intel nor AMD support heterogeneous systems so
1411	 * extrapolate the boot cpu's data to all packages.
1412	 */
1413	ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
1414	__max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
1415	pr_info("Max logical packages: %u\n", __max_logical_packages);
1416}
1417
1418void __init native_smp_cpus_done(unsigned int max_cpus)
1419{
1420	pr_debug("Boot done\n");
1421
1422	calculate_max_logical_packages();
1423
1424	if (x86_has_numa_in_package)
1425		set_sched_topology(x86_numa_in_package_topology);
1426
1427	nmi_selftest();
1428	impress_friends();
1429	mtrr_aps_init();
1430}
1431
1432static int __initdata setup_possible_cpus = -1;
1433static int __init _setup_possible_cpus(char *str)
1434{
1435	get_option(&str, &setup_possible_cpus);
1436	return 0;
1437}
1438early_param("possible_cpus", _setup_possible_cpus);
1439
1440
1441/*
1442 * cpu_possible_mask should be static, it cannot change as cpu's
1443 * are onlined, or offlined. The reason is per-cpu data-structures
1444 * are allocated by some modules at init time, and don't expect to
1445 * do this dynamically on cpu arrival/departure.
1446 * cpu_present_mask on the other hand can change dynamically.
1447 * In case when cpu_hotplug is not compiled, then we resort to current
1448 * behaviour, which is cpu_possible == cpu_present.
1449 * - Ashok Raj
1450 *
1451 * Three ways to find out the number of additional hotplug CPUs:
1452 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
1453 * - The user can overwrite it with possible_cpus=NUM
1454 * - Otherwise don't reserve additional CPUs.
1455 * We do this because additional CPUs waste a lot of memory.
1456 * -AK
1457 */
1458__init void prefill_possible_map(void)
1459{
1460	int i, possible;
1461
1462	/* No boot processor was found in mptable or ACPI MADT */
1463	if (!num_processors) {
1464		if (boot_cpu_has(X86_FEATURE_APIC)) {
1465			int apicid = boot_cpu_physical_apicid;
1466			int cpu = hard_smp_processor_id();
1467
1468			pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);
1469
1470			/* Make sure boot cpu is enumerated */
1471			if (apic->cpu_present_to_apicid(0) == BAD_APICID &&
1472			    apic->apic_id_valid(apicid))
1473				generic_processor_info(apicid, boot_cpu_apic_version);
1474		}
1475
1476		if (!num_processors)
1477			num_processors = 1;
1478	}
1479
1480	i = setup_max_cpus ?: 1;
1481	if (setup_possible_cpus == -1) {
1482		possible = num_processors;
1483#ifdef CONFIG_HOTPLUG_CPU
1484		if (setup_max_cpus)
1485			possible += disabled_cpus;
1486#else
1487		if (possible > i)
1488			possible = i;
1489#endif
1490	} else
1491		possible = setup_possible_cpus;
1492
1493	total_cpus = max_t(int, possible, num_processors + disabled_cpus);
1494
1495	/* nr_cpu_ids could be reduced via nr_cpus= */
1496	if (possible > nr_cpu_ids) {
1497		pr_warn("%d Processors exceeds NR_CPUS limit of %u\n",
1498			possible, nr_cpu_ids);
1499		possible = nr_cpu_ids;
1500	}
1501
1502#ifdef CONFIG_HOTPLUG_CPU
1503	if (!setup_max_cpus)
1504#endif
1505	if (possible > i) {
1506		pr_warn("%d Processors exceeds max_cpus limit of %u\n",
1507			possible, setup_max_cpus);
1508		possible = i;
1509	}
1510
1511	nr_cpu_ids = possible;
1512
1513	pr_info("Allowing %d CPUs, %d hotplug CPUs\n",
1514		possible, max_t(int, possible - num_processors, 0));
1515
1516	reset_cpu_possible_mask();
1517
1518	for (i = 0; i < possible; i++)
1519		set_cpu_possible(i, true);
1520}
1521
 
 
 
 
 
 
1522#ifdef CONFIG_HOTPLUG_CPU
1523
1524/* Recompute SMT state for all CPUs on offline */
1525static void recompute_smt_state(void)
1526{
1527	int max_threads, cpu;
1528
1529	max_threads = 0;
1530	for_each_online_cpu (cpu) {
1531		int threads = cpumask_weight(topology_sibling_cpumask(cpu));
1532
1533		if (threads > max_threads)
1534			max_threads = threads;
1535	}
1536	__max_smt_threads = max_threads;
1537}
1538
1539static void remove_siblinginfo(int cpu)
1540{
1541	int sibling;
1542	struct cpuinfo_x86 *c = &cpu_data(cpu);
1543
1544	for_each_cpu(sibling, topology_core_cpumask(cpu)) {
1545		cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
1546		/*/
1547		 * last thread sibling in this cpu core going down
1548		 */
1549		if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1)
1550			cpu_data(sibling).booted_cores--;
1551	}
1552
1553	for_each_cpu(sibling, topology_die_cpumask(cpu))
1554		cpumask_clear_cpu(cpu, topology_die_cpumask(sibling));
1555	for_each_cpu(sibling, topology_sibling_cpumask(cpu))
 
1556		cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
 
 
 
 
1557	for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
1558		cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
 
 
1559	cpumask_clear(cpu_llc_shared_mask(cpu));
 
1560	cpumask_clear(topology_sibling_cpumask(cpu));
1561	cpumask_clear(topology_core_cpumask(cpu));
1562	cpumask_clear(topology_die_cpumask(cpu));
1563	c->cpu_core_id = 0;
1564	c->booted_cores = 0;
1565	cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
1566	recompute_smt_state();
1567}
1568
1569static void remove_cpu_from_maps(int cpu)
1570{
1571	set_cpu_online(cpu, false);
1572	cpumask_clear_cpu(cpu, cpu_callout_mask);
1573	cpumask_clear_cpu(cpu, cpu_callin_mask);
1574	/* was set by cpu_init() */
1575	cpumask_clear_cpu(cpu, cpu_initialized_mask);
1576	numa_remove_cpu(cpu);
1577}
1578
1579void cpu_disable_common(void)
1580{
1581	int cpu = smp_processor_id();
1582
1583	remove_siblinginfo(cpu);
1584
1585	/* It's now safe to remove this processor from the online map */
1586	lock_vector_lock();
1587	remove_cpu_from_maps(cpu);
1588	unlock_vector_lock();
1589	fixup_irqs();
1590	lapic_offline();
1591}
1592
1593int native_cpu_disable(void)
1594{
1595	int ret;
1596
1597	ret = lapic_can_unplug_cpu();
1598	if (ret)
1599		return ret;
1600
1601	cpu_disable_common();
1602
1603        /*
1604         * Disable the local APIC. Otherwise IPI broadcasts will reach
1605         * it. It still responds normally to INIT, NMI, SMI, and SIPI
1606         * messages.
1607         *
1608         * Disabling the APIC must happen after cpu_disable_common()
1609         * which invokes fixup_irqs().
1610         *
1611         * Disabling the APIC preserves already set bits in IRR, but
1612         * an interrupt arriving after disabling the local APIC does not
1613         * set the corresponding IRR bit.
1614         *
1615         * fixup_irqs() scans IRR for set bits so it can raise a not
1616         * yet handled interrupt on the new destination CPU via an IPI
1617         * but obviously it can't do so for IRR bits which are not set.
1618         * IOW, interrupts arriving after disabling the local APIC will
1619         * be lost.
1620         */
1621	apic_soft_disable();
1622
1623	return 0;
1624}
1625
1626int common_cpu_die(unsigned int cpu)
1627{
1628	int ret = 0;
1629
1630	/* We don't do anything here: idle task is faking death itself. */
1631
1632	/* They ack this in play_dead() by setting CPU_DEAD */
1633	if (cpu_wait_death(cpu, 5)) {
1634		if (system_state == SYSTEM_RUNNING)
1635			pr_info("CPU %u is now offline\n", cpu);
1636	} else {
1637		pr_err("CPU %u didn't die...\n", cpu);
1638		ret = -1;
1639	}
1640
1641	return ret;
1642}
1643
1644void native_cpu_die(unsigned int cpu)
1645{
1646	common_cpu_die(cpu);
1647}
1648
1649void play_dead_common(void)
1650{
1651	idle_task_exit();
1652
1653	/* Ack it */
1654	(void)cpu_report_death();
1655
1656	/*
1657	 * With physical CPU hotplug, we should halt the cpu
1658	 */
1659	local_irq_disable();
1660}
1661
1662/**
1663 * cond_wakeup_cpu0 - Wake up CPU0 if needed.
1664 *
1665 * If NMI wants to wake up CPU0, start CPU0.
1666 */
1667void cond_wakeup_cpu0(void)
1668{
1669	if (smp_processor_id() == 0 && enable_start_cpu0)
1670		start_cpu0();
1671}
1672EXPORT_SYMBOL_GPL(cond_wakeup_cpu0);
1673
1674/*
1675 * We need to flush the caches before going to sleep, lest we have
1676 * dirty data in our caches when we come back up.
1677 */
1678static inline void mwait_play_dead(void)
1679{
 
1680	unsigned int eax, ebx, ecx, edx;
1681	unsigned int highest_cstate = 0;
1682	unsigned int highest_subcstate = 0;
1683	void *mwait_ptr;
1684	int i;
1685
1686	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1687	    boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
1688		return;
1689	if (!this_cpu_has(X86_FEATURE_MWAIT))
1690		return;
1691	if (!this_cpu_has(X86_FEATURE_CLFLUSH))
1692		return;
1693	if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
1694		return;
1695
1696	eax = CPUID_MWAIT_LEAF;
1697	ecx = 0;
1698	native_cpuid(&eax, &ebx, &ecx, &edx);
1699
1700	/*
1701	 * eax will be 0 if EDX enumeration is not valid.
1702	 * Initialized below to cstate, sub_cstate value when EDX is valid.
1703	 */
1704	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
1705		eax = 0;
1706	} else {
1707		edx >>= MWAIT_SUBSTATE_SIZE;
1708		for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
1709			if (edx & MWAIT_SUBSTATE_MASK) {
1710				highest_cstate = i;
1711				highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
1712			}
1713		}
1714		eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
1715			(highest_subcstate - 1);
1716	}
1717
1718	/*
1719	 * This should be a memory location in a cache line which is
1720	 * unlikely to be touched by other processors.  The actual
1721	 * content is immaterial as it is not actually modified in any way.
1722	 */
1723	mwait_ptr = &current_thread_info()->flags;
1724
1725	wbinvd();
1726
1727	while (1) {
1728		/*
1729		 * The CLFLUSH is a workaround for erratum AAI65 for
1730		 * the Xeon 7400 series.  It's not clear it is actually
1731		 * needed, but it should be harmless in either case.
1732		 * The WBINVD is insufficient due to the spurious-wakeup
1733		 * case where we return around the loop.
1734		 */
1735		mb();
1736		clflush(mwait_ptr);
1737		mb();
1738		__monitor(mwait_ptr, 0, 0);
1739		mb();
1740		__mwait(eax, 0);
1741
1742		cond_wakeup_cpu0();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1743	}
1744}
1745
1746void hlt_play_dead(void)
1747{
1748	if (__this_cpu_read(cpu_info.x86) >= 4)
1749		wbinvd();
1750
1751	while (1) {
1752		native_halt();
1753
1754		cond_wakeup_cpu0();
1755	}
1756}
1757
 
 
 
 
1758void native_play_dead(void)
1759{
 
 
 
1760	play_dead_common();
1761	tboot_shutdown(TB_SHUTDOWN_WFS);
1762
1763	mwait_play_dead();	/* Only returns on failure */
1764	if (cpuidle_play_dead())
1765		hlt_play_dead();
1766}
1767
1768#else /* ... !CONFIG_HOTPLUG_CPU */
1769int native_cpu_disable(void)
1770{
1771	return -ENOSYS;
1772}
1773
1774void native_cpu_die(unsigned int cpu)
1775{
1776	/* We said "no" in __cpu_disable */
1777	BUG();
1778}
1779
1780void native_play_dead(void)
1781{
1782	BUG();
1783}
1784
1785#endif
1786
1787#ifdef CONFIG_X86_64
1788/*
1789 * APERF/MPERF frequency ratio computation.
1790 *
1791 * The scheduler wants to do frequency invariant accounting and needs a <1
1792 * ratio to account for the 'current' frequency, corresponding to
1793 * freq_curr / freq_max.
1794 *
1795 * Since the frequency freq_curr on x86 is controlled by micro-controller and
1796 * our P-state setting is little more than a request/hint, we need to observe
1797 * the effective frequency 'BusyMHz', i.e. the average frequency over a time
1798 * interval after discarding idle time. This is given by:
1799 *
1800 *   BusyMHz = delta_APERF / delta_MPERF * freq_base
1801 *
1802 * where freq_base is the max non-turbo P-state.
1803 *
1804 * The freq_max term has to be set to a somewhat arbitrary value, because we
1805 * can't know which turbo states will be available at a given point in time:
1806 * it all depends on the thermal headroom of the entire package. We set it to
1807 * the turbo level with 4 cores active.
1808 *
1809 * Benchmarks show that's a good compromise between the 1C turbo ratio
1810 * (freq_curr/freq_max would rarely reach 1) and something close to freq_base,
1811 * which would ignore the entire turbo range (a conspicuous part, making
1812 * freq_curr/freq_max always maxed out).
1813 *
1814 * An exception to the heuristic above is the Atom uarch, where we choose the
1815 * highest turbo level for freq_max since Atom's are generally oriented towards
1816 * power efficiency.
1817 *
1818 * Setting freq_max to anything less than the 1C turbo ratio makes the ratio
1819 * freq_curr / freq_max to eventually grow >1, in which case we clip it to 1.
1820 */
1821
1822DEFINE_STATIC_KEY_FALSE(arch_scale_freq_key);
1823
1824static DEFINE_PER_CPU(u64, arch_prev_aperf);
1825static DEFINE_PER_CPU(u64, arch_prev_mperf);
1826static u64 arch_turbo_freq_ratio = SCHED_CAPACITY_SCALE;
1827static u64 arch_max_freq_ratio = SCHED_CAPACITY_SCALE;
1828
1829void arch_set_max_freq_ratio(bool turbo_disabled)
1830{
1831	arch_max_freq_ratio = turbo_disabled ? SCHED_CAPACITY_SCALE :
1832					arch_turbo_freq_ratio;
1833}
1834EXPORT_SYMBOL_GPL(arch_set_max_freq_ratio);
1835
1836static bool turbo_disabled(void)
1837{
1838	u64 misc_en;
1839	int err;
1840
1841	err = rdmsrl_safe(MSR_IA32_MISC_ENABLE, &misc_en);
1842	if (err)
1843		return false;
1844
1845	return (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
1846}
1847
1848static bool slv_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
1849{
1850	int err;
1851
1852	err = rdmsrl_safe(MSR_ATOM_CORE_RATIOS, base_freq);
1853	if (err)
1854		return false;
1855
1856	err = rdmsrl_safe(MSR_ATOM_CORE_TURBO_RATIOS, turbo_freq);
1857	if (err)
1858		return false;
1859
1860	*base_freq = (*base_freq >> 16) & 0x3F;     /* max P state */
1861	*turbo_freq = *turbo_freq & 0x3F;           /* 1C turbo    */
1862
1863	return true;
1864}
1865
1866#define X86_MATCH(model)					\
1867	X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6,		\
1868		INTEL_FAM6_##model, X86_FEATURE_APERFMPERF, NULL)
1869
1870static const struct x86_cpu_id has_knl_turbo_ratio_limits[] = {
1871	X86_MATCH(XEON_PHI_KNL),
1872	X86_MATCH(XEON_PHI_KNM),
1873	{}
1874};
1875
1876static const struct x86_cpu_id has_skx_turbo_ratio_limits[] = {
1877	X86_MATCH(SKYLAKE_X),
1878	{}
1879};
1880
1881static const struct x86_cpu_id has_glm_turbo_ratio_limits[] = {
1882	X86_MATCH(ATOM_GOLDMONT),
1883	X86_MATCH(ATOM_GOLDMONT_D),
1884	X86_MATCH(ATOM_GOLDMONT_PLUS),
1885	{}
1886};
1887
1888static bool knl_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq,
1889				int num_delta_fratio)
1890{
1891	int fratio, delta_fratio, found;
1892	int err, i;
1893	u64 msr;
1894
1895	err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq);
1896	if (err)
1897		return false;
1898
1899	*base_freq = (*base_freq >> 8) & 0xFF;	    /* max P state */
1900
1901	err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &msr);
1902	if (err)
1903		return false;
1904
1905	fratio = (msr >> 8) & 0xFF;
1906	i = 16;
1907	found = 0;
1908	do {
1909		if (found >= num_delta_fratio) {
1910			*turbo_freq = fratio;
1911			return true;
1912		}
1913
1914		delta_fratio = (msr >> (i + 5)) & 0x7;
1915
1916		if (delta_fratio) {
1917			found += 1;
1918			fratio -= delta_fratio;
1919		}
1920
1921		i += 8;
1922	} while (i < 64);
1923
1924	return true;
1925}
1926
1927static bool skx_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq, int size)
1928{
1929	u64 ratios, counts;
1930	u32 group_size;
1931	int err, i;
1932
1933	err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq);
1934	if (err)
1935		return false;
1936
1937	*base_freq = (*base_freq >> 8) & 0xFF;      /* max P state */
1938
1939	err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &ratios);
1940	if (err)
1941		return false;
1942
1943	err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT1, &counts);
1944	if (err)
1945		return false;
1946
1947	for (i = 0; i < 64; i += 8) {
1948		group_size = (counts >> i) & 0xFF;
1949		if (group_size >= size) {
1950			*turbo_freq = (ratios >> i) & 0xFF;
1951			return true;
1952		}
1953	}
1954
1955	return false;
1956}
1957
1958static bool core_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
1959{
1960	u64 msr;
1961	int err;
1962
1963	err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq);
1964	if (err)
1965		return false;
1966
1967	err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &msr);
1968	if (err)
1969		return false;
1970
1971	*base_freq = (*base_freq >> 8) & 0xFF;    /* max P state */
1972	*turbo_freq = (msr >> 24) & 0xFF;         /* 4C turbo    */
1973
1974	/* The CPU may have less than 4 cores */
1975	if (!*turbo_freq)
1976		*turbo_freq = msr & 0xFF;         /* 1C turbo    */
1977
1978	return true;
1979}
1980
1981static bool intel_set_max_freq_ratio(void)
1982{
1983	u64 base_freq, turbo_freq;
1984	u64 turbo_ratio;
1985
1986	if (slv_set_max_freq_ratio(&base_freq, &turbo_freq))
1987		goto out;
1988
1989	if (x86_match_cpu(has_glm_turbo_ratio_limits) &&
1990	    skx_set_max_freq_ratio(&base_freq, &turbo_freq, 1))
1991		goto out;
1992
1993	if (x86_match_cpu(has_knl_turbo_ratio_limits) &&
1994	    knl_set_max_freq_ratio(&base_freq, &turbo_freq, 1))
1995		goto out;
1996
1997	if (x86_match_cpu(has_skx_turbo_ratio_limits) &&
1998	    skx_set_max_freq_ratio(&base_freq, &turbo_freq, 4))
1999		goto out;
2000
2001	if (core_set_max_freq_ratio(&base_freq, &turbo_freq))
2002		goto out;
2003
2004	return false;
2005
2006out:
2007	/*
2008	 * Some hypervisors advertise X86_FEATURE_APERFMPERF
2009	 * but then fill all MSR's with zeroes.
2010	 * Some CPUs have turbo boost but don't declare any turbo ratio
2011	 * in MSR_TURBO_RATIO_LIMIT.
2012	 */
2013	if (!base_freq || !turbo_freq) {
2014		pr_debug("Couldn't determine cpu base or turbo frequency, necessary for scale-invariant accounting.\n");
2015		return false;
2016	}
2017
2018	turbo_ratio = div_u64(turbo_freq * SCHED_CAPACITY_SCALE, base_freq);
2019	if (!turbo_ratio) {
2020		pr_debug("Non-zero turbo and base frequencies led to a 0 ratio.\n");
2021		return false;
2022	}
2023
2024	arch_turbo_freq_ratio = turbo_ratio;
2025	arch_set_max_freq_ratio(turbo_disabled());
2026
2027	return true;
2028}
2029
2030#ifdef CONFIG_ACPI_CPPC_LIB
2031static bool amd_set_max_freq_ratio(void)
2032{
2033	struct cppc_perf_caps perf_caps;
2034	u64 highest_perf, nominal_perf;
2035	u64 perf_ratio;
2036	int rc;
2037
2038	rc = cppc_get_perf_caps(0, &perf_caps);
2039	if (rc) {
2040		pr_debug("Could not retrieve perf counters (%d)\n", rc);
2041		return false;
2042	}
2043
2044	highest_perf = amd_get_highest_perf();
2045	nominal_perf = perf_caps.nominal_perf;
2046
2047	if (!highest_perf || !nominal_perf) {
2048		pr_debug("Could not retrieve highest or nominal performance\n");
2049		return false;
2050	}
2051
2052	perf_ratio = div_u64(highest_perf * SCHED_CAPACITY_SCALE, nominal_perf);
2053	/* midpoint between max_boost and max_P */
2054	perf_ratio = (perf_ratio + SCHED_CAPACITY_SCALE) >> 1;
2055	if (!perf_ratio) {
2056		pr_debug("Non-zero highest/nominal perf values led to a 0 ratio\n");
2057		return false;
2058	}
2059
2060	arch_turbo_freq_ratio = perf_ratio;
2061	arch_set_max_freq_ratio(false);
2062
2063	return true;
2064}
2065#else
2066static bool amd_set_max_freq_ratio(void)
2067{
2068	return false;
2069}
2070#endif
2071
2072static void init_counter_refs(void)
2073{
2074	u64 aperf, mperf;
2075
2076	rdmsrl(MSR_IA32_APERF, aperf);
2077	rdmsrl(MSR_IA32_MPERF, mperf);
2078
2079	this_cpu_write(arch_prev_aperf, aperf);
2080	this_cpu_write(arch_prev_mperf, mperf);
2081}
2082
2083#ifdef CONFIG_PM_SLEEP
2084static struct syscore_ops freq_invariance_syscore_ops = {
2085	.resume = init_counter_refs,
2086};
2087
2088static void register_freq_invariance_syscore_ops(void)
2089{
2090	/* Bail out if registered already. */
2091	if (freq_invariance_syscore_ops.node.prev)
2092		return;
2093
2094	register_syscore_ops(&freq_invariance_syscore_ops);
2095}
2096#else
2097static inline void register_freq_invariance_syscore_ops(void) {}
2098#endif
2099
2100static void init_freq_invariance(bool secondary, bool cppc_ready)
2101{
2102	bool ret = false;
2103
2104	if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
2105		return;
2106
2107	if (secondary) {
2108		if (static_branch_likely(&arch_scale_freq_key)) {
2109			init_counter_refs();
2110		}
2111		return;
2112	}
2113
2114	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2115		ret = intel_set_max_freq_ratio();
2116	else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
2117		if (!cppc_ready) {
2118			return;
2119		}
2120		ret = amd_set_max_freq_ratio();
2121	}
2122
2123	if (ret) {
2124		init_counter_refs();
2125		static_branch_enable(&arch_scale_freq_key);
2126		register_freq_invariance_syscore_ops();
2127		pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
2128	} else {
2129		pr_debug("Couldn't determine max cpu frequency, necessary for scale-invariant accounting.\n");
2130	}
2131}
2132
2133#ifdef CONFIG_ACPI_CPPC_LIB
2134static DEFINE_MUTEX(freq_invariance_lock);
2135
2136void init_freq_invariance_cppc(void)
2137{
2138	static bool secondary;
2139
2140	mutex_lock(&freq_invariance_lock);
2141
2142	init_freq_invariance(secondary, true);
2143	secondary = true;
2144
2145	mutex_unlock(&freq_invariance_lock);
2146}
2147#endif
2148
2149static void disable_freq_invariance_workfn(struct work_struct *work)
2150{
2151	static_branch_disable(&arch_scale_freq_key);
2152}
2153
2154static DECLARE_WORK(disable_freq_invariance_work,
2155		    disable_freq_invariance_workfn);
2156
2157DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
2158
2159void arch_scale_freq_tick(void)
2160{
2161	u64 freq_scale = SCHED_CAPACITY_SCALE;
2162	u64 aperf, mperf;
2163	u64 acnt, mcnt;
2164
2165	if (!arch_scale_freq_invariant())
2166		return;
2167
2168	rdmsrl(MSR_IA32_APERF, aperf);
2169	rdmsrl(MSR_IA32_MPERF, mperf);
2170
2171	acnt = aperf - this_cpu_read(arch_prev_aperf);
2172	mcnt = mperf - this_cpu_read(arch_prev_mperf);
2173
2174	this_cpu_write(arch_prev_aperf, aperf);
2175	this_cpu_write(arch_prev_mperf, mperf);
2176
2177	if (check_shl_overflow(acnt, 2*SCHED_CAPACITY_SHIFT, &acnt))
2178		goto error;
2179
2180	if (check_mul_overflow(mcnt, arch_max_freq_ratio, &mcnt) || !mcnt)
2181		goto error;
2182
2183	freq_scale = div64_u64(acnt, mcnt);
2184	if (!freq_scale)
2185		goto error;
2186
2187	if (freq_scale > SCHED_CAPACITY_SCALE)
2188		freq_scale = SCHED_CAPACITY_SCALE;
2189
2190	this_cpu_write(arch_freq_scale, freq_scale);
2191	return;
2192
2193error:
2194	pr_warn("Scheduler frequency invariance went wobbly, disabling!\n");
2195	schedule_work(&disable_freq_invariance_work);
2196}
2197#else
2198static inline void init_freq_invariance(bool secondary, bool cppc_ready)
2199{
2200}
2201#endif /* CONFIG_X86_64 */