Linux Audio

Check our new training course

Loading...
v4.17
   1 /*
   2 *	x86 SMP booting functions
   3 *
   4 *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
   5 *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
   6 *	Copyright 2001 Andi Kleen, SuSE Labs.
   7 *
   8 *	Much of the core SMP work is based on previous work by Thomas Radke, to
   9 *	whom a great many thanks are extended.
  10 *
  11 *	Thanks to Intel for making available several different Pentium,
  12 *	Pentium Pro and Pentium-II/Xeon MP machines.
  13 *	Original development of Linux SMP code supported by Caldera.
  14 *
  15 *	This code is released under the GNU General Public License version 2 or
  16 *	later.
  17 *
  18 *	Fixes
  19 *		Felix Koop	:	NR_CPUS used properly
  20 *		Jose Renau	:	Handle single CPU case.
  21 *		Alan Cox	:	By repeated request 8) - Total BogoMIPS report.
  22 *		Greg Wright	:	Fix for kernel stacks panic.
  23 *		Erich Boleyn	:	MP v1.4 and additional changes.
  24 *	Matthias Sattler	:	Changes for 2.1 kernel map.
  25 *	Michel Lespinasse	:	Changes for 2.1 kernel map.
  26 *	Michael Chastain	:	Change trampoline.S to gnu as.
  27 *		Alan Cox	:	Dumb bug: 'B' step PPro's are fine
  28 *		Ingo Molnar	:	Added APIC timers, based on code
  29 *					from Jose Renau
  30 *		Ingo Molnar	:	various cleanups and rewrites
  31 *		Tigran Aivazian	:	fixed "0.00 in /proc/uptime on SMP" bug.
  32 *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs
  33 *	Andi Kleen		:	Changed for SMP boot into long mode.
  34 *		Martin J. Bligh	: 	Added support for multi-quad systems
  35 *		Dave Jones	:	Report invalid combinations of Athlon CPUs.
  36 *		Rusty Russell	:	Hacked into shape for new "hotplug" boot process.
  37 *      Andi Kleen              :       Converted to new state machine.
  38 *	Ashok Raj		: 	CPU hotplug support
  39 *	Glauber Costa		:	i386 and x86_64 integration
  40 */
  41
  42#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  43
  44#include <linux/init.h>
  45#include <linux/smp.h>
  46#include <linux/export.h>
  47#include <linux/sched.h>
  48#include <linux/sched/topology.h>
  49#include <linux/sched/hotplug.h>
  50#include <linux/sched/task_stack.h>
  51#include <linux/percpu.h>
  52#include <linux/bootmem.h>
  53#include <linux/err.h>
  54#include <linux/nmi.h>
  55#include <linux/tboot.h>
  56#include <linux/stackprotector.h>
  57#include <linux/gfp.h>
  58#include <linux/cpuidle.h>
  59
  60#include <asm/acpi.h>
  61#include <asm/desc.h>
  62#include <asm/nmi.h>
  63#include <asm/irq.h>
 
  64#include <asm/realmode.h>
  65#include <asm/cpu.h>
  66#include <asm/numa.h>
  67#include <asm/pgtable.h>
  68#include <asm/tlbflush.h>
  69#include <asm/mtrr.h>
  70#include <asm/mwait.h>
  71#include <asm/apic.h>
  72#include <asm/io_apic.h>
  73#include <asm/fpu/internal.h>
  74#include <asm/setup.h>
  75#include <asm/uv/uv.h>
  76#include <linux/mc146818rtc.h>
  77#include <asm/i8259.h>
 
  78#include <asm/misc.h>
  79#include <asm/qspinlock.h>
  80#include <asm/intel-family.h>
  81#include <asm/cpu_device_id.h>
  82#include <asm/spec-ctrl.h>
  83
  84/* Number of siblings per CPU package */
  85int smp_num_siblings = 1;
  86EXPORT_SYMBOL(smp_num_siblings);
  87
  88/* Last level cache ID of each logical CPU */
  89DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
  90
  91/* representing HT siblings of each logical CPU */
  92DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
  93EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  94
  95/* representing HT and core siblings of each logical CPU */
  96DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
  97EXPORT_PER_CPU_SYMBOL(cpu_core_map);
  98
  99DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
 100
 101/* Per CPU bogomips and other parameters */
 102DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
 103EXPORT_PER_CPU_SYMBOL(cpu_info);
 104
 105/* Logical package management. We might want to allocate that dynamically */
 
 
 
 
 106unsigned int __max_logical_packages __read_mostly;
 107EXPORT_SYMBOL(__max_logical_packages);
 108static unsigned int logical_packages __read_mostly;
 109
 110/* Maximum number of SMT threads on any online core */
 111int __read_mostly __max_smt_threads = 1;
 112
 113/* Flag to indicate if a complete sched domain rebuild is required */
 114bool x86_topology_update;
 115
 116int arch_update_cpu_topology(void)
 117{
 118	int retval = x86_topology_update;
 119
 120	x86_topology_update = false;
 121	return retval;
 122}
 123
 124static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
 125{
 126	unsigned long flags;
 127
 128	spin_lock_irqsave(&rtc_lock, flags);
 129	CMOS_WRITE(0xa, 0xf);
 130	spin_unlock_irqrestore(&rtc_lock, flags);
 
 
 131	*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
 132							start_eip >> 4;
 
 133	*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
 134							start_eip & 0xf;
 
 135}
 136
 137static inline void smpboot_restore_warm_reset_vector(void)
 138{
 139	unsigned long flags;
 140
 141	/*
 
 
 
 
 
 142	 * Paranoid:  Set warm reset code and vector here back
 143	 * to default values.
 144	 */
 145	spin_lock_irqsave(&rtc_lock, flags);
 146	CMOS_WRITE(0, 0xf);
 147	spin_unlock_irqrestore(&rtc_lock, flags);
 148
 149	*((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
 150}
 151
 152/*
 153 * Report back to the Boot Processor during boot time or to the caller processor
 154 * during CPU online.
 155 */
 156static void smp_callin(void)
 157{
 158	int cpuid, phys_id;
 159
 160	/*
 161	 * If waken up by an INIT in an 82489DX configuration
 162	 * cpu_callout_mask guarantees we don't get here before
 163	 * an INIT_deassert IPI reaches our local APIC, so it is
 164	 * now safe to touch our local APIC.
 165	 */
 166	cpuid = smp_processor_id();
 167
 168	/*
 169	 * (This works even if the APIC is not enabled.)
 170	 */
 171	phys_id = read_apic_id();
 172
 173	/*
 174	 * the boot CPU has finished the init stage and is spinning
 175	 * on callin_map until we finish. We are free to set up this
 176	 * CPU, first the APIC. (this is probably redundant on most
 177	 * boards)
 178	 */
 179	apic_ap_setup();
 180
 181	/*
 182	 * Save our processor parameters. Note: this information
 183	 * is needed for clock calibration.
 184	 */
 185	smp_store_cpu_info(cpuid);
 186
 187	/*
 188	 * The topology information must be up to date before
 189	 * calibrate_delay() and notify_cpu_starting().
 190	 */
 191	set_cpu_sibling_map(raw_smp_processor_id());
 192
 193	/*
 194	 * Get our bogomips.
 195	 * Update loops_per_jiffy in cpu_data. Previous call to
 196	 * smp_store_cpu_info() stored a value that is close but not as
 197	 * accurate as the value just calculated.
 198	 */
 199	calibrate_delay();
 200	cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
 201	pr_debug("Stack at about %p\n", &cpuid);
 202
 
 
 
 
 
 203	wmb();
 204
 205	notify_cpu_starting(cpuid);
 206
 207	/*
 208	 * Allow the master to continue.
 209	 */
 210	cpumask_set_cpu(cpuid, cpu_callin_mask);
 211}
 212
 213static int cpu0_logical_apicid;
 214static int enable_start_cpu0;
 215/*
 216 * Activate a secondary processor.
 217 */
 218static void notrace start_secondary(void *unused)
 219{
 220	/*
 221	 * Don't put *anything* except direct CPU state initialization
 222	 * before cpu_init(), SMP booting is too fragile that we want to
 223	 * limit the things done here to the most necessary things.
 224	 */
 225	if (boot_cpu_has(X86_FEATURE_PCID))
 226		__write_cr4(__read_cr4() | X86_CR4_PCIDE);
 227
 228#ifdef CONFIG_X86_32
 229	/* switch away from the initial page table */
 230	load_cr3(swapper_pg_dir);
 231	__flush_tlb_all();
 232#endif
 233	load_current_idt();
 234	cpu_init();
 235	x86_cpuinit.early_percpu_clock_init();
 236	preempt_disable();
 237	smp_callin();
 238
 239	enable_start_cpu0 = 0;
 240
 
 
 
 
 
 
 241	/* otherwise gcc will move up smp_processor_id before the cpu_init */
 242	barrier();
 243	/*
 244	 * Check TSC synchronization with the boot CPU:
 245	 */
 246	check_tsc_sync_target();
 247
 248	speculative_store_bypass_ht_init();
 249
 250	/*
 251	 * Lock vector_lock, set CPU online and bring the vector
 252	 * allocator online. Online must be set with vector_lock held
 253	 * to prevent a concurrent irq setup/teardown from seeing a
 254	 * half valid vector space.
 255	 */
 256	lock_vector_lock();
 
 257	set_cpu_online(smp_processor_id(), true);
 258	lapic_online();
 259	unlock_vector_lock();
 260	cpu_set_state_online(smp_processor_id());
 261	x86_platform.nmi_init();
 262
 263	/* enable local interrupts */
 264	local_irq_enable();
 265
 266	/* to prevent fake stack check failure in clock setup */
 267	boot_init_stack_canary();
 268
 269	x86_cpuinit.setup_percpu_clockev();
 270
 271	wmb();
 272	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 273}
 274
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 275/**
 276 * topology_phys_to_logical_pkg - Map a physical package id to a logical
 277 *
 278 * Returns logical package id or -1 if not found
 279 */
 280int topology_phys_to_logical_pkg(unsigned int phys_pkg)
 281{
 282	int cpu;
 283
 284	for_each_possible_cpu(cpu) {
 285		struct cpuinfo_x86 *c = &cpu_data(cpu);
 286
 287		if (c->initialized && c->phys_proc_id == phys_pkg)
 288			return c->logical_proc_id;
 289	}
 290	return -1;
 291}
 292EXPORT_SYMBOL(topology_phys_to_logical_pkg);
 293
 294/**
 295 * topology_update_package_map - Update the physical to logical package map
 296 * @pkg:	The physical package id as retrieved via CPUID
 297 * @cpu:	The cpu for which this is updated
 298 */
 299int topology_update_package_map(unsigned int pkg, unsigned int cpu)
 300{
 301	int new;
 
 302
 303	/* Already available somewhere? */
 304	new = topology_phys_to_logical_pkg(pkg);
 305	if (new >= 0)
 306		goto found;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 307
 308	new = logical_packages++;
 309	if (new != pkg) {
 310		pr_info("CPU %u Converting physical %u to logical package %u\n",
 311			cpu, pkg, new);
 
 
 
 
 312	}
 313found:
 314	cpu_data(cpu).logical_proc_id = new;
 315	return 0;
 316}
 317
 318void __init smp_store_boot_cpu_info(void)
 319{
 320	int id = 0; /* CPU 0 */
 321	struct cpuinfo_x86 *c = &cpu_data(id);
 322
 323	*c = boot_cpu_data;
 324	c->cpu_index = id;
 325	topology_update_package_map(c->phys_proc_id, id);
 326	c->initialized = true;
 327}
 328
 329/*
 330 * The bootstrap kernel entry code has set these up. Save them for
 331 * a given CPU
 332 */
 333void smp_store_cpu_info(int id)
 334{
 335	struct cpuinfo_x86 *c = &cpu_data(id);
 336
 337	/* Copy boot_cpu_data only on the first bringup */
 338	if (!c->initialized)
 339		*c = boot_cpu_data;
 340	c->cpu_index = id;
 341	/*
 342	 * During boot time, CPU0 has this setup already. Save the info when
 343	 * bringing up AP or offlined CPU0.
 344	 */
 345	identify_secondary_cpu(c);
 346	c->initialized = true;
 347}
 348
 349static bool
 350topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 351{
 352	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 353
 354	return (cpu_to_node(cpu1) == cpu_to_node(cpu2));
 355}
 356
 357static bool
 358topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
 359{
 360	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 361
 362	return !WARN_ONCE(!topology_same_node(c, o),
 363		"sched: CPU #%d's %s-sibling CPU #%d is not on the same node! "
 364		"[node: %d != %d]. Ignoring dependency.\n",
 365		cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2));
 366}
 367
 368#define link_mask(mfunc, c1, c2)					\
 369do {									\
 370	cpumask_set_cpu((c1), mfunc(c2));				\
 371	cpumask_set_cpu((c2), mfunc(c1));				\
 372} while (0)
 373
 374static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 375{
 376	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
 377		int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 378
 379		if (c->phys_proc_id == o->phys_proc_id &&
 380		    per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
 381			if (c->cpu_core_id == o->cpu_core_id)
 382				return topology_sane(c, o, "smt");
 383
 384			if ((c->cu_id != 0xff) &&
 385			    (o->cu_id != 0xff) &&
 386			    (c->cu_id == o->cu_id))
 387				return topology_sane(c, o, "smt");
 388		}
 389
 390	} else if (c->phys_proc_id == o->phys_proc_id &&
 391		   c->cpu_core_id == o->cpu_core_id) {
 392		return topology_sane(c, o, "smt");
 393	}
 394
 395	return false;
 396}
 397
 398/*
 399 * Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs.
 400 *
 401 * These are Intel CPUs that enumerate an LLC that is shared by
 402 * multiple NUMA nodes. The LLC on these systems is shared for
 403 * off-package data access but private to the NUMA node (half
 404 * of the package) for on-package access.
 405 *
 406 * CPUID (the source of the information about the LLC) can only
 407 * enumerate the cache as being shared *or* unshared, but not
 408 * this particular configuration. The CPU in this case enumerates
 409 * the cache to be shared across the entire package (spanning both
 410 * NUMA nodes).
 411 */
 412
 413static const struct x86_cpu_id snc_cpu[] = {
 414	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X },
 415	{}
 416};
 417
 418static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 419{
 420	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 421
 422	/* Do not match if we do not have a valid APICID for cpu: */
 423	if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
 424		return false;
 425
 426	/* Do not match if LLC id does not match: */
 427	if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2))
 428		return false;
 429
 430	/*
 431	 * Allow the SNC topology without warning. Return of false
 432	 * means 'c' does not share the LLC of 'o'. This will be
 433	 * reflected to userspace.
 434	 */
 435	if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu))
 436		return false;
 437
 438	return topology_sane(c, o, "llc");
 439}
 440
 441/*
 442 * Unlike the other levels, we do not enforce keeping a
 443 * multicore group inside a NUMA node.  If this happens, we will
 444 * discard the MC level of the topology later.
 445 */
 446static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 447{
 448	if (c->phys_proc_id == o->phys_proc_id)
 449		return true;
 450	return false;
 451}
 452
 453#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
 454static inline int x86_sched_itmt_flags(void)
 455{
 456	return sysctl_sched_itmt_enabled ? SD_ASYM_PACKING : 0;
 457}
 458
 459#ifdef CONFIG_SCHED_MC
 460static int x86_core_flags(void)
 461{
 462	return cpu_core_flags() | x86_sched_itmt_flags();
 463}
 464#endif
 465#ifdef CONFIG_SCHED_SMT
 466static int x86_smt_flags(void)
 467{
 468	return cpu_smt_flags() | x86_sched_itmt_flags();
 469}
 470#endif
 471#endif
 472
 473static struct sched_domain_topology_level x86_numa_in_package_topology[] = {
 474#ifdef CONFIG_SCHED_SMT
 475	{ cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
 476#endif
 477#ifdef CONFIG_SCHED_MC
 478	{ cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
 479#endif
 480	{ NULL, },
 481};
 482
 483static struct sched_domain_topology_level x86_topology[] = {
 484#ifdef CONFIG_SCHED_SMT
 485	{ cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
 486#endif
 487#ifdef CONFIG_SCHED_MC
 488	{ cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
 489#endif
 490	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
 491	{ NULL, },
 492};
 493
 494/*
 495 * Set if a package/die has multiple NUMA nodes inside.
 496 * AMD Magny-Cours, Intel Cluster-on-Die, and Intel
 497 * Sub-NUMA Clustering have this.
 
 
 
 
 
 
 
 498 */
 499static bool x86_has_numa_in_package;
 
 
 
 500
 501void set_cpu_sibling_map(int cpu)
 502{
 503	bool has_smt = smp_num_siblings > 1;
 504	bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
 505	struct cpuinfo_x86 *c = &cpu_data(cpu);
 506	struct cpuinfo_x86 *o;
 507	int i, threads;
 508
 509	cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
 510
 511	if (!has_mp) {
 512		cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu));
 513		cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
 514		cpumask_set_cpu(cpu, topology_core_cpumask(cpu));
 515		c->booted_cores = 1;
 516		return;
 517	}
 518
 519	for_each_cpu(i, cpu_sibling_setup_mask) {
 520		o = &cpu_data(i);
 521
 522		if ((i == cpu) || (has_smt && match_smt(c, o)))
 523			link_mask(topology_sibling_cpumask, cpu, i);
 524
 525		if ((i == cpu) || (has_mp && match_llc(c, o)))
 526			link_mask(cpu_llc_shared_mask, cpu, i);
 527
 528	}
 529
 530	/*
 531	 * This needs a separate iteration over the cpus because we rely on all
 532	 * topology_sibling_cpumask links to be set-up.
 533	 */
 534	for_each_cpu(i, cpu_sibling_setup_mask) {
 535		o = &cpu_data(i);
 536
 537		if ((i == cpu) || (has_mp && match_die(c, o))) {
 538			link_mask(topology_core_cpumask, cpu, i);
 539
 540			/*
 541			 *  Does this new cpu bringup a new core?
 542			 */
 543			if (cpumask_weight(
 544			    topology_sibling_cpumask(cpu)) == 1) {
 545				/*
 546				 * for each core in package, increment
 547				 * the booted_cores for this new cpu
 548				 */
 549				if (cpumask_first(
 550				    topology_sibling_cpumask(i)) == i)
 551					c->booted_cores++;
 552				/*
 553				 * increment the core count for all
 554				 * the other cpus in this package
 555				 */
 556				if (i != cpu)
 557					cpu_data(i).booted_cores++;
 558			} else if (i != cpu && !c->booted_cores)
 559				c->booted_cores = cpu_data(i).booted_cores;
 560		}
 561		if (match_die(c, o) && !topology_same_node(c, o))
 562			x86_has_numa_in_package = true;
 563	}
 564
 565	threads = cpumask_weight(topology_sibling_cpumask(cpu));
 566	if (threads > __max_smt_threads)
 567		__max_smt_threads = threads;
 568}
 569
 570/* maps the cpu to the sched domain representing multi-core */
 571const struct cpumask *cpu_coregroup_mask(int cpu)
 572{
 573	return cpu_llc_shared_mask(cpu);
 574}
 575
 576static void impress_friends(void)
 577{
 578	int cpu;
 579	unsigned long bogosum = 0;
 580	/*
 581	 * Allow the user to impress friends.
 582	 */
 583	pr_debug("Before bogomips\n");
 584	for_each_possible_cpu(cpu)
 585		if (cpumask_test_cpu(cpu, cpu_callout_mask))
 586			bogosum += cpu_data(cpu).loops_per_jiffy;
 587	pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n",
 588		num_online_cpus(),
 589		bogosum/(500000/HZ),
 590		(bogosum/(5000/HZ))%100);
 591
 592	pr_debug("Before bogocount - setting activated=1\n");
 593}
 594
 595void __inquire_remote_apic(int apicid)
 596{
 597	unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
 598	const char * const names[] = { "ID", "VERSION", "SPIV" };
 599	int timeout;
 600	u32 status;
 601
 602	pr_info("Inquiring remote APIC 0x%x...\n", apicid);
 603
 604	for (i = 0; i < ARRAY_SIZE(regs); i++) {
 605		pr_info("... APIC 0x%x %s: ", apicid, names[i]);
 606
 607		/*
 608		 * Wait for idle.
 609		 */
 610		status = safe_apic_wait_icr_idle();
 611		if (status)
 612			pr_cont("a previous APIC delivery may have failed\n");
 613
 614		apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
 615
 616		timeout = 0;
 617		do {
 618			udelay(100);
 619			status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
 620		} while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
 621
 622		switch (status) {
 623		case APIC_ICR_RR_VALID:
 624			status = apic_read(APIC_RRR);
 625			pr_cont("%08x\n", status);
 626			break;
 627		default:
 628			pr_cont("failed\n");
 629		}
 630	}
 631}
 632
 633/*
 634 * The Multiprocessor Specification 1.4 (1997) example code suggests
 635 * that there should be a 10ms delay between the BSP asserting INIT
 636 * and de-asserting INIT, when starting a remote processor.
 637 * But that slows boot and resume on modern processors, which include
 638 * many cores and don't require that delay.
 639 *
 640 * Cmdline "init_cpu_udelay=" is available to over-ride this delay.
 641 * Modern processor families are quirked to remove the delay entirely.
 642 */
 643#define UDELAY_10MS_DEFAULT 10000
 644
 645static unsigned int init_udelay = UINT_MAX;
 646
 647static int __init cpu_init_udelay(char *str)
 648{
 649	get_option(&str, &init_udelay);
 650
 651	return 0;
 652}
 653early_param("cpu_init_udelay", cpu_init_udelay);
 654
 655static void __init smp_quirk_init_udelay(void)
 656{
 657	/* if cmdline changed it from default, leave it alone */
 658	if (init_udelay != UINT_MAX)
 659		return;
 660
 661	/* if modern processor, use no delay */
 662	if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
 663	    ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
 664		init_udelay = 0;
 665		return;
 666	}
 667	/* else, use legacy delay */
 668	init_udelay = UDELAY_10MS_DEFAULT;
 669}
 670
 671/*
 672 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
 673 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
 674 * won't ... remember to clear down the APIC, etc later.
 675 */
 676int
 677wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
 678{
 679	unsigned long send_status, accept_status = 0;
 680	int maxlvt;
 681
 682	/* Target chip */
 683	/* Boot on the stack */
 684	/* Kick the second */
 685	apic_icr_write(APIC_DM_NMI | apic->dest_logical, apicid);
 686
 687	pr_debug("Waiting for send to finish...\n");
 688	send_status = safe_apic_wait_icr_idle();
 689
 690	/*
 691	 * Give the other CPU some time to accept the IPI.
 692	 */
 693	udelay(200);
 694	if (APIC_INTEGRATED(boot_cpu_apic_version)) {
 695		maxlvt = lapic_get_maxlvt();
 696		if (maxlvt > 3)			/* Due to the Pentium erratum 3AP.  */
 697			apic_write(APIC_ESR, 0);
 698		accept_status = (apic_read(APIC_ESR) & 0xEF);
 699	}
 700	pr_debug("NMI sent\n");
 701
 702	if (send_status)
 703		pr_err("APIC never delivered???\n");
 704	if (accept_status)
 705		pr_err("APIC delivery error (%lx)\n", accept_status);
 706
 707	return (send_status | accept_status);
 708}
 709
 710static int
 711wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
 712{
 713	unsigned long send_status = 0, accept_status = 0;
 714	int maxlvt, num_starts, j;
 715
 716	maxlvt = lapic_get_maxlvt();
 717
 718	/*
 719	 * Be paranoid about clearing APIC errors.
 720	 */
 721	if (APIC_INTEGRATED(boot_cpu_apic_version)) {
 722		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
 723			apic_write(APIC_ESR, 0);
 724		apic_read(APIC_ESR);
 725	}
 726
 727	pr_debug("Asserting INIT\n");
 728
 729	/*
 730	 * Turn INIT on target chip
 731	 */
 732	/*
 733	 * Send IPI
 734	 */
 735	apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
 736		       phys_apicid);
 737
 738	pr_debug("Waiting for send to finish...\n");
 739	send_status = safe_apic_wait_icr_idle();
 740
 741	udelay(init_udelay);
 742
 743	pr_debug("Deasserting INIT\n");
 744
 745	/* Target chip */
 746	/* Send IPI */
 747	apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
 748
 749	pr_debug("Waiting for send to finish...\n");
 750	send_status = safe_apic_wait_icr_idle();
 751
 752	mb();
 753
 754	/*
 755	 * Should we send STARTUP IPIs ?
 756	 *
 757	 * Determine this based on the APIC version.
 758	 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
 759	 */
 760	if (APIC_INTEGRATED(boot_cpu_apic_version))
 761		num_starts = 2;
 762	else
 763		num_starts = 0;
 764
 765	/*
 766	 * Run STARTUP IPI loop.
 767	 */
 768	pr_debug("#startup loops: %d\n", num_starts);
 769
 770	for (j = 1; j <= num_starts; j++) {
 771		pr_debug("Sending STARTUP #%d\n", j);
 772		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
 773			apic_write(APIC_ESR, 0);
 774		apic_read(APIC_ESR);
 775		pr_debug("After apic_write\n");
 776
 777		/*
 778		 * STARTUP IPI
 779		 */
 780
 781		/* Target chip */
 782		/* Boot on the stack */
 783		/* Kick the second */
 784		apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
 785			       phys_apicid);
 786
 787		/*
 788		 * Give the other CPU some time to accept the IPI.
 789		 */
 790		if (init_udelay == 0)
 791			udelay(10);
 792		else
 793			udelay(300);
 794
 795		pr_debug("Startup point 1\n");
 796
 797		pr_debug("Waiting for send to finish...\n");
 798		send_status = safe_apic_wait_icr_idle();
 799
 800		/*
 801		 * Give the other CPU some time to accept the IPI.
 802		 */
 803		if (init_udelay == 0)
 804			udelay(10);
 805		else
 806			udelay(200);
 807
 808		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
 809			apic_write(APIC_ESR, 0);
 810		accept_status = (apic_read(APIC_ESR) & 0xEF);
 811		if (send_status || accept_status)
 812			break;
 813	}
 814	pr_debug("After Startup\n");
 815
 816	if (send_status)
 817		pr_err("APIC never delivered???\n");
 818	if (accept_status)
 819		pr_err("APIC delivery error (%lx)\n", accept_status);
 820
 821	return (send_status | accept_status);
 822}
 823
 
 
 
 
 
 
 
 
 824/* reduce the number of lines printed when booting a large cpu count system */
 825static void announce_cpu(int cpu, int apicid)
 826{
 827	static int current_node = -1;
 828	int node = early_cpu_to_node(cpu);
 829	static int width, node_width;
 830
 831	if (!width)
 832		width = num_digits(num_possible_cpus()) + 1; /* + '#' sign */
 833
 834	if (!node_width)
 835		node_width = num_digits(num_possible_nodes()) + 1; /* + '#' */
 836
 837	if (cpu == 1)
 838		printk(KERN_INFO "x86: Booting SMP configuration:\n");
 839
 840	if (system_state < SYSTEM_RUNNING) {
 841		if (node != current_node) {
 842			if (current_node > (-1))
 843				pr_cont("\n");
 844			current_node = node;
 845
 846			printk(KERN_INFO ".... node %*s#%d, CPUs:  ",
 847			       node_width - num_digits(node), " ", node);
 848		}
 849
 850		/* Add padding for the BSP */
 851		if (cpu == 1)
 852			pr_cont("%*s", width + 1, " ");
 853
 854		pr_cont("%*s#%d", width - num_digits(cpu), " ", cpu);
 855
 856	} else
 857		pr_info("Booting Node %d Processor %d APIC 0x%x\n",
 858			node, cpu, apicid);
 859}
 860
 861static int wakeup_cpu0_nmi(unsigned int cmd, struct pt_regs *regs)
 862{
 863	int cpu;
 864
 865	cpu = smp_processor_id();
 866	if (cpu == 0 && !cpu_online(cpu) && enable_start_cpu0)
 867		return NMI_HANDLED;
 868
 869	return NMI_DONE;
 870}
 871
 872/*
 873 * Wake up AP by INIT, INIT, STARTUP sequence.
 874 *
 875 * Instead of waiting for STARTUP after INITs, BSP will execute the BIOS
 876 * boot-strap code which is not a desired behavior for waking up BSP. To
 877 * void the boot-strap code, wake up CPU0 by NMI instead.
 878 *
 879 * This works to wake up soft offlined CPU0 only. If CPU0 is hard offlined
 880 * (i.e. physically hot removed and then hot added), NMI won't wake it up.
 881 * We'll change this code in the future to wake up hard offlined CPU0 if
 882 * real platform and request are available.
 883 */
 884static int
 885wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
 886	       int *cpu0_nmi_registered)
 887{
 888	int id;
 889	int boot_error;
 890
 891	preempt_disable();
 892
 893	/*
 894	 * Wake up AP by INIT, INIT, STARTUP sequence.
 895	 */
 896	if (cpu) {
 897		boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
 898		goto out;
 899	}
 900
 901	/*
 902	 * Wake up BSP by nmi.
 903	 *
 904	 * Register a NMI handler to help wake up CPU0.
 905	 */
 906	boot_error = register_nmi_handler(NMI_LOCAL,
 907					  wakeup_cpu0_nmi, 0, "wake_cpu0");
 908
 909	if (!boot_error) {
 910		enable_start_cpu0 = 1;
 911		*cpu0_nmi_registered = 1;
 912		if (apic->dest_logical == APIC_DEST_LOGICAL)
 913			id = cpu0_logical_apicid;
 914		else
 915			id = apicid;
 916		boot_error = wakeup_secondary_cpu_via_nmi(id, start_ip);
 917	}
 918
 919out:
 920	preempt_enable();
 921
 922	return boot_error;
 923}
 924
 925void common_cpu_up(unsigned int cpu, struct task_struct *idle)
 926{
 927	/* Just in case we booted with a single CPU. */
 928	alternatives_enable_smp();
 929
 930	per_cpu(current_task, cpu) = idle;
 931
 932#ifdef CONFIG_X86_32
 933	/* Stack for startup_32 can be just as for start_secondary onwards */
 934	irq_ctx_init(cpu);
 935	per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle);
 
 936#else
 
 937	initial_gs = per_cpu_offset(cpu);
 938#endif
 939}
 940
 941/*
 942 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
 943 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
 944 * Returns zero if CPU booted OK, else error code from
 945 * ->wakeup_secondary_cpu.
 946 */
 947static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
 948		       int *cpu0_nmi_registered)
 949{
 950	volatile u32 *trampoline_status =
 951		(volatile u32 *) __va(real_mode_header->trampoline_status);
 952	/* start_ip had better be page-aligned! */
 953	unsigned long start_ip = real_mode_header->trampoline_start;
 954
 955	unsigned long boot_error = 0;
 
 956	unsigned long timeout;
 957
 958	idle->thread.sp = (unsigned long)task_pt_regs(idle);
 959	early_gdt_descr.address = (unsigned long)get_cpu_gdt_rw(cpu);
 
 
 960	initial_code = (unsigned long)start_secondary;
 961	initial_stack  = idle->thread.sp;
 962
 963	/* Enable the espfix hack for this CPU */
 
 
 
 964	init_espfix_ap(cpu);
 
 965
 966	/* So we see what's up */
 967	announce_cpu(cpu, apicid);
 968
 969	/*
 970	 * This grunge runs the startup process for
 971	 * the targeted processor.
 972	 */
 973
 974	if (x86_platform.legacy.warm_reset) {
 975
 976		pr_debug("Setting warm reset code and vector.\n");
 977
 978		smpboot_setup_warm_reset_vector(start_ip);
 979		/*
 980		 * Be paranoid about clearing APIC errors.
 981		*/
 982		if (APIC_INTEGRATED(boot_cpu_apic_version)) {
 983			apic_write(APIC_ESR, 0);
 984			apic_read(APIC_ESR);
 985		}
 986	}
 987
 988	/*
 989	 * AP might wait on cpu_callout_mask in cpu_init() with
 990	 * cpu_initialized_mask set if previous attempt to online
 991	 * it timed-out. Clear cpu_initialized_mask so that after
 992	 * INIT/SIPI it could start with a clean state.
 993	 */
 994	cpumask_clear_cpu(cpu, cpu_initialized_mask);
 995	smp_mb();
 996
 997	/*
 998	 * Wake up a CPU in difference cases:
 999	 * - Use the method in the APIC driver if it's defined
1000	 * Otherwise,
1001	 * - Use an INIT boot APIC message for APs or NMI for BSP.
1002	 */
1003	if (apic->wakeup_secondary_cpu)
1004		boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
1005	else
1006		boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid,
1007						     cpu0_nmi_registered);
1008
1009	if (!boot_error) {
1010		/*
1011		 * Wait 10s total for first sign of life from AP
1012		 */
1013		boot_error = -1;
1014		timeout = jiffies + 10*HZ;
1015		while (time_before(jiffies, timeout)) {
1016			if (cpumask_test_cpu(cpu, cpu_initialized_mask)) {
1017				/*
1018				 * Tell AP to proceed with initialization
1019				 */
1020				cpumask_set_cpu(cpu, cpu_callout_mask);
1021				boot_error = 0;
1022				break;
1023			}
1024			schedule();
1025		}
1026	}
1027
1028	if (!boot_error) {
1029		/*
1030		 * Wait till AP completes initial initialization
1031		 */
1032		while (!cpumask_test_cpu(cpu, cpu_callin_mask)) {
1033			/*
1034			 * Allow other tasks to run while we wait for the
1035			 * AP to come online. This also gives a chance
1036			 * for the MTRR work(triggered by the AP coming online)
1037			 * to be completed in the stop machine context.
1038			 */
1039			schedule();
1040		}
1041	}
1042
1043	/* mark "stuck" area as not stuck */
1044	*trampoline_status = 0;
1045
1046	if (x86_platform.legacy.warm_reset) {
1047		/*
1048		 * Cleanup possible dangling ends...
1049		 */
1050		smpboot_restore_warm_reset_vector();
1051	}
 
 
 
 
 
 
1052
1053	return boot_error;
1054}
1055
1056int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
1057{
1058	int apicid = apic->cpu_present_to_apicid(cpu);
1059	int cpu0_nmi_registered = 0;
1060	unsigned long flags;
1061	int err, ret = 0;
1062
1063	lockdep_assert_irqs_enabled();
1064
1065	pr_debug("++++++++++++++++++++=_---CPU UP  %u\n", cpu);
1066
1067	if (apicid == BAD_APICID ||
1068	    !physid_isset(apicid, phys_cpu_present_map) ||
1069	    !apic->apic_id_valid(apicid)) {
1070		pr_err("%s: bad cpu %d\n", __func__, cpu);
1071		return -EINVAL;
1072	}
1073
1074	/*
1075	 * Already booted CPU?
1076	 */
1077	if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
1078		pr_debug("do_boot_cpu %d Already started\n", cpu);
1079		return -ENOSYS;
1080	}
1081
1082	/*
1083	 * Save current MTRR state in case it was changed since early boot
1084	 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
1085	 */
1086	mtrr_save_state();
1087
1088	/* x86 CPUs take themselves offline, so delayed offline is OK. */
1089	err = cpu_check_up_prepare(cpu);
1090	if (err && err != -EBUSY)
1091		return err;
1092
1093	/* the FPU context is blank, nobody can own it */
1094	per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
1095
1096	common_cpu_up(cpu, tidle);
1097
1098	err = do_boot_cpu(apicid, cpu, tidle, &cpu0_nmi_registered);
 
 
 
 
 
 
 
 
1099	if (err) {
 
1100		pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
1101		ret = -EIO;
1102		goto unreg_nmi;
1103	}
1104
1105	/*
1106	 * Check TSC synchronization with the AP (keep irqs disabled
1107	 * while doing so):
1108	 */
1109	local_irq_save(flags);
1110	check_tsc_sync_source(cpu);
1111	local_irq_restore(flags);
1112
1113	while (!cpu_online(cpu)) {
1114		cpu_relax();
1115		touch_nmi_watchdog();
1116	}
1117
1118unreg_nmi:
1119	/*
1120	 * Clean up the nmi handler. Do this after the callin and callout sync
1121	 * to avoid impact of possible long unregister time.
1122	 */
1123	if (cpu0_nmi_registered)
1124		unregister_nmi_handler(NMI_LOCAL, "wake_cpu0");
1125
1126	return ret;
1127}
1128
1129/**
1130 * arch_disable_smp_support() - disables SMP support for x86 at runtime
1131 */
1132void arch_disable_smp_support(void)
1133{
1134	disable_ioapic_support();
1135}
1136
1137/*
1138 * Fall back to non SMP mode after errors.
1139 *
1140 * RED-PEN audit/test this more. I bet there is more state messed up here.
1141 */
1142static __init void disable_smp(void)
1143{
1144	pr_info("SMP disabled\n");
1145
1146	disable_ioapic_support();
1147
1148	init_cpu_present(cpumask_of(0));
1149	init_cpu_possible(cpumask_of(0));
1150
1151	if (smp_found_config)
1152		physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
1153	else
1154		physid_set_mask_of_physid(0, &phys_cpu_present_map);
1155	cpumask_set_cpu(0, topology_sibling_cpumask(0));
1156	cpumask_set_cpu(0, topology_core_cpumask(0));
1157}
1158
 
 
 
 
 
 
 
1159/*
1160 * Various sanity checks.
1161 */
1162static void __init smp_sanity_check(void)
1163{
1164	preempt_disable();
1165
1166#if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
1167	if (def_to_bigsmp && nr_cpu_ids > 8) {
1168		unsigned int cpu;
1169		unsigned nr;
1170
1171		pr_warn("More than 8 CPUs detected - skipping them\n"
1172			"Use CONFIG_X86_BIGSMP\n");
1173
1174		nr = 0;
1175		for_each_present_cpu(cpu) {
1176			if (nr >= 8)
1177				set_cpu_present(cpu, false);
1178			nr++;
1179		}
1180
1181		nr = 0;
1182		for_each_possible_cpu(cpu) {
1183			if (nr >= 8)
1184				set_cpu_possible(cpu, false);
1185			nr++;
1186		}
1187
1188		nr_cpu_ids = 8;
1189	}
1190#endif
1191
1192	if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
1193		pr_warn("weird, boot CPU (#%d) not listed by the BIOS\n",
1194			hard_smp_processor_id());
1195
1196		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1197	}
1198
1199	/*
 
 
 
 
 
 
 
 
 
 
1200	 * Should not be necessary because the MP table should list the boot
1201	 * CPU too, but we do it for the sake of robustness anyway.
1202	 */
1203	if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
1204		pr_notice("weird, boot CPU (#%d) not listed by the BIOS\n",
1205			  boot_cpu_physical_apicid);
1206		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1207	}
1208	preempt_enable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1209}
1210
1211static void __init smp_cpu_index_default(void)
1212{
1213	int i;
1214	struct cpuinfo_x86 *c;
1215
1216	for_each_possible_cpu(i) {
1217		c = &cpu_data(i);
1218		/* mark all to hotplug */
1219		c->cpu_index = nr_cpu_ids;
1220	}
1221}
1222
1223static void __init smp_get_logical_apicid(void)
1224{
1225	if (x2apic_mode)
1226		cpu0_logical_apicid = apic_read(APIC_LDR);
1227	else
1228		cpu0_logical_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
1229}
1230
1231/*
1232 * Prepare for SMP bootup.
1233 * @max_cpus: configured maximum number of CPUs, It is a legacy parameter
1234 *            for common interface support.
1235 */
1236void __init native_smp_prepare_cpus(unsigned int max_cpus)
1237{
1238	unsigned int i;
1239
1240	smp_cpu_index_default();
1241
1242	/*
1243	 * Setup boot CPU information
1244	 */
1245	smp_store_boot_cpu_info(); /* Final full version of the data */
1246	cpumask_copy(cpu_callin_mask, cpumask_of(0));
1247	mb();
1248
 
1249	for_each_possible_cpu(i) {
1250		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
1251		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
1252		zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
1253	}
1254
1255	/*
1256	 * Set 'default' x86 topology, this matches default_topology() in that
1257	 * it has NUMA nodes as a topology level. See also
1258	 * native_smp_cpus_done().
1259	 *
1260	 * Must be done before set_cpus_sibling_map() is ran.
1261	 */
1262	set_sched_topology(x86_topology);
1263
1264	set_cpu_sibling_map(0);
1265
1266	smp_sanity_check();
1267
1268	switch (apic_intr_mode) {
1269	case APIC_PIC:
1270	case APIC_VIRTUAL_WIRE_NO_CONFIG:
1271		disable_smp();
 
 
1272		return;
1273	case APIC_SYMMETRIC_IO_NO_ROUTING:
1274		disable_smp();
1275		/* Setup local timer */
1276		x86_init.timers.setup_percpu_clockev();
1277		return;
1278	case APIC_VIRTUAL_WIRE:
1279	case APIC_SYMMETRIC_IO:
 
 
 
1280		break;
1281	}
1282
1283	/* Setup local timer */
1284	x86_init.timers.setup_percpu_clockev();
1285
1286	smp_get_logical_apicid();
 
 
 
 
1287
1288	pr_info("CPU0: ");
1289	print_cpu_info(&cpu_data(0));
1290
1291	native_pv_lock_init();
 
1292
1293	uv_system_init();
 
1294
1295	set_mtrr_aps_delayed_init();
1296
1297	smp_quirk_init_udelay();
1298
1299	speculative_store_bypass_ht_init();
1300}
1301
1302void arch_enable_nonboot_cpus_begin(void)
1303{
1304	set_mtrr_aps_delayed_init();
1305}
1306
1307void arch_enable_nonboot_cpus_end(void)
1308{
1309	mtrr_aps_init();
1310}
1311
1312/*
1313 * Early setup to make printk work.
1314 */
1315void __init native_smp_prepare_boot_cpu(void)
1316{
1317	int me = smp_processor_id();
1318	switch_to_new_gdt(me);
1319	/* already set me in cpu_online_mask in boot_cpu_init() */
1320	cpumask_set_cpu(me, cpu_callout_mask);
1321	cpu_set_state_online(me);
1322}
1323
1324void __init calculate_max_logical_packages(void)
1325{
1326	int ncpus;
1327
1328	/*
1329	 * Today neither Intel nor AMD support heterogenous systems so
1330	 * extrapolate the boot cpu's data to all packages.
1331	 */
1332	ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
1333	__max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
1334	pr_info("Max logical packages: %u\n", __max_logical_packages);
1335}
1336
1337void __init native_smp_cpus_done(unsigned int max_cpus)
1338{
1339	pr_debug("Boot done\n");
1340
1341	calculate_max_logical_packages();
1342
1343	if (x86_has_numa_in_package)
1344		set_sched_topology(x86_numa_in_package_topology);
1345
1346	nmi_selftest();
1347	impress_friends();
 
1348	mtrr_aps_init();
1349}
1350
1351static int __initdata setup_possible_cpus = -1;
1352static int __init _setup_possible_cpus(char *str)
1353{
1354	get_option(&str, &setup_possible_cpus);
1355	return 0;
1356}
1357early_param("possible_cpus", _setup_possible_cpus);
1358
1359
1360/*
1361 * cpu_possible_mask should be static, it cannot change as cpu's
1362 * are onlined, or offlined. The reason is per-cpu data-structures
1363 * are allocated by some modules at init time, and dont expect to
1364 * do this dynamically on cpu arrival/departure.
1365 * cpu_present_mask on the other hand can change dynamically.
1366 * In case when cpu_hotplug is not compiled, then we resort to current
1367 * behaviour, which is cpu_possible == cpu_present.
1368 * - Ashok Raj
1369 *
1370 * Three ways to find out the number of additional hotplug CPUs:
1371 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
1372 * - The user can overwrite it with possible_cpus=NUM
1373 * - Otherwise don't reserve additional CPUs.
1374 * We do this because additional CPUs waste a lot of memory.
1375 * -AK
1376 */
1377__init void prefill_possible_map(void)
1378{
1379	int i, possible;
1380
1381	/* No boot processor was found in mptable or ACPI MADT */
1382	if (!num_processors) {
1383		if (boot_cpu_has(X86_FEATURE_APIC)) {
1384			int apicid = boot_cpu_physical_apicid;
1385			int cpu = hard_smp_processor_id();
1386
1387			pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);
1388
1389			/* Make sure boot cpu is enumerated */
1390			if (apic->cpu_present_to_apicid(0) == BAD_APICID &&
1391			    apic->apic_id_valid(apicid))
1392				generic_processor_info(apicid, boot_cpu_apic_version);
1393		}
1394
1395		if (!num_processors)
1396			num_processors = 1;
1397	}
1398
1399	i = setup_max_cpus ?: 1;
1400	if (setup_possible_cpus == -1) {
1401		possible = num_processors;
1402#ifdef CONFIG_HOTPLUG_CPU
1403		if (setup_max_cpus)
1404			possible += disabled_cpus;
1405#else
1406		if (possible > i)
1407			possible = i;
1408#endif
1409	} else
1410		possible = setup_possible_cpus;
1411
1412	total_cpus = max_t(int, possible, num_processors + disabled_cpus);
1413
1414	/* nr_cpu_ids could be reduced via nr_cpus= */
1415	if (possible > nr_cpu_ids) {
1416		pr_warn("%d Processors exceeds NR_CPUS limit of %u\n",
1417			possible, nr_cpu_ids);
1418		possible = nr_cpu_ids;
1419	}
1420
1421#ifdef CONFIG_HOTPLUG_CPU
1422	if (!setup_max_cpus)
1423#endif
1424	if (possible > i) {
1425		pr_warn("%d Processors exceeds max_cpus limit of %u\n",
1426			possible, setup_max_cpus);
1427		possible = i;
1428	}
1429
1430	nr_cpu_ids = possible;
1431
1432	pr_info("Allowing %d CPUs, %d hotplug CPUs\n",
1433		possible, max_t(int, possible - num_processors, 0));
1434
1435	reset_cpu_possible_mask();
1436
1437	for (i = 0; i < possible; i++)
1438		set_cpu_possible(i, true);
 
 
 
 
1439}
1440
1441#ifdef CONFIG_HOTPLUG_CPU
1442
1443/* Recompute SMT state for all CPUs on offline */
1444static void recompute_smt_state(void)
1445{
1446	int max_threads, cpu;
1447
1448	max_threads = 0;
1449	for_each_online_cpu (cpu) {
1450		int threads = cpumask_weight(topology_sibling_cpumask(cpu));
1451
1452		if (threads > max_threads)
1453			max_threads = threads;
1454	}
1455	__max_smt_threads = max_threads;
1456}
1457
1458static void remove_siblinginfo(int cpu)
1459{
1460	int sibling;
1461	struct cpuinfo_x86 *c = &cpu_data(cpu);
1462
1463	for_each_cpu(sibling, topology_core_cpumask(cpu)) {
1464		cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
1465		/*/
1466		 * last thread sibling in this cpu core going down
1467		 */
1468		if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1)
1469			cpu_data(sibling).booted_cores--;
1470	}
1471
1472	for_each_cpu(sibling, topology_sibling_cpumask(cpu))
1473		cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
1474	for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
1475		cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
1476	cpumask_clear(cpu_llc_shared_mask(cpu));
1477	cpumask_clear(topology_sibling_cpumask(cpu));
1478	cpumask_clear(topology_core_cpumask(cpu));
 
1479	c->cpu_core_id = 0;
1480	c->booted_cores = 0;
1481	cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
1482	recompute_smt_state();
1483}
1484
1485static void remove_cpu_from_maps(int cpu)
1486{
1487	set_cpu_online(cpu, false);
1488	cpumask_clear_cpu(cpu, cpu_callout_mask);
1489	cpumask_clear_cpu(cpu, cpu_callin_mask);
1490	/* was set by cpu_init() */
1491	cpumask_clear_cpu(cpu, cpu_initialized_mask);
1492	numa_remove_cpu(cpu);
1493}
1494
1495void cpu_disable_common(void)
1496{
1497	int cpu = smp_processor_id();
1498
1499	remove_siblinginfo(cpu);
1500
1501	/* It's now safe to remove this processor from the online map */
1502	lock_vector_lock();
1503	remove_cpu_from_maps(cpu);
1504	unlock_vector_lock();
1505	fixup_irqs();
1506	lapic_offline();
1507}
1508
1509int native_cpu_disable(void)
1510{
1511	int ret;
1512
1513	ret = lapic_can_unplug_cpu();
1514	if (ret)
1515		return ret;
1516
1517	clear_local_APIC();
1518	cpu_disable_common();
1519
1520	return 0;
1521}
1522
1523int common_cpu_die(unsigned int cpu)
1524{
1525	int ret = 0;
1526
1527	/* We don't do anything here: idle task is faking death itself. */
1528
1529	/* They ack this in play_dead() by setting CPU_DEAD */
1530	if (cpu_wait_death(cpu, 5)) {
1531		if (system_state == SYSTEM_RUNNING)
1532			pr_info("CPU %u is now offline\n", cpu);
1533	} else {
1534		pr_err("CPU %u didn't die...\n", cpu);
1535		ret = -1;
1536	}
1537
1538	return ret;
1539}
1540
1541void native_cpu_die(unsigned int cpu)
1542{
1543	common_cpu_die(cpu);
1544}
1545
1546void play_dead_common(void)
1547{
1548	idle_task_exit();
 
 
1549
1550	/* Ack it */
1551	(void)cpu_report_death();
1552
1553	/*
1554	 * With physical CPU hotplug, we should halt the cpu
1555	 */
1556	local_irq_disable();
1557}
1558
1559static bool wakeup_cpu0(void)
1560{
1561	if (smp_processor_id() == 0 && enable_start_cpu0)
1562		return true;
1563
1564	return false;
1565}
1566
1567/*
1568 * We need to flush the caches before going to sleep, lest we have
1569 * dirty data in our caches when we come back up.
1570 */
1571static inline void mwait_play_dead(void)
1572{
1573	unsigned int eax, ebx, ecx, edx;
1574	unsigned int highest_cstate = 0;
1575	unsigned int highest_subcstate = 0;
1576	void *mwait_ptr;
1577	int i;
1578
1579	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
1580		return;
1581	if (!this_cpu_has(X86_FEATURE_MWAIT))
1582		return;
1583	if (!this_cpu_has(X86_FEATURE_CLFLUSH))
1584		return;
1585	if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
1586		return;
1587
1588	eax = CPUID_MWAIT_LEAF;
1589	ecx = 0;
1590	native_cpuid(&eax, &ebx, &ecx, &edx);
1591
1592	/*
1593	 * eax will be 0 if EDX enumeration is not valid.
1594	 * Initialized below to cstate, sub_cstate value when EDX is valid.
1595	 */
1596	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
1597		eax = 0;
1598	} else {
1599		edx >>= MWAIT_SUBSTATE_SIZE;
1600		for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
1601			if (edx & MWAIT_SUBSTATE_MASK) {
1602				highest_cstate = i;
1603				highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
1604			}
1605		}
1606		eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
1607			(highest_subcstate - 1);
1608	}
1609
1610	/*
1611	 * This should be a memory location in a cache line which is
1612	 * unlikely to be touched by other processors.  The actual
1613	 * content is immaterial as it is not actually modified in any way.
1614	 */
1615	mwait_ptr = &current_thread_info()->flags;
1616
1617	wbinvd();
1618
1619	while (1) {
1620		/*
1621		 * The CLFLUSH is a workaround for erratum AAI65 for
1622		 * the Xeon 7400 series.  It's not clear it is actually
1623		 * needed, but it should be harmless in either case.
1624		 * The WBINVD is insufficient due to the spurious-wakeup
1625		 * case where we return around the loop.
1626		 */
1627		mb();
1628		clflush(mwait_ptr);
1629		mb();
1630		__monitor(mwait_ptr, 0, 0);
1631		mb();
1632		__mwait(eax, 0);
1633		/*
1634		 * If NMI wants to wake up CPU0, start CPU0.
1635		 */
1636		if (wakeup_cpu0())
1637			start_cpu0();
1638	}
1639}
1640
1641void hlt_play_dead(void)
1642{
1643	if (__this_cpu_read(cpu_info.x86) >= 4)
1644		wbinvd();
1645
1646	while (1) {
1647		native_halt();
1648		/*
1649		 * If NMI wants to wake up CPU0, start CPU0.
1650		 */
1651		if (wakeup_cpu0())
1652			start_cpu0();
1653	}
1654}
1655
1656void native_play_dead(void)
1657{
1658	play_dead_common();
1659	tboot_shutdown(TB_SHUTDOWN_WFS);
1660
1661	mwait_play_dead();	/* Only returns on failure */
1662	if (cpuidle_play_dead())
1663		hlt_play_dead();
1664}
1665
1666#else /* ... !CONFIG_HOTPLUG_CPU */
1667int native_cpu_disable(void)
1668{
1669	return -ENOSYS;
1670}
1671
1672void native_cpu_die(unsigned int cpu)
1673{
1674	/* We said "no" in __cpu_disable */
1675	BUG();
1676}
1677
1678void native_play_dead(void)
1679{
1680	BUG();
1681}
1682
1683#endif
v4.6
   1 /*
   2 *	x86 SMP booting functions
   3 *
   4 *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
   5 *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
   6 *	Copyright 2001 Andi Kleen, SuSE Labs.
   7 *
   8 *	Much of the core SMP work is based on previous work by Thomas Radke, to
   9 *	whom a great many thanks are extended.
  10 *
  11 *	Thanks to Intel for making available several different Pentium,
  12 *	Pentium Pro and Pentium-II/Xeon MP machines.
  13 *	Original development of Linux SMP code supported by Caldera.
  14 *
  15 *	This code is released under the GNU General Public License version 2 or
  16 *	later.
  17 *
  18 *	Fixes
  19 *		Felix Koop	:	NR_CPUS used properly
  20 *		Jose Renau	:	Handle single CPU case.
  21 *		Alan Cox	:	By repeated request 8) - Total BogoMIPS report.
  22 *		Greg Wright	:	Fix for kernel stacks panic.
  23 *		Erich Boleyn	:	MP v1.4 and additional changes.
  24 *	Matthias Sattler	:	Changes for 2.1 kernel map.
  25 *	Michel Lespinasse	:	Changes for 2.1 kernel map.
  26 *	Michael Chastain	:	Change trampoline.S to gnu as.
  27 *		Alan Cox	:	Dumb bug: 'B' step PPro's are fine
  28 *		Ingo Molnar	:	Added APIC timers, based on code
  29 *					from Jose Renau
  30 *		Ingo Molnar	:	various cleanups and rewrites
  31 *		Tigran Aivazian	:	fixed "0.00 in /proc/uptime on SMP" bug.
  32 *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs
  33 *	Andi Kleen		:	Changed for SMP boot into long mode.
  34 *		Martin J. Bligh	: 	Added support for multi-quad systems
  35 *		Dave Jones	:	Report invalid combinations of Athlon CPUs.
  36 *		Rusty Russell	:	Hacked into shape for new "hotplug" boot process.
  37 *      Andi Kleen              :       Converted to new state machine.
  38 *	Ashok Raj		: 	CPU hotplug support
  39 *	Glauber Costa		:	i386 and x86_64 integration
  40 */
  41
  42#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  43
  44#include <linux/init.h>
  45#include <linux/smp.h>
  46#include <linux/module.h>
  47#include <linux/sched.h>
 
 
 
  48#include <linux/percpu.h>
  49#include <linux/bootmem.h>
  50#include <linux/err.h>
  51#include <linux/nmi.h>
  52#include <linux/tboot.h>
  53#include <linux/stackprotector.h>
  54#include <linux/gfp.h>
  55#include <linux/cpuidle.h>
  56
  57#include <asm/acpi.h>
  58#include <asm/desc.h>
  59#include <asm/nmi.h>
  60#include <asm/irq.h>
  61#include <asm/idle.h>
  62#include <asm/realmode.h>
  63#include <asm/cpu.h>
  64#include <asm/numa.h>
  65#include <asm/pgtable.h>
  66#include <asm/tlbflush.h>
  67#include <asm/mtrr.h>
  68#include <asm/mwait.h>
  69#include <asm/apic.h>
  70#include <asm/io_apic.h>
  71#include <asm/fpu/internal.h>
  72#include <asm/setup.h>
  73#include <asm/uv/uv.h>
  74#include <linux/mc146818rtc.h>
  75#include <asm/i8259.h>
  76#include <asm/realmode.h>
  77#include <asm/misc.h>
 
 
 
 
  78
  79/* Number of siblings per CPU package */
  80int smp_num_siblings = 1;
  81EXPORT_SYMBOL(smp_num_siblings);
  82
  83/* Last level cache ID of each logical CPU */
  84DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
  85
  86/* representing HT siblings of each logical CPU */
  87DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
  88EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  89
  90/* representing HT and core siblings of each logical CPU */
  91DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
  92EXPORT_PER_CPU_SYMBOL(cpu_core_map);
  93
  94DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
  95
  96/* Per CPU bogomips and other parameters */
  97DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
  98EXPORT_PER_CPU_SYMBOL(cpu_info);
  99
 100/* Logical package management. We might want to allocate that dynamically */
 101static int *physical_to_logical_pkg __read_mostly;
 102static unsigned long *physical_package_map __read_mostly;;
 103static unsigned long *logical_package_map  __read_mostly;
 104static unsigned int max_physical_pkg_id __read_mostly;
 105unsigned int __max_logical_packages __read_mostly;
 106EXPORT_SYMBOL(__max_logical_packages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 107
 108static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
 109{
 110	unsigned long flags;
 111
 112	spin_lock_irqsave(&rtc_lock, flags);
 113	CMOS_WRITE(0xa, 0xf);
 114	spin_unlock_irqrestore(&rtc_lock, flags);
 115	local_flush_tlb();
 116	pr_debug("1.\n");
 117	*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
 118							start_eip >> 4;
 119	pr_debug("2.\n");
 120	*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
 121							start_eip & 0xf;
 122	pr_debug("3.\n");
 123}
 124
 125static inline void smpboot_restore_warm_reset_vector(void)
 126{
 127	unsigned long flags;
 128
 129	/*
 130	 * Install writable page 0 entry to set BIOS data area.
 131	 */
 132	local_flush_tlb();
 133
 134	/*
 135	 * Paranoid:  Set warm reset code and vector here back
 136	 * to default values.
 137	 */
 138	spin_lock_irqsave(&rtc_lock, flags);
 139	CMOS_WRITE(0, 0xf);
 140	spin_unlock_irqrestore(&rtc_lock, flags);
 141
 142	*((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
 143}
 144
 145/*
 146 * Report back to the Boot Processor during boot time or to the caller processor
 147 * during CPU online.
 148 */
 149static void smp_callin(void)
 150{
 151	int cpuid, phys_id;
 152
 153	/*
 154	 * If waken up by an INIT in an 82489DX configuration
 155	 * cpu_callout_mask guarantees we don't get here before
 156	 * an INIT_deassert IPI reaches our local APIC, so it is
 157	 * now safe to touch our local APIC.
 158	 */
 159	cpuid = smp_processor_id();
 160
 161	/*
 162	 * (This works even if the APIC is not enabled.)
 163	 */
 164	phys_id = read_apic_id();
 165
 166	/*
 167	 * the boot CPU has finished the init stage and is spinning
 168	 * on callin_map until we finish. We are free to set up this
 169	 * CPU, first the APIC. (this is probably redundant on most
 170	 * boards)
 171	 */
 172	apic_ap_setup();
 173
 174	/*
 175	 * Save our processor parameters. Note: this information
 176	 * is needed for clock calibration.
 177	 */
 178	smp_store_cpu_info(cpuid);
 179
 180	/*
 
 
 
 
 
 
 181	 * Get our bogomips.
 182	 * Update loops_per_jiffy in cpu_data. Previous call to
 183	 * smp_store_cpu_info() stored a value that is close but not as
 184	 * accurate as the value just calculated.
 185	 */
 186	calibrate_delay();
 187	cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
 188	pr_debug("Stack at about %p\n", &cpuid);
 189
 190	/*
 191	 * This must be done before setting cpu_online_mask
 192	 * or calling notify_cpu_starting.
 193	 */
 194	set_cpu_sibling_map(raw_smp_processor_id());
 195	wmb();
 196
 197	notify_cpu_starting(cpuid);
 198
 199	/*
 200	 * Allow the master to continue.
 201	 */
 202	cpumask_set_cpu(cpuid, cpu_callin_mask);
 203}
 204
 205static int cpu0_logical_apicid;
 206static int enable_start_cpu0;
 207/*
 208 * Activate a secondary processor.
 209 */
 210static void notrace start_secondary(void *unused)
 211{
 212	/*
 213	 * Don't put *anything* before cpu_init(), SMP booting is too
 214	 * fragile that we want to limit the things done here to the
 215	 * most necessary things.
 216	 */
 
 
 
 
 
 
 
 
 
 217	cpu_init();
 218	x86_cpuinit.early_percpu_clock_init();
 219	preempt_disable();
 220	smp_callin();
 221
 222	enable_start_cpu0 = 0;
 223
 224#ifdef CONFIG_X86_32
 225	/* switch away from the initial page table */
 226	load_cr3(swapper_pg_dir);
 227	__flush_tlb_all();
 228#endif
 229
 230	/* otherwise gcc will move up smp_processor_id before the cpu_init */
 231	barrier();
 232	/*
 233	 * Check TSC synchronization with the BP:
 234	 */
 235	check_tsc_sync_target();
 236
 
 
 237	/*
 238	 * Lock vector_lock and initialize the vectors on this cpu
 239	 * before setting the cpu online. We must set it online with
 240	 * vector_lock held to prevent a concurrent setup/teardown
 241	 * from seeing a half valid vector space.
 242	 */
 243	lock_vector_lock();
 244	setup_vector_irq(smp_processor_id());
 245	set_cpu_online(smp_processor_id(), true);
 
 246	unlock_vector_lock();
 247	cpu_set_state_online(smp_processor_id());
 248	x86_platform.nmi_init();
 249
 250	/* enable local interrupts */
 251	local_irq_enable();
 252
 253	/* to prevent fake stack check failure in clock setup */
 254	boot_init_stack_canary();
 255
 256	x86_cpuinit.setup_percpu_clockev();
 257
 258	wmb();
 259	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 260}
 261
 262int topology_update_package_map(unsigned int apicid, unsigned int cpu)
 263{
 264	unsigned int new, pkg = apicid >> boot_cpu_data.x86_coreid_bits;
 265
 266	/* Called from early boot ? */
 267	if (!physical_package_map)
 268		return 0;
 269
 270	if (pkg >= max_physical_pkg_id)
 271		return -EINVAL;
 272
 273	/* Set the logical package id */
 274	if (test_and_set_bit(pkg, physical_package_map))
 275		goto found;
 276
 277	new = find_first_zero_bit(logical_package_map, __max_logical_packages);
 278	if (new >= __max_logical_packages) {
 279		physical_to_logical_pkg[pkg] = -1;
 280		pr_warn("APIC(%x) Package %u exceeds logical package map\n",
 281			apicid, pkg);
 282		return -ENOSPC;
 283	}
 284	set_bit(new, logical_package_map);
 285	pr_info("APIC(%x) Converting physical %u to logical package %u\n",
 286		apicid, pkg, new);
 287	physical_to_logical_pkg[pkg] = new;
 288
 289found:
 290	cpu_data(cpu).logical_proc_id = physical_to_logical_pkg[pkg];
 291	return 0;
 292}
 293
 294/**
 295 * topology_phys_to_logical_pkg - Map a physical package id to a logical
 296 *
 297 * Returns logical package id or -1 if not found
 298 */
 299int topology_phys_to_logical_pkg(unsigned int phys_pkg)
 300{
 301	if (phys_pkg >= max_physical_pkg_id)
 302		return -1;
 303	return physical_to_logical_pkg[phys_pkg];
 
 
 
 
 
 
 304}
 305EXPORT_SYMBOL(topology_phys_to_logical_pkg);
 306
 307static void __init smp_init_package_map(void)
 
 
 
 
 
 308{
 309	unsigned int ncpus, cpu;
 310	size_t size;
 311
 312	/*
 313	 * Today neither Intel nor AMD support heterogenous systems. That
 314	 * might change in the future....
 315	 *
 316	 * While ideally we'd want '* smp_num_siblings' in the below @ncpus
 317	 * computation, this won't actually work since some Intel BIOSes
 318	 * report inconsistent HT data when they disable HT.
 319	 *
 320	 * In particular, they reduce the APIC-IDs to only include the cores,
 321	 * but leave the CPUID topology to say there are (2) siblings.
 322	 * This means we don't know how many threads there will be until
 323	 * after the APIC enumeration.
 324	 *
 325	 * By not including this we'll sometimes over-estimate the number of
 326	 * logical packages by the amount of !present siblings, but this is
 327	 * still better than MAX_LOCAL_APIC.
 328	 *
 329	 * We use total_cpus not nr_cpu_ids because nr_cpu_ids can be limited
 330	 * on the command line leading to a similar issue as the HT disable
 331	 * problem because the hyperthreads are usually enumerated after the
 332	 * primary cores.
 333	 */
 334	ncpus = boot_cpu_data.x86_max_cores;
 335	if (!ncpus) {
 336		pr_warn("x86_max_cores == zero !?!?");
 337		ncpus = 1;
 338	}
 339
 340	__max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
 341
 342	/*
 343	 * Possibly larger than what we need as the number of apic ids per
 344	 * package can be smaller than the actual used apic ids.
 345	 */
 346	max_physical_pkg_id = DIV_ROUND_UP(MAX_LOCAL_APIC, ncpus);
 347	size = max_physical_pkg_id * sizeof(unsigned int);
 348	physical_to_logical_pkg = kmalloc(size, GFP_KERNEL);
 349	memset(physical_to_logical_pkg, 0xff, size);
 350	size = BITS_TO_LONGS(max_physical_pkg_id) * sizeof(unsigned long);
 351	physical_package_map = kzalloc(size, GFP_KERNEL);
 352	size = BITS_TO_LONGS(__max_logical_packages) * sizeof(unsigned long);
 353	logical_package_map = kzalloc(size, GFP_KERNEL);
 354
 355	pr_info("Max logical packages: %u\n", __max_logical_packages);
 356
 357	for_each_present_cpu(cpu) {
 358		unsigned int apicid = apic->cpu_present_to_apicid(cpu);
 359
 360		if (apicid == BAD_APICID || !apic->apic_id_valid(apicid))
 361			continue;
 362		if (!topology_update_package_map(apicid, cpu))
 363			continue;
 364		pr_warn("CPU %u APICId %x disabled\n", cpu, apicid);
 365		per_cpu(x86_bios_cpu_apicid, cpu) = BAD_APICID;
 366		set_cpu_possible(cpu, false);
 367		set_cpu_present(cpu, false);
 368	}
 
 
 
 369}
 370
 371void __init smp_store_boot_cpu_info(void)
 372{
 373	int id = 0; /* CPU 0 */
 374	struct cpuinfo_x86 *c = &cpu_data(id);
 375
 376	*c = boot_cpu_data;
 377	c->cpu_index = id;
 378	smp_init_package_map();
 
 379}
 380
 381/*
 382 * The bootstrap kernel entry code has set these up. Save them for
 383 * a given CPU
 384 */
 385void smp_store_cpu_info(int id)
 386{
 387	struct cpuinfo_x86 *c = &cpu_data(id);
 388
 389	*c = boot_cpu_data;
 
 
 390	c->cpu_index = id;
 391	/*
 392	 * During boot time, CPU0 has this setup already. Save the info when
 393	 * bringing up AP or offlined CPU0.
 394	 */
 395	identify_secondary_cpu(c);
 
 396}
 397
 398static bool
 399topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 400{
 401	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 402
 403	return (cpu_to_node(cpu1) == cpu_to_node(cpu2));
 404}
 405
 406static bool
 407topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
 408{
 409	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 410
 411	return !WARN_ONCE(!topology_same_node(c, o),
 412		"sched: CPU #%d's %s-sibling CPU #%d is not on the same node! "
 413		"[node: %d != %d]. Ignoring dependency.\n",
 414		cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2));
 415}
 416
 417#define link_mask(mfunc, c1, c2)					\
 418do {									\
 419	cpumask_set_cpu((c1), mfunc(c2));				\
 420	cpumask_set_cpu((c2), mfunc(c1));				\
 421} while (0)
 422
 423static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 424{
 425	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
 426		int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 427
 428		if (c->phys_proc_id == o->phys_proc_id &&
 429		    per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) &&
 430		    c->cpu_core_id == o->cpu_core_id)
 431			return topology_sane(c, o, "smt");
 
 
 
 
 
 
 432
 433	} else if (c->phys_proc_id == o->phys_proc_id &&
 434		   c->cpu_core_id == o->cpu_core_id) {
 435		return topology_sane(c, o, "smt");
 436	}
 437
 438	return false;
 439}
 440
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 441static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 442{
 443	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 444
 445	if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID &&
 446	    per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2))
 447		return topology_sane(c, o, "llc");
 
 
 
 
 448
 449	return false;
 
 
 
 
 
 
 
 
 450}
 451
 452/*
 453 * Unlike the other levels, we do not enforce keeping a
 454 * multicore group inside a NUMA node.  If this happens, we will
 455 * discard the MC level of the topology later.
 456 */
 457static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 458{
 459	if (c->phys_proc_id == o->phys_proc_id)
 460		return true;
 461	return false;
 462}
 463
 464static struct sched_domain_topology_level numa_inside_package_topology[] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 465#ifdef CONFIG_SCHED_SMT
 466	{ cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
 467#endif
 468#ifdef CONFIG_SCHED_MC
 469	{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
 470#endif
 
 471	{ NULL, },
 472};
 
 473/*
 474 * set_sched_topology() sets the topology internal to a CPU.  The
 475 * NUMA topologies are layered on top of it to build the full
 476 * system topology.
 477 *
 478 * If NUMA nodes are observed to occur within a CPU package, this
 479 * function should be called.  It forces the sched domain code to
 480 * only use the SMT level for the CPU portion of the topology.
 481 * This essentially falls back to relying on NUMA information
 482 * from the SRAT table to describe the entire system topology
 483 * (except for hyperthreads).
 484 */
 485static void primarily_use_numa_for_topology(void)
 486{
 487	set_sched_topology(numa_inside_package_topology);
 488}
 489
 490void set_cpu_sibling_map(int cpu)
 491{
 492	bool has_smt = smp_num_siblings > 1;
 493	bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
 494	struct cpuinfo_x86 *c = &cpu_data(cpu);
 495	struct cpuinfo_x86 *o;
 496	int i;
 497
 498	cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
 499
 500	if (!has_mp) {
 501		cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu));
 502		cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
 503		cpumask_set_cpu(cpu, topology_core_cpumask(cpu));
 504		c->booted_cores = 1;
 505		return;
 506	}
 507
 508	for_each_cpu(i, cpu_sibling_setup_mask) {
 509		o = &cpu_data(i);
 510
 511		if ((i == cpu) || (has_smt && match_smt(c, o)))
 512			link_mask(topology_sibling_cpumask, cpu, i);
 513
 514		if ((i == cpu) || (has_mp && match_llc(c, o)))
 515			link_mask(cpu_llc_shared_mask, cpu, i);
 516
 517	}
 518
 519	/*
 520	 * This needs a separate iteration over the cpus because we rely on all
 521	 * topology_sibling_cpumask links to be set-up.
 522	 */
 523	for_each_cpu(i, cpu_sibling_setup_mask) {
 524		o = &cpu_data(i);
 525
 526		if ((i == cpu) || (has_mp && match_die(c, o))) {
 527			link_mask(topology_core_cpumask, cpu, i);
 528
 529			/*
 530			 *  Does this new cpu bringup a new core?
 531			 */
 532			if (cpumask_weight(
 533			    topology_sibling_cpumask(cpu)) == 1) {
 534				/*
 535				 * for each core in package, increment
 536				 * the booted_cores for this new cpu
 537				 */
 538				if (cpumask_first(
 539				    topology_sibling_cpumask(i)) == i)
 540					c->booted_cores++;
 541				/*
 542				 * increment the core count for all
 543				 * the other cpus in this package
 544				 */
 545				if (i != cpu)
 546					cpu_data(i).booted_cores++;
 547			} else if (i != cpu && !c->booted_cores)
 548				c->booted_cores = cpu_data(i).booted_cores;
 549		}
 550		if (match_die(c, o) && !topology_same_node(c, o))
 551			primarily_use_numa_for_topology();
 552	}
 
 
 
 
 553}
 554
 555/* maps the cpu to the sched domain representing multi-core */
 556const struct cpumask *cpu_coregroup_mask(int cpu)
 557{
 558	return cpu_llc_shared_mask(cpu);
 559}
 560
 561static void impress_friends(void)
 562{
 563	int cpu;
 564	unsigned long bogosum = 0;
 565	/*
 566	 * Allow the user to impress friends.
 567	 */
 568	pr_debug("Before bogomips\n");
 569	for_each_possible_cpu(cpu)
 570		if (cpumask_test_cpu(cpu, cpu_callout_mask))
 571			bogosum += cpu_data(cpu).loops_per_jiffy;
 572	pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n",
 573		num_online_cpus(),
 574		bogosum/(500000/HZ),
 575		(bogosum/(5000/HZ))%100);
 576
 577	pr_debug("Before bogocount - setting activated=1\n");
 578}
 579
 580void __inquire_remote_apic(int apicid)
 581{
 582	unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
 583	const char * const names[] = { "ID", "VERSION", "SPIV" };
 584	int timeout;
 585	u32 status;
 586
 587	pr_info("Inquiring remote APIC 0x%x...\n", apicid);
 588
 589	for (i = 0; i < ARRAY_SIZE(regs); i++) {
 590		pr_info("... APIC 0x%x %s: ", apicid, names[i]);
 591
 592		/*
 593		 * Wait for idle.
 594		 */
 595		status = safe_apic_wait_icr_idle();
 596		if (status)
 597			pr_cont("a previous APIC delivery may have failed\n");
 598
 599		apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
 600
 601		timeout = 0;
 602		do {
 603			udelay(100);
 604			status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
 605		} while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
 606
 607		switch (status) {
 608		case APIC_ICR_RR_VALID:
 609			status = apic_read(APIC_RRR);
 610			pr_cont("%08x\n", status);
 611			break;
 612		default:
 613			pr_cont("failed\n");
 614		}
 615	}
 616}
 617
 618/*
 619 * The Multiprocessor Specification 1.4 (1997) example code suggests
 620 * that there should be a 10ms delay between the BSP asserting INIT
 621 * and de-asserting INIT, when starting a remote processor.
 622 * But that slows boot and resume on modern processors, which include
 623 * many cores and don't require that delay.
 624 *
 625 * Cmdline "init_cpu_udelay=" is available to over-ride this delay.
 626 * Modern processor families are quirked to remove the delay entirely.
 627 */
 628#define UDELAY_10MS_DEFAULT 10000
 629
 630static unsigned int init_udelay = UINT_MAX;
 631
 632static int __init cpu_init_udelay(char *str)
 633{
 634	get_option(&str, &init_udelay);
 635
 636	return 0;
 637}
 638early_param("cpu_init_udelay", cpu_init_udelay);
 639
 640static void __init smp_quirk_init_udelay(void)
 641{
 642	/* if cmdline changed it from default, leave it alone */
 643	if (init_udelay != UINT_MAX)
 644		return;
 645
 646	/* if modern processor, use no delay */
 647	if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
 648	    ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
 649		init_udelay = 0;
 650		return;
 651	}
 652	/* else, use legacy delay */
 653	init_udelay = UDELAY_10MS_DEFAULT;
 654}
 655
 656/*
 657 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
 658 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
 659 * won't ... remember to clear down the APIC, etc later.
 660 */
 661int
 662wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
 663{
 664	unsigned long send_status, accept_status = 0;
 665	int maxlvt;
 666
 667	/* Target chip */
 668	/* Boot on the stack */
 669	/* Kick the second */
 670	apic_icr_write(APIC_DM_NMI | apic->dest_logical, apicid);
 671
 672	pr_debug("Waiting for send to finish...\n");
 673	send_status = safe_apic_wait_icr_idle();
 674
 675	/*
 676	 * Give the other CPU some time to accept the IPI.
 677	 */
 678	udelay(200);
 679	if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
 680		maxlvt = lapic_get_maxlvt();
 681		if (maxlvt > 3)			/* Due to the Pentium erratum 3AP.  */
 682			apic_write(APIC_ESR, 0);
 683		accept_status = (apic_read(APIC_ESR) & 0xEF);
 684	}
 685	pr_debug("NMI sent\n");
 686
 687	if (send_status)
 688		pr_err("APIC never delivered???\n");
 689	if (accept_status)
 690		pr_err("APIC delivery error (%lx)\n", accept_status);
 691
 692	return (send_status | accept_status);
 693}
 694
 695static int
 696wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
 697{
 698	unsigned long send_status = 0, accept_status = 0;
 699	int maxlvt, num_starts, j;
 700
 701	maxlvt = lapic_get_maxlvt();
 702
 703	/*
 704	 * Be paranoid about clearing APIC errors.
 705	 */
 706	if (APIC_INTEGRATED(apic_version[phys_apicid])) {
 707		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
 708			apic_write(APIC_ESR, 0);
 709		apic_read(APIC_ESR);
 710	}
 711
 712	pr_debug("Asserting INIT\n");
 713
 714	/*
 715	 * Turn INIT on target chip
 716	 */
 717	/*
 718	 * Send IPI
 719	 */
 720	apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
 721		       phys_apicid);
 722
 723	pr_debug("Waiting for send to finish...\n");
 724	send_status = safe_apic_wait_icr_idle();
 725
 726	udelay(init_udelay);
 727
 728	pr_debug("Deasserting INIT\n");
 729
 730	/* Target chip */
 731	/* Send IPI */
 732	apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
 733
 734	pr_debug("Waiting for send to finish...\n");
 735	send_status = safe_apic_wait_icr_idle();
 736
 737	mb();
 738
 739	/*
 740	 * Should we send STARTUP IPIs ?
 741	 *
 742	 * Determine this based on the APIC version.
 743	 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
 744	 */
 745	if (APIC_INTEGRATED(apic_version[phys_apicid]))
 746		num_starts = 2;
 747	else
 748		num_starts = 0;
 749
 750	/*
 751	 * Run STARTUP IPI loop.
 752	 */
 753	pr_debug("#startup loops: %d\n", num_starts);
 754
 755	for (j = 1; j <= num_starts; j++) {
 756		pr_debug("Sending STARTUP #%d\n", j);
 757		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
 758			apic_write(APIC_ESR, 0);
 759		apic_read(APIC_ESR);
 760		pr_debug("After apic_write\n");
 761
 762		/*
 763		 * STARTUP IPI
 764		 */
 765
 766		/* Target chip */
 767		/* Boot on the stack */
 768		/* Kick the second */
 769		apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
 770			       phys_apicid);
 771
 772		/*
 773		 * Give the other CPU some time to accept the IPI.
 774		 */
 775		if (init_udelay == 0)
 776			udelay(10);
 777		else
 778			udelay(300);
 779
 780		pr_debug("Startup point 1\n");
 781
 782		pr_debug("Waiting for send to finish...\n");
 783		send_status = safe_apic_wait_icr_idle();
 784
 785		/*
 786		 * Give the other CPU some time to accept the IPI.
 787		 */
 788		if (init_udelay == 0)
 789			udelay(10);
 790		else
 791			udelay(200);
 792
 793		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
 794			apic_write(APIC_ESR, 0);
 795		accept_status = (apic_read(APIC_ESR) & 0xEF);
 796		if (send_status || accept_status)
 797			break;
 798	}
 799	pr_debug("After Startup\n");
 800
 801	if (send_status)
 802		pr_err("APIC never delivered???\n");
 803	if (accept_status)
 804		pr_err("APIC delivery error (%lx)\n", accept_status);
 805
 806	return (send_status | accept_status);
 807}
 808
 809void smp_announce(void)
 810{
 811	int num_nodes = num_online_nodes();
 812
 813	printk(KERN_INFO "x86: Booted up %d node%s, %d CPUs\n",
 814	       num_nodes, (num_nodes > 1 ? "s" : ""), num_online_cpus());
 815}
 816
 817/* reduce the number of lines printed when booting a large cpu count system */
 818static void announce_cpu(int cpu, int apicid)
 819{
 820	static int current_node = -1;
 821	int node = early_cpu_to_node(cpu);
 822	static int width, node_width;
 823
 824	if (!width)
 825		width = num_digits(num_possible_cpus()) + 1; /* + '#' sign */
 826
 827	if (!node_width)
 828		node_width = num_digits(num_possible_nodes()) + 1; /* + '#' */
 829
 830	if (cpu == 1)
 831		printk(KERN_INFO "x86: Booting SMP configuration:\n");
 832
 833	if (system_state == SYSTEM_BOOTING) {
 834		if (node != current_node) {
 835			if (current_node > (-1))
 836				pr_cont("\n");
 837			current_node = node;
 838
 839			printk(KERN_INFO ".... node %*s#%d, CPUs:  ",
 840			       node_width - num_digits(node), " ", node);
 841		}
 842
 843		/* Add padding for the BSP */
 844		if (cpu == 1)
 845			pr_cont("%*s", width + 1, " ");
 846
 847		pr_cont("%*s#%d", width - num_digits(cpu), " ", cpu);
 848
 849	} else
 850		pr_info("Booting Node %d Processor %d APIC 0x%x\n",
 851			node, cpu, apicid);
 852}
 853
 854static int wakeup_cpu0_nmi(unsigned int cmd, struct pt_regs *regs)
 855{
 856	int cpu;
 857
 858	cpu = smp_processor_id();
 859	if (cpu == 0 && !cpu_online(cpu) && enable_start_cpu0)
 860		return NMI_HANDLED;
 861
 862	return NMI_DONE;
 863}
 864
 865/*
 866 * Wake up AP by INIT, INIT, STARTUP sequence.
 867 *
 868 * Instead of waiting for STARTUP after INITs, BSP will execute the BIOS
 869 * boot-strap code which is not a desired behavior for waking up BSP. To
 870 * void the boot-strap code, wake up CPU0 by NMI instead.
 871 *
 872 * This works to wake up soft offlined CPU0 only. If CPU0 is hard offlined
 873 * (i.e. physically hot removed and then hot added), NMI won't wake it up.
 874 * We'll change this code in the future to wake up hard offlined CPU0 if
 875 * real platform and request are available.
 876 */
 877static int
 878wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
 879	       int *cpu0_nmi_registered)
 880{
 881	int id;
 882	int boot_error;
 883
 884	preempt_disable();
 885
 886	/*
 887	 * Wake up AP by INIT, INIT, STARTUP sequence.
 888	 */
 889	if (cpu) {
 890		boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
 891		goto out;
 892	}
 893
 894	/*
 895	 * Wake up BSP by nmi.
 896	 *
 897	 * Register a NMI handler to help wake up CPU0.
 898	 */
 899	boot_error = register_nmi_handler(NMI_LOCAL,
 900					  wakeup_cpu0_nmi, 0, "wake_cpu0");
 901
 902	if (!boot_error) {
 903		enable_start_cpu0 = 1;
 904		*cpu0_nmi_registered = 1;
 905		if (apic->dest_logical == APIC_DEST_LOGICAL)
 906			id = cpu0_logical_apicid;
 907		else
 908			id = apicid;
 909		boot_error = wakeup_secondary_cpu_via_nmi(id, start_ip);
 910	}
 911
 912out:
 913	preempt_enable();
 914
 915	return boot_error;
 916}
 917
 918void common_cpu_up(unsigned int cpu, struct task_struct *idle)
 919{
 920	/* Just in case we booted with a single CPU. */
 921	alternatives_enable_smp();
 922
 923	per_cpu(current_task, cpu) = idle;
 924
 925#ifdef CONFIG_X86_32
 926	/* Stack for startup_32 can be just as for start_secondary onwards */
 927	irq_ctx_init(cpu);
 928	per_cpu(cpu_current_top_of_stack, cpu) =
 929		(unsigned long)task_stack_page(idle) + THREAD_SIZE;
 930#else
 931	clear_tsk_thread_flag(idle, TIF_FORK);
 932	initial_gs = per_cpu_offset(cpu);
 933#endif
 934}
 935
 936/*
 937 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
 938 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
 939 * Returns zero if CPU booted OK, else error code from
 940 * ->wakeup_secondary_cpu.
 941 */
 942static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
 
 943{
 944	volatile u32 *trampoline_status =
 945		(volatile u32 *) __va(real_mode_header->trampoline_status);
 946	/* start_ip had better be page-aligned! */
 947	unsigned long start_ip = real_mode_header->trampoline_start;
 948
 949	unsigned long boot_error = 0;
 950	int cpu0_nmi_registered = 0;
 951	unsigned long timeout;
 952
 953	idle->thread.sp = (unsigned long) (((struct pt_regs *)
 954			  (THREAD_SIZE +  task_stack_page(idle))) - 1);
 955
 956	early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
 957	initial_code = (unsigned long)start_secondary;
 958	stack_start  = idle->thread.sp;
 959
 960	/*
 961	 * Enable the espfix hack for this CPU
 962	*/
 963#ifdef CONFIG_X86_ESPFIX64
 964	init_espfix_ap(cpu);
 965#endif
 966
 967	/* So we see what's up */
 968	announce_cpu(cpu, apicid);
 969
 970	/*
 971	 * This grunge runs the startup process for
 972	 * the targeted processor.
 973	 */
 974
 975	if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
 976
 977		pr_debug("Setting warm reset code and vector.\n");
 978
 979		smpboot_setup_warm_reset_vector(start_ip);
 980		/*
 981		 * Be paranoid about clearing APIC errors.
 982		*/
 983		if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
 984			apic_write(APIC_ESR, 0);
 985			apic_read(APIC_ESR);
 986		}
 987	}
 988
 989	/*
 990	 * AP might wait on cpu_callout_mask in cpu_init() with
 991	 * cpu_initialized_mask set if previous attempt to online
 992	 * it timed-out. Clear cpu_initialized_mask so that after
 993	 * INIT/SIPI it could start with a clean state.
 994	 */
 995	cpumask_clear_cpu(cpu, cpu_initialized_mask);
 996	smp_mb();
 997
 998	/*
 999	 * Wake up a CPU in difference cases:
1000	 * - Use the method in the APIC driver if it's defined
1001	 * Otherwise,
1002	 * - Use an INIT boot APIC message for APs or NMI for BSP.
1003	 */
1004	if (apic->wakeup_secondary_cpu)
1005		boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
1006	else
1007		boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid,
1008						     &cpu0_nmi_registered);
1009
1010	if (!boot_error) {
1011		/*
1012		 * Wait 10s total for first sign of life from AP
1013		 */
1014		boot_error = -1;
1015		timeout = jiffies + 10*HZ;
1016		while (time_before(jiffies, timeout)) {
1017			if (cpumask_test_cpu(cpu, cpu_initialized_mask)) {
1018				/*
1019				 * Tell AP to proceed with initialization
1020				 */
1021				cpumask_set_cpu(cpu, cpu_callout_mask);
1022				boot_error = 0;
1023				break;
1024			}
1025			schedule();
1026		}
1027	}
1028
1029	if (!boot_error) {
1030		/*
1031		 * Wait till AP completes initial initialization
1032		 */
1033		while (!cpumask_test_cpu(cpu, cpu_callin_mask)) {
1034			/*
1035			 * Allow other tasks to run while we wait for the
1036			 * AP to come online. This also gives a chance
1037			 * for the MTRR work(triggered by the AP coming online)
1038			 * to be completed in the stop machine context.
1039			 */
1040			schedule();
1041		}
1042	}
1043
1044	/* mark "stuck" area as not stuck */
1045	*trampoline_status = 0;
1046
1047	if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
1048		/*
1049		 * Cleanup possible dangling ends...
1050		 */
1051		smpboot_restore_warm_reset_vector();
1052	}
1053	/*
1054	 * Clean up the nmi handler. Do this after the callin and callout sync
1055	 * to avoid impact of possible long unregister time.
1056	 */
1057	if (cpu0_nmi_registered)
1058		unregister_nmi_handler(NMI_LOCAL, "wake_cpu0");
1059
1060	return boot_error;
1061}
1062
1063int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
1064{
1065	int apicid = apic->cpu_present_to_apicid(cpu);
 
1066	unsigned long flags;
1067	int err;
1068
1069	WARN_ON(irqs_disabled());
1070
1071	pr_debug("++++++++++++++++++++=_---CPU UP  %u\n", cpu);
1072
1073	if (apicid == BAD_APICID ||
1074	    !physid_isset(apicid, phys_cpu_present_map) ||
1075	    !apic->apic_id_valid(apicid)) {
1076		pr_err("%s: bad cpu %d\n", __func__, cpu);
1077		return -EINVAL;
1078	}
1079
1080	/*
1081	 * Already booted CPU?
1082	 */
1083	if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
1084		pr_debug("do_boot_cpu %d Already started\n", cpu);
1085		return -ENOSYS;
1086	}
1087
1088	/*
1089	 * Save current MTRR state in case it was changed since early boot
1090	 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
1091	 */
1092	mtrr_save_state();
1093
1094	/* x86 CPUs take themselves offline, so delayed offline is OK. */
1095	err = cpu_check_up_prepare(cpu);
1096	if (err && err != -EBUSY)
1097		return err;
1098
1099	/* the FPU context is blank, nobody can own it */
1100	__cpu_disable_lazy_restore(cpu);
1101
1102	common_cpu_up(cpu, tidle);
1103
1104	/*
1105	 * We have to walk the irq descriptors to setup the vector
1106	 * space for the cpu which comes online.  Prevent irq
1107	 * alloc/free across the bringup.
1108	 */
1109	irq_lock_sparse();
1110
1111	err = do_boot_cpu(apicid, cpu, tidle);
1112
1113	if (err) {
1114		irq_unlock_sparse();
1115		pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
1116		return -EIO;
 
1117	}
1118
1119	/*
1120	 * Check TSC synchronization with the AP (keep irqs disabled
1121	 * while doing so):
1122	 */
1123	local_irq_save(flags);
1124	check_tsc_sync_source(cpu);
1125	local_irq_restore(flags);
1126
1127	while (!cpu_online(cpu)) {
1128		cpu_relax();
1129		touch_nmi_watchdog();
1130	}
1131
1132	irq_unlock_sparse();
 
 
 
 
 
 
1133
1134	return 0;
1135}
1136
1137/**
1138 * arch_disable_smp_support() - disables SMP support for x86 at runtime
1139 */
1140void arch_disable_smp_support(void)
1141{
1142	disable_ioapic_support();
1143}
1144
1145/*
1146 * Fall back to non SMP mode after errors.
1147 *
1148 * RED-PEN audit/test this more. I bet there is more state messed up here.
1149 */
1150static __init void disable_smp(void)
1151{
1152	pr_info("SMP disabled\n");
1153
1154	disable_ioapic_support();
1155
1156	init_cpu_present(cpumask_of(0));
1157	init_cpu_possible(cpumask_of(0));
1158
1159	if (smp_found_config)
1160		physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
1161	else
1162		physid_set_mask_of_physid(0, &phys_cpu_present_map);
1163	cpumask_set_cpu(0, topology_sibling_cpumask(0));
1164	cpumask_set_cpu(0, topology_core_cpumask(0));
1165}
1166
1167enum {
1168	SMP_OK,
1169	SMP_NO_CONFIG,
1170	SMP_NO_APIC,
1171	SMP_FORCE_UP,
1172};
1173
1174/*
1175 * Various sanity checks.
1176 */
1177static int __init smp_sanity_check(unsigned max_cpus)
1178{
1179	preempt_disable();
1180
1181#if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
1182	if (def_to_bigsmp && nr_cpu_ids > 8) {
1183		unsigned int cpu;
1184		unsigned nr;
1185
1186		pr_warn("More than 8 CPUs detected - skipping them\n"
1187			"Use CONFIG_X86_BIGSMP\n");
1188
1189		nr = 0;
1190		for_each_present_cpu(cpu) {
1191			if (nr >= 8)
1192				set_cpu_present(cpu, false);
1193			nr++;
1194		}
1195
1196		nr = 0;
1197		for_each_possible_cpu(cpu) {
1198			if (nr >= 8)
1199				set_cpu_possible(cpu, false);
1200			nr++;
1201		}
1202
1203		nr_cpu_ids = 8;
1204	}
1205#endif
1206
1207	if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
1208		pr_warn("weird, boot CPU (#%d) not listed by the BIOS\n",
1209			hard_smp_processor_id());
1210
1211		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1212	}
1213
1214	/*
1215	 * If we couldn't find an SMP configuration at boot time,
1216	 * get out of here now!
1217	 */
1218	if (!smp_found_config && !acpi_lapic) {
1219		preempt_enable();
1220		pr_notice("SMP motherboard not detected\n");
1221		return SMP_NO_CONFIG;
1222	}
1223
1224	/*
1225	 * Should not be necessary because the MP table should list the boot
1226	 * CPU too, but we do it for the sake of robustness anyway.
1227	 */
1228	if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
1229		pr_notice("weird, boot CPU (#%d) not listed by the BIOS\n",
1230			  boot_cpu_physical_apicid);
1231		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1232	}
1233	preempt_enable();
1234
1235	/*
1236	 * If we couldn't find a local APIC, then get out of here now!
1237	 */
1238	if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
1239	    !cpu_has_apic) {
1240		if (!disable_apic) {
1241			pr_err("BIOS bug, local APIC #%d not detected!...\n",
1242				boot_cpu_physical_apicid);
1243			pr_err("... forcing use of dummy APIC emulation (tell your hw vendor)\n");
1244		}
1245		return SMP_NO_APIC;
1246	}
1247
1248	/*
1249	 * If SMP should be disabled, then really disable it!
1250	 */
1251	if (!max_cpus) {
1252		pr_info("SMP mode deactivated\n");
1253		return SMP_FORCE_UP;
1254	}
1255
1256	return SMP_OK;
1257}
1258
1259static void __init smp_cpu_index_default(void)
1260{
1261	int i;
1262	struct cpuinfo_x86 *c;
1263
1264	for_each_possible_cpu(i) {
1265		c = &cpu_data(i);
1266		/* mark all to hotplug */
1267		c->cpu_index = nr_cpu_ids;
1268	}
1269}
1270
 
 
 
 
 
 
 
 
1271/*
1272 * Prepare for SMP bootup.  The MP table or ACPI has been read
1273 * earlier.  Just do some sanity checking here and enable APIC mode.
 
1274 */
1275void __init native_smp_prepare_cpus(unsigned int max_cpus)
1276{
1277	unsigned int i;
1278
1279	smp_cpu_index_default();
1280
1281	/*
1282	 * Setup boot CPU information
1283	 */
1284	smp_store_boot_cpu_info(); /* Final full version of the data */
1285	cpumask_copy(cpu_callin_mask, cpumask_of(0));
1286	mb();
1287
1288	current_thread_info()->cpu = 0;  /* needed? */
1289	for_each_possible_cpu(i) {
1290		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
1291		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
1292		zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
1293	}
 
 
 
 
 
 
 
 
 
 
1294	set_cpu_sibling_map(0);
1295
1296	switch (smp_sanity_check(max_cpus)) {
1297	case SMP_NO_CONFIG:
 
 
 
1298		disable_smp();
1299		if (APIC_init_uniprocessor())
1300			pr_notice("Local APIC not detected. Using dummy APIC emulation.\n");
1301		return;
1302	case SMP_NO_APIC:
1303		disable_smp();
 
 
1304		return;
1305	case SMP_FORCE_UP:
1306		disable_smp();
1307		apic_bsp_setup(false);
1308		return;
1309	case SMP_OK:
1310		break;
1311	}
1312
1313	default_setup_apic_routing();
 
1314
1315	if (read_apic_id() != boot_cpu_physical_apicid) {
1316		panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
1317		     read_apic_id(), boot_cpu_physical_apicid);
1318		/* Or can we switch back to PIC here? */
1319	}
1320
1321	cpu0_logical_apicid = apic_bsp_setup(false);
 
1322
1323	pr_info("CPU%d: ", 0);
1324	print_cpu_info(&cpu_data(0));
1325
1326	if (is_uv_system())
1327		uv_system_init();
1328
1329	set_mtrr_aps_delayed_init();
1330
1331	smp_quirk_init_udelay();
 
 
1332}
1333
1334void arch_enable_nonboot_cpus_begin(void)
1335{
1336	set_mtrr_aps_delayed_init();
1337}
1338
1339void arch_enable_nonboot_cpus_end(void)
1340{
1341	mtrr_aps_init();
1342}
1343
1344/*
1345 * Early setup to make printk work.
1346 */
1347void __init native_smp_prepare_boot_cpu(void)
1348{
1349	int me = smp_processor_id();
1350	switch_to_new_gdt(me);
1351	/* already set me in cpu_online_mask in boot_cpu_init() */
1352	cpumask_set_cpu(me, cpu_callout_mask);
1353	cpu_set_state_online(me);
1354}
1355
 
 
 
 
 
 
 
 
 
 
 
 
 
1356void __init native_smp_cpus_done(unsigned int max_cpus)
1357{
1358	pr_debug("Boot done\n");
1359
 
 
 
 
 
1360	nmi_selftest();
1361	impress_friends();
1362	setup_ioapic_dest();
1363	mtrr_aps_init();
1364}
1365
1366static int __initdata setup_possible_cpus = -1;
1367static int __init _setup_possible_cpus(char *str)
1368{
1369	get_option(&str, &setup_possible_cpus);
1370	return 0;
1371}
1372early_param("possible_cpus", _setup_possible_cpus);
1373
1374
1375/*
1376 * cpu_possible_mask should be static, it cannot change as cpu's
1377 * are onlined, or offlined. The reason is per-cpu data-structures
1378 * are allocated by some modules at init time, and dont expect to
1379 * do this dynamically on cpu arrival/departure.
1380 * cpu_present_mask on the other hand can change dynamically.
1381 * In case when cpu_hotplug is not compiled, then we resort to current
1382 * behaviour, which is cpu_possible == cpu_present.
1383 * - Ashok Raj
1384 *
1385 * Three ways to find out the number of additional hotplug CPUs:
1386 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
1387 * - The user can overwrite it with possible_cpus=NUM
1388 * - Otherwise don't reserve additional CPUs.
1389 * We do this because additional CPUs waste a lot of memory.
1390 * -AK
1391 */
1392__init void prefill_possible_map(void)
1393{
1394	int i, possible;
1395
1396	/* no processor from mptable or madt */
1397	if (!num_processors)
1398		num_processors = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1399
1400	i = setup_max_cpus ?: 1;
1401	if (setup_possible_cpus == -1) {
1402		possible = num_processors;
1403#ifdef CONFIG_HOTPLUG_CPU
1404		if (setup_max_cpus)
1405			possible += disabled_cpus;
1406#else
1407		if (possible > i)
1408			possible = i;
1409#endif
1410	} else
1411		possible = setup_possible_cpus;
1412
1413	total_cpus = max_t(int, possible, num_processors + disabled_cpus);
1414
1415	/* nr_cpu_ids could be reduced via nr_cpus= */
1416	if (possible > nr_cpu_ids) {
1417		pr_warn("%d Processors exceeds NR_CPUS limit of %d\n",
1418			possible, nr_cpu_ids);
1419		possible = nr_cpu_ids;
1420	}
1421
1422#ifdef CONFIG_HOTPLUG_CPU
1423	if (!setup_max_cpus)
1424#endif
1425	if (possible > i) {
1426		pr_warn("%d Processors exceeds max_cpus limit of %u\n",
1427			possible, setup_max_cpus);
1428		possible = i;
1429	}
1430
 
 
1431	pr_info("Allowing %d CPUs, %d hotplug CPUs\n",
1432		possible, max_t(int, possible - num_processors, 0));
1433
 
 
1434	for (i = 0; i < possible; i++)
1435		set_cpu_possible(i, true);
1436	for (; i < NR_CPUS; i++)
1437		set_cpu_possible(i, false);
1438
1439	nr_cpu_ids = possible;
1440}
1441
1442#ifdef CONFIG_HOTPLUG_CPU
1443
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1444static void remove_siblinginfo(int cpu)
1445{
1446	int sibling;
1447	struct cpuinfo_x86 *c = &cpu_data(cpu);
1448
1449	for_each_cpu(sibling, topology_core_cpumask(cpu)) {
1450		cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
1451		/*/
1452		 * last thread sibling in this cpu core going down
1453		 */
1454		if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1)
1455			cpu_data(sibling).booted_cores--;
1456	}
1457
1458	for_each_cpu(sibling, topology_sibling_cpumask(cpu))
1459		cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
1460	for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
1461		cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
1462	cpumask_clear(cpu_llc_shared_mask(cpu));
1463	cpumask_clear(topology_sibling_cpumask(cpu));
1464	cpumask_clear(topology_core_cpumask(cpu));
1465	c->phys_proc_id = 0;
1466	c->cpu_core_id = 0;
 
1467	cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
 
1468}
1469
1470static void remove_cpu_from_maps(int cpu)
1471{
1472	set_cpu_online(cpu, false);
1473	cpumask_clear_cpu(cpu, cpu_callout_mask);
1474	cpumask_clear_cpu(cpu, cpu_callin_mask);
1475	/* was set by cpu_init() */
1476	cpumask_clear_cpu(cpu, cpu_initialized_mask);
1477	numa_remove_cpu(cpu);
1478}
1479
1480void cpu_disable_common(void)
1481{
1482	int cpu = smp_processor_id();
1483
1484	remove_siblinginfo(cpu);
1485
1486	/* It's now safe to remove this processor from the online map */
1487	lock_vector_lock();
1488	remove_cpu_from_maps(cpu);
1489	unlock_vector_lock();
1490	fixup_irqs();
 
1491}
1492
1493int native_cpu_disable(void)
1494{
1495	int ret;
1496
1497	ret = check_irq_vectors_for_cpu_disable();
1498	if (ret)
1499		return ret;
1500
1501	clear_local_APIC();
1502	cpu_disable_common();
1503
1504	return 0;
1505}
1506
1507int common_cpu_die(unsigned int cpu)
1508{
1509	int ret = 0;
1510
1511	/* We don't do anything here: idle task is faking death itself. */
1512
1513	/* They ack this in play_dead() by setting CPU_DEAD */
1514	if (cpu_wait_death(cpu, 5)) {
1515		if (system_state == SYSTEM_RUNNING)
1516			pr_info("CPU %u is now offline\n", cpu);
1517	} else {
1518		pr_err("CPU %u didn't die...\n", cpu);
1519		ret = -1;
1520	}
1521
1522	return ret;
1523}
1524
1525void native_cpu_die(unsigned int cpu)
1526{
1527	common_cpu_die(cpu);
1528}
1529
1530void play_dead_common(void)
1531{
1532	idle_task_exit();
1533	reset_lazy_tlbstate();
1534	amd_e400_remove_cpu(raw_smp_processor_id());
1535
1536	/* Ack it */
1537	(void)cpu_report_death();
1538
1539	/*
1540	 * With physical CPU hotplug, we should halt the cpu
1541	 */
1542	local_irq_disable();
1543}
1544
1545static bool wakeup_cpu0(void)
1546{
1547	if (smp_processor_id() == 0 && enable_start_cpu0)
1548		return true;
1549
1550	return false;
1551}
1552
1553/*
1554 * We need to flush the caches before going to sleep, lest we have
1555 * dirty data in our caches when we come back up.
1556 */
1557static inline void mwait_play_dead(void)
1558{
1559	unsigned int eax, ebx, ecx, edx;
1560	unsigned int highest_cstate = 0;
1561	unsigned int highest_subcstate = 0;
1562	void *mwait_ptr;
1563	int i;
1564
 
 
1565	if (!this_cpu_has(X86_FEATURE_MWAIT))
1566		return;
1567	if (!this_cpu_has(X86_FEATURE_CLFLUSH))
1568		return;
1569	if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
1570		return;
1571
1572	eax = CPUID_MWAIT_LEAF;
1573	ecx = 0;
1574	native_cpuid(&eax, &ebx, &ecx, &edx);
1575
1576	/*
1577	 * eax will be 0 if EDX enumeration is not valid.
1578	 * Initialized below to cstate, sub_cstate value when EDX is valid.
1579	 */
1580	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
1581		eax = 0;
1582	} else {
1583		edx >>= MWAIT_SUBSTATE_SIZE;
1584		for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
1585			if (edx & MWAIT_SUBSTATE_MASK) {
1586				highest_cstate = i;
1587				highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
1588			}
1589		}
1590		eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
1591			(highest_subcstate - 1);
1592	}
1593
1594	/*
1595	 * This should be a memory location in a cache line which is
1596	 * unlikely to be touched by other processors.  The actual
1597	 * content is immaterial as it is not actually modified in any way.
1598	 */
1599	mwait_ptr = &current_thread_info()->flags;
1600
1601	wbinvd();
1602
1603	while (1) {
1604		/*
1605		 * The CLFLUSH is a workaround for erratum AAI65 for
1606		 * the Xeon 7400 series.  It's not clear it is actually
1607		 * needed, but it should be harmless in either case.
1608		 * The WBINVD is insufficient due to the spurious-wakeup
1609		 * case where we return around the loop.
1610		 */
1611		mb();
1612		clflush(mwait_ptr);
1613		mb();
1614		__monitor(mwait_ptr, 0, 0);
1615		mb();
1616		__mwait(eax, 0);
1617		/*
1618		 * If NMI wants to wake up CPU0, start CPU0.
1619		 */
1620		if (wakeup_cpu0())
1621			start_cpu0();
1622	}
1623}
1624
1625static inline void hlt_play_dead(void)
1626{
1627	if (__this_cpu_read(cpu_info.x86) >= 4)
1628		wbinvd();
1629
1630	while (1) {
1631		native_halt();
1632		/*
1633		 * If NMI wants to wake up CPU0, start CPU0.
1634		 */
1635		if (wakeup_cpu0())
1636			start_cpu0();
1637	}
1638}
1639
1640void native_play_dead(void)
1641{
1642	play_dead_common();
1643	tboot_shutdown(TB_SHUTDOWN_WFS);
1644
1645	mwait_play_dead();	/* Only returns on failure */
1646	if (cpuidle_play_dead())
1647		hlt_play_dead();
1648}
1649
1650#else /* ... !CONFIG_HOTPLUG_CPU */
1651int native_cpu_disable(void)
1652{
1653	return -ENOSYS;
1654}
1655
1656void native_cpu_die(unsigned int cpu)
1657{
1658	/* We said "no" in __cpu_disable */
1659	BUG();
1660}
1661
1662void native_play_dead(void)
1663{
1664	BUG();
1665}
1666
1667#endif