Linux Audio

Check our new training course

Loading...
v3.5.6
   1/*
   2 *	x86 SMP booting functions
   3 *
   4 *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
   5 *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
   6 *	Copyright 2001 Andi Kleen, SuSE Labs.
   7 *
   8 *	Much of the core SMP work is based on previous work by Thomas Radke, to
   9 *	whom a great many thanks are extended.
  10 *
  11 *	Thanks to Intel for making available several different Pentium,
  12 *	Pentium Pro and Pentium-II/Xeon MP machines.
  13 *	Original development of Linux SMP code supported by Caldera.
  14 *
  15 *	This code is released under the GNU General Public License version 2 or
  16 *	later.
  17 *
  18 *	Fixes
  19 *		Felix Koop	:	NR_CPUS used properly
  20 *		Jose Renau	:	Handle single CPU case.
  21 *		Alan Cox	:	By repeated request 8) - Total BogoMIPS report.
  22 *		Greg Wright	:	Fix for kernel stacks panic.
  23 *		Erich Boleyn	:	MP v1.4 and additional changes.
  24 *	Matthias Sattler	:	Changes for 2.1 kernel map.
  25 *	Michel Lespinasse	:	Changes for 2.1 kernel map.
  26 *	Michael Chastain	:	Change trampoline.S to gnu as.
  27 *		Alan Cox	:	Dumb bug: 'B' step PPro's are fine
  28 *		Ingo Molnar	:	Added APIC timers, based on code
  29 *					from Jose Renau
  30 *		Ingo Molnar	:	various cleanups and rewrites
  31 *		Tigran Aivazian	:	fixed "0.00 in /proc/uptime on SMP" bug.
  32 *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs
  33 *	Andi Kleen		:	Changed for SMP boot into long mode.
  34 *		Martin J. Bligh	: 	Added support for multi-quad systems
  35 *		Dave Jones	:	Report invalid combinations of Athlon CPUs.
  36 *		Rusty Russell	:	Hacked into shape for new "hotplug" boot process.
  37 *      Andi Kleen              :       Converted to new state machine.
  38 *	Ashok Raj		: 	CPU hotplug support
  39 *	Glauber Costa		:	i386 and x86_64 integration
  40 */
  41
  42#include <linux/init.h>
  43#include <linux/smp.h>
  44#include <linux/module.h>
  45#include <linux/sched.h>
  46#include <linux/percpu.h>
  47#include <linux/bootmem.h>
  48#include <linux/err.h>
  49#include <linux/nmi.h>
  50#include <linux/tboot.h>
  51#include <linux/stackprotector.h>
  52#include <linux/gfp.h>
  53#include <linux/cpuidle.h>
  54
  55#include <asm/acpi.h>
  56#include <asm/desc.h>
  57#include <asm/nmi.h>
  58#include <asm/irq.h>
  59#include <asm/idle.h>
  60#include <asm/realmode.h>
  61#include <asm/cpu.h>
  62#include <asm/numa.h>
  63#include <asm/pgtable.h>
  64#include <asm/tlbflush.h>
  65#include <asm/mtrr.h>
  66#include <asm/mwait.h>
  67#include <asm/apic.h>
  68#include <asm/io_apic.h>
  69#include <asm/setup.h>
  70#include <asm/uv/uv.h>
  71#include <linux/mc146818rtc.h>
  72
  73#include <asm/smpboot_hooks.h>
  74#include <asm/i8259.h>
  75
  76#include <asm/realmode.h>
  77
  78/* State of each CPU */
  79DEFINE_PER_CPU(int, cpu_state) = { 0 };
  80
 
 
 
 
  81#ifdef CONFIG_HOTPLUG_CPU
  82/*
 
 
 
 
 
 
 
 
  83 * We need this for trampoline_base protection from concurrent accesses when
  84 * off- and onlining cores wildly.
  85 */
  86static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
  87
  88void cpu_hotplug_driver_lock(void)
  89{
  90	mutex_lock(&x86_cpu_hotplug_driver_mutex);
  91}
  92
  93void cpu_hotplug_driver_unlock(void)
  94{
  95	mutex_unlock(&x86_cpu_hotplug_driver_mutex);
  96}
  97
  98ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
  99ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
 
 
 
 
 100#endif
 101
 102/* Number of siblings per CPU package */
 103int smp_num_siblings = 1;
 104EXPORT_SYMBOL(smp_num_siblings);
 105
 106/* Last level cache ID of each logical CPU */
 107DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
 108
 109/* representing HT siblings of each logical CPU */
 110DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
 111EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 112
 113/* representing HT and core siblings of each logical CPU */
 114DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
 115EXPORT_PER_CPU_SYMBOL(cpu_core_map);
 116
 117DEFINE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
 118
 119/* Per CPU bogomips and other parameters */
 120DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
 121EXPORT_PER_CPU_SYMBOL(cpu_info);
 122
 123atomic_t init_deasserted;
 124
 125/*
 126 * Report back to the Boot Processor.
 127 * Running on AP.
 128 */
 129static void __cpuinit smp_callin(void)
 130{
 131	int cpuid, phys_id;
 132	unsigned long timeout;
 133
 134	/*
 135	 * If waken up by an INIT in an 82489DX configuration
 136	 * we may get here before an INIT-deassert IPI reaches
 137	 * our local APIC.  We have to wait for the IPI or we'll
 138	 * lock up on an APIC access.
 139	 */
 140	if (apic->wait_for_init_deassert)
 141		apic->wait_for_init_deassert(&init_deasserted);
 142
 143	/*
 144	 * (This works even if the APIC is not enabled.)
 145	 */
 146	phys_id = read_apic_id();
 147	cpuid = smp_processor_id();
 148	if (cpumask_test_cpu(cpuid, cpu_callin_mask)) {
 149		panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
 150					phys_id, cpuid);
 151	}
 152	pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
 153
 154	/*
 155	 * STARTUP IPIs are fragile beasts as they might sometimes
 156	 * trigger some glue motherboard logic. Complete APIC bus
 157	 * silence for 1 second, this overestimates the time the
 158	 * boot CPU is spending to send the up to 2 STARTUP IPIs
 159	 * by a factor of two. This should be enough.
 160	 */
 161
 162	/*
 163	 * Waiting 2s total for startup (udelay is not yet working)
 164	 */
 165	timeout = jiffies + 2*HZ;
 166	while (time_before(jiffies, timeout)) {
 167		/*
 168		 * Has the boot CPU finished it's STARTUP sequence?
 169		 */
 170		if (cpumask_test_cpu(cpuid, cpu_callout_mask))
 171			break;
 172		cpu_relax();
 173	}
 174
 175	if (!time_before(jiffies, timeout)) {
 176		panic("%s: CPU%d started up but did not get a callout!\n",
 177		      __func__, cpuid);
 178	}
 179
 180	/*
 181	 * the boot CPU has finished the init stage and is spinning
 182	 * on callin_map until we finish. We are free to set up this
 183	 * CPU, first the APIC. (this is probably redundant on most
 184	 * boards)
 185	 */
 186
 187	pr_debug("CALLIN, before setup_local_APIC().\n");
 188	if (apic->smp_callin_clear_local_apic)
 189		apic->smp_callin_clear_local_apic();
 190	setup_local_APIC();
 191	end_local_APIC_setup();
 192
 193	/*
 194	 * Need to setup vector mappings before we enable interrupts.
 195	 */
 196	setup_vector_irq(smp_processor_id());
 197
 198	/*
 199	 * Save our processor parameters. Note: this information
 200	 * is needed for clock calibration.
 201	 */
 202	smp_store_cpu_info(cpuid);
 203
 204	/*
 205	 * Get our bogomips.
 206	 * Update loops_per_jiffy in cpu_data. Previous call to
 207	 * smp_store_cpu_info() stored a value that is close but not as
 208	 * accurate as the value just calculated.
 209	 */
 
 210	calibrate_delay();
 211	cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
 212	pr_debug("Stack at about %p\n", &cpuid);
 213
 214	/*
 
 
 
 
 
 215	 * This must be done before setting cpu_online_mask
 216	 * or calling notify_cpu_starting.
 217	 */
 218	set_cpu_sibling_map(raw_smp_processor_id());
 219	wmb();
 220
 221	notify_cpu_starting(cpuid);
 222
 223	/*
 224	 * Allow the master to continue.
 225	 */
 226	cpumask_set_cpu(cpuid, cpu_callin_mask);
 227}
 228
 229/*
 230 * Activate a secondary processor.
 231 */
 232notrace static void __cpuinit start_secondary(void *unused)
 233{
 234	/*
 235	 * Don't put *anything* before cpu_init(), SMP booting is too
 236	 * fragile that we want to limit the things done here to the
 237	 * most necessary things.
 238	 */
 239	cpu_init();
 240	x86_cpuinit.early_percpu_clock_init();
 241	preempt_disable();
 242	smp_callin();
 243
 244#ifdef CONFIG_X86_32
 245	/* switch away from the initial page table */
 246	load_cr3(swapper_pg_dir);
 247	__flush_tlb_all();
 248#endif
 249
 250	/* otherwise gcc will move up smp_processor_id before the cpu_init */
 251	barrier();
 252	/*
 253	 * Check TSC synchronization with the BP:
 254	 */
 255	check_tsc_sync_target();
 256
 257	/*
 258	 * We need to hold call_lock, so there is no inconsistency
 259	 * between the time smp_call_function() determines number of
 260	 * IPI recipients, and the time when the determination is made
 261	 * for which cpus receive the IPI. Holding this
 262	 * lock helps us to not include this cpu in a currently in progress
 263	 * smp_call_function().
 264	 *
 265	 * We need to hold vector_lock so there the set of online cpus
 266	 * does not change while we are assigning vectors to cpus.  Holding
 267	 * this lock ensures we don't half assign or remove an irq from a cpu.
 268	 */
 269	ipi_call_lock();
 270	lock_vector_lock();
 271	set_cpu_online(smp_processor_id(), true);
 272	unlock_vector_lock();
 273	ipi_call_unlock();
 274	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
 275	x86_platform.nmi_init();
 276
 
 
 
 
 
 
 
 
 
 
 
 
 
 277	/* enable local interrupts */
 278	local_irq_enable();
 279
 280	/* to prevent fake stack check failure in clock setup */
 281	boot_init_stack_canary();
 282
 283	x86_cpuinit.setup_percpu_clockev();
 284
 285	wmb();
 286	cpu_idle();
 287}
 288
 289/*
 290 * The bootstrap kernel entry code has set these up. Save them for
 291 * a given CPU
 292 */
 293
 294void __cpuinit smp_store_cpu_info(int id)
 295{
 296	struct cpuinfo_x86 *c = &cpu_data(id);
 297
 298	*c = boot_cpu_data;
 299	c->cpu_index = id;
 300	if (id != 0)
 301		identify_secondary_cpu(c);
 302}
 303
 304static bool __cpuinit
 305topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
 306{
 307	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 308
 309	return !WARN_ONCE(cpu_to_node(cpu1) != cpu_to_node(cpu2),
 310		"sched: CPU #%d's %s-sibling CPU #%d is not on the same node! "
 311		"[node: %d != %d]. Ignoring dependency.\n",
 312		cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2));
 313}
 314
 315#define link_mask(_m, c1, c2)						\
 316do {									\
 317	cpumask_set_cpu((c1), cpu_##_m##_mask(c2));			\
 318	cpumask_set_cpu((c2), cpu_##_m##_mask(c1));			\
 319} while (0)
 320
 321static bool __cpuinit match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 322{
 323	if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
 324		int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 325
 326		if (c->phys_proc_id == o->phys_proc_id &&
 327		    per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) &&
 328		    c->compute_unit_id == o->compute_unit_id)
 329			return topology_sane(c, o, "smt");
 330
 331	} else if (c->phys_proc_id == o->phys_proc_id &&
 332		   c->cpu_core_id == o->cpu_core_id) {
 333		return topology_sane(c, o, "smt");
 334	}
 335
 336	return false;
 337}
 338
 339static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 340{
 341	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 342
 343	if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID &&
 344	    per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2))
 345		return topology_sane(c, o, "llc");
 346
 347	return false;
 348}
 349
 350static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 351{
 352	if (c->phys_proc_id == o->phys_proc_id) {
 353		if (cpu_has(c, X86_FEATURE_AMD_DCM))
 354			return true;
 355
 356		return topology_sane(c, o, "mc");
 357	}
 358	return false;
 359}
 360
 361void __cpuinit set_cpu_sibling_map(int cpu)
 362{
 363	bool has_mc = boot_cpu_data.x86_max_cores > 1;
 364	bool has_smt = smp_num_siblings > 1;
 365	struct cpuinfo_x86 *c = &cpu_data(cpu);
 366	struct cpuinfo_x86 *o;
 367	int i;
 
 368
 369	cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
 370
 371	if (!has_smt && !has_mc) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 372		cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
 373		cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
 374		cpumask_set_cpu(cpu, cpu_core_mask(cpu));
 375		c->booted_cores = 1;
 376		return;
 377	}
 378
 379	for_each_cpu(i, cpu_sibling_setup_mask) {
 380		o = &cpu_data(i);
 381
 382		if ((i == cpu) || (has_smt && match_smt(c, o)))
 383			link_mask(sibling, cpu, i);
 384
 385		if ((i == cpu) || (has_mc && match_llc(c, o)))
 386			link_mask(llc_shared, cpu, i);
 387
 
 
 
 
 388	}
 389
 390	/*
 391	 * This needs a separate iteration over the cpus because we rely on all
 392	 * cpu_sibling_mask links to be set-up.
 393	 */
 394	for_each_cpu(i, cpu_sibling_setup_mask) {
 395		o = &cpu_data(i);
 396
 397		if ((i == cpu) || (has_mc && match_mc(c, o))) {
 398			link_mask(core, cpu, i);
 399
 
 
 
 400			/*
 401			 *  Does this new cpu bringup a new core?
 402			 */
 403			if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) {
 404				/*
 405				 * for each core in package, increment
 406				 * the booted_cores for this new cpu
 407				 */
 408				if (cpumask_first(cpu_sibling_mask(i)) == i)
 409					c->booted_cores++;
 410				/*
 411				 * increment the core count for all
 412				 * the other cpus in this package
 413				 */
 414				if (i != cpu)
 415					cpu_data(i).booted_cores++;
 416			} else if (i != cpu && !c->booted_cores)
 417				c->booted_cores = cpu_data(i).booted_cores;
 418		}
 419	}
 420}
 421
 422/* maps the cpu to the sched domain representing multi-core */
 423const struct cpumask *cpu_coregroup_mask(int cpu)
 424{
 425	return cpu_llc_shared_mask(cpu);
 
 
 
 
 
 
 
 
 
 426}
 427
 428static void impress_friends(void)
 429{
 430	int cpu;
 431	unsigned long bogosum = 0;
 432	/*
 433	 * Allow the user to impress friends.
 434	 */
 435	pr_debug("Before bogomips.\n");
 436	for_each_possible_cpu(cpu)
 437		if (cpumask_test_cpu(cpu, cpu_callout_mask))
 438			bogosum += cpu_data(cpu).loops_per_jiffy;
 439	printk(KERN_INFO
 440		"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
 441		num_online_cpus(),
 442		bogosum/(500000/HZ),
 443		(bogosum/(5000/HZ))%100);
 444
 445	pr_debug("Before bogocount - setting activated=1.\n");
 446}
 447
 448void __inquire_remote_apic(int apicid)
 449{
 450	unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
 451	const char * const names[] = { "ID", "VERSION", "SPIV" };
 452	int timeout;
 453	u32 status;
 454
 455	printk(KERN_INFO "Inquiring remote APIC 0x%x...\n", apicid);
 456
 457	for (i = 0; i < ARRAY_SIZE(regs); i++) {
 458		printk(KERN_INFO "... APIC 0x%x %s: ", apicid, names[i]);
 459
 460		/*
 461		 * Wait for idle.
 462		 */
 463		status = safe_apic_wait_icr_idle();
 464		if (status)
 465			printk(KERN_CONT
 466			       "a previous APIC delivery may have failed\n");
 467
 468		apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
 469
 470		timeout = 0;
 471		do {
 472			udelay(100);
 473			status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
 474		} while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
 475
 476		switch (status) {
 477		case APIC_ICR_RR_VALID:
 478			status = apic_read(APIC_RRR);
 479			printk(KERN_CONT "%08x\n", status);
 480			break;
 481		default:
 482			printk(KERN_CONT "failed\n");
 483		}
 484	}
 485}
 486
 487/*
 488 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
 489 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
 490 * won't ... remember to clear down the APIC, etc later.
 491 */
 492int __cpuinit
 493wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
 494{
 495	unsigned long send_status, accept_status = 0;
 496	int maxlvt;
 497
 498	/* Target chip */
 499	/* Boot on the stack */
 500	/* Kick the second */
 501	apic_icr_write(APIC_DM_NMI | apic->dest_logical, logical_apicid);
 502
 503	pr_debug("Waiting for send to finish...\n");
 504	send_status = safe_apic_wait_icr_idle();
 505
 506	/*
 507	 * Give the other CPU some time to accept the IPI.
 508	 */
 509	udelay(200);
 510	if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
 511		maxlvt = lapic_get_maxlvt();
 512		if (maxlvt > 3)			/* Due to the Pentium erratum 3AP.  */
 513			apic_write(APIC_ESR, 0);
 514		accept_status = (apic_read(APIC_ESR) & 0xEF);
 515	}
 516	pr_debug("NMI sent.\n");
 517
 518	if (send_status)
 519		printk(KERN_ERR "APIC never delivered???\n");
 520	if (accept_status)
 521		printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
 522
 523	return (send_status | accept_status);
 524}
 525
 526static int __cpuinit
 527wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
 528{
 529	unsigned long send_status, accept_status = 0;
 530	int maxlvt, num_starts, j;
 531
 532	maxlvt = lapic_get_maxlvt();
 533
 534	/*
 535	 * Be paranoid about clearing APIC errors.
 536	 */
 537	if (APIC_INTEGRATED(apic_version[phys_apicid])) {
 538		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
 539			apic_write(APIC_ESR, 0);
 540		apic_read(APIC_ESR);
 541	}
 542
 543	pr_debug("Asserting INIT.\n");
 544
 545	/*
 546	 * Turn INIT on target chip
 547	 */
 548	/*
 549	 * Send IPI
 550	 */
 551	apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
 552		       phys_apicid);
 553
 554	pr_debug("Waiting for send to finish...\n");
 555	send_status = safe_apic_wait_icr_idle();
 556
 557	mdelay(10);
 558
 559	pr_debug("Deasserting INIT.\n");
 560
 561	/* Target chip */
 562	/* Send IPI */
 563	apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
 564
 565	pr_debug("Waiting for send to finish...\n");
 566	send_status = safe_apic_wait_icr_idle();
 567
 568	mb();
 569	atomic_set(&init_deasserted, 1);
 570
 571	/*
 572	 * Should we send STARTUP IPIs ?
 573	 *
 574	 * Determine this based on the APIC version.
 575	 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
 576	 */
 577	if (APIC_INTEGRATED(apic_version[phys_apicid]))
 578		num_starts = 2;
 579	else
 580		num_starts = 0;
 581
 582	/*
 583	 * Paravirt / VMI wants a startup IPI hook here to set up the
 584	 * target processor state.
 585	 */
 586	startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
 587			 stack_start);
 588
 589	/*
 590	 * Run STARTUP IPI loop.
 591	 */
 592	pr_debug("#startup loops: %d.\n", num_starts);
 593
 594	for (j = 1; j <= num_starts; j++) {
 595		pr_debug("Sending STARTUP #%d.\n", j);
 596		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
 597			apic_write(APIC_ESR, 0);
 598		apic_read(APIC_ESR);
 599		pr_debug("After apic_write.\n");
 600
 601		/*
 602		 * STARTUP IPI
 603		 */
 604
 605		/* Target chip */
 606		/* Boot on the stack */
 607		/* Kick the second */
 608		apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
 609			       phys_apicid);
 610
 611		/*
 612		 * Give the other CPU some time to accept the IPI.
 613		 */
 614		udelay(300);
 615
 616		pr_debug("Startup point 1.\n");
 617
 618		pr_debug("Waiting for send to finish...\n");
 619		send_status = safe_apic_wait_icr_idle();
 620
 621		/*
 622		 * Give the other CPU some time to accept the IPI.
 623		 */
 624		udelay(200);
 625		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
 626			apic_write(APIC_ESR, 0);
 627		accept_status = (apic_read(APIC_ESR) & 0xEF);
 628		if (send_status || accept_status)
 629			break;
 630	}
 631	pr_debug("After Startup.\n");
 632
 633	if (send_status)
 634		printk(KERN_ERR "APIC never delivered???\n");
 635	if (accept_status)
 636		printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
 637
 638	return (send_status | accept_status);
 639}
 640
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 641/* reduce the number of lines printed when booting a large cpu count system */
 642static void __cpuinit announce_cpu(int cpu, int apicid)
 643{
 644	static int current_node = -1;
 645	int node = early_cpu_to_node(cpu);
 646
 647	if (system_state == SYSTEM_BOOTING) {
 648		if (node != current_node) {
 649			if (current_node > (-1))
 650				pr_cont(" Ok.\n");
 651			current_node = node;
 652			pr_info("Booting Node %3d, Processors ", node);
 653		}
 654		pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " Ok.\n" : "");
 655		return;
 656	} else
 657		pr_info("Booting Node %d Processor %d APIC 0x%x\n",
 658			node, cpu, apicid);
 659}
 660
 661/*
 662 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
 663 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
 664 * Returns zero if CPU booted OK, else error code from
 665 * ->wakeup_secondary_cpu.
 666 */
 667static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
 668{
 669	volatile u32 *trampoline_status =
 670		(volatile u32 *) __va(real_mode_header->trampoline_status);
 671	/* start_ip had better be page-aligned! */
 672	unsigned long start_ip = real_mode_header->trampoline_start;
 673
 674	unsigned long boot_error = 0;
 
 675	int timeout;
 
 
 
 
 
 
 676
 677	alternatives_smp_switch(1);
 678
 679	idle->thread.sp = (unsigned long) (((struct pt_regs *)
 680			  (THREAD_SIZE +  task_stack_page(idle))) - 1);
 681	per_cpu(current_task, cpu) = idle;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 682
 
 
 
 683#ifdef CONFIG_X86_32
 684	/* Stack for startup_32 can be just as for start_secondary onwards */
 685	irq_ctx_init(cpu);
 686#else
 687	clear_tsk_thread_flag(idle, TIF_FORK);
 688	initial_gs = per_cpu_offset(cpu);
 689	per_cpu(kernel_stack, cpu) =
 690		(unsigned long)task_stack_page(idle) -
 691		KERNEL_STACK_OFFSET + THREAD_SIZE;
 692#endif
 693	early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
 694	initial_code = (unsigned long)start_secondary;
 695	stack_start  = idle->thread.sp;
 
 
 
 696
 697	/* So we see what's up */
 698	announce_cpu(cpu, apicid);
 699
 700	/*
 701	 * This grunge runs the startup process for
 702	 * the targeted processor.
 703	 */
 704
 
 
 705	atomic_set(&init_deasserted, 0);
 706
 707	if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
 708
 709		pr_debug("Setting warm reset code and vector.\n");
 710
 711		smpboot_setup_warm_reset_vector(start_ip);
 712		/*
 713		 * Be paranoid about clearing APIC errors.
 714		*/
 715		if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
 716			apic_write(APIC_ESR, 0);
 717			apic_read(APIC_ESR);
 718		}
 719	}
 720
 721	/*
 722	 * Kick the secondary CPU. Use the method in the APIC driver
 723	 * if it's defined - or use an INIT boot APIC message otherwise:
 724	 */
 725	if (apic->wakeup_secondary_cpu)
 726		boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
 727	else
 728		boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
 729
 730	if (!boot_error) {
 731		/*
 732		 * allow APs to start initializing.
 733		 */
 734		pr_debug("Before Callout %d.\n", cpu);
 735		cpumask_set_cpu(cpu, cpu_callout_mask);
 736		pr_debug("After Callout %d.\n", cpu);
 737
 738		/*
 739		 * Wait 5s total for a response
 740		 */
 741		for (timeout = 0; timeout < 50000; timeout++) {
 742			if (cpumask_test_cpu(cpu, cpu_callin_mask))
 743				break;	/* It has booted */
 744			udelay(100);
 745			/*
 746			 * Allow other tasks to run while we wait for the
 747			 * AP to come online. This also gives a chance
 748			 * for the MTRR work(triggered by the AP coming online)
 749			 * to be completed in the stop machine context.
 750			 */
 751			schedule();
 752		}
 753
 754		if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
 755			print_cpu_msr(&cpu_data(cpu));
 756			pr_debug("CPU%d: has booted.\n", cpu);
 757		} else {
 758			boot_error = 1;
 759			if (*trampoline_status == 0xA5A5A5A5)
 
 760				/* trampoline started but...? */
 761				pr_err("CPU%d: Stuck ??\n", cpu);
 762			else
 763				/* trampoline code not run */
 764				pr_err("CPU%d: Not responding.\n", cpu);
 765			if (apic->inquire_remote_apic)
 766				apic->inquire_remote_apic(apicid);
 767		}
 768	}
 769
 770	if (boot_error) {
 771		/* Try to put things back the way they were before ... */
 772		numa_remove_cpu(cpu); /* was set by numa_add_cpu */
 773
 774		/* was set by do_boot_cpu() */
 775		cpumask_clear_cpu(cpu, cpu_callout_mask);
 776
 777		/* was set by cpu_init() */
 778		cpumask_clear_cpu(cpu, cpu_initialized_mask);
 779
 780		set_cpu_present(cpu, false);
 781		per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
 782	}
 783
 784	/* mark "stuck" area as not stuck */
 785	*trampoline_status = 0;
 786
 787	if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
 788		/*
 789		 * Cleanup possible dangling ends...
 790		 */
 791		smpboot_restore_warm_reset_vector();
 792	}
 
 
 793	return boot_error;
 794}
 795
 796int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
 797{
 798	int apicid = apic->cpu_present_to_apicid(cpu);
 799	unsigned long flags;
 800	int err;
 801
 802	WARN_ON(irqs_disabled());
 803
 804	pr_debug("++++++++++++++++++++=_---CPU UP  %u\n", cpu);
 805
 806	if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid ||
 807	    !physid_isset(apicid, phys_cpu_present_map) ||
 808	    !apic->apic_id_valid(apicid)) {
 809		printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu);
 810		return -EINVAL;
 811	}
 812
 813	/*
 814	 * Already booted CPU?
 815	 */
 816	if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
 817		pr_debug("do_boot_cpu %d Already started\n", cpu);
 818		return -ENOSYS;
 819	}
 820
 821	/*
 822	 * Save current MTRR state in case it was changed since early boot
 823	 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
 824	 */
 825	mtrr_save_state();
 826
 827	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
 828
 829	err = do_boot_cpu(apicid, cpu, tidle);
 830	if (err) {
 831		pr_debug("do_boot_cpu failed %d\n", err);
 832		return -EIO;
 833	}
 834
 835	/*
 836	 * Check TSC synchronization with the AP (keep irqs disabled
 837	 * while doing so):
 838	 */
 839	local_irq_save(flags);
 840	check_tsc_sync_source(cpu);
 841	local_irq_restore(flags);
 842
 843	while (!cpu_online(cpu)) {
 844		cpu_relax();
 845		touch_nmi_watchdog();
 846	}
 847
 848	return 0;
 849}
 850
 851/**
 852 * arch_disable_smp_support() - disables SMP support for x86 at runtime
 853 */
 854void arch_disable_smp_support(void)
 855{
 856	disable_ioapic_support();
 857}
 858
 859/*
 860 * Fall back to non SMP mode after errors.
 861 *
 862 * RED-PEN audit/test this more. I bet there is more state messed up here.
 863 */
 864static __init void disable_smp(void)
 865{
 866	init_cpu_present(cpumask_of(0));
 867	init_cpu_possible(cpumask_of(0));
 868	smpboot_clear_io_apic_irqs();
 869
 870	if (smp_found_config)
 871		physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
 872	else
 873		physid_set_mask_of_physid(0, &phys_cpu_present_map);
 874	cpumask_set_cpu(0, cpu_sibling_mask(0));
 875	cpumask_set_cpu(0, cpu_core_mask(0));
 876}
 877
 878/*
 879 * Various sanity checks.
 880 */
 881static int __init smp_sanity_check(unsigned max_cpus)
 882{
 883	preempt_disable();
 884
 885#if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
 886	if (def_to_bigsmp && nr_cpu_ids > 8) {
 887		unsigned int cpu;
 888		unsigned nr;
 889
 890		printk(KERN_WARNING
 891		       "More than 8 CPUs detected - skipping them.\n"
 892		       "Use CONFIG_X86_BIGSMP.\n");
 893
 894		nr = 0;
 895		for_each_present_cpu(cpu) {
 896			if (nr >= 8)
 897				set_cpu_present(cpu, false);
 898			nr++;
 899		}
 900
 901		nr = 0;
 902		for_each_possible_cpu(cpu) {
 903			if (nr >= 8)
 904				set_cpu_possible(cpu, false);
 905			nr++;
 906		}
 907
 908		nr_cpu_ids = 8;
 909	}
 910#endif
 911
 912	if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
 913		printk(KERN_WARNING
 914			"weird, boot CPU (#%d) not listed by the BIOS.\n",
 915			hard_smp_processor_id());
 916
 917		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
 918	}
 919
 920	/*
 921	 * If we couldn't find an SMP configuration at boot time,
 922	 * get out of here now!
 923	 */
 924	if (!smp_found_config && !acpi_lapic) {
 925		preempt_enable();
 926		printk(KERN_NOTICE "SMP motherboard not detected.\n");
 927		disable_smp();
 928		if (APIC_init_uniprocessor())
 929			printk(KERN_NOTICE "Local APIC not detected."
 930					   " Using dummy APIC emulation.\n");
 931		return -1;
 932	}
 933
 934	/*
 935	 * Should not be necessary because the MP table should list the boot
 936	 * CPU too, but we do it for the sake of robustness anyway.
 937	 */
 938	if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
 939		printk(KERN_NOTICE
 940			"weird, boot CPU (#%d) not listed by the BIOS.\n",
 941			boot_cpu_physical_apicid);
 942		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
 943	}
 944	preempt_enable();
 945
 946	/*
 947	 * If we couldn't find a local APIC, then get out of here now!
 948	 */
 949	if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
 950	    !cpu_has_apic) {
 951		if (!disable_apic) {
 952			pr_err("BIOS bug, local APIC #%d not detected!...\n",
 953				boot_cpu_physical_apicid);
 954			pr_err("... forcing use of dummy APIC emulation."
 955				"(tell your hw vendor)\n");
 956		}
 957		smpboot_clear_io_apic();
 958		disable_ioapic_support();
 959		return -1;
 960	}
 961
 962	verify_local_APIC();
 963
 964	/*
 965	 * If SMP should be disabled, then really disable it!
 966	 */
 967	if (!max_cpus) {
 968		printk(KERN_INFO "SMP mode deactivated.\n");
 969		smpboot_clear_io_apic();
 970
 971		connect_bsp_APIC();
 972		setup_local_APIC();
 973		bsp_end_local_APIC_setup();
 974		return -1;
 975	}
 976
 977	return 0;
 978}
 979
 980static void __init smp_cpu_index_default(void)
 981{
 982	int i;
 983	struct cpuinfo_x86 *c;
 984
 985	for_each_possible_cpu(i) {
 986		c = &cpu_data(i);
 987		/* mark all to hotplug */
 988		c->cpu_index = nr_cpu_ids;
 989	}
 990}
 991
 992/*
 993 * Prepare for SMP bootup.  The MP table or ACPI has been read
 994 * earlier.  Just do some sanity checking here and enable APIC mode.
 995 */
 996void __init native_smp_prepare_cpus(unsigned int max_cpus)
 997{
 998	unsigned int i;
 999
1000	preempt_disable();
1001	smp_cpu_index_default();
1002
1003	/*
1004	 * Setup boot CPU information
1005	 */
1006	smp_store_cpu_info(0); /* Final full version of the data */
1007	cpumask_copy(cpu_callin_mask, cpumask_of(0));
1008	mb();
1009
1010	current_thread_info()->cpu = 0;  /* needed? */
1011	for_each_possible_cpu(i) {
1012		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
1013		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
1014		zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
1015	}
1016	set_cpu_sibling_map(0);
1017
1018
1019	if (smp_sanity_check(max_cpus) < 0) {
1020		printk(KERN_INFO "SMP disabled\n");
1021		disable_smp();
1022		goto out;
1023	}
1024
1025	default_setup_apic_routing();
1026
1027	preempt_disable();
1028	if (read_apic_id() != boot_cpu_physical_apicid) {
1029		panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
1030		     read_apic_id(), boot_cpu_physical_apicid);
1031		/* Or can we switch back to PIC here? */
1032	}
1033	preempt_enable();
1034
1035	connect_bsp_APIC();
1036
1037	/*
1038	 * Switch from PIC to APIC mode.
1039	 */
1040	setup_local_APIC();
1041
1042	/*
1043	 * Enable IO APIC before setting up error vector
1044	 */
1045	if (!skip_ioapic_setup && nr_ioapics)
1046		enable_IO_APIC();
1047
1048	bsp_end_local_APIC_setup();
1049
1050	if (apic->setup_portio_remap)
1051		apic->setup_portio_remap();
1052
1053	smpboot_setup_io_apic();
1054	/*
1055	 * Set up local APIC timer on boot CPU.
1056	 */
1057
1058	printk(KERN_INFO "CPU%d: ", 0);
1059	print_cpu_info(&cpu_data(0));
1060	x86_init.timers.setup_percpu_clockev();
1061
1062	if (is_uv_system())
1063		uv_system_init();
1064
1065	set_mtrr_aps_delayed_init();
1066out:
1067	preempt_enable();
1068}
1069
1070void arch_disable_nonboot_cpus_begin(void)
1071{
1072	/*
1073	 * Avoid the smp alternatives switch during the disable_nonboot_cpus().
1074	 * In the suspend path, we will be back in the SMP mode shortly anyways.
1075	 */
1076	skip_smp_alternatives = true;
1077}
1078
1079void arch_disable_nonboot_cpus_end(void)
1080{
1081	skip_smp_alternatives = false;
1082}
1083
1084void arch_enable_nonboot_cpus_begin(void)
1085{
1086	set_mtrr_aps_delayed_init();
1087}
1088
1089void arch_enable_nonboot_cpus_end(void)
1090{
1091	mtrr_aps_init();
1092}
1093
1094/*
1095 * Early setup to make printk work.
1096 */
1097void __init native_smp_prepare_boot_cpu(void)
1098{
1099	int me = smp_processor_id();
1100	switch_to_new_gdt(me);
1101	/* already set me in cpu_online_mask in boot_cpu_init() */
1102	cpumask_set_cpu(me, cpu_callout_mask);
1103	per_cpu(cpu_state, me) = CPU_ONLINE;
1104}
1105
1106void __init native_smp_cpus_done(unsigned int max_cpus)
1107{
1108	pr_debug("Boot done.\n");
1109
1110	nmi_selftest();
1111	impress_friends();
1112#ifdef CONFIG_X86_IO_APIC
1113	setup_ioapic_dest();
1114#endif
1115	mtrr_aps_init();
1116}
1117
1118static int __initdata setup_possible_cpus = -1;
1119static int __init _setup_possible_cpus(char *str)
1120{
1121	get_option(&str, &setup_possible_cpus);
1122	return 0;
1123}
1124early_param("possible_cpus", _setup_possible_cpus);
1125
1126
1127/*
1128 * cpu_possible_mask should be static, it cannot change as cpu's
1129 * are onlined, or offlined. The reason is per-cpu data-structures
1130 * are allocated by some modules at init time, and dont expect to
1131 * do this dynamically on cpu arrival/departure.
1132 * cpu_present_mask on the other hand can change dynamically.
1133 * In case when cpu_hotplug is not compiled, then we resort to current
1134 * behaviour, which is cpu_possible == cpu_present.
1135 * - Ashok Raj
1136 *
1137 * Three ways to find out the number of additional hotplug CPUs:
1138 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
1139 * - The user can overwrite it with possible_cpus=NUM
1140 * - Otherwise don't reserve additional CPUs.
1141 * We do this because additional CPUs waste a lot of memory.
1142 * -AK
1143 */
1144__init void prefill_possible_map(void)
1145{
1146	int i, possible;
1147
1148	/* no processor from mptable or madt */
1149	if (!num_processors)
1150		num_processors = 1;
1151
1152	i = setup_max_cpus ?: 1;
1153	if (setup_possible_cpus == -1) {
1154		possible = num_processors;
1155#ifdef CONFIG_HOTPLUG_CPU
1156		if (setup_max_cpus)
1157			possible += disabled_cpus;
1158#else
1159		if (possible > i)
1160			possible = i;
1161#endif
1162	} else
1163		possible = setup_possible_cpus;
1164
1165	total_cpus = max_t(int, possible, num_processors + disabled_cpus);
1166
1167	/* nr_cpu_ids could be reduced via nr_cpus= */
1168	if (possible > nr_cpu_ids) {
1169		printk(KERN_WARNING
1170			"%d Processors exceeds NR_CPUS limit of %d\n",
1171			possible, nr_cpu_ids);
1172		possible = nr_cpu_ids;
1173	}
1174
1175#ifdef CONFIG_HOTPLUG_CPU
1176	if (!setup_max_cpus)
1177#endif
1178	if (possible > i) {
1179		printk(KERN_WARNING
1180			"%d Processors exceeds max_cpus limit of %u\n",
1181			possible, setup_max_cpus);
1182		possible = i;
1183	}
1184
1185	printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
1186		possible, max_t(int, possible - num_processors, 0));
1187
1188	for (i = 0; i < possible; i++)
1189		set_cpu_possible(i, true);
1190	for (; i < NR_CPUS; i++)
1191		set_cpu_possible(i, false);
1192
1193	nr_cpu_ids = possible;
1194}
1195
1196#ifdef CONFIG_HOTPLUG_CPU
1197
1198static void remove_siblinginfo(int cpu)
1199{
1200	int sibling;
1201	struct cpuinfo_x86 *c = &cpu_data(cpu);
1202
1203	for_each_cpu(sibling, cpu_core_mask(cpu)) {
1204		cpumask_clear_cpu(cpu, cpu_core_mask(sibling));
1205		/*/
1206		 * last thread sibling in this cpu core going down
1207		 */
1208		if (cpumask_weight(cpu_sibling_mask(cpu)) == 1)
1209			cpu_data(sibling).booted_cores--;
1210	}
1211
1212	for_each_cpu(sibling, cpu_sibling_mask(cpu))
1213		cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
1214	cpumask_clear(cpu_sibling_mask(cpu));
1215	cpumask_clear(cpu_core_mask(cpu));
1216	c->phys_proc_id = 0;
1217	c->cpu_core_id = 0;
1218	cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
1219}
1220
1221static void __ref remove_cpu_from_maps(int cpu)
1222{
1223	set_cpu_online(cpu, false);
1224	cpumask_clear_cpu(cpu, cpu_callout_mask);
1225	cpumask_clear_cpu(cpu, cpu_callin_mask);
1226	/* was set by cpu_init() */
1227	cpumask_clear_cpu(cpu, cpu_initialized_mask);
1228	numa_remove_cpu(cpu);
1229}
1230
1231void cpu_disable_common(void)
1232{
1233	int cpu = smp_processor_id();
1234
1235	remove_siblinginfo(cpu);
1236
1237	/* It's now safe to remove this processor from the online map */
1238	lock_vector_lock();
1239	remove_cpu_from_maps(cpu);
1240	unlock_vector_lock();
1241	fixup_irqs();
1242}
1243
1244int native_cpu_disable(void)
1245{
1246	int cpu = smp_processor_id();
1247
1248	/*
1249	 * Perhaps use cpufreq to drop frequency, but that could go
1250	 * into generic code.
1251	 *
1252	 * We won't take down the boot processor on i386 due to some
1253	 * interrupts only being able to be serviced by the BSP.
1254	 * Especially so if we're not using an IOAPIC	-zwane
1255	 */
1256	if (cpu == 0)
1257		return -EBUSY;
1258
1259	clear_local_APIC();
1260
1261	cpu_disable_common();
1262	return 0;
1263}
1264
1265void native_cpu_die(unsigned int cpu)
1266{
1267	/* We don't do anything here: idle task is faking death itself. */
1268	unsigned int i;
1269
1270	for (i = 0; i < 10; i++) {
1271		/* They ack this in play_dead by setting CPU_DEAD */
1272		if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1273			if (system_state == SYSTEM_RUNNING)
1274				pr_info("CPU %u is now offline\n", cpu);
1275
1276			if (1 == num_online_cpus())
1277				alternatives_smp_switch(0);
1278			return;
1279		}
1280		msleep(100);
1281	}
1282	pr_err("CPU %u didn't die...\n", cpu);
1283}
1284
1285void play_dead_common(void)
1286{
1287	idle_task_exit();
1288	reset_lazy_tlbstate();
1289	amd_e400_remove_cpu(raw_smp_processor_id());
1290
1291	mb();
1292	/* Ack it */
1293	__this_cpu_write(cpu_state, CPU_DEAD);
1294
1295	/*
1296	 * With physical CPU hotplug, we should halt the cpu
1297	 */
1298	local_irq_disable();
1299}
1300
1301/*
1302 * We need to flush the caches before going to sleep, lest we have
1303 * dirty data in our caches when we come back up.
1304 */
1305static inline void mwait_play_dead(void)
1306{
1307	unsigned int eax, ebx, ecx, edx;
1308	unsigned int highest_cstate = 0;
1309	unsigned int highest_subcstate = 0;
1310	int i;
1311	void *mwait_ptr;
1312	struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
1313
1314	if (!(this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c)))
1315		return;
1316	if (!this_cpu_has(X86_FEATURE_CLFLSH))
1317		return;
1318	if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
1319		return;
1320
1321	eax = CPUID_MWAIT_LEAF;
1322	ecx = 0;
1323	native_cpuid(&eax, &ebx, &ecx, &edx);
1324
1325	/*
1326	 * eax will be 0 if EDX enumeration is not valid.
1327	 * Initialized below to cstate, sub_cstate value when EDX is valid.
1328	 */
1329	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
1330		eax = 0;
1331	} else {
1332		edx >>= MWAIT_SUBSTATE_SIZE;
1333		for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
1334			if (edx & MWAIT_SUBSTATE_MASK) {
1335				highest_cstate = i;
1336				highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
1337			}
1338		}
1339		eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
1340			(highest_subcstate - 1);
1341	}
1342
1343	/*
1344	 * This should be a memory location in a cache line which is
1345	 * unlikely to be touched by other processors.  The actual
1346	 * content is immaterial as it is not actually modified in any way.
1347	 */
1348	mwait_ptr = &current_thread_info()->flags;
1349
1350	wbinvd();
1351
1352	while (1) {
1353		/*
1354		 * The CLFLUSH is a workaround for erratum AAI65 for
1355		 * the Xeon 7400 series.  It's not clear it is actually
1356		 * needed, but it should be harmless in either case.
1357		 * The WBINVD is insufficient due to the spurious-wakeup
1358		 * case where we return around the loop.
1359		 */
1360		clflush(mwait_ptr);
1361		__monitor(mwait_ptr, 0, 0);
1362		mb();
1363		__mwait(eax, 0);
1364	}
1365}
1366
1367static inline void hlt_play_dead(void)
1368{
1369	if (__this_cpu_read(cpu_info.x86) >= 4)
1370		wbinvd();
1371
1372	while (1) {
1373		native_halt();
1374	}
1375}
1376
1377void native_play_dead(void)
1378{
1379	play_dead_common();
1380	tboot_shutdown(TB_SHUTDOWN_WFS);
1381
1382	mwait_play_dead();	/* Only returns on failure */
1383	if (cpuidle_play_dead())
1384		hlt_play_dead();
1385}
1386
1387#else /* ... !CONFIG_HOTPLUG_CPU */
1388int native_cpu_disable(void)
1389{
1390	return -ENOSYS;
1391}
1392
1393void native_cpu_die(unsigned int cpu)
1394{
1395	/* We said "no" in __cpu_disable */
1396	BUG();
1397}
1398
1399void native_play_dead(void)
1400{
1401	BUG();
1402}
1403
1404#endif
v3.1
   1/*
   2 *	x86 SMP booting functions
   3 *
   4 *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
   5 *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
   6 *	Copyright 2001 Andi Kleen, SuSE Labs.
   7 *
   8 *	Much of the core SMP work is based on previous work by Thomas Radke, to
   9 *	whom a great many thanks are extended.
  10 *
  11 *	Thanks to Intel for making available several different Pentium,
  12 *	Pentium Pro and Pentium-II/Xeon MP machines.
  13 *	Original development of Linux SMP code supported by Caldera.
  14 *
  15 *	This code is released under the GNU General Public License version 2 or
  16 *	later.
  17 *
  18 *	Fixes
  19 *		Felix Koop	:	NR_CPUS used properly
  20 *		Jose Renau	:	Handle single CPU case.
  21 *		Alan Cox	:	By repeated request 8) - Total BogoMIPS report.
  22 *		Greg Wright	:	Fix for kernel stacks panic.
  23 *		Erich Boleyn	:	MP v1.4 and additional changes.
  24 *	Matthias Sattler	:	Changes for 2.1 kernel map.
  25 *	Michel Lespinasse	:	Changes for 2.1 kernel map.
  26 *	Michael Chastain	:	Change trampoline.S to gnu as.
  27 *		Alan Cox	:	Dumb bug: 'B' step PPro's are fine
  28 *		Ingo Molnar	:	Added APIC timers, based on code
  29 *					from Jose Renau
  30 *		Ingo Molnar	:	various cleanups and rewrites
  31 *		Tigran Aivazian	:	fixed "0.00 in /proc/uptime on SMP" bug.
  32 *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs
  33 *	Andi Kleen		:	Changed for SMP boot into long mode.
  34 *		Martin J. Bligh	: 	Added support for multi-quad systems
  35 *		Dave Jones	:	Report invalid combinations of Athlon CPUs.
  36 *		Rusty Russell	:	Hacked into shape for new "hotplug" boot process.
  37 *      Andi Kleen              :       Converted to new state machine.
  38 *	Ashok Raj		: 	CPU hotplug support
  39 *	Glauber Costa		:	i386 and x86_64 integration
  40 */
  41
  42#include <linux/init.h>
  43#include <linux/smp.h>
  44#include <linux/module.h>
  45#include <linux/sched.h>
  46#include <linux/percpu.h>
  47#include <linux/bootmem.h>
  48#include <linux/err.h>
  49#include <linux/nmi.h>
  50#include <linux/tboot.h>
  51#include <linux/stackprotector.h>
  52#include <linux/gfp.h>
 
  53
  54#include <asm/acpi.h>
  55#include <asm/desc.h>
  56#include <asm/nmi.h>
  57#include <asm/irq.h>
  58#include <asm/idle.h>
  59#include <asm/trampoline.h>
  60#include <asm/cpu.h>
  61#include <asm/numa.h>
  62#include <asm/pgtable.h>
  63#include <asm/tlbflush.h>
  64#include <asm/mtrr.h>
  65#include <asm/mwait.h>
  66#include <asm/apic.h>
  67#include <asm/io_apic.h>
  68#include <asm/setup.h>
  69#include <asm/uv/uv.h>
  70#include <linux/mc146818rtc.h>
  71
  72#include <asm/smpboot_hooks.h>
  73#include <asm/i8259.h>
  74
 
 
  75/* State of each CPU */
  76DEFINE_PER_CPU(int, cpu_state) = { 0 };
  77
  78/* Store all idle threads, this can be reused instead of creating
  79* a new thread. Also avoids complicated thread destroy functionality
  80* for idle threads.
  81*/
  82#ifdef CONFIG_HOTPLUG_CPU
  83/*
  84 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
  85 * removed after init for !CONFIG_HOTPLUG_CPU.
  86 */
  87static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
  88#define get_idle_for_cpu(x)      (per_cpu(idle_thread_array, x))
  89#define set_idle_for_cpu(x, p)   (per_cpu(idle_thread_array, x) = (p))
  90
  91/*
  92 * We need this for trampoline_base protection from concurrent accesses when
  93 * off- and onlining cores wildly.
  94 */
  95static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
  96
  97void cpu_hotplug_driver_lock(void)
  98{
  99        mutex_lock(&x86_cpu_hotplug_driver_mutex);
 100}
 101
 102void cpu_hotplug_driver_unlock(void)
 103{
 104        mutex_unlock(&x86_cpu_hotplug_driver_mutex);
 105}
 106
 107ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
 108ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
 109#else
 110static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
 111#define get_idle_for_cpu(x)      (idle_thread_array[(x)])
 112#define set_idle_for_cpu(x, p)   (idle_thread_array[(x)] = (p))
 113#endif
 114
 115/* Number of siblings per CPU package */
 116int smp_num_siblings = 1;
 117EXPORT_SYMBOL(smp_num_siblings);
 118
 119/* Last level cache ID of each logical CPU */
 120DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
 121
 122/* representing HT siblings of each logical CPU */
 123DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
 124EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 125
 126/* representing HT and core siblings of each logical CPU */
 127DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
 128EXPORT_PER_CPU_SYMBOL(cpu_core_map);
 129
 130DEFINE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
 131
 132/* Per CPU bogomips and other parameters */
 133DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
 134EXPORT_PER_CPU_SYMBOL(cpu_info);
 135
 136atomic_t init_deasserted;
 137
 138/*
 139 * Report back to the Boot Processor.
 140 * Running on AP.
 141 */
 142static void __cpuinit smp_callin(void)
 143{
 144	int cpuid, phys_id;
 145	unsigned long timeout;
 146
 147	/*
 148	 * If waken up by an INIT in an 82489DX configuration
 149	 * we may get here before an INIT-deassert IPI reaches
 150	 * our local APIC.  We have to wait for the IPI or we'll
 151	 * lock up on an APIC access.
 152	 */
 153	if (apic->wait_for_init_deassert)
 154		apic->wait_for_init_deassert(&init_deasserted);
 155
 156	/*
 157	 * (This works even if the APIC is not enabled.)
 158	 */
 159	phys_id = read_apic_id();
 160	cpuid = smp_processor_id();
 161	if (cpumask_test_cpu(cpuid, cpu_callin_mask)) {
 162		panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
 163					phys_id, cpuid);
 164	}
 165	pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
 166
 167	/*
 168	 * STARTUP IPIs are fragile beasts as they might sometimes
 169	 * trigger some glue motherboard logic. Complete APIC bus
 170	 * silence for 1 second, this overestimates the time the
 171	 * boot CPU is spending to send the up to 2 STARTUP IPIs
 172	 * by a factor of two. This should be enough.
 173	 */
 174
 175	/*
 176	 * Waiting 2s total for startup (udelay is not yet working)
 177	 */
 178	timeout = jiffies + 2*HZ;
 179	while (time_before(jiffies, timeout)) {
 180		/*
 181		 * Has the boot CPU finished it's STARTUP sequence?
 182		 */
 183		if (cpumask_test_cpu(cpuid, cpu_callout_mask))
 184			break;
 185		cpu_relax();
 186	}
 187
 188	if (!time_before(jiffies, timeout)) {
 189		panic("%s: CPU%d started up but did not get a callout!\n",
 190		      __func__, cpuid);
 191	}
 192
 193	/*
 194	 * the boot CPU has finished the init stage and is spinning
 195	 * on callin_map until we finish. We are free to set up this
 196	 * CPU, first the APIC. (this is probably redundant on most
 197	 * boards)
 198	 */
 199
 200	pr_debug("CALLIN, before setup_local_APIC().\n");
 201	if (apic->smp_callin_clear_local_apic)
 202		apic->smp_callin_clear_local_apic();
 203	setup_local_APIC();
 204	end_local_APIC_setup();
 205
 206	/*
 207	 * Need to setup vector mappings before we enable interrupts.
 208	 */
 209	setup_vector_irq(smp_processor_id());
 
 
 
 
 
 
 
 210	/*
 211	 * Get our bogomips.
 212	 *
 213	 * Need to enable IRQs because it can take longer and then
 214	 * the NMI watchdog might kill us.
 215	 */
 216	local_irq_enable();
 217	calibrate_delay();
 218	local_irq_disable();
 219	pr_debug("Stack at about %p\n", &cpuid);
 220
 221	/*
 222	 * Save our processor parameters
 223	 */
 224	smp_store_cpu_info(cpuid);
 225
 226	/*
 227	 * This must be done before setting cpu_online_mask
 228	 * or calling notify_cpu_starting.
 229	 */
 230	set_cpu_sibling_map(raw_smp_processor_id());
 231	wmb();
 232
 233	notify_cpu_starting(cpuid);
 234
 235	/*
 236	 * Allow the master to continue.
 237	 */
 238	cpumask_set_cpu(cpuid, cpu_callin_mask);
 239}
 240
 241/*
 242 * Activate a secondary processor.
 243 */
 244notrace static void __cpuinit start_secondary(void *unused)
 245{
 246	/*
 247	 * Don't put *anything* before cpu_init(), SMP booting is too
 248	 * fragile that we want to limit the things done here to the
 249	 * most necessary things.
 250	 */
 251	cpu_init();
 
 252	preempt_disable();
 253	smp_callin();
 254
 255#ifdef CONFIG_X86_32
 256	/* switch away from the initial page table */
 257	load_cr3(swapper_pg_dir);
 258	__flush_tlb_all();
 259#endif
 260
 261	/* otherwise gcc will move up smp_processor_id before the cpu_init */
 262	barrier();
 263	/*
 264	 * Check TSC synchronization with the BP:
 265	 */
 266	check_tsc_sync_target();
 267
 268	/*
 269	 * We need to hold call_lock, so there is no inconsistency
 270	 * between the time smp_call_function() determines number of
 271	 * IPI recipients, and the time when the determination is made
 272	 * for which cpus receive the IPI. Holding this
 273	 * lock helps us to not include this cpu in a currently in progress
 274	 * smp_call_function().
 275	 *
 276	 * We need to hold vector_lock so there the set of online cpus
 277	 * does not change while we are assigning vectors to cpus.  Holding
 278	 * this lock ensures we don't half assign or remove an irq from a cpu.
 279	 */
 280	ipi_call_lock();
 281	lock_vector_lock();
 282	set_cpu_online(smp_processor_id(), true);
 283	unlock_vector_lock();
 284	ipi_call_unlock();
 285	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
 286	x86_platform.nmi_init();
 287
 288	/*
 289	 * Wait until the cpu which brought this one up marked it
 290	 * online before enabling interrupts. If we don't do that then
 291	 * we can end up waking up the softirq thread before this cpu
 292	 * reached the active state, which makes the scheduler unhappy
 293	 * and schedule the softirq thread on the wrong cpu. This is
 294	 * only observable with forced threaded interrupts, but in
 295	 * theory it could also happen w/o them. It's just way harder
 296	 * to achieve.
 297	 */
 298	while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
 299		cpu_relax();
 300
 301	/* enable local interrupts */
 302	local_irq_enable();
 303
 304	/* to prevent fake stack check failure in clock setup */
 305	boot_init_stack_canary();
 306
 307	x86_cpuinit.setup_percpu_clockev();
 308
 309	wmb();
 310	cpu_idle();
 311}
 312
 313/*
 314 * The bootstrap kernel entry code has set these up. Save them for
 315 * a given CPU
 316 */
 317
 318void __cpuinit smp_store_cpu_info(int id)
 319{
 320	struct cpuinfo_x86 *c = &cpu_data(id);
 321
 322	*c = boot_cpu_data;
 323	c->cpu_index = id;
 324	if (id != 0)
 325		identify_secondary_cpu(c);
 326}
 327
 328static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 329{
 330	cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2));
 331	cpumask_set_cpu(cpu2, cpu_sibling_mask(cpu1));
 332	cpumask_set_cpu(cpu1, cpu_core_mask(cpu2));
 333	cpumask_set_cpu(cpu2, cpu_core_mask(cpu1));
 334	cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2));
 335	cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1));
 
 336}
 337
 
 
 
 
 
 
 
 
 
 
 338
 339void __cpuinit set_cpu_sibling_map(int cpu)
 340{
 
 
 
 
 341	int i;
 342	struct cpuinfo_x86 *c = &cpu_data(cpu);
 343
 344	cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
 345
 346	if (smp_num_siblings > 1) {
 347		for_each_cpu(i, cpu_sibling_setup_mask) {
 348			struct cpuinfo_x86 *o = &cpu_data(i);
 349
 350			if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
 351				if (c->phys_proc_id == o->phys_proc_id &&
 352				    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i) &&
 353				    c->compute_unit_id == o->compute_unit_id)
 354					link_thread_siblings(cpu, i);
 355			} else if (c->phys_proc_id == o->phys_proc_id &&
 356				   c->cpu_core_id == o->cpu_core_id) {
 357				link_thread_siblings(cpu, i);
 358			}
 359		}
 360	} else {
 361		cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
 
 
 
 
 362	}
 363
 364	cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
 
 
 
 
 
 
 
 365
 366	if (__this_cpu_read(cpu_info.x86_max_cores) == 1) {
 367		cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
 368		c->booted_cores = 1;
 369		return;
 370	}
 371
 
 
 
 
 372	for_each_cpu(i, cpu_sibling_setup_mask) {
 373		if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
 374		    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
 375			cpumask_set_cpu(i, cpu_llc_shared_mask(cpu));
 376			cpumask_set_cpu(cpu, cpu_llc_shared_mask(i));
 377		}
 378		if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
 379			cpumask_set_cpu(i, cpu_core_mask(cpu));
 380			cpumask_set_cpu(cpu, cpu_core_mask(i));
 381			/*
 382			 *  Does this new cpu bringup a new core?
 383			 */
 384			if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) {
 385				/*
 386				 * for each core in package, increment
 387				 * the booted_cores for this new cpu
 388				 */
 389				if (cpumask_first(cpu_sibling_mask(i)) == i)
 390					c->booted_cores++;
 391				/*
 392				 * increment the core count for all
 393				 * the other cpus in this package
 394				 */
 395				if (i != cpu)
 396					cpu_data(i).booted_cores++;
 397			} else if (i != cpu && !c->booted_cores)
 398				c->booted_cores = cpu_data(i).booted_cores;
 399		}
 400	}
 401}
 402
 403/* maps the cpu to the sched domain representing multi-core */
 404const struct cpumask *cpu_coregroup_mask(int cpu)
 405{
 406	struct cpuinfo_x86 *c = &cpu_data(cpu);
 407	/*
 408	 * For perf, we return last level cache shared map.
 409	 * And for power savings, we return cpu_core_map
 410	 */
 411	if ((sched_mc_power_savings || sched_smt_power_savings) &&
 412	    !(cpu_has(c, X86_FEATURE_AMD_DCM)))
 413		return cpu_core_mask(cpu);
 414	else
 415		return cpu_llc_shared_mask(cpu);
 416}
 417
 418static void impress_friends(void)
 419{
 420	int cpu;
 421	unsigned long bogosum = 0;
 422	/*
 423	 * Allow the user to impress friends.
 424	 */
 425	pr_debug("Before bogomips.\n");
 426	for_each_possible_cpu(cpu)
 427		if (cpumask_test_cpu(cpu, cpu_callout_mask))
 428			bogosum += cpu_data(cpu).loops_per_jiffy;
 429	printk(KERN_INFO
 430		"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
 431		num_online_cpus(),
 432		bogosum/(500000/HZ),
 433		(bogosum/(5000/HZ))%100);
 434
 435	pr_debug("Before bogocount - setting activated=1.\n");
 436}
 437
 438void __inquire_remote_apic(int apicid)
 439{
 440	unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
 441	const char * const names[] = { "ID", "VERSION", "SPIV" };
 442	int timeout;
 443	u32 status;
 444
 445	printk(KERN_INFO "Inquiring remote APIC 0x%x...\n", apicid);
 446
 447	for (i = 0; i < ARRAY_SIZE(regs); i++) {
 448		printk(KERN_INFO "... APIC 0x%x %s: ", apicid, names[i]);
 449
 450		/*
 451		 * Wait for idle.
 452		 */
 453		status = safe_apic_wait_icr_idle();
 454		if (status)
 455			printk(KERN_CONT
 456			       "a previous APIC delivery may have failed\n");
 457
 458		apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
 459
 460		timeout = 0;
 461		do {
 462			udelay(100);
 463			status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
 464		} while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
 465
 466		switch (status) {
 467		case APIC_ICR_RR_VALID:
 468			status = apic_read(APIC_RRR);
 469			printk(KERN_CONT "%08x\n", status);
 470			break;
 471		default:
 472			printk(KERN_CONT "failed\n");
 473		}
 474	}
 475}
 476
 477/*
 478 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
 479 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
 480 * won't ... remember to clear down the APIC, etc later.
 481 */
 482int __cpuinit
 483wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
 484{
 485	unsigned long send_status, accept_status = 0;
 486	int maxlvt;
 487
 488	/* Target chip */
 489	/* Boot on the stack */
 490	/* Kick the second */
 491	apic_icr_write(APIC_DM_NMI | apic->dest_logical, logical_apicid);
 492
 493	pr_debug("Waiting for send to finish...\n");
 494	send_status = safe_apic_wait_icr_idle();
 495
 496	/*
 497	 * Give the other CPU some time to accept the IPI.
 498	 */
 499	udelay(200);
 500	if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
 501		maxlvt = lapic_get_maxlvt();
 502		if (maxlvt > 3)			/* Due to the Pentium erratum 3AP.  */
 503			apic_write(APIC_ESR, 0);
 504		accept_status = (apic_read(APIC_ESR) & 0xEF);
 505	}
 506	pr_debug("NMI sent.\n");
 507
 508	if (send_status)
 509		printk(KERN_ERR "APIC never delivered???\n");
 510	if (accept_status)
 511		printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
 512
 513	return (send_status | accept_status);
 514}
 515
 516static int __cpuinit
 517wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
 518{
 519	unsigned long send_status, accept_status = 0;
 520	int maxlvt, num_starts, j;
 521
 522	maxlvt = lapic_get_maxlvt();
 523
 524	/*
 525	 * Be paranoid about clearing APIC errors.
 526	 */
 527	if (APIC_INTEGRATED(apic_version[phys_apicid])) {
 528		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
 529			apic_write(APIC_ESR, 0);
 530		apic_read(APIC_ESR);
 531	}
 532
 533	pr_debug("Asserting INIT.\n");
 534
 535	/*
 536	 * Turn INIT on target chip
 537	 */
 538	/*
 539	 * Send IPI
 540	 */
 541	apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
 542		       phys_apicid);
 543
 544	pr_debug("Waiting for send to finish...\n");
 545	send_status = safe_apic_wait_icr_idle();
 546
 547	mdelay(10);
 548
 549	pr_debug("Deasserting INIT.\n");
 550
 551	/* Target chip */
 552	/* Send IPI */
 553	apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
 554
 555	pr_debug("Waiting for send to finish...\n");
 556	send_status = safe_apic_wait_icr_idle();
 557
 558	mb();
 559	atomic_set(&init_deasserted, 1);
 560
 561	/*
 562	 * Should we send STARTUP IPIs ?
 563	 *
 564	 * Determine this based on the APIC version.
 565	 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
 566	 */
 567	if (APIC_INTEGRATED(apic_version[phys_apicid]))
 568		num_starts = 2;
 569	else
 570		num_starts = 0;
 571
 572	/*
 573	 * Paravirt / VMI wants a startup IPI hook here to set up the
 574	 * target processor state.
 575	 */
 576	startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
 577			 stack_start);
 578
 579	/*
 580	 * Run STARTUP IPI loop.
 581	 */
 582	pr_debug("#startup loops: %d.\n", num_starts);
 583
 584	for (j = 1; j <= num_starts; j++) {
 585		pr_debug("Sending STARTUP #%d.\n", j);
 586		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
 587			apic_write(APIC_ESR, 0);
 588		apic_read(APIC_ESR);
 589		pr_debug("After apic_write.\n");
 590
 591		/*
 592		 * STARTUP IPI
 593		 */
 594
 595		/* Target chip */
 596		/* Boot on the stack */
 597		/* Kick the second */
 598		apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
 599			       phys_apicid);
 600
 601		/*
 602		 * Give the other CPU some time to accept the IPI.
 603		 */
 604		udelay(300);
 605
 606		pr_debug("Startup point 1.\n");
 607
 608		pr_debug("Waiting for send to finish...\n");
 609		send_status = safe_apic_wait_icr_idle();
 610
 611		/*
 612		 * Give the other CPU some time to accept the IPI.
 613		 */
 614		udelay(200);
 615		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
 616			apic_write(APIC_ESR, 0);
 617		accept_status = (apic_read(APIC_ESR) & 0xEF);
 618		if (send_status || accept_status)
 619			break;
 620	}
 621	pr_debug("After Startup.\n");
 622
 623	if (send_status)
 624		printk(KERN_ERR "APIC never delivered???\n");
 625	if (accept_status)
 626		printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
 627
 628	return (send_status | accept_status);
 629}
 630
 631struct create_idle {
 632	struct work_struct work;
 633	struct task_struct *idle;
 634	struct completion done;
 635	int cpu;
 636};
 637
 638static void __cpuinit do_fork_idle(struct work_struct *work)
 639{
 640	struct create_idle *c_idle =
 641		container_of(work, struct create_idle, work);
 642
 643	c_idle->idle = fork_idle(c_idle->cpu);
 644	complete(&c_idle->done);
 645}
 646
 647/* reduce the number of lines printed when booting a large cpu count system */
 648static void __cpuinit announce_cpu(int cpu, int apicid)
 649{
 650	static int current_node = -1;
 651	int node = early_cpu_to_node(cpu);
 652
 653	if (system_state == SYSTEM_BOOTING) {
 654		if (node != current_node) {
 655			if (current_node > (-1))
 656				pr_cont(" Ok.\n");
 657			current_node = node;
 658			pr_info("Booting Node %3d, Processors ", node);
 659		}
 660		pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " Ok.\n" : "");
 661		return;
 662	} else
 663		pr_info("Booting Node %d Processor %d APIC 0x%x\n",
 664			node, cpu, apicid);
 665}
 666
 667/*
 668 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
 669 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
 670 * Returns zero if CPU booted OK, else error code from
 671 * ->wakeup_secondary_cpu.
 672 */
 673static int __cpuinit do_boot_cpu(int apicid, int cpu)
 674{
 
 
 
 
 
 675	unsigned long boot_error = 0;
 676	unsigned long start_ip;
 677	int timeout;
 678	struct create_idle c_idle = {
 679		.cpu	= cpu,
 680		.done	= COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
 681	};
 682
 683	INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
 684
 685	alternatives_smp_switch(1);
 686
 687	c_idle.idle = get_idle_for_cpu(cpu);
 688
 689	/*
 690	 * We can't use kernel_thread since we must avoid to
 691	 * reschedule the child.
 692	 */
 693	if (c_idle.idle) {
 694		c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *)
 695			(THREAD_SIZE +  task_stack_page(c_idle.idle))) - 1);
 696		init_idle(c_idle.idle, cpu);
 697		goto do_rest;
 698	}
 699
 700	schedule_work(&c_idle.work);
 701	wait_for_completion(&c_idle.done);
 702
 703	if (IS_ERR(c_idle.idle)) {
 704		printk("failed fork for CPU %d\n", cpu);
 705		destroy_work_on_stack(&c_idle.work);
 706		return PTR_ERR(c_idle.idle);
 707	}
 708
 709	set_idle_for_cpu(cpu, c_idle.idle);
 710do_rest:
 711	per_cpu(current_task, cpu) = c_idle.idle;
 712#ifdef CONFIG_X86_32
 713	/* Stack for startup_32 can be just as for start_secondary onwards */
 714	irq_ctx_init(cpu);
 715#else
 716	clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
 717	initial_gs = per_cpu_offset(cpu);
 718	per_cpu(kernel_stack, cpu) =
 719		(unsigned long)task_stack_page(c_idle.idle) -
 720		KERNEL_STACK_OFFSET + THREAD_SIZE;
 721#endif
 722	early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
 723	initial_code = (unsigned long)start_secondary;
 724	stack_start  = c_idle.idle->thread.sp;
 725
 726	/* start_ip had better be page-aligned! */
 727	start_ip = trampoline_address();
 728
 729	/* So we see what's up */
 730	announce_cpu(cpu, apicid);
 731
 732	/*
 733	 * This grunge runs the startup process for
 734	 * the targeted processor.
 735	 */
 736
 737	printk(KERN_DEBUG "smpboot cpu %d: start_ip = %lx\n", cpu, start_ip);
 738
 739	atomic_set(&init_deasserted, 0);
 740
 741	if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
 742
 743		pr_debug("Setting warm reset code and vector.\n");
 744
 745		smpboot_setup_warm_reset_vector(start_ip);
 746		/*
 747		 * Be paranoid about clearing APIC errors.
 748		*/
 749		if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
 750			apic_write(APIC_ESR, 0);
 751			apic_read(APIC_ESR);
 752		}
 753	}
 754
 755	/*
 756	 * Kick the secondary CPU. Use the method in the APIC driver
 757	 * if it's defined - or use an INIT boot APIC message otherwise:
 758	 */
 759	if (apic->wakeup_secondary_cpu)
 760		boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
 761	else
 762		boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
 763
 764	if (!boot_error) {
 765		/*
 766		 * allow APs to start initializing.
 767		 */
 768		pr_debug("Before Callout %d.\n", cpu);
 769		cpumask_set_cpu(cpu, cpu_callout_mask);
 770		pr_debug("After Callout %d.\n", cpu);
 771
 772		/*
 773		 * Wait 5s total for a response
 774		 */
 775		for (timeout = 0; timeout < 50000; timeout++) {
 776			if (cpumask_test_cpu(cpu, cpu_callin_mask))
 777				break;	/* It has booted */
 778			udelay(100);
 779			/*
 780			 * Allow other tasks to run while we wait for the
 781			 * AP to come online. This also gives a chance
 782			 * for the MTRR work(triggered by the AP coming online)
 783			 * to be completed in the stop machine context.
 784			 */
 785			schedule();
 786		}
 787
 788		if (cpumask_test_cpu(cpu, cpu_callin_mask))
 
 789			pr_debug("CPU%d: has booted.\n", cpu);
 790		else {
 791			boot_error = 1;
 792			if (*(volatile u32 *)TRAMPOLINE_SYM(trampoline_status)
 793			    == 0xA5A5A5A5)
 794				/* trampoline started but...? */
 795				pr_err("CPU%d: Stuck ??\n", cpu);
 796			else
 797				/* trampoline code not run */
 798				pr_err("CPU%d: Not responding.\n", cpu);
 799			if (apic->inquire_remote_apic)
 800				apic->inquire_remote_apic(apicid);
 801		}
 802	}
 803
 804	if (boot_error) {
 805		/* Try to put things back the way they were before ... */
 806		numa_remove_cpu(cpu); /* was set by numa_add_cpu */
 807
 808		/* was set by do_boot_cpu() */
 809		cpumask_clear_cpu(cpu, cpu_callout_mask);
 810
 811		/* was set by cpu_init() */
 812		cpumask_clear_cpu(cpu, cpu_initialized_mask);
 813
 814		set_cpu_present(cpu, false);
 815		per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
 816	}
 817
 818	/* mark "stuck" area as not stuck */
 819	*(volatile u32 *)TRAMPOLINE_SYM(trampoline_status) = 0;
 820
 821	if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
 822		/*
 823		 * Cleanup possible dangling ends...
 824		 */
 825		smpboot_restore_warm_reset_vector();
 826	}
 827
 828	destroy_work_on_stack(&c_idle.work);
 829	return boot_error;
 830}
 831
 832int __cpuinit native_cpu_up(unsigned int cpu)
 833{
 834	int apicid = apic->cpu_present_to_apicid(cpu);
 835	unsigned long flags;
 836	int err;
 837
 838	WARN_ON(irqs_disabled());
 839
 840	pr_debug("++++++++++++++++++++=_---CPU UP  %u\n", cpu);
 841
 842	if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid ||
 843	    !physid_isset(apicid, phys_cpu_present_map)) {
 
 844		printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu);
 845		return -EINVAL;
 846	}
 847
 848	/*
 849	 * Already booted CPU?
 850	 */
 851	if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
 852		pr_debug("do_boot_cpu %d Already started\n", cpu);
 853		return -ENOSYS;
 854	}
 855
 856	/*
 857	 * Save current MTRR state in case it was changed since early boot
 858	 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
 859	 */
 860	mtrr_save_state();
 861
 862	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
 863
 864	err = do_boot_cpu(apicid, cpu);
 865	if (err) {
 866		pr_debug("do_boot_cpu failed %d\n", err);
 867		return -EIO;
 868	}
 869
 870	/*
 871	 * Check TSC synchronization with the AP (keep irqs disabled
 872	 * while doing so):
 873	 */
 874	local_irq_save(flags);
 875	check_tsc_sync_source(cpu);
 876	local_irq_restore(flags);
 877
 878	while (!cpu_online(cpu)) {
 879		cpu_relax();
 880		touch_nmi_watchdog();
 881	}
 882
 883	return 0;
 884}
 885
 886/**
 887 * arch_disable_smp_support() - disables SMP support for x86 at runtime
 888 */
 889void arch_disable_smp_support(void)
 890{
 891	disable_ioapic_support();
 892}
 893
 894/*
 895 * Fall back to non SMP mode after errors.
 896 *
 897 * RED-PEN audit/test this more. I bet there is more state messed up here.
 898 */
 899static __init void disable_smp(void)
 900{
 901	init_cpu_present(cpumask_of(0));
 902	init_cpu_possible(cpumask_of(0));
 903	smpboot_clear_io_apic_irqs();
 904
 905	if (smp_found_config)
 906		physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
 907	else
 908		physid_set_mask_of_physid(0, &phys_cpu_present_map);
 909	cpumask_set_cpu(0, cpu_sibling_mask(0));
 910	cpumask_set_cpu(0, cpu_core_mask(0));
 911}
 912
 913/*
 914 * Various sanity checks.
 915 */
 916static int __init smp_sanity_check(unsigned max_cpus)
 917{
 918	preempt_disable();
 919
 920#if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
 921	if (def_to_bigsmp && nr_cpu_ids > 8) {
 922		unsigned int cpu;
 923		unsigned nr;
 924
 925		printk(KERN_WARNING
 926		       "More than 8 CPUs detected - skipping them.\n"
 927		       "Use CONFIG_X86_BIGSMP.\n");
 928
 929		nr = 0;
 930		for_each_present_cpu(cpu) {
 931			if (nr >= 8)
 932				set_cpu_present(cpu, false);
 933			nr++;
 934		}
 935
 936		nr = 0;
 937		for_each_possible_cpu(cpu) {
 938			if (nr >= 8)
 939				set_cpu_possible(cpu, false);
 940			nr++;
 941		}
 942
 943		nr_cpu_ids = 8;
 944	}
 945#endif
 946
 947	if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
 948		printk(KERN_WARNING
 949			"weird, boot CPU (#%d) not listed by the BIOS.\n",
 950			hard_smp_processor_id());
 951
 952		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
 953	}
 954
 955	/*
 956	 * If we couldn't find an SMP configuration at boot time,
 957	 * get out of here now!
 958	 */
 959	if (!smp_found_config && !acpi_lapic) {
 960		preempt_enable();
 961		printk(KERN_NOTICE "SMP motherboard not detected.\n");
 962		disable_smp();
 963		if (APIC_init_uniprocessor())
 964			printk(KERN_NOTICE "Local APIC not detected."
 965					   " Using dummy APIC emulation.\n");
 966		return -1;
 967	}
 968
 969	/*
 970	 * Should not be necessary because the MP table should list the boot
 971	 * CPU too, but we do it for the sake of robustness anyway.
 972	 */
 973	if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
 974		printk(KERN_NOTICE
 975			"weird, boot CPU (#%d) not listed by the BIOS.\n",
 976			boot_cpu_physical_apicid);
 977		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
 978	}
 979	preempt_enable();
 980
 981	/*
 982	 * If we couldn't find a local APIC, then get out of here now!
 983	 */
 984	if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
 985	    !cpu_has_apic) {
 986		if (!disable_apic) {
 987			pr_err("BIOS bug, local APIC #%d not detected!...\n",
 988				boot_cpu_physical_apicid);
 989			pr_err("... forcing use of dummy APIC emulation."
 990				"(tell your hw vendor)\n");
 991		}
 992		smpboot_clear_io_apic();
 993		disable_ioapic_support();
 994		return -1;
 995	}
 996
 997	verify_local_APIC();
 998
 999	/*
1000	 * If SMP should be disabled, then really disable it!
1001	 */
1002	if (!max_cpus) {
1003		printk(KERN_INFO "SMP mode deactivated.\n");
1004		smpboot_clear_io_apic();
1005
1006		connect_bsp_APIC();
1007		setup_local_APIC();
1008		bsp_end_local_APIC_setup();
1009		return -1;
1010	}
1011
1012	return 0;
1013}
1014
1015static void __init smp_cpu_index_default(void)
1016{
1017	int i;
1018	struct cpuinfo_x86 *c;
1019
1020	for_each_possible_cpu(i) {
1021		c = &cpu_data(i);
1022		/* mark all to hotplug */
1023		c->cpu_index = nr_cpu_ids;
1024	}
1025}
1026
1027/*
1028 * Prepare for SMP bootup.  The MP table or ACPI has been read
1029 * earlier.  Just do some sanity checking here and enable APIC mode.
1030 */
1031void __init native_smp_prepare_cpus(unsigned int max_cpus)
1032{
1033	unsigned int i;
1034
1035	preempt_disable();
1036	smp_cpu_index_default();
1037
1038	/*
1039	 * Setup boot CPU information
1040	 */
1041	smp_store_cpu_info(0); /* Final full version of the data */
1042	cpumask_copy(cpu_callin_mask, cpumask_of(0));
1043	mb();
1044
1045	current_thread_info()->cpu = 0;  /* needed? */
1046	for_each_possible_cpu(i) {
1047		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
1048		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
1049		zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
1050	}
1051	set_cpu_sibling_map(0);
1052
1053
1054	if (smp_sanity_check(max_cpus) < 0) {
1055		printk(KERN_INFO "SMP disabled\n");
1056		disable_smp();
1057		goto out;
1058	}
1059
1060	default_setup_apic_routing();
1061
1062	preempt_disable();
1063	if (read_apic_id() != boot_cpu_physical_apicid) {
1064		panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
1065		     read_apic_id(), boot_cpu_physical_apicid);
1066		/* Or can we switch back to PIC here? */
1067	}
1068	preempt_enable();
1069
1070	connect_bsp_APIC();
1071
1072	/*
1073	 * Switch from PIC to APIC mode.
1074	 */
1075	setup_local_APIC();
1076
1077	/*
1078	 * Enable IO APIC before setting up error vector
1079	 */
1080	if (!skip_ioapic_setup && nr_ioapics)
1081		enable_IO_APIC();
1082
1083	bsp_end_local_APIC_setup();
1084
1085	if (apic->setup_portio_remap)
1086		apic->setup_portio_remap();
1087
1088	smpboot_setup_io_apic();
1089	/*
1090	 * Set up local APIC timer on boot CPU.
1091	 */
1092
1093	printk(KERN_INFO "CPU%d: ", 0);
1094	print_cpu_info(&cpu_data(0));
1095	x86_init.timers.setup_percpu_clockev();
1096
1097	if (is_uv_system())
1098		uv_system_init();
1099
1100	set_mtrr_aps_delayed_init();
1101out:
1102	preempt_enable();
1103}
1104
1105void arch_disable_nonboot_cpus_begin(void)
1106{
1107	/*
1108	 * Avoid the smp alternatives switch during the disable_nonboot_cpus().
1109	 * In the suspend path, we will be back in the SMP mode shortly anyways.
1110	 */
1111	skip_smp_alternatives = true;
1112}
1113
1114void arch_disable_nonboot_cpus_end(void)
1115{
1116	skip_smp_alternatives = false;
1117}
1118
1119void arch_enable_nonboot_cpus_begin(void)
1120{
1121	set_mtrr_aps_delayed_init();
1122}
1123
1124void arch_enable_nonboot_cpus_end(void)
1125{
1126	mtrr_aps_init();
1127}
1128
1129/*
1130 * Early setup to make printk work.
1131 */
1132void __init native_smp_prepare_boot_cpu(void)
1133{
1134	int me = smp_processor_id();
1135	switch_to_new_gdt(me);
1136	/* already set me in cpu_online_mask in boot_cpu_init() */
1137	cpumask_set_cpu(me, cpu_callout_mask);
1138	per_cpu(cpu_state, me) = CPU_ONLINE;
1139}
1140
1141void __init native_smp_cpus_done(unsigned int max_cpus)
1142{
1143	pr_debug("Boot done.\n");
1144
 
1145	impress_friends();
1146#ifdef CONFIG_X86_IO_APIC
1147	setup_ioapic_dest();
1148#endif
1149	mtrr_aps_init();
1150}
1151
1152static int __initdata setup_possible_cpus = -1;
1153static int __init _setup_possible_cpus(char *str)
1154{
1155	get_option(&str, &setup_possible_cpus);
1156	return 0;
1157}
1158early_param("possible_cpus", _setup_possible_cpus);
1159
1160
1161/*
1162 * cpu_possible_mask should be static, it cannot change as cpu's
1163 * are onlined, or offlined. The reason is per-cpu data-structures
1164 * are allocated by some modules at init time, and dont expect to
1165 * do this dynamically on cpu arrival/departure.
1166 * cpu_present_mask on the other hand can change dynamically.
1167 * In case when cpu_hotplug is not compiled, then we resort to current
1168 * behaviour, which is cpu_possible == cpu_present.
1169 * - Ashok Raj
1170 *
1171 * Three ways to find out the number of additional hotplug CPUs:
1172 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
1173 * - The user can overwrite it with possible_cpus=NUM
1174 * - Otherwise don't reserve additional CPUs.
1175 * We do this because additional CPUs waste a lot of memory.
1176 * -AK
1177 */
1178__init void prefill_possible_map(void)
1179{
1180	int i, possible;
1181
1182	/* no processor from mptable or madt */
1183	if (!num_processors)
1184		num_processors = 1;
1185
1186	i = setup_max_cpus ?: 1;
1187	if (setup_possible_cpus == -1) {
1188		possible = num_processors;
1189#ifdef CONFIG_HOTPLUG_CPU
1190		if (setup_max_cpus)
1191			possible += disabled_cpus;
1192#else
1193		if (possible > i)
1194			possible = i;
1195#endif
1196	} else
1197		possible = setup_possible_cpus;
1198
1199	total_cpus = max_t(int, possible, num_processors + disabled_cpus);
1200
1201	/* nr_cpu_ids could be reduced via nr_cpus= */
1202	if (possible > nr_cpu_ids) {
1203		printk(KERN_WARNING
1204			"%d Processors exceeds NR_CPUS limit of %d\n",
1205			possible, nr_cpu_ids);
1206		possible = nr_cpu_ids;
1207	}
1208
1209#ifdef CONFIG_HOTPLUG_CPU
1210	if (!setup_max_cpus)
1211#endif
1212	if (possible > i) {
1213		printk(KERN_WARNING
1214			"%d Processors exceeds max_cpus limit of %u\n",
1215			possible, setup_max_cpus);
1216		possible = i;
1217	}
1218
1219	printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
1220		possible, max_t(int, possible - num_processors, 0));
1221
1222	for (i = 0; i < possible; i++)
1223		set_cpu_possible(i, true);
1224	for (; i < NR_CPUS; i++)
1225		set_cpu_possible(i, false);
1226
1227	nr_cpu_ids = possible;
1228}
1229
1230#ifdef CONFIG_HOTPLUG_CPU
1231
1232static void remove_siblinginfo(int cpu)
1233{
1234	int sibling;
1235	struct cpuinfo_x86 *c = &cpu_data(cpu);
1236
1237	for_each_cpu(sibling, cpu_core_mask(cpu)) {
1238		cpumask_clear_cpu(cpu, cpu_core_mask(sibling));
1239		/*/
1240		 * last thread sibling in this cpu core going down
1241		 */
1242		if (cpumask_weight(cpu_sibling_mask(cpu)) == 1)
1243			cpu_data(sibling).booted_cores--;
1244	}
1245
1246	for_each_cpu(sibling, cpu_sibling_mask(cpu))
1247		cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
1248	cpumask_clear(cpu_sibling_mask(cpu));
1249	cpumask_clear(cpu_core_mask(cpu));
1250	c->phys_proc_id = 0;
1251	c->cpu_core_id = 0;
1252	cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
1253}
1254
1255static void __ref remove_cpu_from_maps(int cpu)
1256{
1257	set_cpu_online(cpu, false);
1258	cpumask_clear_cpu(cpu, cpu_callout_mask);
1259	cpumask_clear_cpu(cpu, cpu_callin_mask);
1260	/* was set by cpu_init() */
1261	cpumask_clear_cpu(cpu, cpu_initialized_mask);
1262	numa_remove_cpu(cpu);
1263}
1264
1265void cpu_disable_common(void)
1266{
1267	int cpu = smp_processor_id();
1268
1269	remove_siblinginfo(cpu);
1270
1271	/* It's now safe to remove this processor from the online map */
1272	lock_vector_lock();
1273	remove_cpu_from_maps(cpu);
1274	unlock_vector_lock();
1275	fixup_irqs();
1276}
1277
1278int native_cpu_disable(void)
1279{
1280	int cpu = smp_processor_id();
1281
1282	/*
1283	 * Perhaps use cpufreq to drop frequency, but that could go
1284	 * into generic code.
1285	 *
1286	 * We won't take down the boot processor on i386 due to some
1287	 * interrupts only being able to be serviced by the BSP.
1288	 * Especially so if we're not using an IOAPIC	-zwane
1289	 */
1290	if (cpu == 0)
1291		return -EBUSY;
1292
1293	clear_local_APIC();
1294
1295	cpu_disable_common();
1296	return 0;
1297}
1298
1299void native_cpu_die(unsigned int cpu)
1300{
1301	/* We don't do anything here: idle task is faking death itself. */
1302	unsigned int i;
1303
1304	for (i = 0; i < 10; i++) {
1305		/* They ack this in play_dead by setting CPU_DEAD */
1306		if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1307			if (system_state == SYSTEM_RUNNING)
1308				pr_info("CPU %u is now offline\n", cpu);
1309
1310			if (1 == num_online_cpus())
1311				alternatives_smp_switch(0);
1312			return;
1313		}
1314		msleep(100);
1315	}
1316	pr_err("CPU %u didn't die...\n", cpu);
1317}
1318
1319void play_dead_common(void)
1320{
1321	idle_task_exit();
1322	reset_lazy_tlbstate();
1323	amd_e400_remove_cpu(raw_smp_processor_id());
1324
1325	mb();
1326	/* Ack it */
1327	__this_cpu_write(cpu_state, CPU_DEAD);
1328
1329	/*
1330	 * With physical CPU hotplug, we should halt the cpu
1331	 */
1332	local_irq_disable();
1333}
1334
1335/*
1336 * We need to flush the caches before going to sleep, lest we have
1337 * dirty data in our caches when we come back up.
1338 */
1339static inline void mwait_play_dead(void)
1340{
1341	unsigned int eax, ebx, ecx, edx;
1342	unsigned int highest_cstate = 0;
1343	unsigned int highest_subcstate = 0;
1344	int i;
1345	void *mwait_ptr;
1346	struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
1347
1348	if (!(this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c)))
1349		return;
1350	if (!this_cpu_has(X86_FEATURE_CLFLSH))
1351		return;
1352	if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
1353		return;
1354
1355	eax = CPUID_MWAIT_LEAF;
1356	ecx = 0;
1357	native_cpuid(&eax, &ebx, &ecx, &edx);
1358
1359	/*
1360	 * eax will be 0 if EDX enumeration is not valid.
1361	 * Initialized below to cstate, sub_cstate value when EDX is valid.
1362	 */
1363	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
1364		eax = 0;
1365	} else {
1366		edx >>= MWAIT_SUBSTATE_SIZE;
1367		for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
1368			if (edx & MWAIT_SUBSTATE_MASK) {
1369				highest_cstate = i;
1370				highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
1371			}
1372		}
1373		eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
1374			(highest_subcstate - 1);
1375	}
1376
1377	/*
1378	 * This should be a memory location in a cache line which is
1379	 * unlikely to be touched by other processors.  The actual
1380	 * content is immaterial as it is not actually modified in any way.
1381	 */
1382	mwait_ptr = &current_thread_info()->flags;
1383
1384	wbinvd();
1385
1386	while (1) {
1387		/*
1388		 * The CLFLUSH is a workaround for erratum AAI65 for
1389		 * the Xeon 7400 series.  It's not clear it is actually
1390		 * needed, but it should be harmless in either case.
1391		 * The WBINVD is insufficient due to the spurious-wakeup
1392		 * case where we return around the loop.
1393		 */
1394		clflush(mwait_ptr);
1395		__monitor(mwait_ptr, 0, 0);
1396		mb();
1397		__mwait(eax, 0);
1398	}
1399}
1400
1401static inline void hlt_play_dead(void)
1402{
1403	if (__this_cpu_read(cpu_info.x86) >= 4)
1404		wbinvd();
1405
1406	while (1) {
1407		native_halt();
1408	}
1409}
1410
1411void native_play_dead(void)
1412{
1413	play_dead_common();
1414	tboot_shutdown(TB_SHUTDOWN_WFS);
1415
1416	mwait_play_dead();	/* Only returns on failure */
1417	hlt_play_dead();
 
1418}
1419
1420#else /* ... !CONFIG_HOTPLUG_CPU */
1421int native_cpu_disable(void)
1422{
1423	return -ENOSYS;
1424}
1425
1426void native_cpu_die(unsigned int cpu)
1427{
1428	/* We said "no" in __cpu_disable */
1429	BUG();
1430}
1431
1432void native_play_dead(void)
1433{
1434	BUG();
1435}
1436
1437#endif