Linux Audio

Check our new training course

Loading...
v3.5.6
   1#include <linux/bootmem.h>
   2#include <linux/linkage.h>
   3#include <linux/bitops.h>
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <linux/percpu.h>
   7#include <linux/string.h>
 
   8#include <linux/delay.h>
   9#include <linux/sched.h>
  10#include <linux/init.h>
 
  11#include <linux/kgdb.h>
  12#include <linux/smp.h>
  13#include <linux/io.h>
 
  14
  15#include <asm/stackprotector.h>
  16#include <asm/perf_event.h>
  17#include <asm/mmu_context.h>
  18#include <asm/archrandom.h>
  19#include <asm/hypervisor.h>
  20#include <asm/processor.h>
 
  21#include <asm/debugreg.h>
  22#include <asm/sections.h>
 
  23#include <linux/topology.h>
  24#include <linux/cpumask.h>
  25#include <asm/pgtable.h>
  26#include <linux/atomic.h>
  27#include <asm/proto.h>
  28#include <asm/setup.h>
  29#include <asm/apic.h>
  30#include <asm/desc.h>
  31#include <asm/i387.h>
  32#include <asm/fpu-internal.h>
  33#include <asm/mtrr.h>
  34#include <linux/numa.h>
  35#include <asm/asm.h>
  36#include <asm/cpu.h>
  37#include <asm/mce.h>
  38#include <asm/msr.h>
  39#include <asm/pat.h>
 
 
  40
  41#ifdef CONFIG_X86_LOCAL_APIC
  42#include <asm/uv/uv.h>
  43#endif
  44
  45#include "cpu.h"
  46
  47/* all of these masks are initialized in setup_cpu_local_masks() */
  48cpumask_var_t cpu_initialized_mask;
  49cpumask_var_t cpu_callout_mask;
  50cpumask_var_t cpu_callin_mask;
  51
  52/* representing cpus for which sibling maps can be computed */
  53cpumask_var_t cpu_sibling_setup_mask;
  54
  55/* correctly size the local cpu masks */
  56void __init setup_cpu_local_masks(void)
  57{
  58	alloc_bootmem_cpumask_var(&cpu_initialized_mask);
  59	alloc_bootmem_cpumask_var(&cpu_callin_mask);
  60	alloc_bootmem_cpumask_var(&cpu_callout_mask);
  61	alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
  62}
  63
  64static void __cpuinit default_init(struct cpuinfo_x86 *c)
  65{
  66#ifdef CONFIG_X86_64
  67	cpu_detect_cache_sizes(c);
  68#else
  69	/* Not much we can do here... */
  70	/* Check if at least it has cpuid */
  71	if (c->cpuid_level == -1) {
  72		/* No cpuid. It must be an ancient CPU */
  73		if (c->x86 == 4)
  74			strcpy(c->x86_model_id, "486");
  75		else if (c->x86 == 3)
  76			strcpy(c->x86_model_id, "386");
  77	}
  78#endif
  79}
  80
  81static const struct cpu_dev __cpuinitconst default_cpu = {
  82	.c_init		= default_init,
  83	.c_vendor	= "Unknown",
  84	.c_x86_vendor	= X86_VENDOR_UNKNOWN,
  85};
  86
  87static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
  88
  89DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
  90#ifdef CONFIG_X86_64
  91	/*
  92	 * We need valid kernel segments for data and code in long mode too
  93	 * IRET will check the segment types  kkeil 2000/10/28
  94	 * Also sysret mandates a special GDT layout
  95	 *
  96	 * TLS descriptors are currently at a different place compared to i386.
  97	 * Hopefully nobody expects them at a fixed place (Wine?)
  98	 */
  99	[GDT_ENTRY_KERNEL32_CS]		= GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
 100	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
 101	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
 102	[GDT_ENTRY_DEFAULT_USER32_CS]	= GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
 103	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
 104	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
 105#else
 106	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
 107	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
 108	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
 109	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
 110	/*
 111	 * Segments used for calling PnP BIOS have byte granularity.
 112	 * They code segments and data segments have fixed 64k limits,
 113	 * the transfer segment sizes are set at run time.
 114	 */
 115	/* 32-bit code */
 116	[GDT_ENTRY_PNPBIOS_CS32]	= GDT_ENTRY_INIT(0x409a, 0, 0xffff),
 117	/* 16-bit code */
 118	[GDT_ENTRY_PNPBIOS_CS16]	= GDT_ENTRY_INIT(0x009a, 0, 0xffff),
 119	/* 16-bit data */
 120	[GDT_ENTRY_PNPBIOS_DS]		= GDT_ENTRY_INIT(0x0092, 0, 0xffff),
 121	/* 16-bit data */
 122	[GDT_ENTRY_PNPBIOS_TS1]		= GDT_ENTRY_INIT(0x0092, 0, 0),
 123	/* 16-bit data */
 124	[GDT_ENTRY_PNPBIOS_TS2]		= GDT_ENTRY_INIT(0x0092, 0, 0),
 125	/*
 126	 * The APM segments have byte granularity and their bases
 127	 * are set at run time.  All have 64k limits.
 128	 */
 129	/* 32-bit code */
 130	[GDT_ENTRY_APMBIOS_BASE]	= GDT_ENTRY_INIT(0x409a, 0, 0xffff),
 131	/* 16-bit code */
 132	[GDT_ENTRY_APMBIOS_BASE+1]	= GDT_ENTRY_INIT(0x009a, 0, 0xffff),
 133	/* data */
 134	[GDT_ENTRY_APMBIOS_BASE+2]	= GDT_ENTRY_INIT(0x4092, 0, 0xffff),
 135
 136	[GDT_ENTRY_ESPFIX_SS]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
 137	[GDT_ENTRY_PERCPU]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
 138	GDT_STACK_CANARY_INIT
 139#endif
 140} };
 141EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
 142
 143static int __init x86_xsave_setup(char *s)
 144{
 145	setup_clear_cpu_cap(X86_FEATURE_XSAVE);
 146	setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
 147	setup_clear_cpu_cap(X86_FEATURE_AVX);
 148	setup_clear_cpu_cap(X86_FEATURE_AVX2);
 
 
 
 
 
 
 149	return 1;
 150}
 151__setup("noxsave", x86_xsave_setup);
 152
 153static int __init x86_xsaveopt_setup(char *s)
 154{
 155	setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
 156	return 1;
 
 
 
 
 
 
 
 
 
 157}
 158__setup("noxsaveopt", x86_xsaveopt_setup);
 159
 160#ifdef CONFIG_X86_32
 161static int cachesize_override __cpuinitdata = -1;
 162static int disable_x86_serial_nr __cpuinitdata = 1;
 163
 164static int __init cachesize_setup(char *str)
 165{
 166	get_option(&str, &cachesize_override);
 167	return 1;
 168}
 169__setup("cachesize=", cachesize_setup);
 170
 171static int __init x86_fxsr_setup(char *s)
 172{
 173	setup_clear_cpu_cap(X86_FEATURE_FXSR);
 174	setup_clear_cpu_cap(X86_FEATURE_XMM);
 175	return 1;
 176}
 177__setup("nofxsr", x86_fxsr_setup);
 178
 179static int __init x86_sep_setup(char *s)
 180{
 181	setup_clear_cpu_cap(X86_FEATURE_SEP);
 182	return 1;
 183}
 184__setup("nosep", x86_sep_setup);
 185
 186/* Standard macro to see if a specific flag is changeable */
 187static inline int flag_is_changeable_p(u32 flag)
 188{
 189	u32 f1, f2;
 190
 191	/*
 192	 * Cyrix and IDT cpus allow disabling of CPUID
 193	 * so the code below may return different results
 194	 * when it is executed before and after enabling
 195	 * the CPUID. Add "volatile" to not allow gcc to
 196	 * optimize the subsequent calls to this function.
 197	 */
 198	asm volatile ("pushfl		\n\t"
 199		      "pushfl		\n\t"
 200		      "popl %0		\n\t"
 201		      "movl %0, %1	\n\t"
 202		      "xorl %2, %0	\n\t"
 203		      "pushl %0		\n\t"
 204		      "popfl		\n\t"
 205		      "pushfl		\n\t"
 206		      "popl %0		\n\t"
 207		      "popfl		\n\t"
 208
 209		      : "=&r" (f1), "=&r" (f2)
 210		      : "ir" (flag));
 211
 212	return ((f1^f2) & flag) != 0;
 213}
 214
 215/* Probe for the CPUID instruction */
 216static int __cpuinit have_cpuid_p(void)
 217{
 218	return flag_is_changeable_p(X86_EFLAGS_ID);
 219}
 220
 221static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
 222{
 223	unsigned long lo, hi;
 224
 225	if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
 226		return;
 227
 228	/* Disable processor serial number: */
 229
 230	rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
 231	lo |= 0x200000;
 232	wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
 233
 234	printk(KERN_NOTICE "CPU serial number disabled.\n");
 235	clear_cpu_cap(c, X86_FEATURE_PN);
 236
 237	/* Disabling the serial number may affect the cpuid level */
 238	c->cpuid_level = cpuid_eax(0);
 239}
 240
 241static int __init x86_serial_nr_setup(char *s)
 242{
 243	disable_x86_serial_nr = 0;
 244	return 1;
 245}
 246__setup("serialnumber", x86_serial_nr_setup);
 247#else
 248static inline int flag_is_changeable_p(u32 flag)
 249{
 250	return 1;
 251}
 252/* Probe for the CPUID instruction */
 253static inline int have_cpuid_p(void)
 254{
 255	return 1;
 256}
 257static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
 258{
 259}
 260#endif
 261
 262static int disable_smep __cpuinitdata;
 263static __init int setup_disable_smep(char *arg)
 264{
 265	disable_smep = 1;
 266	return 1;
 267}
 268__setup("nosmep", setup_disable_smep);
 269
 270static __cpuinit void setup_smep(struct cpuinfo_x86 *c)
 
 
 
 
 
 
 
 
 
 
 
 
 
 271{
 272	if (cpu_has(c, X86_FEATURE_SMEP)) {
 273		if (unlikely(disable_smep)) {
 274			setup_clear_cpu_cap(X86_FEATURE_SMEP);
 275			clear_in_cr4(X86_CR4_SMEP);
 276		} else
 277			set_in_cr4(X86_CR4_SMEP);
 
 
 
 
 
 278	}
 279}
 280
 281/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 282 * Some CPU features depend on higher CPUID levels, which may not always
 283 * be available due to CPUID level capping or broken virtualization
 284 * software.  Add those features to this table to auto-disable them.
 285 */
 286struct cpuid_dependent_feature {
 287	u32 feature;
 288	u32 level;
 289};
 290
 291static const struct cpuid_dependent_feature __cpuinitconst
 292cpuid_dependent_features[] = {
 293	{ X86_FEATURE_MWAIT,		0x00000005 },
 294	{ X86_FEATURE_DCA,		0x00000009 },
 295	{ X86_FEATURE_XSAVE,		0x0000000d },
 296	{ 0, 0 }
 297};
 298
 299static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
 300{
 301	const struct cpuid_dependent_feature *df;
 302
 303	for (df = cpuid_dependent_features; df->feature; df++) {
 304
 305		if (!cpu_has(c, df->feature))
 306			continue;
 307		/*
 308		 * Note: cpuid_level is set to -1 if unavailable, but
 309		 * extended_extended_level is set to 0 if unavailable
 310		 * and the legitimate extended levels are all negative
 311		 * when signed; hence the weird messing around with
 312		 * signs here...
 313		 */
 314		if (!((s32)df->level < 0 ?
 315		     (u32)df->level > (u32)c->extended_cpuid_level :
 316		     (s32)df->level > (s32)c->cpuid_level))
 317			continue;
 318
 319		clear_cpu_cap(c, df->feature);
 320		if (!warn)
 321			continue;
 322
 323		printk(KERN_WARNING
 324		       "CPU: CPU feature %s disabled, no CPUID level 0x%x\n",
 325				x86_cap_flags[df->feature], df->level);
 326	}
 327}
 328
 329/*
 330 * Naming convention should be: <Name> [(<Codename>)]
 331 * This table only is used unless init_<vendor>() below doesn't set it;
 332 * in particular, if CPUID levels 0x80000002..4 are supported, this
 333 * isn't used
 334 */
 335
 336/* Look up CPU names by table lookup. */
 337static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
 338{
 339	const struct cpu_model_info *info;
 
 340
 341	if (c->x86_model >= 16)
 342		return NULL;	/* Range check */
 343
 344	if (!this_cpu)
 345		return NULL;
 346
 347	info = this_cpu->c_models;
 348
 349	while (info && info->family) {
 350		if (info->family == c->x86)
 351			return info->model_names[c->x86_model];
 352		info++;
 353	}
 
 354	return NULL;		/* Not found */
 355}
 356
 357__u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata;
 358__u32 cpu_caps_set[NCAPINTS] __cpuinitdata;
 359
 360void load_percpu_segment(int cpu)
 361{
 362#ifdef CONFIG_X86_32
 363	loadsegment(fs, __KERNEL_PERCPU);
 364#else
 365	loadsegment(gs, 0);
 366	wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
 367#endif
 368	load_stack_canary_segment();
 369}
 370
 371/*
 372 * Current gdt points %fs at the "master" per-cpu area: after this,
 373 * it's on the real one.
 374 */
 375void switch_to_new_gdt(int cpu)
 376{
 377	struct desc_ptr gdt_descr;
 378
 379	gdt_descr.address = (long)get_cpu_gdt_table(cpu);
 380	gdt_descr.size = GDT_SIZE - 1;
 381	load_gdt(&gdt_descr);
 382	/* Reload the per-cpu base */
 383
 384	load_percpu_segment(cpu);
 385}
 386
 387static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {};
 388
 389static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
 390{
 391	unsigned int *v;
 392	char *p, *q;
 393
 394	if (c->extended_cpuid_level < 0x80000004)
 395		return;
 396
 397	v = (unsigned int *)c->x86_model_id;
 398	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
 399	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
 400	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
 401	c->x86_model_id[48] = 0;
 402
 403	/*
 404	 * Intel chips right-justify this string for some dumb reason;
 405	 * undo that brain damage:
 406	 */
 407	p = q = &c->x86_model_id[0];
 408	while (*p == ' ')
 409		p++;
 410	if (p != q) {
 411		while (*p)
 412			*q++ = *p++;
 413		while (q <= &c->x86_model_id[48])
 414			*q++ = '\0';	/* Zero-pad the rest */
 
 
 415	}
 
 
 416}
 417
 418void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
 419{
 420	unsigned int n, dummy, ebx, ecx, edx, l2size;
 421
 422	n = c->extended_cpuid_level;
 423
 424	if (n >= 0x80000005) {
 425		cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
 426		c->x86_cache_size = (ecx>>24) + (edx>>24);
 427#ifdef CONFIG_X86_64
 428		/* On K8 L1 TLB is inclusive, so don't count it */
 429		c->x86_tlbsize = 0;
 430#endif
 431	}
 432
 433	if (n < 0x80000006)	/* Some chips just has a large L1. */
 434		return;
 435
 436	cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
 437	l2size = ecx >> 16;
 438
 439#ifdef CONFIG_X86_64
 440	c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
 441#else
 442	/* do processor-specific cache resizing */
 443	if (this_cpu->c_size_cache)
 444		l2size = this_cpu->c_size_cache(c, l2size);
 445
 446	/* Allow user to override all this if necessary. */
 447	if (cachesize_override != -1)
 448		l2size = cachesize_override;
 449
 450	if (l2size == 0)
 451		return;		/* Again, no L2 cache is possible */
 452#endif
 453
 454	c->x86_cache_size = l2size;
 455}
 456
 457void __cpuinit detect_ht(struct cpuinfo_x86 *c)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 458{
 459#ifdef CONFIG_X86_HT
 460	u32 eax, ebx, ecx, edx;
 461	int index_msb, core_bits;
 462	static bool printed;
 463
 464	if (!cpu_has(c, X86_FEATURE_HT))
 465		return;
 466
 467	if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
 468		goto out;
 469
 470	if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
 471		return;
 472
 473	cpuid(1, &eax, &ebx, &ecx, &edx);
 474
 475	smp_num_siblings = (ebx & 0xff0000) >> 16;
 476
 477	if (smp_num_siblings == 1) {
 478		printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n");
 479		goto out;
 480	}
 481
 482	if (smp_num_siblings <= 1)
 483		goto out;
 484
 485	index_msb = get_count_order(smp_num_siblings);
 486	c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
 487
 488	smp_num_siblings = smp_num_siblings / c->x86_max_cores;
 489
 490	index_msb = get_count_order(smp_num_siblings);
 491
 492	core_bits = get_count_order(c->x86_max_cores);
 493
 494	c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
 495				       ((1 << core_bits) - 1);
 496
 497out:
 498	if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
 499		printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
 500		       c->phys_proc_id);
 501		printk(KERN_INFO  "CPU: Processor Core ID: %d\n",
 502		       c->cpu_core_id);
 503		printed = 1;
 504	}
 505#endif
 506}
 507
 508static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
 509{
 510	char *v = c->x86_vendor_id;
 511	int i;
 512
 513	for (i = 0; i < X86_VENDOR_NUM; i++) {
 514		if (!cpu_devs[i])
 515			break;
 516
 517		if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
 518		    (cpu_devs[i]->c_ident[1] &&
 519		     !strcmp(v, cpu_devs[i]->c_ident[1]))) {
 520
 521			this_cpu = cpu_devs[i];
 522			c->x86_vendor = this_cpu->c_x86_vendor;
 523			return;
 524		}
 525	}
 526
 527	printk_once(KERN_ERR
 528			"CPU: vendor_id '%s' unknown, using generic init.\n" \
 529			"CPU: Your system may be unstable.\n", v);
 530
 531	c->x86_vendor = X86_VENDOR_UNKNOWN;
 532	this_cpu = &default_cpu;
 533}
 534
 535void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
 536{
 537	/* Get vendor name */
 538	cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
 539	      (unsigned int *)&c->x86_vendor_id[0],
 540	      (unsigned int *)&c->x86_vendor_id[8],
 541	      (unsigned int *)&c->x86_vendor_id[4]);
 542
 543	c->x86 = 4;
 544	/* Intel-defined flags: level 0x00000001 */
 545	if (c->cpuid_level >= 0x00000001) {
 546		u32 junk, tfms, cap0, misc;
 547
 548		cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
 549		c->x86 = (tfms >> 8) & 0xf;
 550		c->x86_model = (tfms >> 4) & 0xf;
 551		c->x86_mask = tfms & 0xf;
 552
 553		if (c->x86 == 0xf)
 554			c->x86 += (tfms >> 20) & 0xff;
 555		if (c->x86 >= 0x6)
 556			c->x86_model += ((tfms >> 16) & 0xf) << 4;
 557
 558		if (cap0 & (1<<19)) {
 559			c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
 560			c->x86_cache_alignment = c->x86_clflush_size;
 561		}
 562	}
 563}
 564
 565void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
 566{
 567	u32 tfms, xlvl;
 568	u32 ebx;
 569
 570	/* Intel-defined flags: level 0x00000001 */
 571	if (c->cpuid_level >= 0x00000001) {
 572		u32 capability, excap;
 573
 574		cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
 575		c->x86_capability[0] = capability;
 576		c->x86_capability[4] = excap;
 577	}
 578
 579	/* Additional Intel-defined flags: level 0x00000007 */
 580	if (c->cpuid_level >= 0x00000007) {
 581		u32 eax, ebx, ecx, edx;
 582
 583		cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
 584
 585		c->x86_capability[9] = ebx;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 586	}
 587
 588	/* AMD-defined flags: level 0x80000001 */
 589	xlvl = cpuid_eax(0x80000000);
 590	c->extended_cpuid_level = xlvl;
 591
 592	if ((xlvl & 0xffff0000) == 0x80000000) {
 593		if (xlvl >= 0x80000001) {
 594			c->x86_capability[1] = cpuid_edx(0x80000001);
 595			c->x86_capability[6] = cpuid_ecx(0x80000001);
 
 
 596		}
 597	}
 598
 599	if (c->extended_cpuid_level >= 0x80000008) {
 600		u32 eax = cpuid_eax(0x80000008);
 601
 602		c->x86_virt_bits = (eax >> 8) & 0xff;
 603		c->x86_phys_bits = eax & 0xff;
 
 604	}
 605#ifdef CONFIG_X86_32
 606	else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
 607		c->x86_phys_bits = 36;
 608#endif
 609
 610	if (c->extended_cpuid_level >= 0x80000007)
 611		c->x86_power = cpuid_edx(0x80000007);
 612
 
 
 
 613	init_scattered_cpuid_features(c);
 614}
 615
 616static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
 617{
 618#ifdef CONFIG_X86_32
 619	int i;
 620
 621	/*
 622	 * First of all, decide if this is a 486 or higher
 623	 * It's a 486 if we can modify the AC flag
 624	 */
 625	if (flag_is_changeable_p(X86_EFLAGS_AC))
 626		c->x86 = 4;
 627	else
 628		c->x86 = 3;
 629
 630	for (i = 0; i < X86_VENDOR_NUM; i++)
 631		if (cpu_devs[i] && cpu_devs[i]->c_identify) {
 632			c->x86_vendor_id[0] = 0;
 633			cpu_devs[i]->c_identify(c);
 634			if (c->x86_vendor_id[0]) {
 635				get_cpu_vendor(c);
 636				break;
 637			}
 638		}
 639#endif
 640}
 641
 642/*
 643 * Do minimum CPU detection early.
 644 * Fields really needed: vendor, cpuid_level, family, model, mask,
 645 * cache alignment.
 646 * The others are not touched to avoid unwanted side effects.
 647 *
 648 * WARNING: this function is only called on the BP.  Don't add code here
 649 * that is supposed to run on all CPUs.
 650 */
 651static void __init early_identify_cpu(struct cpuinfo_x86 *c)
 652{
 653#ifdef CONFIG_X86_64
 654	c->x86_clflush_size = 64;
 655	c->x86_phys_bits = 36;
 656	c->x86_virt_bits = 48;
 657#else
 658	c->x86_clflush_size = 32;
 659	c->x86_phys_bits = 32;
 660	c->x86_virt_bits = 32;
 661#endif
 662	c->x86_cache_alignment = c->x86_clflush_size;
 663
 664	memset(&c->x86_capability, 0, sizeof c->x86_capability);
 665	c->extended_cpuid_level = 0;
 666
 667	if (!have_cpuid_p())
 668		identify_cpu_without_cpuid(c);
 669
 670	/* cyrix could have cpuid enabled via c_identify()*/
 671	if (!have_cpuid_p())
 672		return;
 673
 674	cpu_detect(c);
 675
 676	get_cpu_vendor(c);
 677
 678	get_cpu_cap(c);
 679
 680	if (this_cpu->c_early_init)
 681		this_cpu->c_early_init(c);
 682
 683	c->cpu_index = 0;
 684	filter_cpuid_features(c, false);
 685
 686	setup_smep(c);
 687
 688	if (this_cpu->c_bsp_init)
 689		this_cpu->c_bsp_init(c);
 
 
 
 690}
 691
 692void __init early_cpu_init(void)
 693{
 694	const struct cpu_dev *const *cdev;
 695	int count = 0;
 696
 697#ifdef CONFIG_PROCESSOR_SELECT
 698	printk(KERN_INFO "KERNEL supported cpus:\n");
 699#endif
 700
 701	for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
 702		const struct cpu_dev *cpudev = *cdev;
 703
 704		if (count >= X86_VENDOR_NUM)
 705			break;
 706		cpu_devs[count] = cpudev;
 707		count++;
 708
 709#ifdef CONFIG_PROCESSOR_SELECT
 710		{
 711			unsigned int j;
 712
 713			for (j = 0; j < 2; j++) {
 714				if (!cpudev->c_ident[j])
 715					continue;
 716				printk(KERN_INFO "  %s %s\n", cpudev->c_vendor,
 717					cpudev->c_ident[j]);
 718			}
 719		}
 720#endif
 721	}
 722	early_identify_cpu(&boot_cpu_data);
 723}
 724
 725/*
 726 * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
 727 * unfortunately, that's not true in practice because of early VIA
 728 * chips and (more importantly) broken virtualizers that are not easy
 729 * to detect. In the latter case it doesn't even *fail* reliably, so
 730 * probing for it doesn't even work. Disable it completely on 32-bit
 731 * unless we can find a reliable way to detect all the broken cases.
 732 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
 733 */
 734static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
 735{
 736#ifdef CONFIG_X86_32
 737	clear_cpu_cap(c, X86_FEATURE_NOPL);
 738#else
 739	set_cpu_cap(c, X86_FEATURE_NOPL);
 740#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 741}
 742
 743static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
 744{
 745	c->extended_cpuid_level = 0;
 746
 747	if (!have_cpuid_p())
 748		identify_cpu_without_cpuid(c);
 749
 750	/* cyrix could have cpuid enabled via c_identify()*/
 751	if (!have_cpuid_p())
 752		return;
 753
 754	cpu_detect(c);
 755
 756	get_cpu_vendor(c);
 757
 758	get_cpu_cap(c);
 759
 760	if (c->cpuid_level >= 0x00000001) {
 761		c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
 762#ifdef CONFIG_X86_32
 763# ifdef CONFIG_X86_HT
 764		c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
 765# else
 766		c->apicid = c->initial_apicid;
 767# endif
 768#endif
 769		c->phys_proc_id = c->initial_apicid;
 770	}
 771
 772	setup_smep(c);
 773
 774	get_model_name(c); /* Default name */
 775
 776	detect_nopl(c);
 777}
 778
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 779/*
 780 * This does the hard work of actually picking apart the CPU stuff...
 781 */
 782static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
 783{
 784	int i;
 785
 786	c->loops_per_jiffy = loops_per_jiffy;
 787	c->x86_cache_size = -1;
 788	c->x86_vendor = X86_VENDOR_UNKNOWN;
 789	c->x86_model = c->x86_mask = 0;	/* So far unknown... */
 790	c->x86_vendor_id[0] = '\0'; /* Unset */
 791	c->x86_model_id[0] = '\0';  /* Unset */
 792	c->x86_max_cores = 1;
 793	c->x86_coreid_bits = 0;
 794#ifdef CONFIG_X86_64
 795	c->x86_clflush_size = 64;
 796	c->x86_phys_bits = 36;
 797	c->x86_virt_bits = 48;
 798#else
 799	c->cpuid_level = -1;	/* CPUID not detected */
 800	c->x86_clflush_size = 32;
 801	c->x86_phys_bits = 32;
 802	c->x86_virt_bits = 32;
 803#endif
 804	c->x86_cache_alignment = c->x86_clflush_size;
 805	memset(&c->x86_capability, 0, sizeof c->x86_capability);
 806
 807	generic_identify(c);
 808
 809	if (this_cpu->c_identify)
 810		this_cpu->c_identify(c);
 811
 812	/* Clear/Set all flags overriden by options, after probe */
 813	for (i = 0; i < NCAPINTS; i++) {
 814		c->x86_capability[i] &= ~cpu_caps_cleared[i];
 815		c->x86_capability[i] |= cpu_caps_set[i];
 816	}
 817
 818#ifdef CONFIG_X86_64
 819	c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
 820#endif
 821
 822	/*
 823	 * Vendor-specific initialization.  In this section we
 824	 * canonicalize the feature flags, meaning if there are
 825	 * features a certain CPU supports which CPUID doesn't
 826	 * tell us, CPUID claiming incorrect flags, or other bugs,
 827	 * we handle them here.
 828	 *
 829	 * At the end of this section, c->x86_capability better
 830	 * indicate the features this CPU genuinely supports!
 831	 */
 832	if (this_cpu->c_init)
 833		this_cpu->c_init(c);
 834
 835	/* Disable the PN if appropriate */
 836	squash_the_stupid_serial_number(c);
 837
 
 
 
 
 838	/*
 839	 * The vendor-specific functions might have changed features.
 840	 * Now we do "generic changes."
 841	 */
 842
 843	/* Filter out anything that depends on CPUID levels we don't have */
 844	filter_cpuid_features(c, true);
 845
 846	/* If the model name is still unset, do table lookup. */
 847	if (!c->x86_model_id[0]) {
 848		const char *p;
 849		p = table_lookup_model(c);
 850		if (p)
 851			strcpy(c->x86_model_id, p);
 852		else
 853			/* Last resort... */
 854			sprintf(c->x86_model_id, "%02x/%02x",
 855				c->x86, c->x86_model);
 856	}
 857
 858#ifdef CONFIG_X86_64
 859	detect_ht(c);
 860#endif
 861
 862	init_hypervisor(c);
 863	x86_init_rdrand(c);
 
 
 864
 865	/*
 866	 * Clear/Set all flags overriden by options, need do it
 867	 * before following smp all cpus cap AND.
 868	 */
 869	for (i = 0; i < NCAPINTS; i++) {
 870		c->x86_capability[i] &= ~cpu_caps_cleared[i];
 871		c->x86_capability[i] |= cpu_caps_set[i];
 872	}
 873
 874	/*
 875	 * On SMP, boot_cpu_data holds the common feature set between
 876	 * all CPUs; so make sure that we indicate which features are
 877	 * common between the CPUs.  The first time this routine gets
 878	 * executed, c == &boot_cpu_data.
 879	 */
 880	if (c != &boot_cpu_data) {
 881		/* AND the already accumulated flags with these */
 882		for (i = 0; i < NCAPINTS; i++)
 883			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
 
 
 
 
 884	}
 885
 886	/* Init Machine Check Exception if available. */
 887	mcheck_cpu_init(c);
 888
 889	select_idle_routine(c);
 890
 891#ifdef CONFIG_NUMA
 892	numa_add_cpu(smp_processor_id());
 893#endif
 
 
 894}
 895
 896#ifdef CONFIG_X86_64
 897static void vgetcpu_set_mode(void)
 
 
 
 
 898{
 899	if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP))
 900		vgetcpu_mode = VGETCPU_RDTSCP;
 901	else
 902		vgetcpu_mode = VGETCPU_LSL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 903}
 904#endif
 905
 906void __init identify_boot_cpu(void)
 907{
 908	identify_cpu(&boot_cpu_data);
 909	init_amd_e400_c1e_mask();
 910#ifdef CONFIG_X86_32
 911	sysenter_setup();
 912	enable_sep_cpu();
 913#else
 914	vgetcpu_set_mode();
 915#endif
 
 916}
 917
 918void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
 919{
 920	BUG_ON(c == &boot_cpu_data);
 921	identify_cpu(c);
 922#ifdef CONFIG_X86_32
 923	enable_sep_cpu();
 924#endif
 925	mtrr_ap_init();
 926}
 927
 928struct msr_range {
 929	unsigned	min;
 930	unsigned	max;
 931};
 932
 933static const struct msr_range msr_range_array[] __cpuinitconst = {
 934	{ 0x00000000, 0x00000418},
 935	{ 0xc0000000, 0xc000040b},
 936	{ 0xc0010000, 0xc0010142},
 937	{ 0xc0011000, 0xc001103b},
 938};
 939
 940static void __cpuinit __print_cpu_msr(void)
 941{
 942	unsigned index_min, index_max;
 943	unsigned index;
 944	u64 val;
 945	int i;
 946
 947	for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
 948		index_min = msr_range_array[i].min;
 949		index_max = msr_range_array[i].max;
 950
 951		for (index = index_min; index < index_max; index++) {
 952			if (rdmsrl_amd_safe(index, &val))
 953				continue;
 954			printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
 955		}
 956	}
 957}
 958
 959static int show_msr __cpuinitdata;
 960
 961static __init int setup_show_msr(char *arg)
 962{
 963	int num;
 964
 965	get_option(&arg, &num);
 966
 967	if (num > 0)
 968		show_msr = num;
 969	return 1;
 970}
 971__setup("show_msr=", setup_show_msr);
 972
 973static __init int setup_noclflush(char *arg)
 974{
 975	setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
 
 976	return 1;
 977}
 978__setup("noclflush", setup_noclflush);
 979
 980void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
 981{
 982	const char *vendor = NULL;
 983
 984	if (c->x86_vendor < X86_VENDOR_NUM) {
 985		vendor = this_cpu->c_vendor;
 986	} else {
 987		if (c->cpuid_level >= 0)
 988			vendor = c->x86_vendor_id;
 989	}
 990
 991	if (vendor && !strstr(c->x86_model_id, vendor))
 992		printk(KERN_CONT "%s ", vendor);
 993
 994	if (c->x86_model_id[0])
 995		printk(KERN_CONT "%s", c->x86_model_id);
 996	else
 997		printk(KERN_CONT "%d86", c->x86);
 
 
 998
 999	if (c->x86_mask || c->cpuid_level >= 0)
1000		printk(KERN_CONT " stepping %02x\n", c->x86_mask);
1001	else
1002		printk(KERN_CONT "\n");
1003
1004	print_cpu_msr(c);
1005}
1006
1007void __cpuinit print_cpu_msr(struct cpuinfo_x86 *c)
1008{
1009	if (c->cpu_index < show_msr)
1010		__print_cpu_msr();
1011}
1012
1013static __init int setup_disablecpuid(char *arg)
1014{
1015	int bit;
1016
1017	if (get_option(&arg, &bit) && bit < NCAPINTS*32)
1018		setup_clear_cpu_cap(bit);
1019	else
1020		return 0;
1021
1022	return 1;
1023}
1024__setup("clearcpuid=", setup_disablecpuid);
1025
1026#ifdef CONFIG_X86_64
1027struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
1028struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
1029				    (unsigned long) nmi_idt_table };
1030
1031DEFINE_PER_CPU_FIRST(union irq_stack_union,
1032		     irq_stack_union) __aligned(PAGE_SIZE);
1033
1034/*
1035 * The following four percpu variables are hot.  Align current_task to
1036 * cacheline size such that all four fall in the same cacheline.
1037 */
1038DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
1039	&init_task;
1040EXPORT_PER_CPU_SYMBOL(current_task);
1041
1042DEFINE_PER_CPU(unsigned long, kernel_stack) =
1043	(unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
1044EXPORT_PER_CPU_SYMBOL(kernel_stack);
1045
1046DEFINE_PER_CPU(char *, irq_stack_ptr) =
1047	init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
1048
1049DEFINE_PER_CPU(unsigned int, irq_count) = -1;
1050
1051DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
 
1052
1053/*
1054 * Special IST stacks which the CPU switches to when it calls
1055 * an IST-marked descriptor entry. Up to 7 stacks (hardware
1056 * limit), all of them are 4K, except the debug stack which
1057 * is 8K.
1058 */
1059static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
1060	  [0 ... N_EXCEPTION_STACKS - 1]	= EXCEPTION_STKSZ,
1061	  [DEBUG_STACK - 1]			= DEBUG_STKSZ
1062};
1063
1064static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
1065	[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
1066
1067/* May not be marked __init: used by software suspend */
1068void syscall_init(void)
1069{
1070	/*
1071	 * LSTAR and STAR live in a bit strange symbiosis.
1072	 * They both write to the same internal register. STAR allows to
1073	 * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
1074	 */
1075	wrmsrl(MSR_STAR,  ((u64)__USER32_CS)<<48  | ((u64)__KERNEL_CS)<<32);
1076	wrmsrl(MSR_LSTAR, system_call);
1077	wrmsrl(MSR_CSTAR, ignore_sysret);
1078
1079#ifdef CONFIG_IA32_EMULATION
1080	syscall32_cpu_init();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1081#endif
1082
1083	/* Flags to clear on syscall */
1084	wrmsrl(MSR_SYSCALL_MASK,
1085	       X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
 
1086}
1087
1088unsigned long kernel_eflags;
1089
1090/*
1091 * Copies of the original ist values from the tss are only accessed during
1092 * debugging, no special alignment required.
1093 */
1094DEFINE_PER_CPU(struct orig_ist, orig_ist);
1095
1096static DEFINE_PER_CPU(unsigned long, debug_stack_addr);
1097DEFINE_PER_CPU(int, debug_stack_usage);
1098
1099int is_debug_stack(unsigned long addr)
1100{
1101	return __get_cpu_var(debug_stack_usage) ||
1102		(addr <= __get_cpu_var(debug_stack_addr) &&
1103		 addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ));
1104}
 
1105
1106static DEFINE_PER_CPU(u32, debug_stack_use_ctr);
1107
1108void debug_stack_set_zero(void)
1109{
1110	this_cpu_inc(debug_stack_use_ctr);
1111	load_idt((const struct desc_ptr *)&nmi_idt_descr);
1112}
 
1113
1114void debug_stack_reset(void)
1115{
1116	if (WARN_ON(!this_cpu_read(debug_stack_use_ctr)))
1117		return;
1118	if (this_cpu_dec_return(debug_stack_use_ctr) == 0)
1119		load_idt((const struct desc_ptr *)&idt_descr);
1120}
 
1121
1122#else	/* CONFIG_X86_64 */
1123
1124DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
1125EXPORT_PER_CPU_SYMBOL(current_task);
1126DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
 
 
 
 
 
 
 
 
 
 
1127
1128#ifdef CONFIG_CC_STACKPROTECTOR
1129DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
1130#endif
1131
1132/* Make sure %fs and %gs are initialized properly in idle threads */
1133struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
1134{
1135	memset(regs, 0, sizeof(struct pt_regs));
1136	regs->fs = __KERNEL_PERCPU;
1137	regs->gs = __KERNEL_STACK_CANARY;
1138
1139	return regs;
1140}
1141#endif	/* CONFIG_X86_64 */
1142
1143/*
1144 * Clear all 6 debug registers:
1145 */
1146static void clear_all_debug_regs(void)
1147{
1148	int i;
1149
1150	for (i = 0; i < 8; i++) {
1151		/* Ignore db4, db5 */
1152		if ((i == 4) || (i == 5))
1153			continue;
1154
1155		set_debugreg(0, i);
1156	}
1157}
1158
1159#ifdef CONFIG_KGDB
1160/*
1161 * Restore debug regs if using kgdbwait and you have a kernel debugger
1162 * connection established.
1163 */
1164static void dbg_restore_debug_regs(void)
1165{
1166	if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
1167		arch_kgdb_ops.correct_hw_break();
1168}
1169#else /* ! CONFIG_KGDB */
1170#define dbg_restore_debug_regs()
1171#endif /* ! CONFIG_KGDB */
1172
 
 
 
 
 
 
 
 
 
 
 
 
 
1173/*
1174 * cpu_init() initializes state that is per-CPU. Some data is already
1175 * initialized (naturally) in the bootstrap process, such as the GDT
1176 * and IDT. We reload them nevertheless, this function acts as a
1177 * 'CPU state barrier', nothing should get across.
1178 * A lot of state is already set up in PDA init for 64 bit
1179 */
1180#ifdef CONFIG_X86_64
1181
1182void __cpuinit cpu_init(void)
1183{
1184	struct orig_ist *oist;
1185	struct task_struct *me;
1186	struct tss_struct *t;
1187	unsigned long v;
1188	int cpu;
1189	int i;
1190
1191	cpu = stack_smp_processor_id();
1192	t = &per_cpu(init_tss, cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
1193	oist = &per_cpu(orig_ist, cpu);
1194
1195#ifdef CONFIG_NUMA
1196	if (cpu != 0 && this_cpu_read(numa_node) == 0 &&
1197	    early_cpu_to_node(cpu) != NUMA_NO_NODE)
1198		set_numa_node(early_cpu_to_node(cpu));
1199#endif
1200
1201	me = current;
1202
1203	if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask))
1204		panic("CPU#%d already initialized!\n", cpu);
1205
1206	pr_debug("Initializing CPU#%d\n", cpu);
1207
1208	clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1209
1210	/*
1211	 * Initialize the per-CPU GDT with the boot GDT,
1212	 * and set up the GDT descriptor:
1213	 */
1214
1215	switch_to_new_gdt(cpu);
1216	loadsegment(fs, 0);
1217
1218	load_idt((const struct desc_ptr *)&idt_descr);
1219
1220	memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
1221	syscall_init();
1222
1223	wrmsrl(MSR_FS_BASE, 0);
1224	wrmsrl(MSR_KERNEL_GS_BASE, 0);
1225	barrier();
1226
1227	x86_configure_nx();
1228	if (cpu != 0)
1229		enable_x2apic();
1230
1231	/*
1232	 * set up and load the per-CPU TSS
1233	 */
1234	if (!oist->ist[0]) {
1235		char *estacks = per_cpu(exception_stacks, cpu);
1236
1237		for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1238			estacks += exception_stack_sizes[v];
1239			oist->ist[v] = t->x86_tss.ist[v] =
1240					(unsigned long)estacks;
1241			if (v == DEBUG_STACK-1)
1242				per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks;
1243		}
1244	}
1245
1246	t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1247
1248	/*
1249	 * <= is required because the CPU will access up to
1250	 * 8 bits beyond the end of the IO permission bitmap.
1251	 */
1252	for (i = 0; i <= IO_BITMAP_LONGS; i++)
1253		t->io_bitmap[i] = ~0UL;
1254
1255	atomic_inc(&init_mm.mm_count);
1256	me->active_mm = &init_mm;
1257	BUG_ON(me->mm);
1258	enter_lazy_tlb(&init_mm, me);
1259
1260	load_sp0(t, &current->thread);
1261	set_tss_desc(cpu, t);
1262	load_TR_desc();
1263	load_LDT(&init_mm.context);
1264
1265	clear_all_debug_regs();
1266	dbg_restore_debug_regs();
1267
1268	fpu_init();
1269	xsave_init();
1270
1271	raw_local_save_flags(kernel_eflags);
1272
1273	if (is_uv_system())
1274		uv_cpu_init();
1275}
1276
1277#else
1278
1279void __cpuinit cpu_init(void)
1280{
1281	int cpu = smp_processor_id();
1282	struct task_struct *curr = current;
1283	struct tss_struct *t = &per_cpu(init_tss, cpu);
1284	struct thread_struct *thread = &curr->thread;
1285
1286	if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
1287		printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
1288		for (;;)
1289			local_irq_enable();
1290	}
 
 
1291
1292	printk(KERN_INFO "Initializing CPU#%d\n", cpu);
1293
1294	if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
1295		clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1296
1297	load_idt(&idt_descr);
 
 
 
 
 
1298	switch_to_new_gdt(cpu);
1299
1300	/*
1301	 * Set up and load the per-CPU TSS and LDT
1302	 */
1303	atomic_inc(&init_mm.mm_count);
1304	curr->active_mm = &init_mm;
1305	BUG_ON(curr->mm);
1306	enter_lazy_tlb(&init_mm, curr);
1307
1308	load_sp0(t, thread);
1309	set_tss_desc(cpu, t);
1310	load_TR_desc();
1311	load_LDT(&init_mm.context);
1312
1313	t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1314
1315#ifdef CONFIG_DOUBLEFAULT
1316	/* Set up doublefault TSS pointer in the GDT */
1317	__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
1318#endif
1319
1320	clear_all_debug_regs();
1321	dbg_restore_debug_regs();
1322
1323	fpu_init();
1324	xsave_init();
1325}
1326#endif
v4.6
   1#include <linux/bootmem.h>
   2#include <linux/linkage.h>
   3#include <linux/bitops.h>
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <linux/percpu.h>
   7#include <linux/string.h>
   8#include <linux/ctype.h>
   9#include <linux/delay.h>
  10#include <linux/sched.h>
  11#include <linux/init.h>
  12#include <linux/kprobes.h>
  13#include <linux/kgdb.h>
  14#include <linux/smp.h>
  15#include <linux/io.h>
  16#include <linux/syscore_ops.h>
  17
  18#include <asm/stackprotector.h>
  19#include <asm/perf_event.h>
  20#include <asm/mmu_context.h>
  21#include <asm/archrandom.h>
  22#include <asm/hypervisor.h>
  23#include <asm/processor.h>
  24#include <asm/tlbflush.h>
  25#include <asm/debugreg.h>
  26#include <asm/sections.h>
  27#include <asm/vsyscall.h>
  28#include <linux/topology.h>
  29#include <linux/cpumask.h>
  30#include <asm/pgtable.h>
  31#include <linux/atomic.h>
  32#include <asm/proto.h>
  33#include <asm/setup.h>
  34#include <asm/apic.h>
  35#include <asm/desc.h>
  36#include <asm/fpu/internal.h>
 
  37#include <asm/mtrr.h>
  38#include <linux/numa.h>
  39#include <asm/asm.h>
  40#include <asm/cpu.h>
  41#include <asm/mce.h>
  42#include <asm/msr.h>
  43#include <asm/pat.h>
  44#include <asm/microcode.h>
  45#include <asm/microcode_intel.h>
  46
  47#ifdef CONFIG_X86_LOCAL_APIC
  48#include <asm/uv/uv.h>
  49#endif
  50
  51#include "cpu.h"
  52
  53/* all of these masks are initialized in setup_cpu_local_masks() */
  54cpumask_var_t cpu_initialized_mask;
  55cpumask_var_t cpu_callout_mask;
  56cpumask_var_t cpu_callin_mask;
  57
  58/* representing cpus for which sibling maps can be computed */
  59cpumask_var_t cpu_sibling_setup_mask;
  60
  61/* correctly size the local cpu masks */
  62void __init setup_cpu_local_masks(void)
  63{
  64	alloc_bootmem_cpumask_var(&cpu_initialized_mask);
  65	alloc_bootmem_cpumask_var(&cpu_callin_mask);
  66	alloc_bootmem_cpumask_var(&cpu_callout_mask);
  67	alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
  68}
  69
  70static void default_init(struct cpuinfo_x86 *c)
  71{
  72#ifdef CONFIG_X86_64
  73	cpu_detect_cache_sizes(c);
  74#else
  75	/* Not much we can do here... */
  76	/* Check if at least it has cpuid */
  77	if (c->cpuid_level == -1) {
  78		/* No cpuid. It must be an ancient CPU */
  79		if (c->x86 == 4)
  80			strcpy(c->x86_model_id, "486");
  81		else if (c->x86 == 3)
  82			strcpy(c->x86_model_id, "386");
  83	}
  84#endif
  85}
  86
  87static const struct cpu_dev default_cpu = {
  88	.c_init		= default_init,
  89	.c_vendor	= "Unknown",
  90	.c_x86_vendor	= X86_VENDOR_UNKNOWN,
  91};
  92
  93static const struct cpu_dev *this_cpu = &default_cpu;
  94
  95DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
  96#ifdef CONFIG_X86_64
  97	/*
  98	 * We need valid kernel segments for data and code in long mode too
  99	 * IRET will check the segment types  kkeil 2000/10/28
 100	 * Also sysret mandates a special GDT layout
 101	 *
 102	 * TLS descriptors are currently at a different place compared to i386.
 103	 * Hopefully nobody expects them at a fixed place (Wine?)
 104	 */
 105	[GDT_ENTRY_KERNEL32_CS]		= GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
 106	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
 107	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
 108	[GDT_ENTRY_DEFAULT_USER32_CS]	= GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
 109	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
 110	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
 111#else
 112	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
 113	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
 114	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
 115	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
 116	/*
 117	 * Segments used for calling PnP BIOS have byte granularity.
 118	 * They code segments and data segments have fixed 64k limits,
 119	 * the transfer segment sizes are set at run time.
 120	 */
 121	/* 32-bit code */
 122	[GDT_ENTRY_PNPBIOS_CS32]	= GDT_ENTRY_INIT(0x409a, 0, 0xffff),
 123	/* 16-bit code */
 124	[GDT_ENTRY_PNPBIOS_CS16]	= GDT_ENTRY_INIT(0x009a, 0, 0xffff),
 125	/* 16-bit data */
 126	[GDT_ENTRY_PNPBIOS_DS]		= GDT_ENTRY_INIT(0x0092, 0, 0xffff),
 127	/* 16-bit data */
 128	[GDT_ENTRY_PNPBIOS_TS1]		= GDT_ENTRY_INIT(0x0092, 0, 0),
 129	/* 16-bit data */
 130	[GDT_ENTRY_PNPBIOS_TS2]		= GDT_ENTRY_INIT(0x0092, 0, 0),
 131	/*
 132	 * The APM segments have byte granularity and their bases
 133	 * are set at run time.  All have 64k limits.
 134	 */
 135	/* 32-bit code */
 136	[GDT_ENTRY_APMBIOS_BASE]	= GDT_ENTRY_INIT(0x409a, 0, 0xffff),
 137	/* 16-bit code */
 138	[GDT_ENTRY_APMBIOS_BASE+1]	= GDT_ENTRY_INIT(0x009a, 0, 0xffff),
 139	/* data */
 140	[GDT_ENTRY_APMBIOS_BASE+2]	= GDT_ENTRY_INIT(0x4092, 0, 0xffff),
 141
 142	[GDT_ENTRY_ESPFIX_SS]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
 143	[GDT_ENTRY_PERCPU]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
 144	GDT_STACK_CANARY_INIT
 145#endif
 146} };
 147EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
 148
 149static int __init x86_mpx_setup(char *s)
 150{
 151	/* require an exact match without trailing characters */
 152	if (strlen(s))
 153		return 0;
 154
 155	/* do not emit a message if the feature is not present */
 156	if (!boot_cpu_has(X86_FEATURE_MPX))
 157		return 1;
 158
 159	setup_clear_cpu_cap(X86_FEATURE_MPX);
 160	pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n");
 161	return 1;
 162}
 163__setup("nompx", x86_mpx_setup);
 164
 165static int __init x86_noinvpcid_setup(char *s)
 166{
 167	/* noinvpcid doesn't accept parameters */
 168	if (s)
 169		return -EINVAL;
 170
 171	/* do not emit a message if the feature is not present */
 172	if (!boot_cpu_has(X86_FEATURE_INVPCID))
 173		return 0;
 174
 175	setup_clear_cpu_cap(X86_FEATURE_INVPCID);
 176	pr_info("noinvpcid: INVPCID feature disabled\n");
 177	return 0;
 178}
 179early_param("noinvpcid", x86_noinvpcid_setup);
 180
 181#ifdef CONFIG_X86_32
 182static int cachesize_override = -1;
 183static int disable_x86_serial_nr = 1;
 184
 185static int __init cachesize_setup(char *str)
 186{
 187	get_option(&str, &cachesize_override);
 188	return 1;
 189}
 190__setup("cachesize=", cachesize_setup);
 191
 
 
 
 
 
 
 
 
 192static int __init x86_sep_setup(char *s)
 193{
 194	setup_clear_cpu_cap(X86_FEATURE_SEP);
 195	return 1;
 196}
 197__setup("nosep", x86_sep_setup);
 198
 199/* Standard macro to see if a specific flag is changeable */
 200static inline int flag_is_changeable_p(u32 flag)
 201{
 202	u32 f1, f2;
 203
 204	/*
 205	 * Cyrix and IDT cpus allow disabling of CPUID
 206	 * so the code below may return different results
 207	 * when it is executed before and after enabling
 208	 * the CPUID. Add "volatile" to not allow gcc to
 209	 * optimize the subsequent calls to this function.
 210	 */
 211	asm volatile ("pushfl		\n\t"
 212		      "pushfl		\n\t"
 213		      "popl %0		\n\t"
 214		      "movl %0, %1	\n\t"
 215		      "xorl %2, %0	\n\t"
 216		      "pushl %0		\n\t"
 217		      "popfl		\n\t"
 218		      "pushfl		\n\t"
 219		      "popl %0		\n\t"
 220		      "popfl		\n\t"
 221
 222		      : "=&r" (f1), "=&r" (f2)
 223		      : "ir" (flag));
 224
 225	return ((f1^f2) & flag) != 0;
 226}
 227
 228/* Probe for the CPUID instruction */
 229int have_cpuid_p(void)
 230{
 231	return flag_is_changeable_p(X86_EFLAGS_ID);
 232}
 233
 234static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
 235{
 236	unsigned long lo, hi;
 237
 238	if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
 239		return;
 240
 241	/* Disable processor serial number: */
 242
 243	rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
 244	lo |= 0x200000;
 245	wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
 246
 247	pr_notice("CPU serial number disabled.\n");
 248	clear_cpu_cap(c, X86_FEATURE_PN);
 249
 250	/* Disabling the serial number may affect the cpuid level */
 251	c->cpuid_level = cpuid_eax(0);
 252}
 253
 254static int __init x86_serial_nr_setup(char *s)
 255{
 256	disable_x86_serial_nr = 0;
 257	return 1;
 258}
 259__setup("serialnumber", x86_serial_nr_setup);
 260#else
 261static inline int flag_is_changeable_p(u32 flag)
 262{
 263	return 1;
 264}
 
 
 
 
 
 265static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
 266{
 267}
 268#endif
 269
 
 270static __init int setup_disable_smep(char *arg)
 271{
 272	setup_clear_cpu_cap(X86_FEATURE_SMEP);
 273	return 1;
 274}
 275__setup("nosmep", setup_disable_smep);
 276
 277static __always_inline void setup_smep(struct cpuinfo_x86 *c)
 278{
 279	if (cpu_has(c, X86_FEATURE_SMEP))
 280		cr4_set_bits(X86_CR4_SMEP);
 281}
 282
 283static __init int setup_disable_smap(char *arg)
 284{
 285	setup_clear_cpu_cap(X86_FEATURE_SMAP);
 286	return 1;
 287}
 288__setup("nosmap", setup_disable_smap);
 289
 290static __always_inline void setup_smap(struct cpuinfo_x86 *c)
 291{
 292	unsigned long eflags = native_save_fl();
 293
 294	/* This should have been cleared long ago */
 295	BUG_ON(eflags & X86_EFLAGS_AC);
 296
 297	if (cpu_has(c, X86_FEATURE_SMAP)) {
 298#ifdef CONFIG_X86_SMAP
 299		cr4_set_bits(X86_CR4_SMAP);
 300#else
 301		cr4_clear_bits(X86_CR4_SMAP);
 302#endif
 303	}
 304}
 305
 306/*
 307 * Protection Keys are not available in 32-bit mode.
 308 */
 309static bool pku_disabled;
 310
 311static __always_inline void setup_pku(struct cpuinfo_x86 *c)
 312{
 313	if (!cpu_has(c, X86_FEATURE_PKU))
 314		return;
 315	if (pku_disabled)
 316		return;
 317
 318	cr4_set_bits(X86_CR4_PKE);
 319	/*
 320	 * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
 321	 * cpuid bit to be set.  We need to ensure that we
 322	 * update that bit in this CPU's "cpu_info".
 323	 */
 324	get_cpu_cap(c);
 325}
 326
 327#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
 328static __init int setup_disable_pku(char *arg)
 329{
 330	/*
 331	 * Do not clear the X86_FEATURE_PKU bit.  All of the
 332	 * runtime checks are against OSPKE so clearing the
 333	 * bit does nothing.
 334	 *
 335	 * This way, we will see "pku" in cpuinfo, but not
 336	 * "ospke", which is exactly what we want.  It shows
 337	 * that the CPU has PKU, but the OS has not enabled it.
 338	 * This happens to be exactly how a system would look
 339	 * if we disabled the config option.
 340	 */
 341	pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n");
 342	pku_disabled = true;
 343	return 1;
 344}
 345__setup("nopku", setup_disable_pku);
 346#endif /* CONFIG_X86_64 */
 347
 348/*
 349 * Some CPU features depend on higher CPUID levels, which may not always
 350 * be available due to CPUID level capping or broken virtualization
 351 * software.  Add those features to this table to auto-disable them.
 352 */
 353struct cpuid_dependent_feature {
 354	u32 feature;
 355	u32 level;
 356};
 357
 358static const struct cpuid_dependent_feature
 359cpuid_dependent_features[] = {
 360	{ X86_FEATURE_MWAIT,		0x00000005 },
 361	{ X86_FEATURE_DCA,		0x00000009 },
 362	{ X86_FEATURE_XSAVE,		0x0000000d },
 363	{ 0, 0 }
 364};
 365
 366static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
 367{
 368	const struct cpuid_dependent_feature *df;
 369
 370	for (df = cpuid_dependent_features; df->feature; df++) {
 371
 372		if (!cpu_has(c, df->feature))
 373			continue;
 374		/*
 375		 * Note: cpuid_level is set to -1 if unavailable, but
 376		 * extended_extended_level is set to 0 if unavailable
 377		 * and the legitimate extended levels are all negative
 378		 * when signed; hence the weird messing around with
 379		 * signs here...
 380		 */
 381		if (!((s32)df->level < 0 ?
 382		     (u32)df->level > (u32)c->extended_cpuid_level :
 383		     (s32)df->level > (s32)c->cpuid_level))
 384			continue;
 385
 386		clear_cpu_cap(c, df->feature);
 387		if (!warn)
 388			continue;
 389
 390		pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
 391			x86_cap_flag(df->feature), df->level);
 
 392	}
 393}
 394
 395/*
 396 * Naming convention should be: <Name> [(<Codename>)]
 397 * This table only is used unless init_<vendor>() below doesn't set it;
 398 * in particular, if CPUID levels 0x80000002..4 are supported, this
 399 * isn't used
 400 */
 401
 402/* Look up CPU names by table lookup. */
 403static const char *table_lookup_model(struct cpuinfo_x86 *c)
 404{
 405#ifdef CONFIG_X86_32
 406	const struct legacy_cpu_model_info *info;
 407
 408	if (c->x86_model >= 16)
 409		return NULL;	/* Range check */
 410
 411	if (!this_cpu)
 412		return NULL;
 413
 414	info = this_cpu->legacy_models;
 415
 416	while (info->family) {
 417		if (info->family == c->x86)
 418			return info->model_names[c->x86_model];
 419		info++;
 420	}
 421#endif
 422	return NULL;		/* Not found */
 423}
 424
 425__u32 cpu_caps_cleared[NCAPINTS];
 426__u32 cpu_caps_set[NCAPINTS];
 427
 428void load_percpu_segment(int cpu)
 429{
 430#ifdef CONFIG_X86_32
 431	loadsegment(fs, __KERNEL_PERCPU);
 432#else
 433	loadsegment(gs, 0);
 434	wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
 435#endif
 436	load_stack_canary_segment();
 437}
 438
 439/*
 440 * Current gdt points %fs at the "master" per-cpu area: after this,
 441 * it's on the real one.
 442 */
 443void switch_to_new_gdt(int cpu)
 444{
 445	struct desc_ptr gdt_descr;
 446
 447	gdt_descr.address = (long)get_cpu_gdt_table(cpu);
 448	gdt_descr.size = GDT_SIZE - 1;
 449	load_gdt(&gdt_descr);
 450	/* Reload the per-cpu base */
 451
 452	load_percpu_segment(cpu);
 453}
 454
 455static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
 456
 457static void get_model_name(struct cpuinfo_x86 *c)
 458{
 459	unsigned int *v;
 460	char *p, *q, *s;
 461
 462	if (c->extended_cpuid_level < 0x80000004)
 463		return;
 464
 465	v = (unsigned int *)c->x86_model_id;
 466	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
 467	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
 468	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
 469	c->x86_model_id[48] = 0;
 470
 471	/* Trim whitespace */
 472	p = q = s = &c->x86_model_id[0];
 473
 
 
 474	while (*p == ' ')
 475		p++;
 476
 477	while (*p) {
 478		/* Note the last non-whitespace index */
 479		if (!isspace(*p))
 480			s = q;
 481
 482		*q++ = *p++;
 483	}
 484
 485	*(s + 1) = '\0';
 486}
 487
 488void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
 489{
 490	unsigned int n, dummy, ebx, ecx, edx, l2size;
 491
 492	n = c->extended_cpuid_level;
 493
 494	if (n >= 0x80000005) {
 495		cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
 496		c->x86_cache_size = (ecx>>24) + (edx>>24);
 497#ifdef CONFIG_X86_64
 498		/* On K8 L1 TLB is inclusive, so don't count it */
 499		c->x86_tlbsize = 0;
 500#endif
 501	}
 502
 503	if (n < 0x80000006)	/* Some chips just has a large L1. */
 504		return;
 505
 506	cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
 507	l2size = ecx >> 16;
 508
 509#ifdef CONFIG_X86_64
 510	c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
 511#else
 512	/* do processor-specific cache resizing */
 513	if (this_cpu->legacy_cache_size)
 514		l2size = this_cpu->legacy_cache_size(c, l2size);
 515
 516	/* Allow user to override all this if necessary. */
 517	if (cachesize_override != -1)
 518		l2size = cachesize_override;
 519
 520	if (l2size == 0)
 521		return;		/* Again, no L2 cache is possible */
 522#endif
 523
 524	c->x86_cache_size = l2size;
 525}
 526
 527u16 __read_mostly tlb_lli_4k[NR_INFO];
 528u16 __read_mostly tlb_lli_2m[NR_INFO];
 529u16 __read_mostly tlb_lli_4m[NR_INFO];
 530u16 __read_mostly tlb_lld_4k[NR_INFO];
 531u16 __read_mostly tlb_lld_2m[NR_INFO];
 532u16 __read_mostly tlb_lld_4m[NR_INFO];
 533u16 __read_mostly tlb_lld_1g[NR_INFO];
 534
 535static void cpu_detect_tlb(struct cpuinfo_x86 *c)
 536{
 537	if (this_cpu->c_detect_tlb)
 538		this_cpu->c_detect_tlb(c);
 539
 540	pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
 541		tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
 542		tlb_lli_4m[ENTRIES]);
 543
 544	pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
 545		tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
 546		tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
 547}
 548
 549void detect_ht(struct cpuinfo_x86 *c)
 550{
 551#ifdef CONFIG_SMP
 552	u32 eax, ebx, ecx, edx;
 553	int index_msb, core_bits;
 554	static bool printed;
 555
 556	if (!cpu_has(c, X86_FEATURE_HT))
 557		return;
 558
 559	if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
 560		goto out;
 561
 562	if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
 563		return;
 564
 565	cpuid(1, &eax, &ebx, &ecx, &edx);
 566
 567	smp_num_siblings = (ebx & 0xff0000) >> 16;
 568
 569	if (smp_num_siblings == 1) {
 570		pr_info_once("CPU0: Hyper-Threading is disabled\n");
 571		goto out;
 572	}
 573
 574	if (smp_num_siblings <= 1)
 575		goto out;
 576
 577	index_msb = get_count_order(smp_num_siblings);
 578	c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
 579
 580	smp_num_siblings = smp_num_siblings / c->x86_max_cores;
 581
 582	index_msb = get_count_order(smp_num_siblings);
 583
 584	core_bits = get_count_order(c->x86_max_cores);
 585
 586	c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
 587				       ((1 << core_bits) - 1);
 588
 589out:
 590	if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
 591		pr_info("CPU: Physical Processor ID: %d\n",
 592			c->phys_proc_id);
 593		pr_info("CPU: Processor Core ID: %d\n",
 594			c->cpu_core_id);
 595		printed = 1;
 596	}
 597#endif
 598}
 599
 600static void get_cpu_vendor(struct cpuinfo_x86 *c)
 601{
 602	char *v = c->x86_vendor_id;
 603	int i;
 604
 605	for (i = 0; i < X86_VENDOR_NUM; i++) {
 606		if (!cpu_devs[i])
 607			break;
 608
 609		if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
 610		    (cpu_devs[i]->c_ident[1] &&
 611		     !strcmp(v, cpu_devs[i]->c_ident[1]))) {
 612
 613			this_cpu = cpu_devs[i];
 614			c->x86_vendor = this_cpu->c_x86_vendor;
 615			return;
 616		}
 617	}
 618
 619	pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
 620		    "CPU: Your system may be unstable.\n", v);
 
 621
 622	c->x86_vendor = X86_VENDOR_UNKNOWN;
 623	this_cpu = &default_cpu;
 624}
 625
 626void cpu_detect(struct cpuinfo_x86 *c)
 627{
 628	/* Get vendor name */
 629	cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
 630	      (unsigned int *)&c->x86_vendor_id[0],
 631	      (unsigned int *)&c->x86_vendor_id[8],
 632	      (unsigned int *)&c->x86_vendor_id[4]);
 633
 634	c->x86 = 4;
 635	/* Intel-defined flags: level 0x00000001 */
 636	if (c->cpuid_level >= 0x00000001) {
 637		u32 junk, tfms, cap0, misc;
 638
 639		cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
 640		c->x86		= x86_family(tfms);
 641		c->x86_model	= x86_model(tfms);
 642		c->x86_mask	= x86_stepping(tfms);
 
 
 
 
 
 643
 644		if (cap0 & (1<<19)) {
 645			c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
 646			c->x86_cache_alignment = c->x86_clflush_size;
 647		}
 648	}
 649}
 650
 651void get_cpu_cap(struct cpuinfo_x86 *c)
 652{
 653	u32 eax, ebx, ecx, edx;
 
 654
 655	/* Intel-defined flags: level 0x00000001 */
 656	if (c->cpuid_level >= 0x00000001) {
 657		cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
 658
 659		c->x86_capability[CPUID_1_ECX] = ecx;
 660		c->x86_capability[CPUID_1_EDX] = edx;
 
 661	}
 662
 663	/* Additional Intel-defined flags: level 0x00000007 */
 664	if (c->cpuid_level >= 0x00000007) {
 
 
 665		cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
 666
 667		c->x86_capability[CPUID_7_0_EBX] = ebx;
 668
 669		c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
 670		c->x86_capability[CPUID_7_ECX] = ecx;
 671	}
 672
 673	/* Extended state features: level 0x0000000d */
 674	if (c->cpuid_level >= 0x0000000d) {
 675		cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
 676
 677		c->x86_capability[CPUID_D_1_EAX] = eax;
 678	}
 679
 680	/* Additional Intel-defined flags: level 0x0000000F */
 681	if (c->cpuid_level >= 0x0000000F) {
 682
 683		/* QoS sub-leaf, EAX=0Fh, ECX=0 */
 684		cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
 685		c->x86_capability[CPUID_F_0_EDX] = edx;
 686
 687		if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
 688			/* will be overridden if occupancy monitoring exists */
 689			c->x86_cache_max_rmid = ebx;
 690
 691			/* QoS sub-leaf, EAX=0Fh, ECX=1 */
 692			cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
 693			c->x86_capability[CPUID_F_1_EDX] = edx;
 694
 695			if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) ||
 696			      ((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) ||
 697			       (cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) {
 698				c->x86_cache_max_rmid = ecx;
 699				c->x86_cache_occ_scale = ebx;
 700			}
 701		} else {
 702			c->x86_cache_max_rmid = -1;
 703			c->x86_cache_occ_scale = -1;
 704		}
 705	}
 706
 707	/* AMD-defined flags: level 0x80000001 */
 708	eax = cpuid_eax(0x80000000);
 709	c->extended_cpuid_level = eax;
 710
 711	if ((eax & 0xffff0000) == 0x80000000) {
 712		if (eax >= 0x80000001) {
 713			cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
 714
 715			c->x86_capability[CPUID_8000_0001_ECX] = ecx;
 716			c->x86_capability[CPUID_8000_0001_EDX] = edx;
 717		}
 718	}
 719
 720	if (c->extended_cpuid_level >= 0x80000008) {
 721		cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
 722
 723		c->x86_virt_bits = (eax >> 8) & 0xff;
 724		c->x86_phys_bits = eax & 0xff;
 725		c->x86_capability[CPUID_8000_0008_EBX] = ebx;
 726	}
 727#ifdef CONFIG_X86_32
 728	else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
 729		c->x86_phys_bits = 36;
 730#endif
 731
 732	if (c->extended_cpuid_level >= 0x80000007)
 733		c->x86_power = cpuid_edx(0x80000007);
 734
 735	if (c->extended_cpuid_level >= 0x8000000a)
 736		c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
 737
 738	init_scattered_cpuid_features(c);
 739}
 740
 741static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
 742{
 743#ifdef CONFIG_X86_32
 744	int i;
 745
 746	/*
 747	 * First of all, decide if this is a 486 or higher
 748	 * It's a 486 if we can modify the AC flag
 749	 */
 750	if (flag_is_changeable_p(X86_EFLAGS_AC))
 751		c->x86 = 4;
 752	else
 753		c->x86 = 3;
 754
 755	for (i = 0; i < X86_VENDOR_NUM; i++)
 756		if (cpu_devs[i] && cpu_devs[i]->c_identify) {
 757			c->x86_vendor_id[0] = 0;
 758			cpu_devs[i]->c_identify(c);
 759			if (c->x86_vendor_id[0]) {
 760				get_cpu_vendor(c);
 761				break;
 762			}
 763		}
 764#endif
 765}
 766
 767/*
 768 * Do minimum CPU detection early.
 769 * Fields really needed: vendor, cpuid_level, family, model, mask,
 770 * cache alignment.
 771 * The others are not touched to avoid unwanted side effects.
 772 *
 773 * WARNING: this function is only called on the BP.  Don't add code here
 774 * that is supposed to run on all CPUs.
 775 */
 776static void __init early_identify_cpu(struct cpuinfo_x86 *c)
 777{
 778#ifdef CONFIG_X86_64
 779	c->x86_clflush_size = 64;
 780	c->x86_phys_bits = 36;
 781	c->x86_virt_bits = 48;
 782#else
 783	c->x86_clflush_size = 32;
 784	c->x86_phys_bits = 32;
 785	c->x86_virt_bits = 32;
 786#endif
 787	c->x86_cache_alignment = c->x86_clflush_size;
 788
 789	memset(&c->x86_capability, 0, sizeof c->x86_capability);
 790	c->extended_cpuid_level = 0;
 791
 792	if (!have_cpuid_p())
 793		identify_cpu_without_cpuid(c);
 794
 795	/* cyrix could have cpuid enabled via c_identify()*/
 796	if (!have_cpuid_p())
 797		return;
 798
 799	cpu_detect(c);
 
 800	get_cpu_vendor(c);
 
 801	get_cpu_cap(c);
 802
 803	if (this_cpu->c_early_init)
 804		this_cpu->c_early_init(c);
 805
 806	c->cpu_index = 0;
 807	filter_cpuid_features(c, false);
 808
 
 
 809	if (this_cpu->c_bsp_init)
 810		this_cpu->c_bsp_init(c);
 811
 812	setup_force_cpu_cap(X86_FEATURE_ALWAYS);
 813	fpu__init_system(c);
 814}
 815
 816void __init early_cpu_init(void)
 817{
 818	const struct cpu_dev *const *cdev;
 819	int count = 0;
 820
 821#ifdef CONFIG_PROCESSOR_SELECT
 822	pr_info("KERNEL supported cpus:\n");
 823#endif
 824
 825	for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
 826		const struct cpu_dev *cpudev = *cdev;
 827
 828		if (count >= X86_VENDOR_NUM)
 829			break;
 830		cpu_devs[count] = cpudev;
 831		count++;
 832
 833#ifdef CONFIG_PROCESSOR_SELECT
 834		{
 835			unsigned int j;
 836
 837			for (j = 0; j < 2; j++) {
 838				if (!cpudev->c_ident[j])
 839					continue;
 840				pr_info("  %s %s\n", cpudev->c_vendor,
 841					cpudev->c_ident[j]);
 842			}
 843		}
 844#endif
 845	}
 846	early_identify_cpu(&boot_cpu_data);
 847}
 848
 849/*
 850 * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
 851 * unfortunately, that's not true in practice because of early VIA
 852 * chips and (more importantly) broken virtualizers that are not easy
 853 * to detect. In the latter case it doesn't even *fail* reliably, so
 854 * probing for it doesn't even work. Disable it completely on 32-bit
 855 * unless we can find a reliable way to detect all the broken cases.
 856 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
 857 */
 858static void detect_nopl(struct cpuinfo_x86 *c)
 859{
 860#ifdef CONFIG_X86_32
 861	clear_cpu_cap(c, X86_FEATURE_NOPL);
 862#else
 863	set_cpu_cap(c, X86_FEATURE_NOPL);
 864#endif
 865
 866	/*
 867	 * ESPFIX is a strange bug.  All real CPUs have it.  Paravirt
 868	 * systems that run Linux at CPL > 0 may or may not have the
 869	 * issue, but, even if they have the issue, there's absolutely
 870	 * nothing we can do about it because we can't use the real IRET
 871	 * instruction.
 872	 *
 873	 * NB: For the time being, only 32-bit kernels support
 874	 * X86_BUG_ESPFIX as such.  64-bit kernels directly choose
 875	 * whether to apply espfix using paravirt hooks.  If any
 876	 * non-paravirt system ever shows up that does *not* have the
 877	 * ESPFIX issue, we can change this.
 878	 */
 879#ifdef CONFIG_X86_32
 880#ifdef CONFIG_PARAVIRT
 881	do {
 882		extern void native_iret(void);
 883		if (pv_cpu_ops.iret == native_iret)
 884			set_cpu_bug(c, X86_BUG_ESPFIX);
 885	} while (0);
 886#else
 887	set_cpu_bug(c, X86_BUG_ESPFIX);
 888#endif
 889#endif
 890}
 891
 892static void generic_identify(struct cpuinfo_x86 *c)
 893{
 894	c->extended_cpuid_level = 0;
 895
 896	if (!have_cpuid_p())
 897		identify_cpu_without_cpuid(c);
 898
 899	/* cyrix could have cpuid enabled via c_identify()*/
 900	if (!have_cpuid_p())
 901		return;
 902
 903	cpu_detect(c);
 904
 905	get_cpu_vendor(c);
 906
 907	get_cpu_cap(c);
 908
 909	if (c->cpuid_level >= 0x00000001) {
 910		c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
 911#ifdef CONFIG_X86_32
 912# ifdef CONFIG_SMP
 913		c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
 914# else
 915		c->apicid = c->initial_apicid;
 916# endif
 917#endif
 918		c->phys_proc_id = c->initial_apicid;
 919	}
 920
 
 
 921	get_model_name(c); /* Default name */
 922
 923	detect_nopl(c);
 924}
 925
 926static void x86_init_cache_qos(struct cpuinfo_x86 *c)
 927{
 928	/*
 929	 * The heavy lifting of max_rmid and cache_occ_scale are handled
 930	 * in get_cpu_cap().  Here we just set the max_rmid for the boot_cpu
 931	 * in case CQM bits really aren't there in this CPU.
 932	 */
 933	if (c != &boot_cpu_data) {
 934		boot_cpu_data.x86_cache_max_rmid =
 935			min(boot_cpu_data.x86_cache_max_rmid,
 936			    c->x86_cache_max_rmid);
 937	}
 938}
 939
 940/*
 941 * This does the hard work of actually picking apart the CPU stuff...
 942 */
 943static void identify_cpu(struct cpuinfo_x86 *c)
 944{
 945	int i;
 946
 947	c->loops_per_jiffy = loops_per_jiffy;
 948	c->x86_cache_size = -1;
 949	c->x86_vendor = X86_VENDOR_UNKNOWN;
 950	c->x86_model = c->x86_mask = 0;	/* So far unknown... */
 951	c->x86_vendor_id[0] = '\0'; /* Unset */
 952	c->x86_model_id[0] = '\0';  /* Unset */
 953	c->x86_max_cores = 1;
 954	c->x86_coreid_bits = 0;
 955#ifdef CONFIG_X86_64
 956	c->x86_clflush_size = 64;
 957	c->x86_phys_bits = 36;
 958	c->x86_virt_bits = 48;
 959#else
 960	c->cpuid_level = -1;	/* CPUID not detected */
 961	c->x86_clflush_size = 32;
 962	c->x86_phys_bits = 32;
 963	c->x86_virt_bits = 32;
 964#endif
 965	c->x86_cache_alignment = c->x86_clflush_size;
 966	memset(&c->x86_capability, 0, sizeof c->x86_capability);
 967
 968	generic_identify(c);
 969
 970	if (this_cpu->c_identify)
 971		this_cpu->c_identify(c);
 972
 973	/* Clear/Set all flags overridden by options, after probe */
 974	for (i = 0; i < NCAPINTS; i++) {
 975		c->x86_capability[i] &= ~cpu_caps_cleared[i];
 976		c->x86_capability[i] |= cpu_caps_set[i];
 977	}
 978
 979#ifdef CONFIG_X86_64
 980	c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
 981#endif
 982
 983	/*
 984	 * Vendor-specific initialization.  In this section we
 985	 * canonicalize the feature flags, meaning if there are
 986	 * features a certain CPU supports which CPUID doesn't
 987	 * tell us, CPUID claiming incorrect flags, or other bugs,
 988	 * we handle them here.
 989	 *
 990	 * At the end of this section, c->x86_capability better
 991	 * indicate the features this CPU genuinely supports!
 992	 */
 993	if (this_cpu->c_init)
 994		this_cpu->c_init(c);
 995
 996	/* Disable the PN if appropriate */
 997	squash_the_stupid_serial_number(c);
 998
 999	/* Set up SMEP/SMAP */
1000	setup_smep(c);
1001	setup_smap(c);
1002
1003	/*
1004	 * The vendor-specific functions might have changed features.
1005	 * Now we do "generic changes."
1006	 */
1007
1008	/* Filter out anything that depends on CPUID levels we don't have */
1009	filter_cpuid_features(c, true);
1010
1011	/* If the model name is still unset, do table lookup. */
1012	if (!c->x86_model_id[0]) {
1013		const char *p;
1014		p = table_lookup_model(c);
1015		if (p)
1016			strcpy(c->x86_model_id, p);
1017		else
1018			/* Last resort... */
1019			sprintf(c->x86_model_id, "%02x/%02x",
1020				c->x86, c->x86_model);
1021	}
1022
1023#ifdef CONFIG_X86_64
1024	detect_ht(c);
1025#endif
1026
1027	init_hypervisor(c);
1028	x86_init_rdrand(c);
1029	x86_init_cache_qos(c);
1030	setup_pku(c);
1031
1032	/*
1033	 * Clear/Set all flags overridden by options, need do it
1034	 * before following smp all cpus cap AND.
1035	 */
1036	for (i = 0; i < NCAPINTS; i++) {
1037		c->x86_capability[i] &= ~cpu_caps_cleared[i];
1038		c->x86_capability[i] |= cpu_caps_set[i];
1039	}
1040
1041	/*
1042	 * On SMP, boot_cpu_data holds the common feature set between
1043	 * all CPUs; so make sure that we indicate which features are
1044	 * common between the CPUs.  The first time this routine gets
1045	 * executed, c == &boot_cpu_data.
1046	 */
1047	if (c != &boot_cpu_data) {
1048		/* AND the already accumulated flags with these */
1049		for (i = 0; i < NCAPINTS; i++)
1050			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1051
1052		/* OR, i.e. replicate the bug flags */
1053		for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
1054			c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
1055	}
1056
1057	/* Init Machine Check Exception if available. */
1058	mcheck_cpu_init(c);
1059
1060	select_idle_routine(c);
1061
1062#ifdef CONFIG_NUMA
1063	numa_add_cpu(smp_processor_id());
1064#endif
1065	/* The boot/hotplug time assigment got cleared, restore it */
1066	c->logical_proc_id = topology_phys_to_logical_pkg(c->phys_proc_id);
1067}
1068
1069/*
1070 * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
1071 * on 32-bit kernels:
1072 */
1073#ifdef CONFIG_X86_32
1074void enable_sep_cpu(void)
1075{
1076	struct tss_struct *tss;
1077	int cpu;
1078
1079	cpu = get_cpu();
1080	tss = &per_cpu(cpu_tss, cpu);
1081
1082	if (!boot_cpu_has(X86_FEATURE_SEP))
1083		goto out;
1084
1085	/*
1086	 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
1087	 * see the big comment in struct x86_hw_tss's definition.
1088	 */
1089
1090	tss->x86_tss.ss1 = __KERNEL_CS;
1091	wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
1092
1093	wrmsr(MSR_IA32_SYSENTER_ESP,
1094	      (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack),
1095	      0);
1096
1097	wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
1098
1099out:
1100	put_cpu();
1101}
1102#endif
1103
1104void __init identify_boot_cpu(void)
1105{
1106	identify_cpu(&boot_cpu_data);
1107	init_amd_e400_c1e_mask();
1108#ifdef CONFIG_X86_32
1109	sysenter_setup();
1110	enable_sep_cpu();
 
 
1111#endif
1112	cpu_detect_tlb(&boot_cpu_data);
1113}
1114
1115void identify_secondary_cpu(struct cpuinfo_x86 *c)
1116{
1117	BUG_ON(c == &boot_cpu_data);
1118	identify_cpu(c);
1119#ifdef CONFIG_X86_32
1120	enable_sep_cpu();
1121#endif
1122	mtrr_ap_init();
1123}
1124
1125struct msr_range {
1126	unsigned	min;
1127	unsigned	max;
1128};
1129
1130static const struct msr_range msr_range_array[] = {
1131	{ 0x00000000, 0x00000418},
1132	{ 0xc0000000, 0xc000040b},
1133	{ 0xc0010000, 0xc0010142},
1134	{ 0xc0011000, 0xc001103b},
1135};
1136
1137static void __print_cpu_msr(void)
1138{
1139	unsigned index_min, index_max;
1140	unsigned index;
1141	u64 val;
1142	int i;
1143
1144	for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
1145		index_min = msr_range_array[i].min;
1146		index_max = msr_range_array[i].max;
1147
1148		for (index = index_min; index < index_max; index++) {
1149			if (rdmsrl_safe(index, &val))
1150				continue;
1151			pr_info(" MSR%08x: %016llx\n", index, val);
1152		}
1153	}
1154}
1155
1156static int show_msr;
1157
1158static __init int setup_show_msr(char *arg)
1159{
1160	int num;
1161
1162	get_option(&arg, &num);
1163
1164	if (num > 0)
1165		show_msr = num;
1166	return 1;
1167}
1168__setup("show_msr=", setup_show_msr);
1169
1170static __init int setup_noclflush(char *arg)
1171{
1172	setup_clear_cpu_cap(X86_FEATURE_CLFLUSH);
1173	setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT);
1174	return 1;
1175}
1176__setup("noclflush", setup_noclflush);
1177
1178void print_cpu_info(struct cpuinfo_x86 *c)
1179{
1180	const char *vendor = NULL;
1181
1182	if (c->x86_vendor < X86_VENDOR_NUM) {
1183		vendor = this_cpu->c_vendor;
1184	} else {
1185		if (c->cpuid_level >= 0)
1186			vendor = c->x86_vendor_id;
1187	}
1188
1189	if (vendor && !strstr(c->x86_model_id, vendor))
1190		pr_cont("%s ", vendor);
1191
1192	if (c->x86_model_id[0])
1193		pr_cont("%s", c->x86_model_id);
1194	else
1195		pr_cont("%d86", c->x86);
1196
1197	pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
1198
1199	if (c->x86_mask || c->cpuid_level >= 0)
1200		pr_cont(", stepping: 0x%x)\n", c->x86_mask);
1201	else
1202		pr_cont(")\n");
1203
1204	print_cpu_msr(c);
1205}
1206
1207void print_cpu_msr(struct cpuinfo_x86 *c)
1208{
1209	if (c->cpu_index < show_msr)
1210		__print_cpu_msr();
1211}
1212
1213static __init int setup_disablecpuid(char *arg)
1214{
1215	int bit;
1216
1217	if (get_option(&arg, &bit) && bit < NCAPINTS*32)
1218		setup_clear_cpu_cap(bit);
1219	else
1220		return 0;
1221
1222	return 1;
1223}
1224__setup("clearcpuid=", setup_disablecpuid);
1225
1226#ifdef CONFIG_X86_64
1227struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
1228struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
1229				    (unsigned long) debug_idt_table };
1230
1231DEFINE_PER_CPU_FIRST(union irq_stack_union,
1232		     irq_stack_union) __aligned(PAGE_SIZE) __visible;
1233
1234/*
1235 * The following percpu variables are hot.  Align current_task to
1236 * cacheline size such that they fall in the same cacheline.
1237 */
1238DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
1239	&init_task;
1240EXPORT_PER_CPU_SYMBOL(current_task);
1241
 
 
 
 
1242DEFINE_PER_CPU(char *, irq_stack_ptr) =
1243	init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
1244
1245DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
1246
1247DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1248EXPORT_PER_CPU_SYMBOL(__preempt_count);
1249
1250/*
1251 * Special IST stacks which the CPU switches to when it calls
1252 * an IST-marked descriptor entry. Up to 7 stacks (hardware
1253 * limit), all of them are 4K, except the debug stack which
1254 * is 8K.
1255 */
1256static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
1257	  [0 ... N_EXCEPTION_STACKS - 1]	= EXCEPTION_STKSZ,
1258	  [DEBUG_STACK - 1]			= DEBUG_STKSZ
1259};
1260
1261static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
1262	[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
1263
1264/* May not be marked __init: used by software suspend */
1265void syscall_init(void)
1266{
1267	/*
1268	 * LSTAR and STAR live in a bit strange symbiosis.
1269	 * They both write to the same internal register. STAR allows to
1270	 * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
1271	 */
1272	wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
1273	wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
 
1274
1275#ifdef CONFIG_IA32_EMULATION
1276	wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
1277	/*
1278	 * This only works on Intel CPUs.
1279	 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
1280	 * This does not cause SYSENTER to jump to the wrong location, because
1281	 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
1282	 */
1283	wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
1284	wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
1285	wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
1286#else
1287	wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
1288	wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
1289	wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
1290	wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
1291#endif
1292
1293	/* Flags to clear on syscall */
1294	wrmsrl(MSR_SYSCALL_MASK,
1295	       X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
1296	       X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
1297}
1298
 
 
1299/*
1300 * Copies of the original ist values from the tss are only accessed during
1301 * debugging, no special alignment required.
1302 */
1303DEFINE_PER_CPU(struct orig_ist, orig_ist);
1304
1305static DEFINE_PER_CPU(unsigned long, debug_stack_addr);
1306DEFINE_PER_CPU(int, debug_stack_usage);
1307
1308int is_debug_stack(unsigned long addr)
1309{
1310	return __this_cpu_read(debug_stack_usage) ||
1311		(addr <= __this_cpu_read(debug_stack_addr) &&
1312		 addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ));
1313}
1314NOKPROBE_SYMBOL(is_debug_stack);
1315
1316DEFINE_PER_CPU(u32, debug_idt_ctr);
1317
1318void debug_stack_set_zero(void)
1319{
1320	this_cpu_inc(debug_idt_ctr);
1321	load_current_idt();
1322}
1323NOKPROBE_SYMBOL(debug_stack_set_zero);
1324
1325void debug_stack_reset(void)
1326{
1327	if (WARN_ON(!this_cpu_read(debug_idt_ctr)))
1328		return;
1329	if (this_cpu_dec_return(debug_idt_ctr) == 0)
1330		load_current_idt();
1331}
1332NOKPROBE_SYMBOL(debug_stack_reset);
1333
1334#else	/* CONFIG_X86_64 */
1335
1336DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
1337EXPORT_PER_CPU_SYMBOL(current_task);
1338DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1339EXPORT_PER_CPU_SYMBOL(__preempt_count);
1340
1341/*
1342 * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find
1343 * the top of the kernel stack.  Use an extra percpu variable to track the
1344 * top of the kernel stack directly.
1345 */
1346DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
1347	(unsigned long)&init_thread_union + THREAD_SIZE;
1348EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
1349
1350#ifdef CONFIG_CC_STACKPROTECTOR
1351DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
1352#endif
1353
 
 
 
 
 
 
 
 
 
1354#endif	/* CONFIG_X86_64 */
1355
1356/*
1357 * Clear all 6 debug registers:
1358 */
1359static void clear_all_debug_regs(void)
1360{
1361	int i;
1362
1363	for (i = 0; i < 8; i++) {
1364		/* Ignore db4, db5 */
1365		if ((i == 4) || (i == 5))
1366			continue;
1367
1368		set_debugreg(0, i);
1369	}
1370}
1371
1372#ifdef CONFIG_KGDB
1373/*
1374 * Restore debug regs if using kgdbwait and you have a kernel debugger
1375 * connection established.
1376 */
1377static void dbg_restore_debug_regs(void)
1378{
1379	if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
1380		arch_kgdb_ops.correct_hw_break();
1381}
1382#else /* ! CONFIG_KGDB */
1383#define dbg_restore_debug_regs()
1384#endif /* ! CONFIG_KGDB */
1385
1386static void wait_for_master_cpu(int cpu)
1387{
1388#ifdef CONFIG_SMP
1389	/*
1390	 * wait for ACK from master CPU before continuing
1391	 * with AP initialization
1392	 */
1393	WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
1394	while (!cpumask_test_cpu(cpu, cpu_callout_mask))
1395		cpu_relax();
1396#endif
1397}
1398
1399/*
1400 * cpu_init() initializes state that is per-CPU. Some data is already
1401 * initialized (naturally) in the bootstrap process, such as the GDT
1402 * and IDT. We reload them nevertheless, this function acts as a
1403 * 'CPU state barrier', nothing should get across.
1404 * A lot of state is already set up in PDA init for 64 bit
1405 */
1406#ifdef CONFIG_X86_64
1407
1408void cpu_init(void)
1409{
1410	struct orig_ist *oist;
1411	struct task_struct *me;
1412	struct tss_struct *t;
1413	unsigned long v;
1414	int cpu = stack_smp_processor_id();
1415	int i;
1416
1417	wait_for_master_cpu(cpu);
1418
1419	/*
1420	 * Initialize the CR4 shadow before doing anything that could
1421	 * try to read it.
1422	 */
1423	cr4_init_shadow();
1424
1425	/*
1426	 * Load microcode on this cpu if a valid microcode is available.
1427	 * This is early microcode loading procedure.
1428	 */
1429	load_ucode_ap();
1430
1431	t = &per_cpu(cpu_tss, cpu);
1432	oist = &per_cpu(orig_ist, cpu);
1433
1434#ifdef CONFIG_NUMA
1435	if (this_cpu_read(numa_node) == 0 &&
1436	    early_cpu_to_node(cpu) != NUMA_NO_NODE)
1437		set_numa_node(early_cpu_to_node(cpu));
1438#endif
1439
1440	me = current;
1441
 
 
 
1442	pr_debug("Initializing CPU#%d\n", cpu);
1443
1444	cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1445
1446	/*
1447	 * Initialize the per-CPU GDT with the boot GDT,
1448	 * and set up the GDT descriptor:
1449	 */
1450
1451	switch_to_new_gdt(cpu);
1452	loadsegment(fs, 0);
1453
1454	load_current_idt();
1455
1456	memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
1457	syscall_init();
1458
1459	wrmsrl(MSR_FS_BASE, 0);
1460	wrmsrl(MSR_KERNEL_GS_BASE, 0);
1461	barrier();
1462
1463	x86_configure_nx();
1464	x2apic_setup();
 
1465
1466	/*
1467	 * set up and load the per-CPU TSS
1468	 */
1469	if (!oist->ist[0]) {
1470		char *estacks = per_cpu(exception_stacks, cpu);
1471
1472		for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1473			estacks += exception_stack_sizes[v];
1474			oist->ist[v] = t->x86_tss.ist[v] =
1475					(unsigned long)estacks;
1476			if (v == DEBUG_STACK-1)
1477				per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks;
1478		}
1479	}
1480
1481	t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1482
1483	/*
1484	 * <= is required because the CPU will access up to
1485	 * 8 bits beyond the end of the IO permission bitmap.
1486	 */
1487	for (i = 0; i <= IO_BITMAP_LONGS; i++)
1488		t->io_bitmap[i] = ~0UL;
1489
1490	atomic_inc(&init_mm.mm_count);
1491	me->active_mm = &init_mm;
1492	BUG_ON(me->mm);
1493	enter_lazy_tlb(&init_mm, me);
1494
1495	load_sp0(t, &current->thread);
1496	set_tss_desc(cpu, t);
1497	load_TR_desc();
1498	load_mm_ldt(&init_mm);
1499
1500	clear_all_debug_regs();
1501	dbg_restore_debug_regs();
1502
1503	fpu__init_cpu();
 
 
 
1504
1505	if (is_uv_system())
1506		uv_cpu_init();
1507}
1508
1509#else
1510
1511void cpu_init(void)
1512{
1513	int cpu = smp_processor_id();
1514	struct task_struct *curr = current;
1515	struct tss_struct *t = &per_cpu(cpu_tss, cpu);
1516	struct thread_struct *thread = &curr->thread;
1517
1518	wait_for_master_cpu(cpu);
1519
1520	/*
1521	 * Initialize the CR4 shadow before doing anything that could
1522	 * try to read it.
1523	 */
1524	cr4_init_shadow();
1525
1526	show_ucode_info_early();
1527
1528	pr_info("Initializing CPU#%d\n", cpu);
 
1529
1530	if (cpu_feature_enabled(X86_FEATURE_VME) ||
1531	    cpu_has_tsc ||
1532	    boot_cpu_has(X86_FEATURE_DE))
1533		cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1534
1535	load_current_idt();
1536	switch_to_new_gdt(cpu);
1537
1538	/*
1539	 * Set up and load the per-CPU TSS and LDT
1540	 */
1541	atomic_inc(&init_mm.mm_count);
1542	curr->active_mm = &init_mm;
1543	BUG_ON(curr->mm);
1544	enter_lazy_tlb(&init_mm, curr);
1545
1546	load_sp0(t, thread);
1547	set_tss_desc(cpu, t);
1548	load_TR_desc();
1549	load_mm_ldt(&init_mm);
1550
1551	t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1552
1553#ifdef CONFIG_DOUBLEFAULT
1554	/* Set up doublefault TSS pointer in the GDT */
1555	__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
1556#endif
1557
1558	clear_all_debug_regs();
1559	dbg_restore_debug_regs();
1560
1561	fpu__init_cpu();
 
1562}
1563#endif
1564
1565static void bsp_resume(void)
1566{
1567	if (this_cpu->c_bsp_resume)
1568		this_cpu->c_bsp_resume(&boot_cpu_data);
1569}
1570
1571static struct syscore_ops cpu_syscore_ops = {
1572	.resume		= bsp_resume,
1573};
1574
1575static int __init init_cpu_syscore(void)
1576{
1577	register_syscore_ops(&cpu_syscore_ops);
1578	return 0;
1579}
1580core_initcall(init_cpu_syscore);