Linux Audio

Check our new training course

Loading...
v4.6
   1#include <linux/bootmem.h>
 
 
 
 
   2#include <linux/linkage.h>
   3#include <linux/bitops.h>
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <linux/percpu.h>
   7#include <linux/string.h>
   8#include <linux/ctype.h>
   9#include <linux/delay.h>
  10#include <linux/sched.h>
 
 
 
  11#include <linux/init.h>
  12#include <linux/kprobes.h>
  13#include <linux/kgdb.h>
  14#include <linux/smp.h>
  15#include <linux/io.h>
  16#include <linux/syscore_ops.h>
 
  17
  18#include <asm/stackprotector.h>
  19#include <asm/perf_event.h>
  20#include <asm/mmu_context.h>
 
  21#include <asm/archrandom.h>
  22#include <asm/hypervisor.h>
  23#include <asm/processor.h>
  24#include <asm/tlbflush.h>
  25#include <asm/debugreg.h>
  26#include <asm/sections.h>
  27#include <asm/vsyscall.h>
  28#include <linux/topology.h>
  29#include <linux/cpumask.h>
  30#include <asm/pgtable.h>
  31#include <linux/atomic.h>
  32#include <asm/proto.h>
  33#include <asm/setup.h>
  34#include <asm/apic.h>
  35#include <asm/desc.h>
  36#include <asm/fpu/internal.h>
  37#include <asm/mtrr.h>
 
  38#include <linux/numa.h>
 
  39#include <asm/asm.h>
 
  40#include <asm/cpu.h>
  41#include <asm/mce.h>
  42#include <asm/msr.h>
  43#include <asm/pat.h>
  44#include <asm/microcode.h>
  45#include <asm/microcode_intel.h>
  46
  47#ifdef CONFIG_X86_LOCAL_APIC
  48#include <asm/uv/uv.h>
  49#endif
  50
  51#include "cpu.h"
  52
 
 
  53/* all of these masks are initialized in setup_cpu_local_masks() */
  54cpumask_var_t cpu_initialized_mask;
  55cpumask_var_t cpu_callout_mask;
  56cpumask_var_t cpu_callin_mask;
  57
  58/* representing cpus for which sibling maps can be computed */
  59cpumask_var_t cpu_sibling_setup_mask;
  60
 
 
 
 
 
 
 
  61/* correctly size the local cpu masks */
  62void __init setup_cpu_local_masks(void)
  63{
  64	alloc_bootmem_cpumask_var(&cpu_initialized_mask);
  65	alloc_bootmem_cpumask_var(&cpu_callin_mask);
  66	alloc_bootmem_cpumask_var(&cpu_callout_mask);
  67	alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
  68}
  69
  70static void default_init(struct cpuinfo_x86 *c)
  71{
  72#ifdef CONFIG_X86_64
  73	cpu_detect_cache_sizes(c);
  74#else
  75	/* Not much we can do here... */
  76	/* Check if at least it has cpuid */
  77	if (c->cpuid_level == -1) {
  78		/* No cpuid. It must be an ancient CPU */
  79		if (c->x86 == 4)
  80			strcpy(c->x86_model_id, "486");
  81		else if (c->x86 == 3)
  82			strcpy(c->x86_model_id, "386");
  83	}
  84#endif
  85}
  86
  87static const struct cpu_dev default_cpu = {
  88	.c_init		= default_init,
  89	.c_vendor	= "Unknown",
  90	.c_x86_vendor	= X86_VENDOR_UNKNOWN,
  91};
  92
  93static const struct cpu_dev *this_cpu = &default_cpu;
  94
  95DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
  96#ifdef CONFIG_X86_64
  97	/*
  98	 * We need valid kernel segments for data and code in long mode too
  99	 * IRET will check the segment types  kkeil 2000/10/28
 100	 * Also sysret mandates a special GDT layout
 101	 *
 102	 * TLS descriptors are currently at a different place compared to i386.
 103	 * Hopefully nobody expects them at a fixed place (Wine?)
 104	 */
 105	[GDT_ENTRY_KERNEL32_CS]		= GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
 106	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
 107	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
 108	[GDT_ENTRY_DEFAULT_USER32_CS]	= GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
 109	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
 110	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
 111#else
 112	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
 113	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
 114	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
 115	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
 116	/*
 117	 * Segments used for calling PnP BIOS have byte granularity.
 118	 * They code segments and data segments have fixed 64k limits,
 119	 * the transfer segment sizes are set at run time.
 120	 */
 121	/* 32-bit code */
 122	[GDT_ENTRY_PNPBIOS_CS32]	= GDT_ENTRY_INIT(0x409a, 0, 0xffff),
 123	/* 16-bit code */
 124	[GDT_ENTRY_PNPBIOS_CS16]	= GDT_ENTRY_INIT(0x009a, 0, 0xffff),
 125	/* 16-bit data */
 126	[GDT_ENTRY_PNPBIOS_DS]		= GDT_ENTRY_INIT(0x0092, 0, 0xffff),
 127	/* 16-bit data */
 128	[GDT_ENTRY_PNPBIOS_TS1]		= GDT_ENTRY_INIT(0x0092, 0, 0),
 129	/* 16-bit data */
 130	[GDT_ENTRY_PNPBIOS_TS2]		= GDT_ENTRY_INIT(0x0092, 0, 0),
 131	/*
 132	 * The APM segments have byte granularity and their bases
 133	 * are set at run time.  All have 64k limits.
 134	 */
 135	/* 32-bit code */
 136	[GDT_ENTRY_APMBIOS_BASE]	= GDT_ENTRY_INIT(0x409a, 0, 0xffff),
 137	/* 16-bit code */
 138	[GDT_ENTRY_APMBIOS_BASE+1]	= GDT_ENTRY_INIT(0x009a, 0, 0xffff),
 139	/* data */
 140	[GDT_ENTRY_APMBIOS_BASE+2]	= GDT_ENTRY_INIT(0x4092, 0, 0xffff),
 141
 142	[GDT_ENTRY_ESPFIX_SS]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
 143	[GDT_ENTRY_PERCPU]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
 144	GDT_STACK_CANARY_INIT
 145#endif
 146} };
 147EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
 148
 149static int __init x86_mpx_setup(char *s)
 
 150{
 151	/* require an exact match without trailing characters */
 152	if (strlen(s))
 153		return 0;
 154
 155	/* do not emit a message if the feature is not present */
 156	if (!boot_cpu_has(X86_FEATURE_MPX))
 157		return 1;
 158
 159	setup_clear_cpu_cap(X86_FEATURE_MPX);
 160	pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n");
 161	return 1;
 162}
 163__setup("nompx", x86_mpx_setup);
 
 164
 165static int __init x86_noinvpcid_setup(char *s)
 166{
 167	/* noinvpcid doesn't accept parameters */
 168	if (s)
 169		return -EINVAL;
 170
 171	/* do not emit a message if the feature is not present */
 172	if (!boot_cpu_has(X86_FEATURE_INVPCID))
 173		return 0;
 174
 175	setup_clear_cpu_cap(X86_FEATURE_INVPCID);
 176	pr_info("noinvpcid: INVPCID feature disabled\n");
 177	return 0;
 178}
 179early_param("noinvpcid", x86_noinvpcid_setup);
 180
 181#ifdef CONFIG_X86_32
 182static int cachesize_override = -1;
 183static int disable_x86_serial_nr = 1;
 184
 185static int __init cachesize_setup(char *str)
 186{
 187	get_option(&str, &cachesize_override);
 188	return 1;
 189}
 190__setup("cachesize=", cachesize_setup);
 191
 192static int __init x86_sep_setup(char *s)
 193{
 194	setup_clear_cpu_cap(X86_FEATURE_SEP);
 195	return 1;
 196}
 197__setup("nosep", x86_sep_setup);
 198
 199/* Standard macro to see if a specific flag is changeable */
 200static inline int flag_is_changeable_p(u32 flag)
 201{
 202	u32 f1, f2;
 203
 204	/*
 205	 * Cyrix and IDT cpus allow disabling of CPUID
 206	 * so the code below may return different results
 207	 * when it is executed before and after enabling
 208	 * the CPUID. Add "volatile" to not allow gcc to
 209	 * optimize the subsequent calls to this function.
 210	 */
 211	asm volatile ("pushfl		\n\t"
 212		      "pushfl		\n\t"
 213		      "popl %0		\n\t"
 214		      "movl %0, %1	\n\t"
 215		      "xorl %2, %0	\n\t"
 216		      "pushl %0		\n\t"
 217		      "popfl		\n\t"
 218		      "pushfl		\n\t"
 219		      "popl %0		\n\t"
 220		      "popfl		\n\t"
 221
 222		      : "=&r" (f1), "=&r" (f2)
 223		      : "ir" (flag));
 224
 225	return ((f1^f2) & flag) != 0;
 226}
 227
 228/* Probe for the CPUID instruction */
 229int have_cpuid_p(void)
 230{
 231	return flag_is_changeable_p(X86_EFLAGS_ID);
 232}
 233
 234static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
 235{
 236	unsigned long lo, hi;
 237
 238	if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
 239		return;
 240
 241	/* Disable processor serial number: */
 242
 243	rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
 244	lo |= 0x200000;
 245	wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
 246
 247	pr_notice("CPU serial number disabled.\n");
 248	clear_cpu_cap(c, X86_FEATURE_PN);
 249
 250	/* Disabling the serial number may affect the cpuid level */
 251	c->cpuid_level = cpuid_eax(0);
 252}
 253
 254static int __init x86_serial_nr_setup(char *s)
 255{
 256	disable_x86_serial_nr = 0;
 257	return 1;
 258}
 259__setup("serialnumber", x86_serial_nr_setup);
 260#else
 261static inline int flag_is_changeable_p(u32 flag)
 262{
 263	return 1;
 264}
 265static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
 266{
 267}
 268#endif
 269
 270static __init int setup_disable_smep(char *arg)
 271{
 272	setup_clear_cpu_cap(X86_FEATURE_SMEP);
 273	return 1;
 274}
 275__setup("nosmep", setup_disable_smep);
 276
 277static __always_inline void setup_smep(struct cpuinfo_x86 *c)
 278{
 279	if (cpu_has(c, X86_FEATURE_SMEP))
 280		cr4_set_bits(X86_CR4_SMEP);
 281}
 282
 283static __init int setup_disable_smap(char *arg)
 284{
 285	setup_clear_cpu_cap(X86_FEATURE_SMAP);
 286	return 1;
 287}
 288__setup("nosmap", setup_disable_smap);
 289
 290static __always_inline void setup_smap(struct cpuinfo_x86 *c)
 291{
 292	unsigned long eflags = native_save_fl();
 293
 294	/* This should have been cleared long ago */
 295	BUG_ON(eflags & X86_EFLAGS_AC);
 296
 297	if (cpu_has(c, X86_FEATURE_SMAP)) {
 298#ifdef CONFIG_X86_SMAP
 299		cr4_set_bits(X86_CR4_SMAP);
 300#else
 301		cr4_clear_bits(X86_CR4_SMAP);
 302#endif
 303	}
 304}
 305
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 306/*
 307 * Protection Keys are not available in 32-bit mode.
 308 */
 309static bool pku_disabled;
 310
 311static __always_inline void setup_pku(struct cpuinfo_x86 *c)
 312{
 
 
 
 
 
 
 313	if (!cpu_has(c, X86_FEATURE_PKU))
 314		return;
 315	if (pku_disabled)
 316		return;
 317
 318	cr4_set_bits(X86_CR4_PKE);
 
 
 
 319	/*
 320	 * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
 321	 * cpuid bit to be set.  We need to ensure that we
 322	 * update that bit in this CPU's "cpu_info".
 323	 */
 324	get_cpu_cap(c);
 325}
 326
 327#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
 328static __init int setup_disable_pku(char *arg)
 329{
 330	/*
 331	 * Do not clear the X86_FEATURE_PKU bit.  All of the
 332	 * runtime checks are against OSPKE so clearing the
 333	 * bit does nothing.
 334	 *
 335	 * This way, we will see "pku" in cpuinfo, but not
 336	 * "ospke", which is exactly what we want.  It shows
 337	 * that the CPU has PKU, but the OS has not enabled it.
 338	 * This happens to be exactly how a system would look
 339	 * if we disabled the config option.
 340	 */
 341	pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n");
 342	pku_disabled = true;
 343	return 1;
 344}
 345__setup("nopku", setup_disable_pku);
 346#endif /* CONFIG_X86_64 */
 347
 348/*
 349 * Some CPU features depend on higher CPUID levels, which may not always
 350 * be available due to CPUID level capping or broken virtualization
 351 * software.  Add those features to this table to auto-disable them.
 352 */
 353struct cpuid_dependent_feature {
 354	u32 feature;
 355	u32 level;
 356};
 357
 358static const struct cpuid_dependent_feature
 359cpuid_dependent_features[] = {
 360	{ X86_FEATURE_MWAIT,		0x00000005 },
 361	{ X86_FEATURE_DCA,		0x00000009 },
 362	{ X86_FEATURE_XSAVE,		0x0000000d },
 363	{ 0, 0 }
 364};
 365
 366static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
 367{
 368	const struct cpuid_dependent_feature *df;
 369
 370	for (df = cpuid_dependent_features; df->feature; df++) {
 371
 372		if (!cpu_has(c, df->feature))
 373			continue;
 374		/*
 375		 * Note: cpuid_level is set to -1 if unavailable, but
 376		 * extended_extended_level is set to 0 if unavailable
 377		 * and the legitimate extended levels are all negative
 378		 * when signed; hence the weird messing around with
 379		 * signs here...
 380		 */
 381		if (!((s32)df->level < 0 ?
 382		     (u32)df->level > (u32)c->extended_cpuid_level :
 383		     (s32)df->level > (s32)c->cpuid_level))
 384			continue;
 385
 386		clear_cpu_cap(c, df->feature);
 387		if (!warn)
 388			continue;
 389
 390		pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
 391			x86_cap_flag(df->feature), df->level);
 392	}
 393}
 394
 395/*
 396 * Naming convention should be: <Name> [(<Codename>)]
 397 * This table only is used unless init_<vendor>() below doesn't set it;
 398 * in particular, if CPUID levels 0x80000002..4 are supported, this
 399 * isn't used
 400 */
 401
 402/* Look up CPU names by table lookup. */
 403static const char *table_lookup_model(struct cpuinfo_x86 *c)
 404{
 405#ifdef CONFIG_X86_32
 406	const struct legacy_cpu_model_info *info;
 407
 408	if (c->x86_model >= 16)
 409		return NULL;	/* Range check */
 410
 411	if (!this_cpu)
 412		return NULL;
 413
 414	info = this_cpu->legacy_models;
 415
 416	while (info->family) {
 417		if (info->family == c->x86)
 418			return info->model_names[c->x86_model];
 419		info++;
 420	}
 421#endif
 422	return NULL;		/* Not found */
 423}
 424
 425__u32 cpu_caps_cleared[NCAPINTS];
 426__u32 cpu_caps_set[NCAPINTS];
 
 427
 428void load_percpu_segment(int cpu)
 429{
 430#ifdef CONFIG_X86_32
 431	loadsegment(fs, __KERNEL_PERCPU);
 432#else
 433	loadsegment(gs, 0);
 434	wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
 435#endif
 436	load_stack_canary_segment();
 437}
 438
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 439/*
 440 * Current gdt points %fs at the "master" per-cpu area: after this,
 441 * it's on the real one.
 442 */
 443void switch_to_new_gdt(int cpu)
 444{
 445	struct desc_ptr gdt_descr;
 446
 447	gdt_descr.address = (long)get_cpu_gdt_table(cpu);
 448	gdt_descr.size = GDT_SIZE - 1;
 449	load_gdt(&gdt_descr);
 450	/* Reload the per-cpu base */
 451
 452	load_percpu_segment(cpu);
 453}
 454
 455static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
 456
 457static void get_model_name(struct cpuinfo_x86 *c)
 458{
 459	unsigned int *v;
 460	char *p, *q, *s;
 461
 462	if (c->extended_cpuid_level < 0x80000004)
 463		return;
 464
 465	v = (unsigned int *)c->x86_model_id;
 466	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
 467	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
 468	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
 469	c->x86_model_id[48] = 0;
 470
 471	/* Trim whitespace */
 472	p = q = s = &c->x86_model_id[0];
 473
 474	while (*p == ' ')
 475		p++;
 476
 477	while (*p) {
 478		/* Note the last non-whitespace index */
 479		if (!isspace(*p))
 480			s = q;
 481
 482		*q++ = *p++;
 483	}
 484
 485	*(s + 1) = '\0';
 486}
 487
 
 
 
 
 
 
 
 
 
 
 
 
 
 488void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
 489{
 490	unsigned int n, dummy, ebx, ecx, edx, l2size;
 491
 492	n = c->extended_cpuid_level;
 493
 494	if (n >= 0x80000005) {
 495		cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
 496		c->x86_cache_size = (ecx>>24) + (edx>>24);
 497#ifdef CONFIG_X86_64
 498		/* On K8 L1 TLB is inclusive, so don't count it */
 499		c->x86_tlbsize = 0;
 500#endif
 501	}
 502
 503	if (n < 0x80000006)	/* Some chips just has a large L1. */
 504		return;
 505
 506	cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
 507	l2size = ecx >> 16;
 508
 509#ifdef CONFIG_X86_64
 510	c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
 511#else
 512	/* do processor-specific cache resizing */
 513	if (this_cpu->legacy_cache_size)
 514		l2size = this_cpu->legacy_cache_size(c, l2size);
 515
 516	/* Allow user to override all this if necessary. */
 517	if (cachesize_override != -1)
 518		l2size = cachesize_override;
 519
 520	if (l2size == 0)
 521		return;		/* Again, no L2 cache is possible */
 522#endif
 523
 524	c->x86_cache_size = l2size;
 525}
 526
 527u16 __read_mostly tlb_lli_4k[NR_INFO];
 528u16 __read_mostly tlb_lli_2m[NR_INFO];
 529u16 __read_mostly tlb_lli_4m[NR_INFO];
 530u16 __read_mostly tlb_lld_4k[NR_INFO];
 531u16 __read_mostly tlb_lld_2m[NR_INFO];
 532u16 __read_mostly tlb_lld_4m[NR_INFO];
 533u16 __read_mostly tlb_lld_1g[NR_INFO];
 534
 535static void cpu_detect_tlb(struct cpuinfo_x86 *c)
 536{
 537	if (this_cpu->c_detect_tlb)
 538		this_cpu->c_detect_tlb(c);
 539
 540	pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
 541		tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
 542		tlb_lli_4m[ENTRIES]);
 543
 544	pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
 545		tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
 546		tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
 547}
 548
 549void detect_ht(struct cpuinfo_x86 *c)
 550{
 551#ifdef CONFIG_SMP
 552	u32 eax, ebx, ecx, edx;
 553	int index_msb, core_bits;
 554	static bool printed;
 555
 556	if (!cpu_has(c, X86_FEATURE_HT))
 557		return;
 558
 559	if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
 560		goto out;
 561
 562	if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
 563		return;
 564
 565	cpuid(1, &eax, &ebx, &ecx, &edx);
 566
 567	smp_num_siblings = (ebx & 0xff0000) >> 16;
 568
 569	if (smp_num_siblings == 1) {
 570		pr_info_once("CPU0: Hyper-Threading is disabled\n");
 571		goto out;
 572	}
 
 573
 574	if (smp_num_siblings <= 1)
 575		goto out;
 
 
 
 
 
 576
 577	index_msb = get_count_order(smp_num_siblings);
 578	c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
 579
 580	smp_num_siblings = smp_num_siblings / c->x86_max_cores;
 581
 582	index_msb = get_count_order(smp_num_siblings);
 583
 584	core_bits = get_count_order(c->x86_max_cores);
 585
 586	c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
 587				       ((1 << core_bits) - 1);
 588
 589out:
 590	if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
 591		pr_info("CPU: Physical Processor ID: %d\n",
 592			c->phys_proc_id);
 593		pr_info("CPU: Processor Core ID: %d\n",
 594			c->cpu_core_id);
 595		printed = 1;
 596	}
 597#endif
 598}
 599
 600static void get_cpu_vendor(struct cpuinfo_x86 *c)
 601{
 602	char *v = c->x86_vendor_id;
 603	int i;
 604
 605	for (i = 0; i < X86_VENDOR_NUM; i++) {
 606		if (!cpu_devs[i])
 607			break;
 608
 609		if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
 610		    (cpu_devs[i]->c_ident[1] &&
 611		     !strcmp(v, cpu_devs[i]->c_ident[1]))) {
 612
 613			this_cpu = cpu_devs[i];
 614			c->x86_vendor = this_cpu->c_x86_vendor;
 615			return;
 616		}
 617	}
 618
 619	pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
 620		    "CPU: Your system may be unstable.\n", v);
 621
 622	c->x86_vendor = X86_VENDOR_UNKNOWN;
 623	this_cpu = &default_cpu;
 624}
 625
 626void cpu_detect(struct cpuinfo_x86 *c)
 627{
 628	/* Get vendor name */
 629	cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
 630	      (unsigned int *)&c->x86_vendor_id[0],
 631	      (unsigned int *)&c->x86_vendor_id[8],
 632	      (unsigned int *)&c->x86_vendor_id[4]);
 633
 634	c->x86 = 4;
 635	/* Intel-defined flags: level 0x00000001 */
 636	if (c->cpuid_level >= 0x00000001) {
 637		u32 junk, tfms, cap0, misc;
 638
 639		cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
 640		c->x86		= x86_family(tfms);
 641		c->x86_model	= x86_model(tfms);
 642		c->x86_mask	= x86_stepping(tfms);
 643
 644		if (cap0 & (1<<19)) {
 645			c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
 646			c->x86_cache_alignment = c->x86_clflush_size;
 647		}
 648	}
 649}
 650
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 651void get_cpu_cap(struct cpuinfo_x86 *c)
 652{
 653	u32 eax, ebx, ecx, edx;
 654
 655	/* Intel-defined flags: level 0x00000001 */
 656	if (c->cpuid_level >= 0x00000001) {
 657		cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
 658
 659		c->x86_capability[CPUID_1_ECX] = ecx;
 660		c->x86_capability[CPUID_1_EDX] = edx;
 661	}
 662
 
 
 
 
 663	/* Additional Intel-defined flags: level 0x00000007 */
 664	if (c->cpuid_level >= 0x00000007) {
 665		cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
 666
 667		c->x86_capability[CPUID_7_0_EBX] = ebx;
 668
 669		c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
 670		c->x86_capability[CPUID_7_ECX] = ecx;
 
 
 
 
 
 
 
 671	}
 672
 673	/* Extended state features: level 0x0000000d */
 674	if (c->cpuid_level >= 0x0000000d) {
 675		cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
 676
 677		c->x86_capability[CPUID_D_1_EAX] = eax;
 678	}
 679
 680	/* Additional Intel-defined flags: level 0x0000000F */
 681	if (c->cpuid_level >= 0x0000000F) {
 682
 683		/* QoS sub-leaf, EAX=0Fh, ECX=0 */
 684		cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
 685		c->x86_capability[CPUID_F_0_EDX] = edx;
 686
 687		if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
 688			/* will be overridden if occupancy monitoring exists */
 689			c->x86_cache_max_rmid = ebx;
 690
 691			/* QoS sub-leaf, EAX=0Fh, ECX=1 */
 692			cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
 693			c->x86_capability[CPUID_F_1_EDX] = edx;
 694
 695			if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) ||
 696			      ((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) ||
 697			       (cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) {
 698				c->x86_cache_max_rmid = ecx;
 699				c->x86_cache_occ_scale = ebx;
 700			}
 701		} else {
 702			c->x86_cache_max_rmid = -1;
 703			c->x86_cache_occ_scale = -1;
 704		}
 705	}
 706
 707	/* AMD-defined flags: level 0x80000001 */
 708	eax = cpuid_eax(0x80000000);
 709	c->extended_cpuid_level = eax;
 710
 711	if ((eax & 0xffff0000) == 0x80000000) {
 712		if (eax >= 0x80000001) {
 713			cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
 714
 715			c->x86_capability[CPUID_8000_0001_ECX] = ecx;
 716			c->x86_capability[CPUID_8000_0001_EDX] = edx;
 717		}
 718	}
 719
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 720	if (c->extended_cpuid_level >= 0x80000008) {
 721		cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
 722
 723		c->x86_virt_bits = (eax >> 8) & 0xff;
 724		c->x86_phys_bits = eax & 0xff;
 725		c->x86_capability[CPUID_8000_0008_EBX] = ebx;
 726	}
 727#ifdef CONFIG_X86_32
 728	else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
 729		c->x86_phys_bits = 36;
 730#endif
 731
 732	if (c->extended_cpuid_level >= 0x80000007)
 733		c->x86_power = cpuid_edx(0x80000007);
 734
 735	if (c->extended_cpuid_level >= 0x8000000a)
 736		c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
 737
 738	init_scattered_cpuid_features(c);
 739}
 740
 741static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
 742{
 743#ifdef CONFIG_X86_32
 744	int i;
 745
 746	/*
 747	 * First of all, decide if this is a 486 or higher
 748	 * It's a 486 if we can modify the AC flag
 749	 */
 750	if (flag_is_changeable_p(X86_EFLAGS_AC))
 751		c->x86 = 4;
 752	else
 753		c->x86 = 3;
 754
 755	for (i = 0; i < X86_VENDOR_NUM; i++)
 756		if (cpu_devs[i] && cpu_devs[i]->c_identify) {
 757			c->x86_vendor_id[0] = 0;
 758			cpu_devs[i]->c_identify(c);
 759			if (c->x86_vendor_id[0]) {
 760				get_cpu_vendor(c);
 761				break;
 762			}
 763		}
 764#endif
 765}
 766
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 767/*
 768 * Do minimum CPU detection early.
 769 * Fields really needed: vendor, cpuid_level, family, model, mask,
 770 * cache alignment.
 771 * The others are not touched to avoid unwanted side effects.
 772 *
 773 * WARNING: this function is only called on the BP.  Don't add code here
 774 * that is supposed to run on all CPUs.
 775 */
 776static void __init early_identify_cpu(struct cpuinfo_x86 *c)
 777{
 778#ifdef CONFIG_X86_64
 779	c->x86_clflush_size = 64;
 780	c->x86_phys_bits = 36;
 781	c->x86_virt_bits = 48;
 782#else
 783	c->x86_clflush_size = 32;
 784	c->x86_phys_bits = 32;
 785	c->x86_virt_bits = 32;
 786#endif
 787	c->x86_cache_alignment = c->x86_clflush_size;
 788
 789	memset(&c->x86_capability, 0, sizeof c->x86_capability);
 790	c->extended_cpuid_level = 0;
 791
 792	if (!have_cpuid_p())
 793		identify_cpu_without_cpuid(c);
 794
 795	/* cyrix could have cpuid enabled via c_identify()*/
 796	if (!have_cpuid_p())
 797		return;
 
 
 
 
 798
 799	cpu_detect(c);
 800	get_cpu_vendor(c);
 801	get_cpu_cap(c);
 802
 803	if (this_cpu->c_early_init)
 804		this_cpu->c_early_init(c);
 805
 806	c->cpu_index = 0;
 807	filter_cpuid_features(c, false);
 808
 809	if (this_cpu->c_bsp_init)
 810		this_cpu->c_bsp_init(c);
 811
 812	setup_force_cpu_cap(X86_FEATURE_ALWAYS);
 
 
 
 
 
 813	fpu__init_system(c);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 814}
 815
 816void __init early_cpu_init(void)
 817{
 818	const struct cpu_dev *const *cdev;
 819	int count = 0;
 820
 821#ifdef CONFIG_PROCESSOR_SELECT
 822	pr_info("KERNEL supported cpus:\n");
 823#endif
 824
 825	for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
 826		const struct cpu_dev *cpudev = *cdev;
 827
 828		if (count >= X86_VENDOR_NUM)
 829			break;
 830		cpu_devs[count] = cpudev;
 831		count++;
 832
 833#ifdef CONFIG_PROCESSOR_SELECT
 834		{
 835			unsigned int j;
 836
 837			for (j = 0; j < 2; j++) {
 838				if (!cpudev->c_ident[j])
 839					continue;
 840				pr_info("  %s %s\n", cpudev->c_vendor,
 841					cpudev->c_ident[j]);
 842			}
 843		}
 844#endif
 845	}
 846	early_identify_cpu(&boot_cpu_data);
 847}
 848
 849/*
 850 * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
 851 * unfortunately, that's not true in practice because of early VIA
 852 * chips and (more importantly) broken virtualizers that are not easy
 853 * to detect. In the latter case it doesn't even *fail* reliably, so
 854 * probing for it doesn't even work. Disable it completely on 32-bit
 855 * unless we can find a reliable way to detect all the broken cases.
 856 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
 857 */
 858static void detect_nopl(struct cpuinfo_x86 *c)
 859{
 860#ifdef CONFIG_X86_32
 861	clear_cpu_cap(c, X86_FEATURE_NOPL);
 862#else
 863	set_cpu_cap(c, X86_FEATURE_NOPL);
 864#endif
 865
 866	/*
 867	 * ESPFIX is a strange bug.  All real CPUs have it.  Paravirt
 868	 * systems that run Linux at CPL > 0 may or may not have the
 869	 * issue, but, even if they have the issue, there's absolutely
 870	 * nothing we can do about it because we can't use the real IRET
 871	 * instruction.
 872	 *
 873	 * NB: For the time being, only 32-bit kernels support
 874	 * X86_BUG_ESPFIX as such.  64-bit kernels directly choose
 875	 * whether to apply espfix using paravirt hooks.  If any
 876	 * non-paravirt system ever shows up that does *not* have the
 877	 * ESPFIX issue, we can change this.
 
 878	 */
 879#ifdef CONFIG_X86_32
 880#ifdef CONFIG_PARAVIRT
 881	do {
 882		extern void native_iret(void);
 883		if (pv_cpu_ops.iret == native_iret)
 884			set_cpu_bug(c, X86_BUG_ESPFIX);
 885	} while (0);
 886#else
 887	set_cpu_bug(c, X86_BUG_ESPFIX);
 888#endif
 889#endif
 890}
 891
 892static void generic_identify(struct cpuinfo_x86 *c)
 893{
 894	c->extended_cpuid_level = 0;
 895
 896	if (!have_cpuid_p())
 897		identify_cpu_without_cpuid(c);
 898
 899	/* cyrix could have cpuid enabled via c_identify()*/
 900	if (!have_cpuid_p())
 901		return;
 902
 903	cpu_detect(c);
 904
 905	get_cpu_vendor(c);
 906
 907	get_cpu_cap(c);
 908
 
 
 909	if (c->cpuid_level >= 0x00000001) {
 910		c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
 911#ifdef CONFIG_X86_32
 912# ifdef CONFIG_SMP
 913		c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
 914# else
 915		c->apicid = c->initial_apicid;
 916# endif
 917#endif
 918		c->phys_proc_id = c->initial_apicid;
 919	}
 920
 921	get_model_name(c); /* Default name */
 922
 923	detect_nopl(c);
 924}
 925
 926static void x86_init_cache_qos(struct cpuinfo_x86 *c)
 927{
 928	/*
 929	 * The heavy lifting of max_rmid and cache_occ_scale are handled
 930	 * in get_cpu_cap().  Here we just set the max_rmid for the boot_cpu
 931	 * in case CQM bits really aren't there in this CPU.
 
 
 
 
 
 
 
 
 932	 */
 933	if (c != &boot_cpu_data) {
 934		boot_cpu_data.x86_cache_max_rmid =
 935			min(boot_cpu_data.x86_cache_max_rmid,
 936			    c->x86_cache_max_rmid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 937	}
 
 
 
 
 
 938}
 939
 940/*
 941 * This does the hard work of actually picking apart the CPU stuff...
 942 */
 943static void identify_cpu(struct cpuinfo_x86 *c)
 944{
 945	int i;
 946
 947	c->loops_per_jiffy = loops_per_jiffy;
 948	c->x86_cache_size = -1;
 949	c->x86_vendor = X86_VENDOR_UNKNOWN;
 950	c->x86_model = c->x86_mask = 0;	/* So far unknown... */
 951	c->x86_vendor_id[0] = '\0'; /* Unset */
 952	c->x86_model_id[0] = '\0';  /* Unset */
 953	c->x86_max_cores = 1;
 954	c->x86_coreid_bits = 0;
 
 955#ifdef CONFIG_X86_64
 956	c->x86_clflush_size = 64;
 957	c->x86_phys_bits = 36;
 958	c->x86_virt_bits = 48;
 959#else
 960	c->cpuid_level = -1;	/* CPUID not detected */
 961	c->x86_clflush_size = 32;
 962	c->x86_phys_bits = 32;
 963	c->x86_virt_bits = 32;
 964#endif
 965	c->x86_cache_alignment = c->x86_clflush_size;
 966	memset(&c->x86_capability, 0, sizeof c->x86_capability);
 
 
 
 967
 968	generic_identify(c);
 969
 970	if (this_cpu->c_identify)
 971		this_cpu->c_identify(c);
 972
 973	/* Clear/Set all flags overridden by options, after probe */
 974	for (i = 0; i < NCAPINTS; i++) {
 975		c->x86_capability[i] &= ~cpu_caps_cleared[i];
 976		c->x86_capability[i] |= cpu_caps_set[i];
 977	}
 978
 979#ifdef CONFIG_X86_64
 980	c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
 981#endif
 982
 983	/*
 984	 * Vendor-specific initialization.  In this section we
 985	 * canonicalize the feature flags, meaning if there are
 986	 * features a certain CPU supports which CPUID doesn't
 987	 * tell us, CPUID claiming incorrect flags, or other bugs,
 988	 * we handle them here.
 989	 *
 990	 * At the end of this section, c->x86_capability better
 991	 * indicate the features this CPU genuinely supports!
 992	 */
 993	if (this_cpu->c_init)
 994		this_cpu->c_init(c);
 995
 996	/* Disable the PN if appropriate */
 997	squash_the_stupid_serial_number(c);
 998
 999	/* Set up SMEP/SMAP */
1000	setup_smep(c);
1001	setup_smap(c);
 
 
 
 
 
 
 
1002
1003	/*
1004	 * The vendor-specific functions might have changed features.
1005	 * Now we do "generic changes."
1006	 */
1007
1008	/* Filter out anything that depends on CPUID levels we don't have */
1009	filter_cpuid_features(c, true);
1010
1011	/* If the model name is still unset, do table lookup. */
1012	if (!c->x86_model_id[0]) {
1013		const char *p;
1014		p = table_lookup_model(c);
1015		if (p)
1016			strcpy(c->x86_model_id, p);
1017		else
1018			/* Last resort... */
1019			sprintf(c->x86_model_id, "%02x/%02x",
1020				c->x86, c->x86_model);
1021	}
1022
1023#ifdef CONFIG_X86_64
1024	detect_ht(c);
1025#endif
1026
1027	init_hypervisor(c);
1028	x86_init_rdrand(c);
1029	x86_init_cache_qos(c);
1030	setup_pku(c);
1031
1032	/*
1033	 * Clear/Set all flags overridden by options, need do it
1034	 * before following smp all cpus cap AND.
1035	 */
1036	for (i = 0; i < NCAPINTS; i++) {
1037		c->x86_capability[i] &= ~cpu_caps_cleared[i];
1038		c->x86_capability[i] |= cpu_caps_set[i];
1039	}
1040
1041	/*
1042	 * On SMP, boot_cpu_data holds the common feature set between
1043	 * all CPUs; so make sure that we indicate which features are
1044	 * common between the CPUs.  The first time this routine gets
1045	 * executed, c == &boot_cpu_data.
1046	 */
1047	if (c != &boot_cpu_data) {
1048		/* AND the already accumulated flags with these */
1049		for (i = 0; i < NCAPINTS; i++)
1050			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1051
1052		/* OR, i.e. replicate the bug flags */
1053		for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
1054			c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
1055	}
1056
1057	/* Init Machine Check Exception if available. */
1058	mcheck_cpu_init(c);
1059
1060	select_idle_routine(c);
1061
1062#ifdef CONFIG_NUMA
1063	numa_add_cpu(smp_processor_id());
1064#endif
1065	/* The boot/hotplug time assigment got cleared, restore it */
1066	c->logical_proc_id = topology_phys_to_logical_pkg(c->phys_proc_id);
1067}
1068
1069/*
1070 * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
1071 * on 32-bit kernels:
1072 */
1073#ifdef CONFIG_X86_32
1074void enable_sep_cpu(void)
1075{
1076	struct tss_struct *tss;
1077	int cpu;
1078
1079	cpu = get_cpu();
1080	tss = &per_cpu(cpu_tss, cpu);
1081
1082	if (!boot_cpu_has(X86_FEATURE_SEP))
1083		goto out;
 
 
 
1084
1085	/*
1086	 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
1087	 * see the big comment in struct x86_hw_tss's definition.
1088	 */
1089
1090	tss->x86_tss.ss1 = __KERNEL_CS;
1091	wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
1092
1093	wrmsr(MSR_IA32_SYSENTER_ESP,
1094	      (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack),
1095	      0);
1096
1097	wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
1098
1099out:
1100	put_cpu();
1101}
1102#endif
1103
1104void __init identify_boot_cpu(void)
1105{
1106	identify_cpu(&boot_cpu_data);
1107	init_amd_e400_c1e_mask();
1108#ifdef CONFIG_X86_32
1109	sysenter_setup();
1110	enable_sep_cpu();
1111#endif
1112	cpu_detect_tlb(&boot_cpu_data);
 
 
 
1113}
1114
1115void identify_secondary_cpu(struct cpuinfo_x86 *c)
1116{
1117	BUG_ON(c == &boot_cpu_data);
1118	identify_cpu(c);
1119#ifdef CONFIG_X86_32
1120	enable_sep_cpu();
1121#endif
1122	mtrr_ap_init();
 
 
 
1123}
1124
1125struct msr_range {
1126	unsigned	min;
1127	unsigned	max;
1128};
1129
1130static const struct msr_range msr_range_array[] = {
1131	{ 0x00000000, 0x00000418},
1132	{ 0xc0000000, 0xc000040b},
1133	{ 0xc0010000, 0xc0010142},
1134	{ 0xc0011000, 0xc001103b},
1135};
1136
1137static void __print_cpu_msr(void)
1138{
1139	unsigned index_min, index_max;
1140	unsigned index;
1141	u64 val;
1142	int i;
1143
1144	for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
1145		index_min = msr_range_array[i].min;
1146		index_max = msr_range_array[i].max;
1147
1148		for (index = index_min; index < index_max; index++) {
1149			if (rdmsrl_safe(index, &val))
1150				continue;
1151			pr_info(" MSR%08x: %016llx\n", index, val);
1152		}
1153	}
1154}
1155
1156static int show_msr;
1157
1158static __init int setup_show_msr(char *arg)
1159{
1160	int num;
1161
1162	get_option(&arg, &num);
1163
1164	if (num > 0)
1165		show_msr = num;
1166	return 1;
1167}
1168__setup("show_msr=", setup_show_msr);
1169
1170static __init int setup_noclflush(char *arg)
1171{
1172	setup_clear_cpu_cap(X86_FEATURE_CLFLUSH);
1173	setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT);
1174	return 1;
1175}
1176__setup("noclflush", setup_noclflush);
1177
1178void print_cpu_info(struct cpuinfo_x86 *c)
1179{
1180	const char *vendor = NULL;
1181
1182	if (c->x86_vendor < X86_VENDOR_NUM) {
1183		vendor = this_cpu->c_vendor;
1184	} else {
1185		if (c->cpuid_level >= 0)
1186			vendor = c->x86_vendor_id;
1187	}
1188
1189	if (vendor && !strstr(c->x86_model_id, vendor))
1190		pr_cont("%s ", vendor);
1191
1192	if (c->x86_model_id[0])
1193		pr_cont("%s", c->x86_model_id);
1194	else
1195		pr_cont("%d86", c->x86);
1196
1197	pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
1198
1199	if (c->x86_mask || c->cpuid_level >= 0)
1200		pr_cont(", stepping: 0x%x)\n", c->x86_mask);
1201	else
1202		pr_cont(")\n");
1203
1204	print_cpu_msr(c);
1205}
1206
1207void print_cpu_msr(struct cpuinfo_x86 *c)
1208{
1209	if (c->cpu_index < show_msr)
1210		__print_cpu_msr();
1211}
1212
1213static __init int setup_disablecpuid(char *arg)
 
 
 
 
 
1214{
1215	int bit;
1216
1217	if (get_option(&arg, &bit) && bit < NCAPINTS*32)
1218		setup_clear_cpu_cap(bit);
1219	else
1220		return 0;
1221
1222	return 1;
1223}
1224__setup("clearcpuid=", setup_disablecpuid);
1225
1226#ifdef CONFIG_X86_64
1227struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
1228struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
1229				    (unsigned long) debug_idt_table };
1230
1231DEFINE_PER_CPU_FIRST(union irq_stack_union,
1232		     irq_stack_union) __aligned(PAGE_SIZE) __visible;
1233
1234/*
1235 * The following percpu variables are hot.  Align current_task to
1236 * cacheline size such that they fall in the same cacheline.
1237 */
1238DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
1239	&init_task;
1240EXPORT_PER_CPU_SYMBOL(current_task);
1241
1242DEFINE_PER_CPU(char *, irq_stack_ptr) =
1243	init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
1244
1245DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
1246
1247DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1248EXPORT_PER_CPU_SYMBOL(__preempt_count);
1249
1250/*
1251 * Special IST stacks which the CPU switches to when it calls
1252 * an IST-marked descriptor entry. Up to 7 stacks (hardware
1253 * limit), all of them are 4K, except the debug stack which
1254 * is 8K.
1255 */
1256static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
1257	  [0 ... N_EXCEPTION_STACKS - 1]	= EXCEPTION_STKSZ,
1258	  [DEBUG_STACK - 1]			= DEBUG_STKSZ
1259};
1260
1261static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
1262	[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
1263
1264/* May not be marked __init: used by software suspend */
1265void syscall_init(void)
1266{
1267	/*
1268	 * LSTAR and STAR live in a bit strange symbiosis.
1269	 * They both write to the same internal register. STAR allows to
1270	 * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
1271	 */
1272	wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
1273	wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
1274
1275#ifdef CONFIG_IA32_EMULATION
1276	wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
1277	/*
1278	 * This only works on Intel CPUs.
1279	 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
1280	 * This does not cause SYSENTER to jump to the wrong location, because
1281	 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
1282	 */
1283	wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
1284	wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
 
1285	wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
1286#else
1287	wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
1288	wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
1289	wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
1290	wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
1291#endif
1292
1293	/* Flags to clear on syscall */
1294	wrmsrl(MSR_SYSCALL_MASK,
1295	       X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
1296	       X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
1297}
1298
1299/*
1300 * Copies of the original ist values from the tss are only accessed during
1301 * debugging, no special alignment required.
1302 */
1303DEFINE_PER_CPU(struct orig_ist, orig_ist);
1304
1305static DEFINE_PER_CPU(unsigned long, debug_stack_addr);
1306DEFINE_PER_CPU(int, debug_stack_usage);
1307
1308int is_debug_stack(unsigned long addr)
1309{
1310	return __this_cpu_read(debug_stack_usage) ||
1311		(addr <= __this_cpu_read(debug_stack_addr) &&
1312		 addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ));
1313}
1314NOKPROBE_SYMBOL(is_debug_stack);
1315
1316DEFINE_PER_CPU(u32, debug_idt_ctr);
1317
1318void debug_stack_set_zero(void)
1319{
1320	this_cpu_inc(debug_idt_ctr);
1321	load_current_idt();
1322}
1323NOKPROBE_SYMBOL(debug_stack_set_zero);
1324
1325void debug_stack_reset(void)
1326{
1327	if (WARN_ON(!this_cpu_read(debug_idt_ctr)))
1328		return;
1329	if (this_cpu_dec_return(debug_idt_ctr) == 0)
1330		load_current_idt();
1331}
1332NOKPROBE_SYMBOL(debug_stack_reset);
1333
1334#else	/* CONFIG_X86_64 */
1335
1336DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
1337EXPORT_PER_CPU_SYMBOL(current_task);
1338DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1339EXPORT_PER_CPU_SYMBOL(__preempt_count);
1340
1341/*
1342 * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find
1343 * the top of the kernel stack.  Use an extra percpu variable to track the
1344 * top of the kernel stack directly.
1345 */
1346DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
1347	(unsigned long)&init_thread_union + THREAD_SIZE;
1348EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
1349
1350#ifdef CONFIG_CC_STACKPROTECTOR
1351DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
1352#endif
1353
1354#endif	/* CONFIG_X86_64 */
1355
1356/*
1357 * Clear all 6 debug registers:
1358 */
1359static void clear_all_debug_regs(void)
1360{
1361	int i;
1362
1363	for (i = 0; i < 8; i++) {
1364		/* Ignore db4, db5 */
1365		if ((i == 4) || (i == 5))
1366			continue;
1367
1368		set_debugreg(0, i);
1369	}
1370}
1371
1372#ifdef CONFIG_KGDB
1373/*
1374 * Restore debug regs if using kgdbwait and you have a kernel debugger
1375 * connection established.
1376 */
1377static void dbg_restore_debug_regs(void)
1378{
1379	if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
1380		arch_kgdb_ops.correct_hw_break();
1381}
1382#else /* ! CONFIG_KGDB */
1383#define dbg_restore_debug_regs()
1384#endif /* ! CONFIG_KGDB */
1385
1386static void wait_for_master_cpu(int cpu)
1387{
1388#ifdef CONFIG_SMP
1389	/*
1390	 * wait for ACK from master CPU before continuing
1391	 * with AP initialization
1392	 */
1393	WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
1394	while (!cpumask_test_cpu(cpu, cpu_callout_mask))
1395		cpu_relax();
1396#endif
1397}
1398
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1399/*
1400 * cpu_init() initializes state that is per-CPU. Some data is already
1401 * initialized (naturally) in the bootstrap process, such as the GDT
1402 * and IDT. We reload them nevertheless, this function acts as a
1403 * 'CPU state barrier', nothing should get across.
1404 * A lot of state is already set up in PDA init for 64 bit
1405 */
1406#ifdef CONFIG_X86_64
1407
1408void cpu_init(void)
1409{
1410	struct orig_ist *oist;
1411	struct task_struct *me;
1412	struct tss_struct *t;
1413	unsigned long v;
1414	int cpu = stack_smp_processor_id();
1415	int i;
1416
1417	wait_for_master_cpu(cpu);
1418
1419	/*
1420	 * Initialize the CR4 shadow before doing anything that could
1421	 * try to read it.
1422	 */
1423	cr4_init_shadow();
1424
1425	/*
1426	 * Load microcode on this cpu if a valid microcode is available.
1427	 * This is early microcode loading procedure.
1428	 */
1429	load_ucode_ap();
1430
1431	t = &per_cpu(cpu_tss, cpu);
1432	oist = &per_cpu(orig_ist, cpu);
1433
1434#ifdef CONFIG_NUMA
1435	if (this_cpu_read(numa_node) == 0 &&
1436	    early_cpu_to_node(cpu) != NUMA_NO_NODE)
1437		set_numa_node(early_cpu_to_node(cpu));
1438#endif
1439
1440	me = current;
1441
1442	pr_debug("Initializing CPU#%d\n", cpu);
1443
1444	cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
 
 
1445
1446	/*
1447	 * Initialize the per-CPU GDT with the boot GDT,
1448	 * and set up the GDT descriptor:
1449	 */
1450
1451	switch_to_new_gdt(cpu);
1452	loadsegment(fs, 0);
1453
1454	load_current_idt();
1455
1456	memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
1457	syscall_init();
1458
1459	wrmsrl(MSR_FS_BASE, 0);
1460	wrmsrl(MSR_KERNEL_GS_BASE, 0);
1461	barrier();
1462
1463	x86_configure_nx();
1464	x2apic_setup();
1465
1466	/*
1467	 * set up and load the per-CPU TSS
1468	 */
1469	if (!oist->ist[0]) {
1470		char *estacks = per_cpu(exception_stacks, cpu);
1471
1472		for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1473			estacks += exception_stack_sizes[v];
1474			oist->ist[v] = t->x86_tss.ist[v] =
1475					(unsigned long)estacks;
1476			if (v == DEBUG_STACK-1)
1477				per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks;
1478		}
1479	}
1480
1481	t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1482
 
1483	/*
1484	 * <= is required because the CPU will access up to
1485	 * 8 bits beyond the end of the IO permission bitmap.
1486	 */
1487	for (i = 0; i <= IO_BITMAP_LONGS; i++)
1488		t->io_bitmap[i] = ~0UL;
1489
1490	atomic_inc(&init_mm.mm_count);
1491	me->active_mm = &init_mm;
1492	BUG_ON(me->mm);
1493	enter_lazy_tlb(&init_mm, me);
1494
1495	load_sp0(t, &current->thread);
1496	set_tss_desc(cpu, t);
1497	load_TR_desc();
1498	load_mm_ldt(&init_mm);
1499
1500	clear_all_debug_regs();
1501	dbg_restore_debug_regs();
1502
 
 
1503	fpu__init_cpu();
1504
1505	if (is_uv_system())
1506		uv_cpu_init();
1507}
1508
1509#else
 
1510
1511void cpu_init(void)
 
 
 
 
 
1512{
1513	int cpu = smp_processor_id();
1514	struct task_struct *curr = current;
1515	struct tss_struct *t = &per_cpu(cpu_tss, cpu);
1516	struct thread_struct *thread = &curr->thread;
1517
1518	wait_for_master_cpu(cpu);
1519
1520	/*
1521	 * Initialize the CR4 shadow before doing anything that could
1522	 * try to read it.
1523	 */
1524	cr4_init_shadow();
1525
1526	show_ucode_info_early();
1527
1528	pr_info("Initializing CPU#%d\n", cpu);
1529
1530	if (cpu_feature_enabled(X86_FEATURE_VME) ||
1531	    cpu_has_tsc ||
1532	    boot_cpu_has(X86_FEATURE_DE))
1533		cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1534
1535	load_current_idt();
1536	switch_to_new_gdt(cpu);
1537
1538	/*
1539	 * Set up and load the per-CPU TSS and LDT
 
 
1540	 */
1541	atomic_inc(&init_mm.mm_count);
1542	curr->active_mm = &init_mm;
1543	BUG_ON(curr->mm);
1544	enter_lazy_tlb(&init_mm, curr);
1545
1546	load_sp0(t, thread);
1547	set_tss_desc(cpu, t);
1548	load_TR_desc();
1549	load_mm_ldt(&init_mm);
1550
1551	t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1552
1553#ifdef CONFIG_DOUBLEFAULT
1554	/* Set up doublefault TSS pointer in the GDT */
1555	__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
1556#endif
1557
1558	clear_all_debug_regs();
1559	dbg_restore_debug_regs();
1560
1561	fpu__init_cpu();
1562}
1563#endif
1564
1565static void bsp_resume(void)
1566{
1567	if (this_cpu->c_bsp_resume)
1568		this_cpu->c_bsp_resume(&boot_cpu_data);
1569}
1570
1571static struct syscore_ops cpu_syscore_ops = {
1572	.resume		= bsp_resume,
1573};
1574
1575static int __init init_cpu_syscore(void)
1576{
1577	register_syscore_ops(&cpu_syscore_ops);
1578	return 0;
 
 
1579}
1580core_initcall(init_cpu_syscore);
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* cpu_feature_enabled() cannot be used this early */
   3#define USE_EARLY_PGTABLE_L5
   4
   5#include <linux/memblock.h>
   6#include <linux/linkage.h>
   7#include <linux/bitops.h>
   8#include <linux/kernel.h>
   9#include <linux/export.h>
  10#include <linux/percpu.h>
  11#include <linux/string.h>
  12#include <linux/ctype.h>
  13#include <linux/delay.h>
  14#include <linux/sched/mm.h>
  15#include <linux/sched/clock.h>
  16#include <linux/sched/task.h>
  17#include <linux/sched/smt.h>
  18#include <linux/init.h>
  19#include <linux/kprobes.h>
  20#include <linux/kgdb.h>
  21#include <linux/smp.h>
  22#include <linux/io.h>
  23#include <linux/syscore_ops.h>
  24#include <linux/pgtable.h>
  25
  26#include <asm/stackprotector.h>
  27#include <asm/perf_event.h>
  28#include <asm/mmu_context.h>
  29#include <asm/doublefault.h>
  30#include <asm/archrandom.h>
  31#include <asm/hypervisor.h>
  32#include <asm/processor.h>
  33#include <asm/tlbflush.h>
  34#include <asm/debugreg.h>
  35#include <asm/sections.h>
  36#include <asm/vsyscall.h>
  37#include <linux/topology.h>
  38#include <linux/cpumask.h>
 
  39#include <linux/atomic.h>
  40#include <asm/proto.h>
  41#include <asm/setup.h>
  42#include <asm/apic.h>
  43#include <asm/desc.h>
  44#include <asm/fpu/internal.h>
  45#include <asm/mtrr.h>
  46#include <asm/hwcap2.h>
  47#include <linux/numa.h>
  48#include <asm/numa.h>
  49#include <asm/asm.h>
  50#include <asm/bugs.h>
  51#include <asm/cpu.h>
  52#include <asm/mce.h>
  53#include <asm/msr.h>
  54#include <asm/memtype.h>
  55#include <asm/microcode.h>
  56#include <asm/microcode_intel.h>
  57#include <asm/intel-family.h>
  58#include <asm/cpu_device_id.h>
  59#include <asm/uv/uv.h>
 
  60
  61#include "cpu.h"
  62
  63u32 elf_hwcap2 __read_mostly;
  64
  65/* all of these masks are initialized in setup_cpu_local_masks() */
  66cpumask_var_t cpu_initialized_mask;
  67cpumask_var_t cpu_callout_mask;
  68cpumask_var_t cpu_callin_mask;
  69
  70/* representing cpus for which sibling maps can be computed */
  71cpumask_var_t cpu_sibling_setup_mask;
  72
  73/* Number of siblings per CPU package */
  74int smp_num_siblings = 1;
  75EXPORT_SYMBOL(smp_num_siblings);
  76
  77/* Last level cache ID of each logical CPU */
  78DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
  79
  80/* correctly size the local cpu masks */
  81void __init setup_cpu_local_masks(void)
  82{
  83	alloc_bootmem_cpumask_var(&cpu_initialized_mask);
  84	alloc_bootmem_cpumask_var(&cpu_callin_mask);
  85	alloc_bootmem_cpumask_var(&cpu_callout_mask);
  86	alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
  87}
  88
  89static void default_init(struct cpuinfo_x86 *c)
  90{
  91#ifdef CONFIG_X86_64
  92	cpu_detect_cache_sizes(c);
  93#else
  94	/* Not much we can do here... */
  95	/* Check if at least it has cpuid */
  96	if (c->cpuid_level == -1) {
  97		/* No cpuid. It must be an ancient CPU */
  98		if (c->x86 == 4)
  99			strcpy(c->x86_model_id, "486");
 100		else if (c->x86 == 3)
 101			strcpy(c->x86_model_id, "386");
 102	}
 103#endif
 104}
 105
 106static const struct cpu_dev default_cpu = {
 107	.c_init		= default_init,
 108	.c_vendor	= "Unknown",
 109	.c_x86_vendor	= X86_VENDOR_UNKNOWN,
 110};
 111
 112static const struct cpu_dev *this_cpu = &default_cpu;
 113
 114DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
 115#ifdef CONFIG_X86_64
 116	/*
 117	 * We need valid kernel segments for data and code in long mode too
 118	 * IRET will check the segment types  kkeil 2000/10/28
 119	 * Also sysret mandates a special GDT layout
 120	 *
 121	 * TLS descriptors are currently at a different place compared to i386.
 122	 * Hopefully nobody expects them at a fixed place (Wine?)
 123	 */
 124	[GDT_ENTRY_KERNEL32_CS]		= GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
 125	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
 126	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
 127	[GDT_ENTRY_DEFAULT_USER32_CS]	= GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
 128	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
 129	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
 130#else
 131	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
 132	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
 133	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
 134	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
 135	/*
 136	 * Segments used for calling PnP BIOS have byte granularity.
 137	 * They code segments and data segments have fixed 64k limits,
 138	 * the transfer segment sizes are set at run time.
 139	 */
 140	/* 32-bit code */
 141	[GDT_ENTRY_PNPBIOS_CS32]	= GDT_ENTRY_INIT(0x409a, 0, 0xffff),
 142	/* 16-bit code */
 143	[GDT_ENTRY_PNPBIOS_CS16]	= GDT_ENTRY_INIT(0x009a, 0, 0xffff),
 144	/* 16-bit data */
 145	[GDT_ENTRY_PNPBIOS_DS]		= GDT_ENTRY_INIT(0x0092, 0, 0xffff),
 146	/* 16-bit data */
 147	[GDT_ENTRY_PNPBIOS_TS1]		= GDT_ENTRY_INIT(0x0092, 0, 0),
 148	/* 16-bit data */
 149	[GDT_ENTRY_PNPBIOS_TS2]		= GDT_ENTRY_INIT(0x0092, 0, 0),
 150	/*
 151	 * The APM segments have byte granularity and their bases
 152	 * are set at run time.  All have 64k limits.
 153	 */
 154	/* 32-bit code */
 155	[GDT_ENTRY_APMBIOS_BASE]	= GDT_ENTRY_INIT(0x409a, 0, 0xffff),
 156	/* 16-bit code */
 157	[GDT_ENTRY_APMBIOS_BASE+1]	= GDT_ENTRY_INIT(0x009a, 0, 0xffff),
 158	/* data */
 159	[GDT_ENTRY_APMBIOS_BASE+2]	= GDT_ENTRY_INIT(0x4092, 0, 0xffff),
 160
 161	[GDT_ENTRY_ESPFIX_SS]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
 162	[GDT_ENTRY_PERCPU]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
 163	GDT_STACK_CANARY_INIT
 164#endif
 165} };
 166EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
 167
 168#ifdef CONFIG_X86_64
 169static int __init x86_nopcid_setup(char *s)
 170{
 171	/* nopcid doesn't accept parameters */
 172	if (s)
 173		return -EINVAL;
 174
 175	/* do not emit a message if the feature is not present */
 176	if (!boot_cpu_has(X86_FEATURE_PCID))
 177		return 0;
 178
 179	setup_clear_cpu_cap(X86_FEATURE_PCID);
 180	pr_info("nopcid: PCID feature disabled\n");
 181	return 0;
 182}
 183early_param("nopcid", x86_nopcid_setup);
 184#endif
 185
 186static int __init x86_noinvpcid_setup(char *s)
 187{
 188	/* noinvpcid doesn't accept parameters */
 189	if (s)
 190		return -EINVAL;
 191
 192	/* do not emit a message if the feature is not present */
 193	if (!boot_cpu_has(X86_FEATURE_INVPCID))
 194		return 0;
 195
 196	setup_clear_cpu_cap(X86_FEATURE_INVPCID);
 197	pr_info("noinvpcid: INVPCID feature disabled\n");
 198	return 0;
 199}
 200early_param("noinvpcid", x86_noinvpcid_setup);
 201
 202#ifdef CONFIG_X86_32
 203static int cachesize_override = -1;
 204static int disable_x86_serial_nr = 1;
 205
 206static int __init cachesize_setup(char *str)
 207{
 208	get_option(&str, &cachesize_override);
 209	return 1;
 210}
 211__setup("cachesize=", cachesize_setup);
 212
 213static int __init x86_sep_setup(char *s)
 214{
 215	setup_clear_cpu_cap(X86_FEATURE_SEP);
 216	return 1;
 217}
 218__setup("nosep", x86_sep_setup);
 219
 220/* Standard macro to see if a specific flag is changeable */
 221static inline int flag_is_changeable_p(u32 flag)
 222{
 223	u32 f1, f2;
 224
 225	/*
 226	 * Cyrix and IDT cpus allow disabling of CPUID
 227	 * so the code below may return different results
 228	 * when it is executed before and after enabling
 229	 * the CPUID. Add "volatile" to not allow gcc to
 230	 * optimize the subsequent calls to this function.
 231	 */
 232	asm volatile ("pushfl		\n\t"
 233		      "pushfl		\n\t"
 234		      "popl %0		\n\t"
 235		      "movl %0, %1	\n\t"
 236		      "xorl %2, %0	\n\t"
 237		      "pushl %0		\n\t"
 238		      "popfl		\n\t"
 239		      "pushfl		\n\t"
 240		      "popl %0		\n\t"
 241		      "popfl		\n\t"
 242
 243		      : "=&r" (f1), "=&r" (f2)
 244		      : "ir" (flag));
 245
 246	return ((f1^f2) & flag) != 0;
 247}
 248
 249/* Probe for the CPUID instruction */
 250int have_cpuid_p(void)
 251{
 252	return flag_is_changeable_p(X86_EFLAGS_ID);
 253}
 254
 255static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
 256{
 257	unsigned long lo, hi;
 258
 259	if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
 260		return;
 261
 262	/* Disable processor serial number: */
 263
 264	rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
 265	lo |= 0x200000;
 266	wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
 267
 268	pr_notice("CPU serial number disabled.\n");
 269	clear_cpu_cap(c, X86_FEATURE_PN);
 270
 271	/* Disabling the serial number may affect the cpuid level */
 272	c->cpuid_level = cpuid_eax(0);
 273}
 274
 275static int __init x86_serial_nr_setup(char *s)
 276{
 277	disable_x86_serial_nr = 0;
 278	return 1;
 279}
 280__setup("serialnumber", x86_serial_nr_setup);
 281#else
 282static inline int flag_is_changeable_p(u32 flag)
 283{
 284	return 1;
 285}
 286static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
 287{
 288}
 289#endif
 290
 291static __init int setup_disable_smep(char *arg)
 292{
 293	setup_clear_cpu_cap(X86_FEATURE_SMEP);
 294	return 1;
 295}
 296__setup("nosmep", setup_disable_smep);
 297
 298static __always_inline void setup_smep(struct cpuinfo_x86 *c)
 299{
 300	if (cpu_has(c, X86_FEATURE_SMEP))
 301		cr4_set_bits(X86_CR4_SMEP);
 302}
 303
 304static __init int setup_disable_smap(char *arg)
 305{
 306	setup_clear_cpu_cap(X86_FEATURE_SMAP);
 307	return 1;
 308}
 309__setup("nosmap", setup_disable_smap);
 310
 311static __always_inline void setup_smap(struct cpuinfo_x86 *c)
 312{
 313	unsigned long eflags = native_save_fl();
 314
 315	/* This should have been cleared long ago */
 316	BUG_ON(eflags & X86_EFLAGS_AC);
 317
 318	if (cpu_has(c, X86_FEATURE_SMAP)) {
 319#ifdef CONFIG_X86_SMAP
 320		cr4_set_bits(X86_CR4_SMAP);
 321#else
 322		cr4_clear_bits(X86_CR4_SMAP);
 323#endif
 324	}
 325}
 326
 327static __always_inline void setup_umip(struct cpuinfo_x86 *c)
 328{
 329	/* Check the boot processor, plus build option for UMIP. */
 330	if (!cpu_feature_enabled(X86_FEATURE_UMIP))
 331		goto out;
 332
 333	/* Check the current processor's cpuid bits. */
 334	if (!cpu_has(c, X86_FEATURE_UMIP))
 335		goto out;
 336
 337	cr4_set_bits(X86_CR4_UMIP);
 338
 339	pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n");
 340
 341	return;
 342
 343out:
 344	/*
 345	 * Make sure UMIP is disabled in case it was enabled in a
 346	 * previous boot (e.g., via kexec).
 347	 */
 348	cr4_clear_bits(X86_CR4_UMIP);
 349}
 350
 351/* These bits should not change their value after CPU init is finished. */
 352static const unsigned long cr4_pinned_mask =
 353	X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | X86_CR4_FSGSBASE;
 354static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
 355static unsigned long cr4_pinned_bits __ro_after_init;
 356
 357void native_write_cr0(unsigned long val)
 358{
 359	unsigned long bits_missing = 0;
 360
 361set_register:
 362	asm volatile("mov %0,%%cr0": "+r" (val), "+m" (__force_order));
 363
 364	if (static_branch_likely(&cr_pinning)) {
 365		if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) {
 366			bits_missing = X86_CR0_WP;
 367			val |= bits_missing;
 368			goto set_register;
 369		}
 370		/* Warn after we've set the missing bits. */
 371		WARN_ONCE(bits_missing, "CR0 WP bit went missing!?\n");
 372	}
 373}
 374EXPORT_SYMBOL(native_write_cr0);
 375
 376void native_write_cr4(unsigned long val)
 377{
 378	unsigned long bits_changed = 0;
 379
 380set_register:
 381	asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits));
 382
 383	if (static_branch_likely(&cr_pinning)) {
 384		if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
 385			bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits;
 386			val = (val & ~cr4_pinned_mask) | cr4_pinned_bits;
 387			goto set_register;
 388		}
 389		/* Warn after we've corrected the changed bits. */
 390		WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n",
 391			  bits_changed);
 392	}
 393}
 394#if IS_MODULE(CONFIG_LKDTM)
 395EXPORT_SYMBOL_GPL(native_write_cr4);
 396#endif
 397
 398void cr4_update_irqsoff(unsigned long set, unsigned long clear)
 399{
 400	unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
 401
 402	lockdep_assert_irqs_disabled();
 403
 404	newval = (cr4 & ~clear) | set;
 405	if (newval != cr4) {
 406		this_cpu_write(cpu_tlbstate.cr4, newval);
 407		__write_cr4(newval);
 408	}
 409}
 410EXPORT_SYMBOL(cr4_update_irqsoff);
 411
 412/* Read the CR4 shadow. */
 413unsigned long cr4_read_shadow(void)
 414{
 415	return this_cpu_read(cpu_tlbstate.cr4);
 416}
 417EXPORT_SYMBOL_GPL(cr4_read_shadow);
 418
 419void cr4_init(void)
 420{
 421	unsigned long cr4 = __read_cr4();
 422
 423	if (boot_cpu_has(X86_FEATURE_PCID))
 424		cr4 |= X86_CR4_PCIDE;
 425	if (static_branch_likely(&cr_pinning))
 426		cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits;
 427
 428	__write_cr4(cr4);
 429
 430	/* Initialize cr4 shadow for this CPU. */
 431	this_cpu_write(cpu_tlbstate.cr4, cr4);
 432}
 433
 434/*
 435 * Once CPU feature detection is finished (and boot params have been
 436 * parsed), record any of the sensitive CR bits that are set, and
 437 * enable CR pinning.
 438 */
 439static void __init setup_cr_pinning(void)
 440{
 441	cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask;
 442	static_key_enable(&cr_pinning.key);
 443}
 444
 445static __init int x86_nofsgsbase_setup(char *arg)
 446{
 447	/* Require an exact match without trailing characters. */
 448	if (strlen(arg))
 449		return 0;
 450
 451	/* Do not emit a message if the feature is not present. */
 452	if (!boot_cpu_has(X86_FEATURE_FSGSBASE))
 453		return 1;
 454
 455	setup_clear_cpu_cap(X86_FEATURE_FSGSBASE);
 456	pr_info("FSGSBASE disabled via kernel command line\n");
 457	return 1;
 458}
 459__setup("nofsgsbase", x86_nofsgsbase_setup);
 460
 461/*
 462 * Protection Keys are not available in 32-bit mode.
 463 */
 464static bool pku_disabled;
 465
 466static __always_inline void setup_pku(struct cpuinfo_x86 *c)
 467{
 468	struct pkru_state *pk;
 469
 470	/* check the boot processor, plus compile options for PKU: */
 471	if (!cpu_feature_enabled(X86_FEATURE_PKU))
 472		return;
 473	/* checks the actual processor's cpuid bits: */
 474	if (!cpu_has(c, X86_FEATURE_PKU))
 475		return;
 476	if (pku_disabled)
 477		return;
 478
 479	cr4_set_bits(X86_CR4_PKE);
 480	pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU);
 481	if (pk)
 482		pk->pkru = init_pkru_value;
 483	/*
 484	 * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
 485	 * cpuid bit to be set.  We need to ensure that we
 486	 * update that bit in this CPU's "cpu_info".
 487	 */
 488	set_cpu_cap(c, X86_FEATURE_OSPKE);
 489}
 490
 491#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
 492static __init int setup_disable_pku(char *arg)
 493{
 494	/*
 495	 * Do not clear the X86_FEATURE_PKU bit.  All of the
 496	 * runtime checks are against OSPKE so clearing the
 497	 * bit does nothing.
 498	 *
 499	 * This way, we will see "pku" in cpuinfo, but not
 500	 * "ospke", which is exactly what we want.  It shows
 501	 * that the CPU has PKU, but the OS has not enabled it.
 502	 * This happens to be exactly how a system would look
 503	 * if we disabled the config option.
 504	 */
 505	pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n");
 506	pku_disabled = true;
 507	return 1;
 508}
 509__setup("nopku", setup_disable_pku);
 510#endif /* CONFIG_X86_64 */
 511
 512/*
 513 * Some CPU features depend on higher CPUID levels, which may not always
 514 * be available due to CPUID level capping or broken virtualization
 515 * software.  Add those features to this table to auto-disable them.
 516 */
 517struct cpuid_dependent_feature {
 518	u32 feature;
 519	u32 level;
 520};
 521
 522static const struct cpuid_dependent_feature
 523cpuid_dependent_features[] = {
 524	{ X86_FEATURE_MWAIT,		0x00000005 },
 525	{ X86_FEATURE_DCA,		0x00000009 },
 526	{ X86_FEATURE_XSAVE,		0x0000000d },
 527	{ 0, 0 }
 528};
 529
 530static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
 531{
 532	const struct cpuid_dependent_feature *df;
 533
 534	for (df = cpuid_dependent_features; df->feature; df++) {
 535
 536		if (!cpu_has(c, df->feature))
 537			continue;
 538		/*
 539		 * Note: cpuid_level is set to -1 if unavailable, but
 540		 * extended_extended_level is set to 0 if unavailable
 541		 * and the legitimate extended levels are all negative
 542		 * when signed; hence the weird messing around with
 543		 * signs here...
 544		 */
 545		if (!((s32)df->level < 0 ?
 546		     (u32)df->level > (u32)c->extended_cpuid_level :
 547		     (s32)df->level > (s32)c->cpuid_level))
 548			continue;
 549
 550		clear_cpu_cap(c, df->feature);
 551		if (!warn)
 552			continue;
 553
 554		pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
 555			x86_cap_flag(df->feature), df->level);
 556	}
 557}
 558
 559/*
 560 * Naming convention should be: <Name> [(<Codename>)]
 561 * This table only is used unless init_<vendor>() below doesn't set it;
 562 * in particular, if CPUID levels 0x80000002..4 are supported, this
 563 * isn't used
 564 */
 565
 566/* Look up CPU names by table lookup. */
 567static const char *table_lookup_model(struct cpuinfo_x86 *c)
 568{
 569#ifdef CONFIG_X86_32
 570	const struct legacy_cpu_model_info *info;
 571
 572	if (c->x86_model >= 16)
 573		return NULL;	/* Range check */
 574
 575	if (!this_cpu)
 576		return NULL;
 577
 578	info = this_cpu->legacy_models;
 579
 580	while (info->family) {
 581		if (info->family == c->x86)
 582			return info->model_names[c->x86_model];
 583		info++;
 584	}
 585#endif
 586	return NULL;		/* Not found */
 587}
 588
 589/* Aligned to unsigned long to avoid split lock in atomic bitmap ops */
 590__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
 591__u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
 592
 593void load_percpu_segment(int cpu)
 594{
 595#ifdef CONFIG_X86_32
 596	loadsegment(fs, __KERNEL_PERCPU);
 597#else
 598	__loadsegment_simple(gs, 0);
 599	wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
 600#endif
 601	load_stack_canary_segment();
 602}
 603
 604#ifdef CONFIG_X86_32
 605/* The 32-bit entry code needs to find cpu_entry_area. */
 606DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
 607#endif
 608
 609/* Load the original GDT from the per-cpu structure */
 610void load_direct_gdt(int cpu)
 611{
 612	struct desc_ptr gdt_descr;
 613
 614	gdt_descr.address = (long)get_cpu_gdt_rw(cpu);
 615	gdt_descr.size = GDT_SIZE - 1;
 616	load_gdt(&gdt_descr);
 617}
 618EXPORT_SYMBOL_GPL(load_direct_gdt);
 619
 620/* Load a fixmap remapping of the per-cpu GDT */
 621void load_fixmap_gdt(int cpu)
 622{
 623	struct desc_ptr gdt_descr;
 624
 625	gdt_descr.address = (long)get_cpu_gdt_ro(cpu);
 626	gdt_descr.size = GDT_SIZE - 1;
 627	load_gdt(&gdt_descr);
 628}
 629EXPORT_SYMBOL_GPL(load_fixmap_gdt);
 630
 631/*
 632 * Current gdt points %fs at the "master" per-cpu area: after this,
 633 * it's on the real one.
 634 */
 635void switch_to_new_gdt(int cpu)
 636{
 637	/* Load the original GDT */
 638	load_direct_gdt(cpu);
 
 
 
 639	/* Reload the per-cpu base */
 
 640	load_percpu_segment(cpu);
 641}
 642
 643static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
 644
 645static void get_model_name(struct cpuinfo_x86 *c)
 646{
 647	unsigned int *v;
 648	char *p, *q, *s;
 649
 650	if (c->extended_cpuid_level < 0x80000004)
 651		return;
 652
 653	v = (unsigned int *)c->x86_model_id;
 654	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
 655	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
 656	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
 657	c->x86_model_id[48] = 0;
 658
 659	/* Trim whitespace */
 660	p = q = s = &c->x86_model_id[0];
 661
 662	while (*p == ' ')
 663		p++;
 664
 665	while (*p) {
 666		/* Note the last non-whitespace index */
 667		if (!isspace(*p))
 668			s = q;
 669
 670		*q++ = *p++;
 671	}
 672
 673	*(s + 1) = '\0';
 674}
 675
 676void detect_num_cpu_cores(struct cpuinfo_x86 *c)
 677{
 678	unsigned int eax, ebx, ecx, edx;
 679
 680	c->x86_max_cores = 1;
 681	if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
 682		return;
 683
 684	cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
 685	if (eax & 0x1f)
 686		c->x86_max_cores = (eax >> 26) + 1;
 687}
 688
 689void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
 690{
 691	unsigned int n, dummy, ebx, ecx, edx, l2size;
 692
 693	n = c->extended_cpuid_level;
 694
 695	if (n >= 0x80000005) {
 696		cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
 697		c->x86_cache_size = (ecx>>24) + (edx>>24);
 698#ifdef CONFIG_X86_64
 699		/* On K8 L1 TLB is inclusive, so don't count it */
 700		c->x86_tlbsize = 0;
 701#endif
 702	}
 703
 704	if (n < 0x80000006)	/* Some chips just has a large L1. */
 705		return;
 706
 707	cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
 708	l2size = ecx >> 16;
 709
 710#ifdef CONFIG_X86_64
 711	c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
 712#else
 713	/* do processor-specific cache resizing */
 714	if (this_cpu->legacy_cache_size)
 715		l2size = this_cpu->legacy_cache_size(c, l2size);
 716
 717	/* Allow user to override all this if necessary. */
 718	if (cachesize_override != -1)
 719		l2size = cachesize_override;
 720
 721	if (l2size == 0)
 722		return;		/* Again, no L2 cache is possible */
 723#endif
 724
 725	c->x86_cache_size = l2size;
 726}
 727
 728u16 __read_mostly tlb_lli_4k[NR_INFO];
 729u16 __read_mostly tlb_lli_2m[NR_INFO];
 730u16 __read_mostly tlb_lli_4m[NR_INFO];
 731u16 __read_mostly tlb_lld_4k[NR_INFO];
 732u16 __read_mostly tlb_lld_2m[NR_INFO];
 733u16 __read_mostly tlb_lld_4m[NR_INFO];
 734u16 __read_mostly tlb_lld_1g[NR_INFO];
 735
 736static void cpu_detect_tlb(struct cpuinfo_x86 *c)
 737{
 738	if (this_cpu->c_detect_tlb)
 739		this_cpu->c_detect_tlb(c);
 740
 741	pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
 742		tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
 743		tlb_lli_4m[ENTRIES]);
 744
 745	pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
 746		tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
 747		tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
 748}
 749
 750int detect_ht_early(struct cpuinfo_x86 *c)
 751{
 752#ifdef CONFIG_SMP
 753	u32 eax, ebx, ecx, edx;
 
 
 754
 755	if (!cpu_has(c, X86_FEATURE_HT))
 756		return -1;
 757
 758	if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
 759		return -1;
 760
 761	if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
 762		return -1;
 763
 764	cpuid(1, &eax, &ebx, &ecx, &edx);
 765
 766	smp_num_siblings = (ebx & 0xff0000) >> 16;
 767	if (smp_num_siblings == 1)
 
 768		pr_info_once("CPU0: Hyper-Threading is disabled\n");
 769#endif
 770	return 0;
 771}
 772
 773void detect_ht(struct cpuinfo_x86 *c)
 774{
 775#ifdef CONFIG_SMP
 776	int index_msb, core_bits;
 777
 778	if (detect_ht_early(c) < 0)
 779		return;
 780
 781	index_msb = get_count_order(smp_num_siblings);
 782	c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
 783
 784	smp_num_siblings = smp_num_siblings / c->x86_max_cores;
 785
 786	index_msb = get_count_order(smp_num_siblings);
 787
 788	core_bits = get_count_order(c->x86_max_cores);
 789
 790	c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
 791				       ((1 << core_bits) - 1);
 
 
 
 
 
 
 
 
 
 792#endif
 793}
 794
 795static void get_cpu_vendor(struct cpuinfo_x86 *c)
 796{
 797	char *v = c->x86_vendor_id;
 798	int i;
 799
 800	for (i = 0; i < X86_VENDOR_NUM; i++) {
 801		if (!cpu_devs[i])
 802			break;
 803
 804		if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
 805		    (cpu_devs[i]->c_ident[1] &&
 806		     !strcmp(v, cpu_devs[i]->c_ident[1]))) {
 807
 808			this_cpu = cpu_devs[i];
 809			c->x86_vendor = this_cpu->c_x86_vendor;
 810			return;
 811		}
 812	}
 813
 814	pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
 815		    "CPU: Your system may be unstable.\n", v);
 816
 817	c->x86_vendor = X86_VENDOR_UNKNOWN;
 818	this_cpu = &default_cpu;
 819}
 820
 821void cpu_detect(struct cpuinfo_x86 *c)
 822{
 823	/* Get vendor name */
 824	cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
 825	      (unsigned int *)&c->x86_vendor_id[0],
 826	      (unsigned int *)&c->x86_vendor_id[8],
 827	      (unsigned int *)&c->x86_vendor_id[4]);
 828
 829	c->x86 = 4;
 830	/* Intel-defined flags: level 0x00000001 */
 831	if (c->cpuid_level >= 0x00000001) {
 832		u32 junk, tfms, cap0, misc;
 833
 834		cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
 835		c->x86		= x86_family(tfms);
 836		c->x86_model	= x86_model(tfms);
 837		c->x86_stepping	= x86_stepping(tfms);
 838
 839		if (cap0 & (1<<19)) {
 840			c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
 841			c->x86_cache_alignment = c->x86_clflush_size;
 842		}
 843	}
 844}
 845
 846static void apply_forced_caps(struct cpuinfo_x86 *c)
 847{
 848	int i;
 849
 850	for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
 851		c->x86_capability[i] &= ~cpu_caps_cleared[i];
 852		c->x86_capability[i] |= cpu_caps_set[i];
 853	}
 854}
 855
 856static void init_speculation_control(struct cpuinfo_x86 *c)
 857{
 858	/*
 859	 * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
 860	 * and they also have a different bit for STIBP support. Also,
 861	 * a hypervisor might have set the individual AMD bits even on
 862	 * Intel CPUs, for finer-grained selection of what's available.
 863	 */
 864	if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
 865		set_cpu_cap(c, X86_FEATURE_IBRS);
 866		set_cpu_cap(c, X86_FEATURE_IBPB);
 867		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
 868	}
 869
 870	if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
 871		set_cpu_cap(c, X86_FEATURE_STIBP);
 872
 873	if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
 874	    cpu_has(c, X86_FEATURE_VIRT_SSBD))
 875		set_cpu_cap(c, X86_FEATURE_SSBD);
 876
 877	if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
 878		set_cpu_cap(c, X86_FEATURE_IBRS);
 879		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
 880	}
 881
 882	if (cpu_has(c, X86_FEATURE_AMD_IBPB))
 883		set_cpu_cap(c, X86_FEATURE_IBPB);
 884
 885	if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
 886		set_cpu_cap(c, X86_FEATURE_STIBP);
 887		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
 888	}
 889
 890	if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
 891		set_cpu_cap(c, X86_FEATURE_SSBD);
 892		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
 893		clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
 894	}
 895}
 896
 897void get_cpu_cap(struct cpuinfo_x86 *c)
 898{
 899	u32 eax, ebx, ecx, edx;
 900
 901	/* Intel-defined flags: level 0x00000001 */
 902	if (c->cpuid_level >= 0x00000001) {
 903		cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
 904
 905		c->x86_capability[CPUID_1_ECX] = ecx;
 906		c->x86_capability[CPUID_1_EDX] = edx;
 907	}
 908
 909	/* Thermal and Power Management Leaf: level 0x00000006 (eax) */
 910	if (c->cpuid_level >= 0x00000006)
 911		c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
 912
 913	/* Additional Intel-defined flags: level 0x00000007 */
 914	if (c->cpuid_level >= 0x00000007) {
 915		cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
 
 916		c->x86_capability[CPUID_7_0_EBX] = ebx;
 
 
 917		c->x86_capability[CPUID_7_ECX] = ecx;
 918		c->x86_capability[CPUID_7_EDX] = edx;
 919
 920		/* Check valid sub-leaf index before accessing it */
 921		if (eax >= 1) {
 922			cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx);
 923			c->x86_capability[CPUID_7_1_EAX] = eax;
 924		}
 925	}
 926
 927	/* Extended state features: level 0x0000000d */
 928	if (c->cpuid_level >= 0x0000000d) {
 929		cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
 930
 931		c->x86_capability[CPUID_D_1_EAX] = eax;
 932	}
 933
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 934	/* AMD-defined flags: level 0x80000001 */
 935	eax = cpuid_eax(0x80000000);
 936	c->extended_cpuid_level = eax;
 937
 938	if ((eax & 0xffff0000) == 0x80000000) {
 939		if (eax >= 0x80000001) {
 940			cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
 941
 942			c->x86_capability[CPUID_8000_0001_ECX] = ecx;
 943			c->x86_capability[CPUID_8000_0001_EDX] = edx;
 944		}
 945	}
 946
 947	if (c->extended_cpuid_level >= 0x80000007) {
 948		cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
 949
 950		c->x86_capability[CPUID_8000_0007_EBX] = ebx;
 951		c->x86_power = edx;
 952	}
 953
 954	if (c->extended_cpuid_level >= 0x80000008) {
 955		cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
 956		c->x86_capability[CPUID_8000_0008_EBX] = ebx;
 957	}
 958
 959	if (c->extended_cpuid_level >= 0x8000000a)
 960		c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
 961
 962	init_scattered_cpuid_features(c);
 963	init_speculation_control(c);
 964
 965	/*
 966	 * Clear/Set all flags overridden by options, after probe.
 967	 * This needs to happen each time we re-probe, which may happen
 968	 * several times during CPU initialization.
 969	 */
 970	apply_forced_caps(c);
 971}
 972
 973void get_cpu_address_sizes(struct cpuinfo_x86 *c)
 974{
 975	u32 eax, ebx, ecx, edx;
 976
 977	if (c->extended_cpuid_level >= 0x80000008) {
 978		cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
 979
 980		c->x86_virt_bits = (eax >> 8) & 0xff;
 981		c->x86_phys_bits = eax & 0xff;
 
 982	}
 983#ifdef CONFIG_X86_32
 984	else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
 985		c->x86_phys_bits = 36;
 986#endif
 987	c->x86_cache_bits = c->x86_phys_bits;
 
 
 
 
 
 
 
 988}
 989
 990static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
 991{
 992#ifdef CONFIG_X86_32
 993	int i;
 994
 995	/*
 996	 * First of all, decide if this is a 486 or higher
 997	 * It's a 486 if we can modify the AC flag
 998	 */
 999	if (flag_is_changeable_p(X86_EFLAGS_AC))
1000		c->x86 = 4;
1001	else
1002		c->x86 = 3;
1003
1004	for (i = 0; i < X86_VENDOR_NUM; i++)
1005		if (cpu_devs[i] && cpu_devs[i]->c_identify) {
1006			c->x86_vendor_id[0] = 0;
1007			cpu_devs[i]->c_identify(c);
1008			if (c->x86_vendor_id[0]) {
1009				get_cpu_vendor(c);
1010				break;
1011			}
1012		}
1013#endif
1014}
1015
1016#define NO_SPECULATION		BIT(0)
1017#define NO_MELTDOWN		BIT(1)
1018#define NO_SSB			BIT(2)
1019#define NO_L1TF			BIT(3)
1020#define NO_MDS			BIT(4)
1021#define MSBDS_ONLY		BIT(5)
1022#define NO_SWAPGS		BIT(6)
1023#define NO_ITLB_MULTIHIT	BIT(7)
1024#define NO_SPECTRE_V2		BIT(8)
1025
1026#define VULNWL(vendor, family, model, whitelist)	\
1027	X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
1028
1029#define VULNWL_INTEL(model, whitelist)		\
1030	VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
1031
1032#define VULNWL_AMD(family, whitelist)		\
1033	VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
1034
1035#define VULNWL_HYGON(family, whitelist)		\
1036	VULNWL(HYGON, family, X86_MODEL_ANY, whitelist)
1037
1038static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
1039	VULNWL(ANY,	4, X86_MODEL_ANY,	NO_SPECULATION),
1040	VULNWL(CENTAUR,	5, X86_MODEL_ANY,	NO_SPECULATION),
1041	VULNWL(INTEL,	5, X86_MODEL_ANY,	NO_SPECULATION),
1042	VULNWL(NSC,	5, X86_MODEL_ANY,	NO_SPECULATION),
1043
1044	/* Intel Family 6 */
1045	VULNWL_INTEL(ATOM_SALTWELL,		NO_SPECULATION | NO_ITLB_MULTIHIT),
1046	VULNWL_INTEL(ATOM_SALTWELL_TABLET,	NO_SPECULATION | NO_ITLB_MULTIHIT),
1047	VULNWL_INTEL(ATOM_SALTWELL_MID,		NO_SPECULATION | NO_ITLB_MULTIHIT),
1048	VULNWL_INTEL(ATOM_BONNELL,		NO_SPECULATION | NO_ITLB_MULTIHIT),
1049	VULNWL_INTEL(ATOM_BONNELL_MID,		NO_SPECULATION | NO_ITLB_MULTIHIT),
1050
1051	VULNWL_INTEL(ATOM_SILVERMONT,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1052	VULNWL_INTEL(ATOM_SILVERMONT_D,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1053	VULNWL_INTEL(ATOM_SILVERMONT_MID,	NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1054	VULNWL_INTEL(ATOM_AIRMONT,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1055	VULNWL_INTEL(XEON_PHI_KNL,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1056	VULNWL_INTEL(XEON_PHI_KNM,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1057
1058	VULNWL_INTEL(CORE_YONAH,		NO_SSB),
1059
1060	VULNWL_INTEL(ATOM_AIRMONT_MID,		NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1061	VULNWL_INTEL(ATOM_AIRMONT_NP,		NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
1062
1063	VULNWL_INTEL(ATOM_GOLDMONT,		NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
1064	VULNWL_INTEL(ATOM_GOLDMONT_D,		NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
1065	VULNWL_INTEL(ATOM_GOLDMONT_PLUS,	NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
1066
1067	/*
1068	 * Technically, swapgs isn't serializing on AMD (despite it previously
1069	 * being documented as such in the APM).  But according to AMD, %gs is
1070	 * updated non-speculatively, and the issuing of %gs-relative memory
1071	 * operands will be blocked until the %gs update completes, which is
1072	 * good enough for our purposes.
1073	 */
1074
1075	VULNWL_INTEL(ATOM_TREMONT_D,		NO_ITLB_MULTIHIT),
1076
1077	/* AMD Family 0xf - 0x12 */
1078	VULNWL_AMD(0x0f,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1079	VULNWL_AMD(0x10,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1080	VULNWL_AMD(0x11,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1081	VULNWL_AMD(0x12,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1082
1083	/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
1084	VULNWL_AMD(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1085	VULNWL_HYGON(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1086
1087	/* Zhaoxin Family 7 */
1088	VULNWL(CENTAUR,	7, X86_MODEL_ANY,	NO_SPECTRE_V2 | NO_SWAPGS),
1089	VULNWL(ZHAOXIN,	7, X86_MODEL_ANY,	NO_SPECTRE_V2 | NO_SWAPGS),
1090	{}
1091};
1092
1093#define VULNBL_INTEL_STEPPINGS(model, steppings, issues)		   \
1094	X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6,		   \
1095					    INTEL_FAM6_##model, steppings, \
1096					    X86_FEATURE_ANY, issues)
1097
1098#define SRBDS		BIT(0)
1099
1100static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
1101	VULNBL_INTEL_STEPPINGS(IVYBRIDGE,	X86_STEPPING_ANY,		SRBDS),
1102	VULNBL_INTEL_STEPPINGS(HASWELL,		X86_STEPPING_ANY,		SRBDS),
1103	VULNBL_INTEL_STEPPINGS(HASWELL_L,	X86_STEPPING_ANY,		SRBDS),
1104	VULNBL_INTEL_STEPPINGS(HASWELL_G,	X86_STEPPING_ANY,		SRBDS),
1105	VULNBL_INTEL_STEPPINGS(BROADWELL_G,	X86_STEPPING_ANY,		SRBDS),
1106	VULNBL_INTEL_STEPPINGS(BROADWELL,	X86_STEPPING_ANY,		SRBDS),
1107	VULNBL_INTEL_STEPPINGS(SKYLAKE_L,	X86_STEPPING_ANY,		SRBDS),
1108	VULNBL_INTEL_STEPPINGS(SKYLAKE,		X86_STEPPING_ANY,		SRBDS),
1109	VULNBL_INTEL_STEPPINGS(KABYLAKE_L,	X86_STEPPINGS(0x0, 0xC),	SRBDS),
1110	VULNBL_INTEL_STEPPINGS(KABYLAKE,	X86_STEPPINGS(0x0, 0xD),	SRBDS),
1111	{}
1112};
1113
1114static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which)
1115{
1116	const struct x86_cpu_id *m = x86_match_cpu(table);
1117
1118	return m && !!(m->driver_data & which);
1119}
1120
1121u64 x86_read_arch_cap_msr(void)
1122{
1123	u64 ia32_cap = 0;
1124
1125	if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
1126		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
1127
1128	return ia32_cap;
1129}
1130
1131static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1132{
1133	u64 ia32_cap = x86_read_arch_cap_msr();
1134
1135	/* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
1136	if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
1137	    !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
1138		setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
1139
1140	if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
1141		return;
1142
1143	setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
1144
1145	if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2))
1146		setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
1147
1148	if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
1149	    !(ia32_cap & ARCH_CAP_SSB_NO) &&
1150	   !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
1151		setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
1152
1153	if (ia32_cap & ARCH_CAP_IBRS_ALL)
1154		setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
1155
1156	if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
1157	    !(ia32_cap & ARCH_CAP_MDS_NO)) {
1158		setup_force_cpu_bug(X86_BUG_MDS);
1159		if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
1160			setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
1161	}
1162
1163	if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS))
1164		setup_force_cpu_bug(X86_BUG_SWAPGS);
1165
1166	/*
1167	 * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when:
1168	 *	- TSX is supported or
1169	 *	- TSX_CTRL is present
1170	 *
1171	 * TSX_CTRL check is needed for cases when TSX could be disabled before
1172	 * the kernel boot e.g. kexec.
1173	 * TSX_CTRL check alone is not sufficient for cases when the microcode
1174	 * update is not present or running as guest that don't get TSX_CTRL.
1175	 */
1176	if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
1177	    (cpu_has(c, X86_FEATURE_RTM) ||
1178	     (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
1179		setup_force_cpu_bug(X86_BUG_TAA);
1180
1181	/*
1182	 * SRBDS affects CPUs which support RDRAND or RDSEED and are listed
1183	 * in the vulnerability blacklist.
1184	 */
1185	if ((cpu_has(c, X86_FEATURE_RDRAND) ||
1186	     cpu_has(c, X86_FEATURE_RDSEED)) &&
1187	    cpu_matches(cpu_vuln_blacklist, SRBDS))
1188		    setup_force_cpu_bug(X86_BUG_SRBDS);
1189
1190	if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
1191		return;
1192
1193	/* Rogue Data Cache Load? No! */
1194	if (ia32_cap & ARCH_CAP_RDCL_NO)
1195		return;
1196
1197	setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
1198
1199	if (cpu_matches(cpu_vuln_whitelist, NO_L1TF))
1200		return;
1201
1202	setup_force_cpu_bug(X86_BUG_L1TF);
1203}
1204
1205/*
1206 * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
1207 * unfortunately, that's not true in practice because of early VIA
1208 * chips and (more importantly) broken virtualizers that are not easy
1209 * to detect. In the latter case it doesn't even *fail* reliably, so
1210 * probing for it doesn't even work. Disable it completely on 32-bit
1211 * unless we can find a reliable way to detect all the broken cases.
1212 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
1213 */
1214static void detect_nopl(void)
1215{
1216#ifdef CONFIG_X86_32
1217	setup_clear_cpu_cap(X86_FEATURE_NOPL);
1218#else
1219	setup_force_cpu_cap(X86_FEATURE_NOPL);
1220#endif
1221}
1222
1223/*
1224 * Do minimum CPU detection early.
1225 * Fields really needed: vendor, cpuid_level, family, model, mask,
1226 * cache alignment.
1227 * The others are not touched to avoid unwanted side effects.
1228 *
1229 * WARNING: this function is only called on the boot CPU.  Don't add code
1230 * here that is supposed to run on all CPUs.
1231 */
1232static void __init early_identify_cpu(struct cpuinfo_x86 *c)
1233{
1234#ifdef CONFIG_X86_64
1235	c->x86_clflush_size = 64;
1236	c->x86_phys_bits = 36;
1237	c->x86_virt_bits = 48;
1238#else
1239	c->x86_clflush_size = 32;
1240	c->x86_phys_bits = 32;
1241	c->x86_virt_bits = 32;
1242#endif
1243	c->x86_cache_alignment = c->x86_clflush_size;
1244
1245	memset(&c->x86_capability, 0, sizeof(c->x86_capability));
1246	c->extended_cpuid_level = 0;
1247
1248	if (!have_cpuid_p())
1249		identify_cpu_without_cpuid(c);
1250
1251	/* cyrix could have cpuid enabled via c_identify()*/
1252	if (have_cpuid_p()) {
1253		cpu_detect(c);
1254		get_cpu_vendor(c);
1255		get_cpu_cap(c);
1256		get_cpu_address_sizes(c);
1257		setup_force_cpu_cap(X86_FEATURE_CPUID);
1258
1259		if (this_cpu->c_early_init)
1260			this_cpu->c_early_init(c);
 
1261
1262		c->cpu_index = 0;
1263		filter_cpuid_features(c, false);
1264
1265		if (this_cpu->c_bsp_init)
1266			this_cpu->c_bsp_init(c);
1267	} else {
1268		setup_clear_cpu_cap(X86_FEATURE_CPUID);
1269	}
1270
1271	setup_force_cpu_cap(X86_FEATURE_ALWAYS);
1272
1273	cpu_set_bug_bits(c);
1274
1275	cpu_set_core_cap_bits(c);
1276
1277	fpu__init_system(c);
1278
1279#ifdef CONFIG_X86_32
1280	/*
1281	 * Regardless of whether PCID is enumerated, the SDM says
1282	 * that it can't be enabled in 32-bit mode.
1283	 */
1284	setup_clear_cpu_cap(X86_FEATURE_PCID);
1285#endif
1286
1287	/*
1288	 * Later in the boot process pgtable_l5_enabled() relies on
1289	 * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not
1290	 * enabled by this point we need to clear the feature bit to avoid
1291	 * false-positives at the later stage.
1292	 *
1293	 * pgtable_l5_enabled() can be false here for several reasons:
1294	 *  - 5-level paging is disabled compile-time;
1295	 *  - it's 32-bit kernel;
1296	 *  - machine doesn't support 5-level paging;
1297	 *  - user specified 'no5lvl' in kernel command line.
1298	 */
1299	if (!pgtable_l5_enabled())
1300		setup_clear_cpu_cap(X86_FEATURE_LA57);
1301
1302	detect_nopl();
1303}
1304
1305void __init early_cpu_init(void)
1306{
1307	const struct cpu_dev *const *cdev;
1308	int count = 0;
1309
1310#ifdef CONFIG_PROCESSOR_SELECT
1311	pr_info("KERNEL supported cpus:\n");
1312#endif
1313
1314	for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
1315		const struct cpu_dev *cpudev = *cdev;
1316
1317		if (count >= X86_VENDOR_NUM)
1318			break;
1319		cpu_devs[count] = cpudev;
1320		count++;
1321
1322#ifdef CONFIG_PROCESSOR_SELECT
1323		{
1324			unsigned int j;
1325
1326			for (j = 0; j < 2; j++) {
1327				if (!cpudev->c_ident[j])
1328					continue;
1329				pr_info("  %s %s\n", cpudev->c_vendor,
1330					cpudev->c_ident[j]);
1331			}
1332		}
1333#endif
1334	}
1335	early_identify_cpu(&boot_cpu_data);
1336}
1337
1338static void detect_null_seg_behavior(struct cpuinfo_x86 *c)
 
 
 
 
 
 
 
 
 
1339{
1340#ifdef CONFIG_X86_64
 
 
 
 
 
1341	/*
1342	 * Empirically, writing zero to a segment selector on AMD does
1343	 * not clear the base, whereas writing zero to a segment
1344	 * selector on Intel does clear the base.  Intel's behavior
1345	 * allows slightly faster context switches in the common case
1346	 * where GS is unused by the prev and next threads.
1347	 *
1348	 * Since neither vendor documents this anywhere that I can see,
1349	 * detect it directly instead of hardcoding the choice by
1350	 * vendor.
1351	 *
1352	 * I've designated AMD's behavior as the "bug" because it's
1353	 * counterintuitive and less friendly.
1354	 */
1355
1356	unsigned long old_base, tmp;
1357	rdmsrl(MSR_FS_BASE, old_base);
1358	wrmsrl(MSR_FS_BASE, 1);
1359	loadsegment(fs, 0);
1360	rdmsrl(MSR_FS_BASE, tmp);
1361	if (tmp != 0)
1362		set_cpu_bug(c, X86_BUG_NULL_SEG);
1363	wrmsrl(MSR_FS_BASE, old_base);
 
1364#endif
1365}
1366
1367static void generic_identify(struct cpuinfo_x86 *c)
1368{
1369	c->extended_cpuid_level = 0;
1370
1371	if (!have_cpuid_p())
1372		identify_cpu_without_cpuid(c);
1373
1374	/* cyrix could have cpuid enabled via c_identify()*/
1375	if (!have_cpuid_p())
1376		return;
1377
1378	cpu_detect(c);
1379
1380	get_cpu_vendor(c);
1381
1382	get_cpu_cap(c);
1383
1384	get_cpu_address_sizes(c);
1385
1386	if (c->cpuid_level >= 0x00000001) {
1387		c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
1388#ifdef CONFIG_X86_32
1389# ifdef CONFIG_SMP
1390		c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
1391# else
1392		c->apicid = c->initial_apicid;
1393# endif
1394#endif
1395		c->phys_proc_id = c->initial_apicid;
1396	}
1397
1398	get_model_name(c); /* Default name */
1399
1400	detect_null_seg_behavior(c);
 
1401
 
 
1402	/*
1403	 * ESPFIX is a strange bug.  All real CPUs have it.  Paravirt
1404	 * systems that run Linux at CPL > 0 may or may not have the
1405	 * issue, but, even if they have the issue, there's absolutely
1406	 * nothing we can do about it because we can't use the real IRET
1407	 * instruction.
1408	 *
1409	 * NB: For the time being, only 32-bit kernels support
1410	 * X86_BUG_ESPFIX as such.  64-bit kernels directly choose
1411	 * whether to apply espfix using paravirt hooks.  If any
1412	 * non-paravirt system ever shows up that does *not* have the
1413	 * ESPFIX issue, we can change this.
1414	 */
1415#ifdef CONFIG_X86_32
1416# ifdef CONFIG_PARAVIRT_XXL
1417	do {
1418		extern void native_iret(void);
1419		if (pv_ops.cpu.iret == native_iret)
1420			set_cpu_bug(c, X86_BUG_ESPFIX);
1421	} while (0);
1422# else
1423	set_cpu_bug(c, X86_BUG_ESPFIX);
1424# endif
1425#endif
1426}
1427
1428/*
1429 * Validate that ACPI/mptables have the same information about the
1430 * effective APIC id and update the package map.
1431 */
1432static void validate_apic_and_package_id(struct cpuinfo_x86 *c)
1433{
1434#ifdef CONFIG_SMP
1435	unsigned int apicid, cpu = smp_processor_id();
1436
1437	apicid = apic->cpu_present_to_apicid(cpu);
1438
1439	if (apicid != c->apicid) {
1440		pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n",
1441		       cpu, apicid, c->initial_apicid);
1442	}
1443	BUG_ON(topology_update_package_map(c->phys_proc_id, cpu));
1444	BUG_ON(topology_update_die_map(c->cpu_die_id, cpu));
1445#else
1446	c->logical_proc_id = 0;
1447#endif
1448}
1449
1450/*
1451 * This does the hard work of actually picking apart the CPU stuff...
1452 */
1453static void identify_cpu(struct cpuinfo_x86 *c)
1454{
1455	int i;
1456
1457	c->loops_per_jiffy = loops_per_jiffy;
1458	c->x86_cache_size = 0;
1459	c->x86_vendor = X86_VENDOR_UNKNOWN;
1460	c->x86_model = c->x86_stepping = 0;	/* So far unknown... */
1461	c->x86_vendor_id[0] = '\0'; /* Unset */
1462	c->x86_model_id[0] = '\0';  /* Unset */
1463	c->x86_max_cores = 1;
1464	c->x86_coreid_bits = 0;
1465	c->cu_id = 0xff;
1466#ifdef CONFIG_X86_64
1467	c->x86_clflush_size = 64;
1468	c->x86_phys_bits = 36;
1469	c->x86_virt_bits = 48;
1470#else
1471	c->cpuid_level = -1;	/* CPUID not detected */
1472	c->x86_clflush_size = 32;
1473	c->x86_phys_bits = 32;
1474	c->x86_virt_bits = 32;
1475#endif
1476	c->x86_cache_alignment = c->x86_clflush_size;
1477	memset(&c->x86_capability, 0, sizeof(c->x86_capability));
1478#ifdef CONFIG_X86_VMX_FEATURE_NAMES
1479	memset(&c->vmx_capability, 0, sizeof(c->vmx_capability));
1480#endif
1481
1482	generic_identify(c);
1483
1484	if (this_cpu->c_identify)
1485		this_cpu->c_identify(c);
1486
1487	/* Clear/Set all flags overridden by options, after probe */
1488	apply_forced_caps(c);
 
 
 
1489
1490#ifdef CONFIG_X86_64
1491	c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
1492#endif
1493
1494	/*
1495	 * Vendor-specific initialization.  In this section we
1496	 * canonicalize the feature flags, meaning if there are
1497	 * features a certain CPU supports which CPUID doesn't
1498	 * tell us, CPUID claiming incorrect flags, or other bugs,
1499	 * we handle them here.
1500	 *
1501	 * At the end of this section, c->x86_capability better
1502	 * indicate the features this CPU genuinely supports!
1503	 */
1504	if (this_cpu->c_init)
1505		this_cpu->c_init(c);
1506
1507	/* Disable the PN if appropriate */
1508	squash_the_stupid_serial_number(c);
1509
1510	/* Set up SMEP/SMAP/UMIP */
1511	setup_smep(c);
1512	setup_smap(c);
1513	setup_umip(c);
1514
1515	/* Enable FSGSBASE instructions if available. */
1516	if (cpu_has(c, X86_FEATURE_FSGSBASE)) {
1517		cr4_set_bits(X86_CR4_FSGSBASE);
1518		elf_hwcap2 |= HWCAP2_FSGSBASE;
1519	}
1520
1521	/*
1522	 * The vendor-specific functions might have changed features.
1523	 * Now we do "generic changes."
1524	 */
1525
1526	/* Filter out anything that depends on CPUID levels we don't have */
1527	filter_cpuid_features(c, true);
1528
1529	/* If the model name is still unset, do table lookup. */
1530	if (!c->x86_model_id[0]) {
1531		const char *p;
1532		p = table_lookup_model(c);
1533		if (p)
1534			strcpy(c->x86_model_id, p);
1535		else
1536			/* Last resort... */
1537			sprintf(c->x86_model_id, "%02x/%02x",
1538				c->x86, c->x86_model);
1539	}
1540
1541#ifdef CONFIG_X86_64
1542	detect_ht(c);
1543#endif
1544
 
1545	x86_init_rdrand(c);
 
1546	setup_pku(c);
1547
1548	/*
1549	 * Clear/Set all flags overridden by options, need do it
1550	 * before following smp all cpus cap AND.
1551	 */
1552	apply_forced_caps(c);
 
 
 
1553
1554	/*
1555	 * On SMP, boot_cpu_data holds the common feature set between
1556	 * all CPUs; so make sure that we indicate which features are
1557	 * common between the CPUs.  The first time this routine gets
1558	 * executed, c == &boot_cpu_data.
1559	 */
1560	if (c != &boot_cpu_data) {
1561		/* AND the already accumulated flags with these */
1562		for (i = 0; i < NCAPINTS; i++)
1563			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1564
1565		/* OR, i.e. replicate the bug flags */
1566		for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
1567			c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
1568	}
1569
1570	/* Init Machine Check Exception if available. */
1571	mcheck_cpu_init(c);
1572
1573	select_idle_routine(c);
1574
1575#ifdef CONFIG_NUMA
1576	numa_add_cpu(smp_processor_id());
1577#endif
 
 
1578}
1579
1580/*
1581 * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
1582 * on 32-bit kernels:
1583 */
1584#ifdef CONFIG_X86_32
1585void enable_sep_cpu(void)
1586{
1587	struct tss_struct *tss;
1588	int cpu;
1589
 
 
 
1590	if (!boot_cpu_has(X86_FEATURE_SEP))
1591		return;
1592
1593	cpu = get_cpu();
1594	tss = &per_cpu(cpu_tss_rw, cpu);
1595
1596	/*
1597	 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
1598	 * see the big comment in struct x86_hw_tss's definition.
1599	 */
1600
1601	tss->x86_tss.ss1 = __KERNEL_CS;
1602	wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
1603	wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
 
 
 
 
1604	wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
1605
 
1606	put_cpu();
1607}
1608#endif
1609
1610void __init identify_boot_cpu(void)
1611{
1612	identify_cpu(&boot_cpu_data);
 
1613#ifdef CONFIG_X86_32
1614	sysenter_setup();
1615	enable_sep_cpu();
1616#endif
1617	cpu_detect_tlb(&boot_cpu_data);
1618	setup_cr_pinning();
1619
1620	tsx_init();
1621}
1622
1623void identify_secondary_cpu(struct cpuinfo_x86 *c)
1624{
1625	BUG_ON(c == &boot_cpu_data);
1626	identify_cpu(c);
1627#ifdef CONFIG_X86_32
1628	enable_sep_cpu();
1629#endif
1630	mtrr_ap_init();
1631	validate_apic_and_package_id(c);
1632	x86_spec_ctrl_setup_ap();
1633	update_srbds_msr();
1634}
1635
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1636static __init int setup_noclflush(char *arg)
1637{
1638	setup_clear_cpu_cap(X86_FEATURE_CLFLUSH);
1639	setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT);
1640	return 1;
1641}
1642__setup("noclflush", setup_noclflush);
1643
1644void print_cpu_info(struct cpuinfo_x86 *c)
1645{
1646	const char *vendor = NULL;
1647
1648	if (c->x86_vendor < X86_VENDOR_NUM) {
1649		vendor = this_cpu->c_vendor;
1650	} else {
1651		if (c->cpuid_level >= 0)
1652			vendor = c->x86_vendor_id;
1653	}
1654
1655	if (vendor && !strstr(c->x86_model_id, vendor))
1656		pr_cont("%s ", vendor);
1657
1658	if (c->x86_model_id[0])
1659		pr_cont("%s", c->x86_model_id);
1660	else
1661		pr_cont("%d86", c->x86);
1662
1663	pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
1664
1665	if (c->x86_stepping || c->cpuid_level >= 0)
1666		pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
1667	else
1668		pr_cont(")\n");
 
 
 
 
 
 
 
 
1669}
1670
1671/*
1672 * clearcpuid= was already parsed in fpu__init_parse_early_param.
1673 * But we need to keep a dummy __setup around otherwise it would
1674 * show up as an environment variable for init.
1675 */
1676static __init int setup_clearcpuid(char *arg)
1677{
 
 
 
 
 
 
 
1678	return 1;
1679}
1680__setup("clearcpuid=", setup_clearcpuid);
1681
1682#ifdef CONFIG_X86_64
1683DEFINE_PER_CPU_FIRST(struct fixed_percpu_data,
1684		     fixed_percpu_data) __aligned(PAGE_SIZE) __visible;
1685EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data);
 
 
 
1686
1687/*
1688 * The following percpu variables are hot.  Align current_task to
1689 * cacheline size such that they fall in the same cacheline.
1690 */
1691DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
1692	&init_task;
1693EXPORT_PER_CPU_SYMBOL(current_task);
1694
1695DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
 
 
1696DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
1697
1698DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1699EXPORT_PER_CPU_SYMBOL(__preempt_count);
1700
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1701/* May not be marked __init: used by software suspend */
1702void syscall_init(void)
1703{
 
 
 
 
 
1704	wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
1705	wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
1706
1707#ifdef CONFIG_IA32_EMULATION
1708	wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
1709	/*
1710	 * This only works on Intel CPUs.
1711	 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
1712	 * This does not cause SYSENTER to jump to the wrong location, because
1713	 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
1714	 */
1715	wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
1716	wrmsrl_safe(MSR_IA32_SYSENTER_ESP,
1717		    (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1));
1718	wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
1719#else
1720	wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
1721	wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
1722	wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
1723	wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
1724#endif
1725
1726	/* Flags to clear on syscall */
1727	wrmsrl(MSR_SYSCALL_MASK,
1728	       X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
1729	       X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
1730}
1731
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1732#else	/* CONFIG_X86_64 */
1733
1734DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
1735EXPORT_PER_CPU_SYMBOL(current_task);
1736DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1737EXPORT_PER_CPU_SYMBOL(__preempt_count);
1738
1739/*
1740 * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find
1741 * the top of the kernel stack.  Use an extra percpu variable to track the
1742 * top of the kernel stack directly.
1743 */
1744DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
1745	(unsigned long)&init_thread_union + THREAD_SIZE;
1746EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
1747
1748#ifdef CONFIG_STACKPROTECTOR
1749DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
1750#endif
1751
1752#endif	/* CONFIG_X86_64 */
1753
1754/*
1755 * Clear all 6 debug registers:
1756 */
1757static void clear_all_debug_regs(void)
1758{
1759	int i;
1760
1761	for (i = 0; i < 8; i++) {
1762		/* Ignore db4, db5 */
1763		if ((i == 4) || (i == 5))
1764			continue;
1765
1766		set_debugreg(0, i);
1767	}
1768}
1769
1770#ifdef CONFIG_KGDB
1771/*
1772 * Restore debug regs if using kgdbwait and you have a kernel debugger
1773 * connection established.
1774 */
1775static void dbg_restore_debug_regs(void)
1776{
1777	if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
1778		arch_kgdb_ops.correct_hw_break();
1779}
1780#else /* ! CONFIG_KGDB */
1781#define dbg_restore_debug_regs()
1782#endif /* ! CONFIG_KGDB */
1783
1784static void wait_for_master_cpu(int cpu)
1785{
1786#ifdef CONFIG_SMP
1787	/*
1788	 * wait for ACK from master CPU before continuing
1789	 * with AP initialization
1790	 */
1791	WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
1792	while (!cpumask_test_cpu(cpu, cpu_callout_mask))
1793		cpu_relax();
1794#endif
1795}
1796
1797#ifdef CONFIG_X86_64
1798static inline void setup_getcpu(int cpu)
1799{
1800	unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
1801	struct desc_struct d = { };
1802
1803	if (boot_cpu_has(X86_FEATURE_RDTSCP))
1804		write_rdtscp_aux(cpudata);
1805
1806	/* Store CPU and node number in limit. */
1807	d.limit0 = cpudata;
1808	d.limit1 = cpudata >> 16;
1809
1810	d.type = 5;		/* RO data, expand down, accessed */
1811	d.dpl = 3;		/* Visible to user code */
1812	d.s = 1;		/* Not a system segment */
1813	d.p = 1;		/* Present */
1814	d.d = 1;		/* 32-bit */
1815
1816	write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S);
1817}
1818
1819static inline void ucode_cpu_init(int cpu)
1820{
1821	if (cpu)
1822		load_ucode_ap();
1823}
1824
1825static inline void tss_setup_ist(struct tss_struct *tss)
1826{
1827	/* Set up the per-CPU TSS IST stacks */
1828	tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF);
1829	tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
1830	tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
1831	tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
1832}
1833
1834#else /* CONFIG_X86_64 */
1835
1836static inline void setup_getcpu(int cpu) { }
1837
1838static inline void ucode_cpu_init(int cpu)
1839{
1840	show_ucode_info_early();
1841}
1842
1843static inline void tss_setup_ist(struct tss_struct *tss) { }
1844
1845#endif /* !CONFIG_X86_64 */
1846
1847static inline void tss_setup_io_bitmap(struct tss_struct *tss)
1848{
1849	tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID;
1850
1851#ifdef CONFIG_X86_IOPL_IOPERM
1852	tss->io_bitmap.prev_max = 0;
1853	tss->io_bitmap.prev_sequence = 0;
1854	memset(tss->io_bitmap.bitmap, 0xff, sizeof(tss->io_bitmap.bitmap));
1855	/*
1856	 * Invalidate the extra array entry past the end of the all
1857	 * permission bitmap as required by the hardware.
1858	 */
1859	tss->io_bitmap.mapall[IO_BITMAP_LONGS] = ~0UL;
1860#endif
1861}
1862
1863/*
1864 * cpu_init() initializes state that is per-CPU. Some data is already
1865 * initialized (naturally) in the bootstrap process, such as the GDT
1866 * and IDT. We reload them nevertheless, this function acts as a
1867 * 'CPU state barrier', nothing should get across.
 
1868 */
 
 
1869void cpu_init(void)
1870{
1871	struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
1872	struct task_struct *cur = current;
1873	int cpu = raw_smp_processor_id();
 
 
 
1874
1875	wait_for_master_cpu(cpu);
1876
1877	ucode_cpu_init(cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
1878
1879#ifdef CONFIG_NUMA
1880	if (this_cpu_read(numa_node) == 0 &&
1881	    early_cpu_to_node(cpu) != NUMA_NO_NODE)
1882		set_numa_node(early_cpu_to_node(cpu));
1883#endif
1884	setup_getcpu(cpu);
 
1885
1886	pr_debug("Initializing CPU#%d\n", cpu);
1887
1888	if (IS_ENABLED(CONFIG_X86_64) || cpu_feature_enabled(X86_FEATURE_VME) ||
1889	    boot_cpu_has(X86_FEATURE_TSC) || boot_cpu_has(X86_FEATURE_DE))
1890		cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1891
1892	/*
1893	 * Initialize the per-CPU GDT with the boot GDT,
1894	 * and set up the GDT descriptor:
1895	 */
 
1896	switch_to_new_gdt(cpu);
 
 
1897	load_current_idt();
1898
1899	if (IS_ENABLED(CONFIG_X86_64)) {
1900		loadsegment(fs, 0);
1901		memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
1902		syscall_init();
1903
1904		wrmsrl(MSR_FS_BASE, 0);
1905		wrmsrl(MSR_KERNEL_GS_BASE, 0);
1906		barrier();
1907
1908		x2apic_setup();
1909	}
1910
1911	mmgrab(&init_mm);
1912	cur->active_mm = &init_mm;
1913	BUG_ON(cur->mm);
1914	initialize_tlbstate_and_flush();
1915	enter_lazy_tlb(&init_mm, cur);
1916
1917	/* Initialize the TSS. */
1918	tss_setup_ist(tss);
1919	tss_setup_io_bitmap(tss);
1920	set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
 
 
 
 
1921
1922	load_TR_desc();
1923	/*
1924	 * sp0 points to the entry trampoline stack regardless of what task
1925	 * is running.
1926	 */
1927	load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
 
 
 
 
 
 
1928
 
 
 
1929	load_mm_ldt(&init_mm);
1930
1931	clear_all_debug_regs();
1932	dbg_restore_debug_regs();
1933
1934	doublefault_init_cpu_tss();
1935
1936	fpu__init_cpu();
1937
1938	if (is_uv_system())
1939		uv_cpu_init();
 
1940
1941	load_fixmap_gdt(cpu);
1942}
1943
1944/*
1945 * The microcode loader calls this upon late microcode load to recheck features,
1946 * only when microcode has been updated. Caller holds microcode_mutex and CPU
1947 * hotplug lock.
1948 */
1949void microcode_check(void)
1950{
1951	struct cpuinfo_x86 info;
 
 
 
 
 
 
 
 
 
 
 
1952
1953	perf_check_microcode();
1954
1955	/* Reload CPUID max function as it might've changed. */
1956	info.cpuid_level = cpuid_eax(0);
 
 
 
 
 
 
 
1957
1958	/*
1959	 * Copy all capability leafs to pick up the synthetic ones so that
1960	 * memcmp() below doesn't fail on that. The ones coming from CPUID will
1961	 * get overwritten in get_cpu_cap().
1962	 */
1963	memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability));
 
 
 
 
 
 
 
 
1964
1965	get_cpu_cap(&info);
1966
1967	if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)))
1968		return;
 
 
 
 
 
 
 
 
 
1969
1970	pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
1971	pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
 
 
1972}
1973
1974/*
1975 * Invoked from core CPU hotplug code after hotplug operations
1976 */
1977void arch_smt_update(void)
 
1978{
1979	/* Handle the speculative execution misfeatures */
1980	cpu_bugs_smt_update();
1981	/* Check whether IPI broadcasting can be enabled */
1982	apic_smt_update();
1983}