Linux Audio

Check our new training course

Loading...
v4.17
   1#include <linux/bootmem.h>
 
 
 
 
   2#include <linux/linkage.h>
   3#include <linux/bitops.h>
   4#include <linux/kernel.h>
   5#include <linux/export.h>
   6#include <linux/percpu.h>
   7#include <linux/string.h>
   8#include <linux/ctype.h>
   9#include <linux/delay.h>
  10#include <linux/sched/mm.h>
  11#include <linux/sched/clock.h>
  12#include <linux/sched/task.h>
 
  13#include <linux/init.h>
  14#include <linux/kprobes.h>
  15#include <linux/kgdb.h>
  16#include <linux/smp.h>
  17#include <linux/io.h>
  18#include <linux/syscore_ops.h>
 
 
  19
  20#include <asm/stackprotector.h>
  21#include <asm/perf_event.h>
  22#include <asm/mmu_context.h>
 
  23#include <asm/archrandom.h>
  24#include <asm/hypervisor.h>
  25#include <asm/processor.h>
  26#include <asm/tlbflush.h>
  27#include <asm/debugreg.h>
  28#include <asm/sections.h>
  29#include <asm/vsyscall.h>
  30#include <linux/topology.h>
  31#include <linux/cpumask.h>
  32#include <asm/pgtable.h>
  33#include <linux/atomic.h>
  34#include <asm/proto.h>
  35#include <asm/setup.h>
  36#include <asm/apic.h>
  37#include <asm/desc.h>
  38#include <asm/fpu/internal.h>
  39#include <asm/mtrr.h>
  40#include <asm/hwcap2.h>
  41#include <linux/numa.h>
 
  42#include <asm/asm.h>
  43#include <asm/bugs.h>
  44#include <asm/cpu.h>
  45#include <asm/mce.h>
  46#include <asm/msr.h>
  47#include <asm/pat.h>
 
  48#include <asm/microcode.h>
  49#include <asm/microcode_intel.h>
  50#include <asm/intel-family.h>
  51#include <asm/cpu_device_id.h>
  52
  53#ifdef CONFIG_X86_LOCAL_APIC
  54#include <asm/uv/uv.h>
  55#endif
 
 
  56
  57#include "cpu.h"
  58
  59u32 elf_hwcap2 __read_mostly;
  60
  61/* all of these masks are initialized in setup_cpu_local_masks() */
  62cpumask_var_t cpu_initialized_mask;
  63cpumask_var_t cpu_callout_mask;
  64cpumask_var_t cpu_callin_mask;
  65
  66/* representing cpus for which sibling maps can be computed */
  67cpumask_var_t cpu_sibling_setup_mask;
  68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  69/* correctly size the local cpu masks */
  70void __init setup_cpu_local_masks(void)
  71{
  72	alloc_bootmem_cpumask_var(&cpu_initialized_mask);
  73	alloc_bootmem_cpumask_var(&cpu_callin_mask);
  74	alloc_bootmem_cpumask_var(&cpu_callout_mask);
  75	alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
  76}
  77
  78static void default_init(struct cpuinfo_x86 *c)
  79{
  80#ifdef CONFIG_X86_64
  81	cpu_detect_cache_sizes(c);
  82#else
  83	/* Not much we can do here... */
  84	/* Check if at least it has cpuid */
  85	if (c->cpuid_level == -1) {
  86		/* No cpuid. It must be an ancient CPU */
  87		if (c->x86 == 4)
  88			strcpy(c->x86_model_id, "486");
  89		else if (c->x86 == 3)
  90			strcpy(c->x86_model_id, "386");
  91	}
  92#endif
  93}
  94
  95static const struct cpu_dev default_cpu = {
  96	.c_init		= default_init,
  97	.c_vendor	= "Unknown",
  98	.c_x86_vendor	= X86_VENDOR_UNKNOWN,
  99};
 100
 101static const struct cpu_dev *this_cpu = &default_cpu;
 102
 103DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
 104#ifdef CONFIG_X86_64
 105	/*
 106	 * We need valid kernel segments for data and code in long mode too
 107	 * IRET will check the segment types  kkeil 2000/10/28
 108	 * Also sysret mandates a special GDT layout
 109	 *
 110	 * TLS descriptors are currently at a different place compared to i386.
 111	 * Hopefully nobody expects them at a fixed place (Wine?)
 112	 */
 113	[GDT_ENTRY_KERNEL32_CS]		= GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
 114	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
 115	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
 116	[GDT_ENTRY_DEFAULT_USER32_CS]	= GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
 117	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
 118	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
 119#else
 120	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
 121	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
 122	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
 123	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
 124	/*
 125	 * Segments used for calling PnP BIOS have byte granularity.
 126	 * They code segments and data segments have fixed 64k limits,
 127	 * the transfer segment sizes are set at run time.
 128	 */
 129	/* 32-bit code */
 130	[GDT_ENTRY_PNPBIOS_CS32]	= GDT_ENTRY_INIT(0x409a, 0, 0xffff),
 131	/* 16-bit code */
 132	[GDT_ENTRY_PNPBIOS_CS16]	= GDT_ENTRY_INIT(0x009a, 0, 0xffff),
 133	/* 16-bit data */
 134	[GDT_ENTRY_PNPBIOS_DS]		= GDT_ENTRY_INIT(0x0092, 0, 0xffff),
 135	/* 16-bit data */
 136	[GDT_ENTRY_PNPBIOS_TS1]		= GDT_ENTRY_INIT(0x0092, 0, 0),
 137	/* 16-bit data */
 138	[GDT_ENTRY_PNPBIOS_TS2]		= GDT_ENTRY_INIT(0x0092, 0, 0),
 139	/*
 140	 * The APM segments have byte granularity and their bases
 141	 * are set at run time.  All have 64k limits.
 142	 */
 143	/* 32-bit code */
 144	[GDT_ENTRY_APMBIOS_BASE]	= GDT_ENTRY_INIT(0x409a, 0, 0xffff),
 145	/* 16-bit code */
 146	[GDT_ENTRY_APMBIOS_BASE+1]	= GDT_ENTRY_INIT(0x009a, 0, 0xffff),
 147	/* data */
 148	[GDT_ENTRY_APMBIOS_BASE+2]	= GDT_ENTRY_INIT(0x4092, 0, 0xffff),
 149
 150	[GDT_ENTRY_ESPFIX_SS]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
 151	[GDT_ENTRY_PERCPU]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
 152	GDT_STACK_CANARY_INIT
 153#endif
 154} };
 155EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
 156
 157static int __init x86_mpx_setup(char *s)
 158{
 159	/* require an exact match without trailing characters */
 160	if (strlen(s))
 161		return 0;
 162
 163	/* do not emit a message if the feature is not present */
 164	if (!boot_cpu_has(X86_FEATURE_MPX))
 165		return 1;
 166
 167	setup_clear_cpu_cap(X86_FEATURE_MPX);
 168	pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n");
 169	return 1;
 170}
 171__setup("nompx", x86_mpx_setup);
 172
 173#ifdef CONFIG_X86_64
 174static int __init x86_nopcid_setup(char *s)
 175{
 176	/* nopcid doesn't accept parameters */
 177	if (s)
 178		return -EINVAL;
 179
 180	/* do not emit a message if the feature is not present */
 181	if (!boot_cpu_has(X86_FEATURE_PCID))
 182		return 0;
 183
 184	setup_clear_cpu_cap(X86_FEATURE_PCID);
 185	pr_info("nopcid: PCID feature disabled\n");
 186	return 0;
 187}
 188early_param("nopcid", x86_nopcid_setup);
 189#endif
 190
 191static int __init x86_noinvpcid_setup(char *s)
 192{
 193	/* noinvpcid doesn't accept parameters */
 194	if (s)
 195		return -EINVAL;
 196
 197	/* do not emit a message if the feature is not present */
 198	if (!boot_cpu_has(X86_FEATURE_INVPCID))
 199		return 0;
 200
 201	setup_clear_cpu_cap(X86_FEATURE_INVPCID);
 202	pr_info("noinvpcid: INVPCID feature disabled\n");
 203	return 0;
 204}
 205early_param("noinvpcid", x86_noinvpcid_setup);
 206
 207#ifdef CONFIG_X86_32
 208static int cachesize_override = -1;
 209static int disable_x86_serial_nr = 1;
 210
 211static int __init cachesize_setup(char *str)
 212{
 213	get_option(&str, &cachesize_override);
 214	return 1;
 215}
 216__setup("cachesize=", cachesize_setup);
 217
 218static int __init x86_sep_setup(char *s)
 219{
 220	setup_clear_cpu_cap(X86_FEATURE_SEP);
 221	return 1;
 222}
 223__setup("nosep", x86_sep_setup);
 224
 225/* Standard macro to see if a specific flag is changeable */
 226static inline int flag_is_changeable_p(u32 flag)
 227{
 228	u32 f1, f2;
 229
 230	/*
 231	 * Cyrix and IDT cpus allow disabling of CPUID
 232	 * so the code below may return different results
 233	 * when it is executed before and after enabling
 234	 * the CPUID. Add "volatile" to not allow gcc to
 235	 * optimize the subsequent calls to this function.
 236	 */
 237	asm volatile ("pushfl		\n\t"
 238		      "pushfl		\n\t"
 239		      "popl %0		\n\t"
 240		      "movl %0, %1	\n\t"
 241		      "xorl %2, %0	\n\t"
 242		      "pushl %0		\n\t"
 243		      "popfl		\n\t"
 244		      "pushfl		\n\t"
 245		      "popl %0		\n\t"
 246		      "popfl		\n\t"
 247
 248		      : "=&r" (f1), "=&r" (f2)
 249		      : "ir" (flag));
 250
 251	return ((f1^f2) & flag) != 0;
 252}
 253
 254/* Probe for the CPUID instruction */
 255int have_cpuid_p(void)
 256{
 257	return flag_is_changeable_p(X86_EFLAGS_ID);
 258}
 259
 260static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
 261{
 262	unsigned long lo, hi;
 263
 264	if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
 265		return;
 266
 267	/* Disable processor serial number: */
 268
 269	rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
 270	lo |= 0x200000;
 271	wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
 272
 273	pr_notice("CPU serial number disabled.\n");
 274	clear_cpu_cap(c, X86_FEATURE_PN);
 275
 276	/* Disabling the serial number may affect the cpuid level */
 277	c->cpuid_level = cpuid_eax(0);
 278}
 279
 280static int __init x86_serial_nr_setup(char *s)
 281{
 282	disable_x86_serial_nr = 0;
 283	return 1;
 284}
 285__setup("serialnumber", x86_serial_nr_setup);
 286#else
 287static inline int flag_is_changeable_p(u32 flag)
 288{
 289	return 1;
 290}
 291static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
 292{
 293}
 294#endif
 295
 296static __init int setup_disable_smep(char *arg)
 297{
 298	setup_clear_cpu_cap(X86_FEATURE_SMEP);
 299	/* Check for things that depend on SMEP being enabled: */
 300	check_mpx_erratum(&boot_cpu_data);
 301	return 1;
 302}
 303__setup("nosmep", setup_disable_smep);
 304
 305static __always_inline void setup_smep(struct cpuinfo_x86 *c)
 306{
 307	if (cpu_has(c, X86_FEATURE_SMEP))
 308		cr4_set_bits(X86_CR4_SMEP);
 309}
 310
 311static __init int setup_disable_smap(char *arg)
 312{
 313	setup_clear_cpu_cap(X86_FEATURE_SMAP);
 314	return 1;
 315}
 316__setup("nosmap", setup_disable_smap);
 317
 318static __always_inline void setup_smap(struct cpuinfo_x86 *c)
 319{
 320	unsigned long eflags = native_save_fl();
 321
 322	/* This should have been cleared long ago */
 323	BUG_ON(eflags & X86_EFLAGS_AC);
 324
 325	if (cpu_has(c, X86_FEATURE_SMAP)) {
 326#ifdef CONFIG_X86_SMAP
 327		cr4_set_bits(X86_CR4_SMAP);
 328#else
 329		cr4_clear_bits(X86_CR4_SMAP);
 330#endif
 331	}
 332}
 333
 334static __always_inline void setup_umip(struct cpuinfo_x86 *c)
 335{
 336	/* Check the boot processor, plus build option for UMIP. */
 337	if (!cpu_feature_enabled(X86_FEATURE_UMIP))
 338		goto out;
 339
 340	/* Check the current processor's cpuid bits. */
 341	if (!cpu_has(c, X86_FEATURE_UMIP))
 342		goto out;
 343
 344	cr4_set_bits(X86_CR4_UMIP);
 345
 346	pr_info("x86/cpu: Activated the Intel User Mode Instruction Prevention (UMIP) CPU feature\n");
 347
 348	return;
 349
 350out:
 351	/*
 352	 * Make sure UMIP is disabled in case it was enabled in a
 353	 * previous boot (e.g., via kexec).
 354	 */
 355	cr4_clear_bits(X86_CR4_UMIP);
 356}
 357
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 358/*
 359 * Protection Keys are not available in 32-bit mode.
 360 */
 361static bool pku_disabled;
 362
 363static __always_inline void setup_pku(struct cpuinfo_x86 *c)
 364{
 365	/* check the boot processor, plus compile options for PKU: */
 366	if (!cpu_feature_enabled(X86_FEATURE_PKU))
 367		return;
 368	/* checks the actual processor's cpuid bits: */
 369	if (!cpu_has(c, X86_FEATURE_PKU))
 370		return;
 371	if (pku_disabled)
 
 
 
 372		return;
 
 373
 374	cr4_set_bits(X86_CR4_PKE);
 375	/*
 376	 * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
 377	 * cpuid bit to be set.  We need to ensure that we
 378	 * update that bit in this CPU's "cpu_info".
 379	 */
 380	get_cpu_cap(c);
 381}
 382
 383#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
 384static __init int setup_disable_pku(char *arg)
 385{
 386	/*
 387	 * Do not clear the X86_FEATURE_PKU bit.  All of the
 388	 * runtime checks are against OSPKE so clearing the
 389	 * bit does nothing.
 390	 *
 391	 * This way, we will see "pku" in cpuinfo, but not
 392	 * "ospke", which is exactly what we want.  It shows
 393	 * that the CPU has PKU, but the OS has not enabled it.
 394	 * This happens to be exactly how a system would look
 395	 * if we disabled the config option.
 396	 */
 397	pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n");
 398	pku_disabled = true;
 399	return 1;
 400}
 401__setup("nopku", setup_disable_pku);
 402#endif /* CONFIG_X86_64 */
 403
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 404/*
 405 * Some CPU features depend on higher CPUID levels, which may not always
 406 * be available due to CPUID level capping or broken virtualization
 407 * software.  Add those features to this table to auto-disable them.
 408 */
 409struct cpuid_dependent_feature {
 410	u32 feature;
 411	u32 level;
 412};
 413
 414static const struct cpuid_dependent_feature
 415cpuid_dependent_features[] = {
 416	{ X86_FEATURE_MWAIT,		0x00000005 },
 417	{ X86_FEATURE_DCA,		0x00000009 },
 418	{ X86_FEATURE_XSAVE,		0x0000000d },
 419	{ 0, 0 }
 420};
 421
 422static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
 423{
 424	const struct cpuid_dependent_feature *df;
 425
 426	for (df = cpuid_dependent_features; df->feature; df++) {
 427
 428		if (!cpu_has(c, df->feature))
 429			continue;
 430		/*
 431		 * Note: cpuid_level is set to -1 if unavailable, but
 432		 * extended_extended_level is set to 0 if unavailable
 433		 * and the legitimate extended levels are all negative
 434		 * when signed; hence the weird messing around with
 435		 * signs here...
 436		 */
 437		if (!((s32)df->level < 0 ?
 438		     (u32)df->level > (u32)c->extended_cpuid_level :
 439		     (s32)df->level > (s32)c->cpuid_level))
 440			continue;
 441
 442		clear_cpu_cap(c, df->feature);
 443		if (!warn)
 444			continue;
 445
 446		pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
 447			x86_cap_flag(df->feature), df->level);
 448	}
 449}
 450
 451/*
 452 * Naming convention should be: <Name> [(<Codename>)]
 453 * This table only is used unless init_<vendor>() below doesn't set it;
 454 * in particular, if CPUID levels 0x80000002..4 are supported, this
 455 * isn't used
 456 */
 457
 458/* Look up CPU names by table lookup. */
 459static const char *table_lookup_model(struct cpuinfo_x86 *c)
 460{
 461#ifdef CONFIG_X86_32
 462	const struct legacy_cpu_model_info *info;
 463
 464	if (c->x86_model >= 16)
 465		return NULL;	/* Range check */
 466
 467	if (!this_cpu)
 468		return NULL;
 469
 470	info = this_cpu->legacy_models;
 471
 472	while (info->family) {
 473		if (info->family == c->x86)
 474			return info->model_names[c->x86_model];
 475		info++;
 476	}
 477#endif
 478	return NULL;		/* Not found */
 479}
 480
 481__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
 482__u32 cpu_caps_set[NCAPINTS + NBUGINTS];
 483
 484void load_percpu_segment(int cpu)
 485{
 486#ifdef CONFIG_X86_32
 487	loadsegment(fs, __KERNEL_PERCPU);
 488#else
 489	__loadsegment_simple(gs, 0);
 490	wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
 491#endif
 492	load_stack_canary_segment();
 493}
 494
 495#ifdef CONFIG_X86_32
 496/* The 32-bit entry code needs to find cpu_entry_area. */
 497DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
 498#endif
 499
 500#ifdef CONFIG_X86_64
 501/*
 502 * Special IST stacks which the CPU switches to when it calls
 503 * an IST-marked descriptor entry. Up to 7 stacks (hardware
 504 * limit), all of them are 4K, except the debug stack which
 505 * is 8K.
 506 */
 507static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
 508	  [0 ... N_EXCEPTION_STACKS - 1]	= EXCEPTION_STKSZ,
 509	  [DEBUG_STACK - 1]			= DEBUG_STKSZ
 510};
 511#endif
 512
 513/* Load the original GDT from the per-cpu structure */
 514void load_direct_gdt(int cpu)
 515{
 516	struct desc_ptr gdt_descr;
 517
 518	gdt_descr.address = (long)get_cpu_gdt_rw(cpu);
 519	gdt_descr.size = GDT_SIZE - 1;
 520	load_gdt(&gdt_descr);
 521}
 522EXPORT_SYMBOL_GPL(load_direct_gdt);
 523
 524/* Load a fixmap remapping of the per-cpu GDT */
 525void load_fixmap_gdt(int cpu)
 526{
 527	struct desc_ptr gdt_descr;
 528
 529	gdt_descr.address = (long)get_cpu_gdt_ro(cpu);
 530	gdt_descr.size = GDT_SIZE - 1;
 531	load_gdt(&gdt_descr);
 532}
 533EXPORT_SYMBOL_GPL(load_fixmap_gdt);
 534
 535/*
 536 * Current gdt points %fs at the "master" per-cpu area: after this,
 537 * it's on the real one.
 
 
 
 
 
 538 */
 539void switch_to_new_gdt(int cpu)
 540{
 541	/* Load the original GDT */
 542	load_direct_gdt(cpu);
 543	/* Reload the per-cpu base */
 544	load_percpu_segment(cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 545}
 546
 547static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
 548
 549static void get_model_name(struct cpuinfo_x86 *c)
 550{
 551	unsigned int *v;
 552	char *p, *q, *s;
 553
 554	if (c->extended_cpuid_level < 0x80000004)
 555		return;
 556
 557	v = (unsigned int *)c->x86_model_id;
 558	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
 559	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
 560	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
 561	c->x86_model_id[48] = 0;
 562
 563	/* Trim whitespace */
 564	p = q = s = &c->x86_model_id[0];
 565
 566	while (*p == ' ')
 567		p++;
 568
 569	while (*p) {
 570		/* Note the last non-whitespace index */
 571		if (!isspace(*p))
 572			s = q;
 573
 574		*q++ = *p++;
 575	}
 576
 577	*(s + 1) = '\0';
 578}
 579
 
 
 
 
 
 
 
 
 
 
 
 
 
 580void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
 581{
 582	unsigned int n, dummy, ebx, ecx, edx, l2size;
 583
 584	n = c->extended_cpuid_level;
 585
 586	if (n >= 0x80000005) {
 587		cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
 588		c->x86_cache_size = (ecx>>24) + (edx>>24);
 589#ifdef CONFIG_X86_64
 590		/* On K8 L1 TLB is inclusive, so don't count it */
 591		c->x86_tlbsize = 0;
 592#endif
 593	}
 594
 595	if (n < 0x80000006)	/* Some chips just has a large L1. */
 596		return;
 597
 598	cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
 599	l2size = ecx >> 16;
 600
 601#ifdef CONFIG_X86_64
 602	c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
 603#else
 604	/* do processor-specific cache resizing */
 605	if (this_cpu->legacy_cache_size)
 606		l2size = this_cpu->legacy_cache_size(c, l2size);
 607
 608	/* Allow user to override all this if necessary. */
 609	if (cachesize_override != -1)
 610		l2size = cachesize_override;
 611
 612	if (l2size == 0)
 613		return;		/* Again, no L2 cache is possible */
 614#endif
 615
 616	c->x86_cache_size = l2size;
 617}
 618
 619u16 __read_mostly tlb_lli_4k[NR_INFO];
 620u16 __read_mostly tlb_lli_2m[NR_INFO];
 621u16 __read_mostly tlb_lli_4m[NR_INFO];
 622u16 __read_mostly tlb_lld_4k[NR_INFO];
 623u16 __read_mostly tlb_lld_2m[NR_INFO];
 624u16 __read_mostly tlb_lld_4m[NR_INFO];
 625u16 __read_mostly tlb_lld_1g[NR_INFO];
 626
 627static void cpu_detect_tlb(struct cpuinfo_x86 *c)
 628{
 629	if (this_cpu->c_detect_tlb)
 630		this_cpu->c_detect_tlb(c);
 631
 632	pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
 633		tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
 634		tlb_lli_4m[ENTRIES]);
 635
 636	pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
 637		tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
 638		tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
 639}
 640
 641void detect_ht(struct cpuinfo_x86 *c)
 642{
 643#ifdef CONFIG_SMP
 644	u32 eax, ebx, ecx, edx;
 645	int index_msb, core_bits;
 646	static bool printed;
 647
 648	if (!cpu_has(c, X86_FEATURE_HT))
 649		return;
 650
 651	if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
 652		goto out;
 653
 654	if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
 655		return;
 656
 657	cpuid(1, &eax, &ebx, &ecx, &edx);
 658
 659	smp_num_siblings = (ebx & 0xff0000) >> 16;
 660
 661	if (smp_num_siblings == 1) {
 662		pr_info_once("CPU0: Hyper-Threading is disabled\n");
 663		goto out;
 664	}
 
 665
 666	if (smp_num_siblings <= 1)
 667		goto out;
 
 
 
 
 
 668
 669	index_msb = get_count_order(smp_num_siblings);
 670	c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
 671
 672	smp_num_siblings = smp_num_siblings / c->x86_max_cores;
 673
 674	index_msb = get_count_order(smp_num_siblings);
 675
 676	core_bits = get_count_order(c->x86_max_cores);
 677
 678	c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
 679				       ((1 << core_bits) - 1);
 680
 681out:
 682	if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
 683		pr_info("CPU: Physical Processor ID: %d\n",
 684			c->phys_proc_id);
 685		pr_info("CPU: Processor Core ID: %d\n",
 686			c->cpu_core_id);
 687		printed = 1;
 688	}
 689#endif
 690}
 691
 692static void get_cpu_vendor(struct cpuinfo_x86 *c)
 693{
 694	char *v = c->x86_vendor_id;
 695	int i;
 696
 697	for (i = 0; i < X86_VENDOR_NUM; i++) {
 698		if (!cpu_devs[i])
 699			break;
 700
 701		if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
 702		    (cpu_devs[i]->c_ident[1] &&
 703		     !strcmp(v, cpu_devs[i]->c_ident[1]))) {
 704
 705			this_cpu = cpu_devs[i];
 706			c->x86_vendor = this_cpu->c_x86_vendor;
 707			return;
 708		}
 709	}
 710
 711	pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
 712		    "CPU: Your system may be unstable.\n", v);
 713
 714	c->x86_vendor = X86_VENDOR_UNKNOWN;
 715	this_cpu = &default_cpu;
 716}
 717
 718void cpu_detect(struct cpuinfo_x86 *c)
 719{
 720	/* Get vendor name */
 721	cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
 722	      (unsigned int *)&c->x86_vendor_id[0],
 723	      (unsigned int *)&c->x86_vendor_id[8],
 724	      (unsigned int *)&c->x86_vendor_id[4]);
 725
 726	c->x86 = 4;
 727	/* Intel-defined flags: level 0x00000001 */
 728	if (c->cpuid_level >= 0x00000001) {
 729		u32 junk, tfms, cap0, misc;
 730
 731		cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
 732		c->x86		= x86_family(tfms);
 733		c->x86_model	= x86_model(tfms);
 734		c->x86_stepping	= x86_stepping(tfms);
 735
 736		if (cap0 & (1<<19)) {
 737			c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
 738			c->x86_cache_alignment = c->x86_clflush_size;
 739		}
 740	}
 741}
 742
 743static void apply_forced_caps(struct cpuinfo_x86 *c)
 744{
 745	int i;
 746
 747	for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
 748		c->x86_capability[i] &= ~cpu_caps_cleared[i];
 749		c->x86_capability[i] |= cpu_caps_set[i];
 750	}
 751}
 752
 753static void init_speculation_control(struct cpuinfo_x86 *c)
 754{
 755	/*
 756	 * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
 757	 * and they also have a different bit for STIBP support. Also,
 758	 * a hypervisor might have set the individual AMD bits even on
 759	 * Intel CPUs, for finer-grained selection of what's available.
 760	 */
 761	if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
 762		set_cpu_cap(c, X86_FEATURE_IBRS);
 763		set_cpu_cap(c, X86_FEATURE_IBPB);
 764		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
 765	}
 766
 767	if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
 768		set_cpu_cap(c, X86_FEATURE_STIBP);
 769
 770	if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
 771	    cpu_has(c, X86_FEATURE_VIRT_SSBD))
 772		set_cpu_cap(c, X86_FEATURE_SSBD);
 773
 774	if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
 775		set_cpu_cap(c, X86_FEATURE_IBRS);
 776		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
 777	}
 778
 779	if (cpu_has(c, X86_FEATURE_AMD_IBPB))
 780		set_cpu_cap(c, X86_FEATURE_IBPB);
 781
 782	if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
 783		set_cpu_cap(c, X86_FEATURE_STIBP);
 784		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
 785	}
 
 
 
 
 
 
 786}
 787
 788void get_cpu_cap(struct cpuinfo_x86 *c)
 789{
 790	u32 eax, ebx, ecx, edx;
 791
 792	/* Intel-defined flags: level 0x00000001 */
 793	if (c->cpuid_level >= 0x00000001) {
 794		cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
 795
 796		c->x86_capability[CPUID_1_ECX] = ecx;
 797		c->x86_capability[CPUID_1_EDX] = edx;
 798	}
 799
 800	/* Thermal and Power Management Leaf: level 0x00000006 (eax) */
 801	if (c->cpuid_level >= 0x00000006)
 802		c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
 803
 804	/* Additional Intel-defined flags: level 0x00000007 */
 805	if (c->cpuid_level >= 0x00000007) {
 806		cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
 807		c->x86_capability[CPUID_7_0_EBX] = ebx;
 808		c->x86_capability[CPUID_7_ECX] = ecx;
 809		c->x86_capability[CPUID_7_EDX] = edx;
 
 
 
 
 
 
 810	}
 811
 812	/* Extended state features: level 0x0000000d */
 813	if (c->cpuid_level >= 0x0000000d) {
 814		cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
 815
 816		c->x86_capability[CPUID_D_1_EAX] = eax;
 817	}
 818
 819	/* Additional Intel-defined flags: level 0x0000000F */
 820	if (c->cpuid_level >= 0x0000000F) {
 821
 822		/* QoS sub-leaf, EAX=0Fh, ECX=0 */
 823		cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
 824		c->x86_capability[CPUID_F_0_EDX] = edx;
 825
 826		if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
 827			/* will be overridden if occupancy monitoring exists */
 828			c->x86_cache_max_rmid = ebx;
 829
 830			/* QoS sub-leaf, EAX=0Fh, ECX=1 */
 831			cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
 832			c->x86_capability[CPUID_F_1_EDX] = edx;
 833
 834			if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) ||
 835			      ((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) ||
 836			       (cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) {
 837				c->x86_cache_max_rmid = ecx;
 838				c->x86_cache_occ_scale = ebx;
 839			}
 840		} else {
 841			c->x86_cache_max_rmid = -1;
 842			c->x86_cache_occ_scale = -1;
 843		}
 844	}
 845
 846	/* AMD-defined flags: level 0x80000001 */
 847	eax = cpuid_eax(0x80000000);
 848	c->extended_cpuid_level = eax;
 849
 850	if ((eax & 0xffff0000) == 0x80000000) {
 851		if (eax >= 0x80000001) {
 852			cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
 853
 854			c->x86_capability[CPUID_8000_0001_ECX] = ecx;
 855			c->x86_capability[CPUID_8000_0001_EDX] = edx;
 856		}
 857	}
 858
 859	if (c->extended_cpuid_level >= 0x80000007) {
 860		cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
 861
 862		c->x86_capability[CPUID_8000_0007_EBX] = ebx;
 863		c->x86_power = edx;
 864	}
 865
 866	if (c->extended_cpuid_level >= 0x80000008) {
 867		cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
 868		c->x86_capability[CPUID_8000_0008_EBX] = ebx;
 869	}
 870
 871	if (c->extended_cpuid_level >= 0x8000000a)
 872		c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
 873
 
 
 
 874	init_scattered_cpuid_features(c);
 875	init_speculation_control(c);
 876
 877	/*
 878	 * Clear/Set all flags overridden by options, after probe.
 879	 * This needs to happen each time we re-probe, which may happen
 880	 * several times during CPU initialization.
 881	 */
 882	apply_forced_caps(c);
 883}
 884
 885static void get_cpu_address_sizes(struct cpuinfo_x86 *c)
 886{
 887	u32 eax, ebx, ecx, edx;
 888
 889	if (c->extended_cpuid_level >= 0x80000008) {
 890		cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
 891
 892		c->x86_virt_bits = (eax >> 8) & 0xff;
 893		c->x86_phys_bits = eax & 0xff;
 894	}
 895#ifdef CONFIG_X86_32
 896	else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
 897		c->x86_phys_bits = 36;
 898#endif
 
 899}
 900
 901static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
 902{
 903#ifdef CONFIG_X86_32
 904	int i;
 905
 906	/*
 907	 * First of all, decide if this is a 486 or higher
 908	 * It's a 486 if we can modify the AC flag
 909	 */
 910	if (flag_is_changeable_p(X86_EFLAGS_AC))
 911		c->x86 = 4;
 912	else
 913		c->x86 = 3;
 914
 915	for (i = 0; i < X86_VENDOR_NUM; i++)
 916		if (cpu_devs[i] && cpu_devs[i]->c_identify) {
 917			c->x86_vendor_id[0] = 0;
 918			cpu_devs[i]->c_identify(c);
 919			if (c->x86_vendor_id[0]) {
 920				get_cpu_vendor(c);
 921				break;
 922			}
 923		}
 924#endif
 925}
 926
 927static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
 928	{ X86_VENDOR_INTEL,	6, INTEL_FAM6_ATOM_CEDARVIEW,	X86_FEATURE_ANY },
 929	{ X86_VENDOR_INTEL,	6, INTEL_FAM6_ATOM_CLOVERVIEW,	X86_FEATURE_ANY },
 930	{ X86_VENDOR_INTEL,	6, INTEL_FAM6_ATOM_LINCROFT,	X86_FEATURE_ANY },
 931	{ X86_VENDOR_INTEL,	6, INTEL_FAM6_ATOM_PENWELL,	X86_FEATURE_ANY },
 932	{ X86_VENDOR_INTEL,	6, INTEL_FAM6_ATOM_PINEVIEW,	X86_FEATURE_ANY },
 933	{ X86_VENDOR_CENTAUR,	5 },
 934	{ X86_VENDOR_INTEL,	5 },
 935	{ X86_VENDOR_NSC,	5 },
 936	{ X86_VENDOR_ANY,	4 },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 937	{}
 938};
 939
 940static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
 941	{ X86_VENDOR_AMD },
 942	{}
 943};
 944
 945/* Only list CPUs which speculate but are non susceptible to SSB */
 946static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
 947	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT1	},
 948	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_AIRMONT		},
 949	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT2	},
 950	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_MERRIFIELD	},
 951	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_CORE_YONAH		},
 952	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNL		},
 953	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNM		},
 954	{ X86_VENDOR_AMD,	0x12,					},
 955	{ X86_VENDOR_AMD,	0x11,					},
 956	{ X86_VENDOR_AMD,	0x10,					},
 957	{ X86_VENDOR_AMD,	0xf,					},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 958	{}
 959};
 960
 961static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
 
 
 
 
 
 
 
 962{
 963	u64 ia32_cap = 0;
 964
 965	if (x86_match_cpu(cpu_no_speculation))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 966		return;
 967
 968	setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
 969	setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
 970
 971	if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
 972		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
 973
 974	if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
 975	   !(ia32_cap & ARCH_CAP_SSB_NO))
 
 976		setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
 977
 978	if (x86_match_cpu(cpu_no_meltdown))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 979		return;
 980
 981	/* Rogue Data Cache Load? No! */
 982	if (ia32_cap & ARCH_CAP_RDCL_NO)
 983		return;
 984
 985	setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 986}
 987
 988/*
 989 * Do minimum CPU detection early.
 990 * Fields really needed: vendor, cpuid_level, family, model, mask,
 991 * cache alignment.
 992 * The others are not touched to avoid unwanted side effects.
 993 *
 994 * WARNING: this function is only called on the boot CPU.  Don't add code
 995 * here that is supposed to run on all CPUs.
 996 */
 997static void __init early_identify_cpu(struct cpuinfo_x86 *c)
 998{
 999#ifdef CONFIG_X86_64
1000	c->x86_clflush_size = 64;
1001	c->x86_phys_bits = 36;
1002	c->x86_virt_bits = 48;
1003#else
1004	c->x86_clflush_size = 32;
1005	c->x86_phys_bits = 32;
1006	c->x86_virt_bits = 32;
1007#endif
1008	c->x86_cache_alignment = c->x86_clflush_size;
1009
1010	memset(&c->x86_capability, 0, sizeof c->x86_capability);
1011	c->extended_cpuid_level = 0;
1012
 
 
 
1013	/* cyrix could have cpuid enabled via c_identify()*/
1014	if (have_cpuid_p()) {
1015		cpu_detect(c);
1016		get_cpu_vendor(c);
1017		get_cpu_cap(c);
1018		get_cpu_address_sizes(c);
1019		setup_force_cpu_cap(X86_FEATURE_CPUID);
 
1020
1021		if (this_cpu->c_early_init)
1022			this_cpu->c_early_init(c);
1023
1024		c->cpu_index = 0;
1025		filter_cpuid_features(c, false);
1026
1027		if (this_cpu->c_bsp_init)
1028			this_cpu->c_bsp_init(c);
1029	} else {
1030		identify_cpu_without_cpuid(c);
1031		setup_clear_cpu_cap(X86_FEATURE_CPUID);
1032	}
1033
1034	setup_force_cpu_cap(X86_FEATURE_ALWAYS);
1035
1036	cpu_set_bug_bits(c);
1037
 
 
1038	fpu__init_system(c);
1039
 
 
1040#ifdef CONFIG_X86_32
1041	/*
1042	 * Regardless of whether PCID is enumerated, the SDM says
1043	 * that it can't be enabled in 32-bit mode.
1044	 */
1045	setup_clear_cpu_cap(X86_FEATURE_PCID);
1046#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1047}
1048
1049void __init early_cpu_init(void)
1050{
1051	const struct cpu_dev *const *cdev;
1052	int count = 0;
1053
1054#ifdef CONFIG_PROCESSOR_SELECT
1055	pr_info("KERNEL supported cpus:\n");
1056#endif
1057
1058	for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
1059		const struct cpu_dev *cpudev = *cdev;
1060
1061		if (count >= X86_VENDOR_NUM)
1062			break;
1063		cpu_devs[count] = cpudev;
1064		count++;
1065
1066#ifdef CONFIG_PROCESSOR_SELECT
1067		{
1068			unsigned int j;
1069
1070			for (j = 0; j < 2; j++) {
1071				if (!cpudev->c_ident[j])
1072					continue;
1073				pr_info("  %s %s\n", cpudev->c_vendor,
1074					cpudev->c_ident[j]);
1075			}
1076		}
1077#endif
1078	}
1079	early_identify_cpu(&boot_cpu_data);
1080}
1081
1082/*
1083 * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
1084 * unfortunately, that's not true in practice because of early VIA
1085 * chips and (more importantly) broken virtualizers that are not easy
1086 * to detect. In the latter case it doesn't even *fail* reliably, so
1087 * probing for it doesn't even work. Disable it completely on 32-bit
1088 * unless we can find a reliable way to detect all the broken cases.
1089 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
1090 */
1091static void detect_nopl(struct cpuinfo_x86 *c)
1092{
1093#ifdef CONFIG_X86_32
1094	clear_cpu_cap(c, X86_FEATURE_NOPL);
1095#else
1096	set_cpu_cap(c, X86_FEATURE_NOPL);
1097#endif
1098}
1099
1100static void detect_null_seg_behavior(struct cpuinfo_x86 *c)
1101{
1102#ifdef CONFIG_X86_64
1103	/*
1104	 * Empirically, writing zero to a segment selector on AMD does
1105	 * not clear the base, whereas writing zero to a segment
1106	 * selector on Intel does clear the base.  Intel's behavior
1107	 * allows slightly faster context switches in the common case
1108	 * where GS is unused by the prev and next threads.
1109	 *
1110	 * Since neither vendor documents this anywhere that I can see,
1111	 * detect it directly instead of hardcoding the choice by
1112	 * vendor.
1113	 *
1114	 * I've designated AMD's behavior as the "bug" because it's
1115	 * counterintuitive and less friendly.
1116	 */
1117
1118	unsigned long old_base, tmp;
1119	rdmsrl(MSR_FS_BASE, old_base);
1120	wrmsrl(MSR_FS_BASE, 1);
1121	loadsegment(fs, 0);
1122	rdmsrl(MSR_FS_BASE, tmp);
1123	if (tmp != 0)
1124		set_cpu_bug(c, X86_BUG_NULL_SEG);
1125	wrmsrl(MSR_FS_BASE, old_base);
1126#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1127}
1128
1129static void generic_identify(struct cpuinfo_x86 *c)
1130{
1131	c->extended_cpuid_level = 0;
1132
1133	if (!have_cpuid_p())
1134		identify_cpu_without_cpuid(c);
1135
1136	/* cyrix could have cpuid enabled via c_identify()*/
1137	if (!have_cpuid_p())
1138		return;
1139
1140	cpu_detect(c);
1141
1142	get_cpu_vendor(c);
1143
1144	get_cpu_cap(c);
1145
1146	get_cpu_address_sizes(c);
1147
1148	if (c->cpuid_level >= 0x00000001) {
1149		c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
1150#ifdef CONFIG_X86_32
1151# ifdef CONFIG_SMP
1152		c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
1153# else
1154		c->apicid = c->initial_apicid;
1155# endif
1156#endif
1157		c->phys_proc_id = c->initial_apicid;
1158	}
1159
1160	get_model_name(c); /* Default name */
1161
1162	detect_nopl(c);
1163
1164	detect_null_seg_behavior(c);
1165
1166	/*
1167	 * ESPFIX is a strange bug.  All real CPUs have it.  Paravirt
1168	 * systems that run Linux at CPL > 0 may or may not have the
1169	 * issue, but, even if they have the issue, there's absolutely
1170	 * nothing we can do about it because we can't use the real IRET
1171	 * instruction.
1172	 *
1173	 * NB: For the time being, only 32-bit kernels support
1174	 * X86_BUG_ESPFIX as such.  64-bit kernels directly choose
1175	 * whether to apply espfix using paravirt hooks.  If any
1176	 * non-paravirt system ever shows up that does *not* have the
1177	 * ESPFIX issue, we can change this.
1178	 */
1179#ifdef CONFIG_X86_32
1180# ifdef CONFIG_PARAVIRT
1181	do {
1182		extern void native_iret(void);
1183		if (pv_cpu_ops.iret == native_iret)
1184			set_cpu_bug(c, X86_BUG_ESPFIX);
1185	} while (0);
1186# else
1187	set_cpu_bug(c, X86_BUG_ESPFIX);
1188# endif
1189#endif
1190}
1191
1192static void x86_init_cache_qos(struct cpuinfo_x86 *c)
1193{
1194	/*
1195	 * The heavy lifting of max_rmid and cache_occ_scale are handled
1196	 * in get_cpu_cap().  Here we just set the max_rmid for the boot_cpu
1197	 * in case CQM bits really aren't there in this CPU.
1198	 */
1199	if (c != &boot_cpu_data) {
1200		boot_cpu_data.x86_cache_max_rmid =
1201			min(boot_cpu_data.x86_cache_max_rmid,
1202			    c->x86_cache_max_rmid);
1203	}
1204}
1205
1206/*
1207 * Validate that ACPI/mptables have the same information about the
1208 * effective APIC id and update the package map.
1209 */
1210static void validate_apic_and_package_id(struct cpuinfo_x86 *c)
1211{
1212#ifdef CONFIG_SMP
1213	unsigned int apicid, cpu = smp_processor_id();
1214
1215	apicid = apic->cpu_present_to_apicid(cpu);
1216
1217	if (apicid != c->apicid) {
1218		pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n",
1219		       cpu, apicid, c->initial_apicid);
1220	}
1221	BUG_ON(topology_update_package_map(c->phys_proc_id, cpu));
 
1222#else
1223	c->logical_proc_id = 0;
1224#endif
1225}
1226
1227/*
1228 * This does the hard work of actually picking apart the CPU stuff...
1229 */
1230static void identify_cpu(struct cpuinfo_x86 *c)
1231{
1232	int i;
1233
1234	c->loops_per_jiffy = loops_per_jiffy;
1235	c->x86_cache_size = 0;
1236	c->x86_vendor = X86_VENDOR_UNKNOWN;
1237	c->x86_model = c->x86_stepping = 0;	/* So far unknown... */
1238	c->x86_vendor_id[0] = '\0'; /* Unset */
1239	c->x86_model_id[0] = '\0';  /* Unset */
1240	c->x86_max_cores = 1;
1241	c->x86_coreid_bits = 0;
1242	c->cu_id = 0xff;
1243#ifdef CONFIG_X86_64
1244	c->x86_clflush_size = 64;
1245	c->x86_phys_bits = 36;
1246	c->x86_virt_bits = 48;
1247#else
1248	c->cpuid_level = -1;	/* CPUID not detected */
1249	c->x86_clflush_size = 32;
1250	c->x86_phys_bits = 32;
1251	c->x86_virt_bits = 32;
1252#endif
1253	c->x86_cache_alignment = c->x86_clflush_size;
1254	memset(&c->x86_capability, 0, sizeof c->x86_capability);
 
 
 
1255
1256	generic_identify(c);
1257
1258	if (this_cpu->c_identify)
1259		this_cpu->c_identify(c);
1260
1261	/* Clear/Set all flags overridden by options, after probe */
1262	apply_forced_caps(c);
1263
1264#ifdef CONFIG_X86_64
1265	c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
1266#endif
1267
1268	/*
1269	 * Vendor-specific initialization.  In this section we
1270	 * canonicalize the feature flags, meaning if there are
1271	 * features a certain CPU supports which CPUID doesn't
1272	 * tell us, CPUID claiming incorrect flags, or other bugs,
1273	 * we handle them here.
1274	 *
1275	 * At the end of this section, c->x86_capability better
1276	 * indicate the features this CPU genuinely supports!
1277	 */
1278	if (this_cpu->c_init)
1279		this_cpu->c_init(c);
1280
1281	/* Disable the PN if appropriate */
1282	squash_the_stupid_serial_number(c);
1283
1284	/* Set up SMEP/SMAP/UMIP */
1285	setup_smep(c);
1286	setup_smap(c);
1287	setup_umip(c);
1288
 
 
 
 
 
 
1289	/*
1290	 * The vendor-specific functions might have changed features.
1291	 * Now we do "generic changes."
1292	 */
1293
1294	/* Filter out anything that depends on CPUID levels we don't have */
1295	filter_cpuid_features(c, true);
1296
1297	/* If the model name is still unset, do table lookup. */
1298	if (!c->x86_model_id[0]) {
1299		const char *p;
1300		p = table_lookup_model(c);
1301		if (p)
1302			strcpy(c->x86_model_id, p);
1303		else
1304			/* Last resort... */
1305			sprintf(c->x86_model_id, "%02x/%02x",
1306				c->x86, c->x86_model);
1307	}
1308
1309#ifdef CONFIG_X86_64
1310	detect_ht(c);
1311#endif
1312
1313	x86_init_rdrand(c);
1314	x86_init_cache_qos(c);
1315	setup_pku(c);
 
1316
1317	/*
1318	 * Clear/Set all flags overridden by options, need do it
1319	 * before following smp all cpus cap AND.
1320	 */
1321	apply_forced_caps(c);
1322
1323	/*
1324	 * On SMP, boot_cpu_data holds the common feature set between
1325	 * all CPUs; so make sure that we indicate which features are
1326	 * common between the CPUs.  The first time this routine gets
1327	 * executed, c == &boot_cpu_data.
1328	 */
1329	if (c != &boot_cpu_data) {
1330		/* AND the already accumulated flags with these */
1331		for (i = 0; i < NCAPINTS; i++)
1332			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1333
1334		/* OR, i.e. replicate the bug flags */
1335		for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
1336			c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
1337	}
1338
 
 
1339	/* Init Machine Check Exception if available. */
1340	mcheck_cpu_init(c);
1341
1342	select_idle_routine(c);
1343
1344#ifdef CONFIG_NUMA
1345	numa_add_cpu(smp_processor_id());
1346#endif
1347}
1348
1349/*
1350 * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
1351 * on 32-bit kernels:
1352 */
1353#ifdef CONFIG_X86_32
1354void enable_sep_cpu(void)
1355{
1356	struct tss_struct *tss;
1357	int cpu;
1358
1359	if (!boot_cpu_has(X86_FEATURE_SEP))
1360		return;
1361
1362	cpu = get_cpu();
1363	tss = &per_cpu(cpu_tss_rw, cpu);
1364
1365	/*
1366	 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
1367	 * see the big comment in struct x86_hw_tss's definition.
1368	 */
1369
1370	tss->x86_tss.ss1 = __KERNEL_CS;
1371	wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
1372	wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
1373	wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
1374
1375	put_cpu();
1376}
1377#endif
1378
1379void __init identify_boot_cpu(void)
1380{
1381	identify_cpu(&boot_cpu_data);
 
 
1382#ifdef CONFIG_X86_32
1383	sysenter_setup();
1384	enable_sep_cpu();
1385#endif
1386	cpu_detect_tlb(&boot_cpu_data);
 
 
 
1387}
1388
1389void identify_secondary_cpu(struct cpuinfo_x86 *c)
1390{
1391	BUG_ON(c == &boot_cpu_data);
1392	identify_cpu(c);
1393#ifdef CONFIG_X86_32
1394	enable_sep_cpu();
1395#endif
1396	mtrr_ap_init();
1397	validate_apic_and_package_id(c);
1398	x86_spec_ctrl_setup_ap();
1399}
1400
1401static __init int setup_noclflush(char *arg)
1402{
1403	setup_clear_cpu_cap(X86_FEATURE_CLFLUSH);
1404	setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT);
1405	return 1;
1406}
1407__setup("noclflush", setup_noclflush);
1408
1409void print_cpu_info(struct cpuinfo_x86 *c)
1410{
1411	const char *vendor = NULL;
1412
1413	if (c->x86_vendor < X86_VENDOR_NUM) {
1414		vendor = this_cpu->c_vendor;
1415	} else {
1416		if (c->cpuid_level >= 0)
1417			vendor = c->x86_vendor_id;
1418	}
1419
1420	if (vendor && !strstr(c->x86_model_id, vendor))
1421		pr_cont("%s ", vendor);
1422
1423	if (c->x86_model_id[0])
1424		pr_cont("%s", c->x86_model_id);
1425	else
1426		pr_cont("%d86", c->x86);
1427
1428	pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
1429
1430	if (c->x86_stepping || c->cpuid_level >= 0)
1431		pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
1432	else
1433		pr_cont(")\n");
1434}
1435
1436/*
1437 * clearcpuid= was already parsed in fpu__init_parse_early_param.
1438 * But we need to keep a dummy __setup around otherwise it would
1439 * show up as an environment variable for init.
1440 */
1441static __init int setup_clearcpuid(char *arg)
1442{
1443	return 1;
1444}
1445__setup("clearcpuid=", setup_clearcpuid);
1446
1447#ifdef CONFIG_X86_64
1448DEFINE_PER_CPU_FIRST(union irq_stack_union,
1449		     irq_stack_union) __aligned(PAGE_SIZE) __visible;
1450EXPORT_PER_CPU_SYMBOL_GPL(irq_stack_union);
1451
1452/*
1453 * The following percpu variables are hot.  Align current_task to
1454 * cacheline size such that they fall in the same cacheline.
1455 */
1456DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
1457	&init_task;
1458EXPORT_PER_CPU_SYMBOL(current_task);
1459
1460DEFINE_PER_CPU(char *, irq_stack_ptr) =
1461	init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE;
1462
1463DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
 
 
 
1464
1465DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1466EXPORT_PER_CPU_SYMBOL(__preempt_count);
 
 
 
 
 
 
 
 
1467
1468/* May not be marked __init: used by software suspend */
1469void syscall_init(void)
1470{
1471	extern char _entry_trampoline[];
1472	extern char entry_SYSCALL_64_trampoline[];
1473
1474	int cpu = smp_processor_id();
1475	unsigned long SYSCALL64_entry_trampoline =
1476		(unsigned long)get_cpu_entry_area(cpu)->entry_trampoline +
1477		(entry_SYSCALL_64_trampoline - _entry_trampoline);
1478
1479	wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
1480	if (static_cpu_has(X86_FEATURE_PTI))
1481		wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline);
1482	else
1483		wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
1484
1485#ifdef CONFIG_IA32_EMULATION
1486	wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
1487	/*
1488	 * This only works on Intel CPUs.
1489	 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
1490	 * This does not cause SYSENTER to jump to the wrong location, because
1491	 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
1492	 */
1493	wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
1494	wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1));
 
1495	wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
1496#else
1497	wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
1498	wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
1499	wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
1500	wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
1501#endif
1502
1503	/* Flags to clear on syscall */
 
 
 
1504	wrmsrl(MSR_SYSCALL_MASK,
1505	       X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
1506	       X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
1507}
1508
1509/*
1510 * Copies of the original ist values from the tss are only accessed during
1511 * debugging, no special alignment required.
1512 */
1513DEFINE_PER_CPU(struct orig_ist, orig_ist);
1514
1515static DEFINE_PER_CPU(unsigned long, debug_stack_addr);
1516DEFINE_PER_CPU(int, debug_stack_usage);
1517
1518int is_debug_stack(unsigned long addr)
1519{
1520	return __this_cpu_read(debug_stack_usage) ||
1521		(addr <= __this_cpu_read(debug_stack_addr) &&
1522		 addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ));
1523}
1524NOKPROBE_SYMBOL(is_debug_stack);
1525
1526DEFINE_PER_CPU(u32, debug_idt_ctr);
1527
1528void debug_stack_set_zero(void)
1529{
1530	this_cpu_inc(debug_idt_ctr);
1531	load_current_idt();
1532}
1533NOKPROBE_SYMBOL(debug_stack_set_zero);
1534
1535void debug_stack_reset(void)
1536{
1537	if (WARN_ON(!this_cpu_read(debug_idt_ctr)))
1538		return;
1539	if (this_cpu_dec_return(debug_idt_ctr) == 0)
1540		load_current_idt();
1541}
1542NOKPROBE_SYMBOL(debug_stack_reset);
1543
1544#else	/* CONFIG_X86_64 */
1545
1546DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
1547EXPORT_PER_CPU_SYMBOL(current_task);
1548DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1549EXPORT_PER_CPU_SYMBOL(__preempt_count);
1550
1551/*
1552 * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find
1553 * the top of the kernel stack.  Use an extra percpu variable to track the
1554 * top of the kernel stack directly.
1555 */
1556DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
1557	(unsigned long)&init_thread_union + THREAD_SIZE;
1558EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
1559
1560#ifdef CONFIG_CC_STACKPROTECTOR
1561DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
1562#endif
1563
1564#endif	/* CONFIG_X86_64 */
1565
1566/*
1567 * Clear all 6 debug registers:
1568 */
1569static void clear_all_debug_regs(void)
1570{
1571	int i;
1572
1573	for (i = 0; i < 8; i++) {
1574		/* Ignore db4, db5 */
1575		if ((i == 4) || (i == 5))
1576			continue;
1577
1578		set_debugreg(0, i);
1579	}
1580}
1581
1582#ifdef CONFIG_KGDB
1583/*
1584 * Restore debug regs if using kgdbwait and you have a kernel debugger
1585 * connection established.
1586 */
1587static void dbg_restore_debug_regs(void)
1588{
1589	if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
1590		arch_kgdb_ops.correct_hw_break();
1591}
1592#else /* ! CONFIG_KGDB */
1593#define dbg_restore_debug_regs()
1594#endif /* ! CONFIG_KGDB */
1595
1596static void wait_for_master_cpu(int cpu)
1597{
1598#ifdef CONFIG_SMP
1599	/*
1600	 * wait for ACK from master CPU before continuing
1601	 * with AP initialization
1602	 */
1603	WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
1604	while (!cpumask_test_cpu(cpu, cpu_callout_mask))
1605		cpu_relax();
1606#endif
1607}
1608
1609/*
1610 * cpu_init() initializes state that is per-CPU. Some data is already
1611 * initialized (naturally) in the bootstrap process, such as the GDT
1612 * and IDT. We reload them nevertheless, this function acts as a
1613 * 'CPU state barrier', nothing should get across.
1614 * A lot of state is already set up in PDA init for 64 bit
1615 */
1616#ifdef CONFIG_X86_64
1617
1618void cpu_init(void)
1619{
1620	struct orig_ist *oist;
1621	struct task_struct *me;
1622	struct tss_struct *t;
1623	unsigned long v;
1624	int cpu = raw_smp_processor_id();
1625	int i;
1626
1627	wait_for_master_cpu(cpu);
1628
1629	/*
1630	 * Initialize the CR4 shadow before doing anything that could
1631	 * try to read it.
1632	 */
1633	cr4_init_shadow();
1634
1635	if (cpu)
1636		load_ucode_ap();
1637
1638	t = &per_cpu(cpu_tss_rw, cpu);
1639	oist = &per_cpu(orig_ist, cpu);
 
1640
1641#ifdef CONFIG_NUMA
1642	if (this_cpu_read(numa_node) == 0 &&
1643	    early_cpu_to_node(cpu) != NUMA_NO_NODE)
1644		set_numa_node(early_cpu_to_node(cpu));
1645#endif
1646
1647	me = current;
 
1648
1649	pr_debug("Initializing CPU#%d\n", cpu);
 
 
 
 
1650
1651	cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
 
 
 
 
 
 
 
 
 
1652
1653	/*
1654	 * Initialize the per-CPU GDT with the boot GDT,
1655	 * and set up the GDT descriptor:
1656	 */
1657
1658	switch_to_new_gdt(cpu);
1659	loadsegment(fs, 0);
1660
1661	load_current_idt();
 
 
 
1662
1663	memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
1664	syscall_init();
1665
1666	wrmsrl(MSR_FS_BASE, 0);
1667	wrmsrl(MSR_KERNEL_GS_BASE, 0);
1668	barrier();
1669
1670	x86_configure_nx();
1671	x2apic_setup();
 
1672
 
 
 
 
1673	/*
1674	 * set up and load the per-CPU TSS
 
1675	 */
1676	if (!oist->ist[0]) {
1677		char *estacks = get_cpu_entry_area(cpu)->exception_stacks;
1678
1679		for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1680			estacks += exception_stack_sizes[v];
1681			oist->ist[v] = t->x86_tss.ist[v] =
1682					(unsigned long)estacks;
1683			if (v == DEBUG_STACK-1)
1684				per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks;
1685		}
1686	}
1687
1688	t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
1689
1690	/*
1691	 * <= is required because the CPU will access up to
1692	 * 8 bits beyond the end of the IO permission bitmap.
1693	 */
1694	for (i = 0; i <= IO_BITMAP_LONGS; i++)
1695		t->io_bitmap[i] = ~0UL;
 
 
1696
1697	mmgrab(&init_mm);
1698	me->active_mm = &init_mm;
1699	BUG_ON(me->mm);
1700	initialize_tlbstate_and_flush();
1701	enter_lazy_tlb(&init_mm, me);
1702
1703	/*
1704	 * Initialize the TSS.  sp0 points to the entry trampoline stack
1705	 * regardless of what task is running.
1706	 */
1707	set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
1708	load_TR_desc();
1709	load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
1710
1711	load_mm_ldt(&init_mm);
1712
1713	clear_all_debug_regs();
1714	dbg_restore_debug_regs();
1715
1716	fpu__init_cpu();
1717
1718	if (is_uv_system())
1719		uv_cpu_init();
1720
1721	load_fixmap_gdt(cpu);
 
1722}
1723
1724#else
1725
 
 
 
 
1726void cpu_init(void)
1727{
1728	int cpu = smp_processor_id();
1729	struct task_struct *curr = current;
1730	struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu);
1731
1732	wait_for_master_cpu(cpu);
1733
1734	/*
1735	 * Initialize the CR4 shadow before doing anything that could
1736	 * try to read it.
1737	 */
1738	cr4_init_shadow();
1739
1740	show_ucode_info_early();
1741
1742	pr_info("Initializing CPU#%d\n", cpu);
 
 
 
 
 
1743
1744	if (cpu_feature_enabled(X86_FEATURE_VME) ||
1745	    boot_cpu_has(X86_FEATURE_TSC) ||
1746	    boot_cpu_has(X86_FEATURE_DE))
1747		cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1748
1749	load_current_idt();
1750	switch_to_new_gdt(cpu);
 
 
 
 
 
 
 
 
 
1751
1752	/*
1753	 * Set up and load the per-CPU TSS and LDT
1754	 */
1755	mmgrab(&init_mm);
1756	curr->active_mm = &init_mm;
1757	BUG_ON(curr->mm);
1758	initialize_tlbstate_and_flush();
1759	enter_lazy_tlb(&init_mm, curr);
1760
1761	/*
1762	 * Initialize the TSS.  Don't bother initializing sp0, as the initial
1763	 * task never enters user mode.
1764	 */
1765	set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
1766	load_TR_desc();
1767
1768	load_mm_ldt(&init_mm);
1769
1770	t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
1771
1772#ifdef CONFIG_DOUBLEFAULT
1773	/* Set up doublefault TSS pointer in the GDT */
1774	__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
1775#endif
1776
1777	clear_all_debug_regs();
1778	dbg_restore_debug_regs();
1779
 
 
1780	fpu__init_cpu();
1781
1782	load_fixmap_gdt(cpu);
1783}
1784#endif
1785
1786static void bsp_resume(void)
1787{
1788	if (this_cpu->c_bsp_resume)
1789		this_cpu->c_bsp_resume(&boot_cpu_data);
1790}
1791
1792static struct syscore_ops cpu_syscore_ops = {
1793	.resume		= bsp_resume,
1794};
1795
1796static int __init init_cpu_syscore(void)
1797{
1798	register_syscore_ops(&cpu_syscore_ops);
1799	return 0;
 
 
 
 
1800}
1801core_initcall(init_cpu_syscore);
1802
 
1803/*
1804 * The microcode loader calls this upon late microcode load to recheck features,
1805 * only when microcode has been updated. Caller holds microcode_mutex and CPU
1806 * hotplug lock.
1807 */
1808void microcode_check(void)
1809{
1810	struct cpuinfo_x86 info;
1811
1812	perf_check_microcode();
1813
1814	/* Reload CPUID max function as it might've changed. */
1815	info.cpuid_level = cpuid_eax(0);
1816
1817	/*
1818	 * Copy all capability leafs to pick up the synthetic ones so that
1819	 * memcmp() below doesn't fail on that. The ones coming from CPUID will
1820	 * get overwritten in get_cpu_cap().
1821	 */
1822	memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability));
1823
1824	get_cpu_cap(&info);
1825
1826	if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)))
1827		return;
1828
1829	pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
1830	pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
 
 
 
 
 
 
 
 
 
 
 
 
1831}
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* cpu_feature_enabled() cannot be used this early */
   3#define USE_EARLY_PGTABLE_L5
   4
   5#include <linux/memblock.h>
   6#include <linux/linkage.h>
   7#include <linux/bitops.h>
   8#include <linux/kernel.h>
   9#include <linux/export.h>
  10#include <linux/percpu.h>
  11#include <linux/string.h>
  12#include <linux/ctype.h>
  13#include <linux/delay.h>
  14#include <linux/sched/mm.h>
  15#include <linux/sched/clock.h>
  16#include <linux/sched/task.h>
  17#include <linux/sched/smt.h>
  18#include <linux/init.h>
  19#include <linux/kprobes.h>
  20#include <linux/kgdb.h>
  21#include <linux/smp.h>
  22#include <linux/io.h>
  23#include <linux/syscore_ops.h>
  24#include <linux/pgtable.h>
  25#include <linux/stackprotector.h>
  26
  27#include <asm/cmdline.h>
  28#include <asm/perf_event.h>
  29#include <asm/mmu_context.h>
  30#include <asm/doublefault.h>
  31#include <asm/archrandom.h>
  32#include <asm/hypervisor.h>
  33#include <asm/processor.h>
  34#include <asm/tlbflush.h>
  35#include <asm/debugreg.h>
  36#include <asm/sections.h>
  37#include <asm/vsyscall.h>
  38#include <linux/topology.h>
  39#include <linux/cpumask.h>
 
  40#include <linux/atomic.h>
  41#include <asm/proto.h>
  42#include <asm/setup.h>
  43#include <asm/apic.h>
  44#include <asm/desc.h>
  45#include <asm/fpu/api.h>
  46#include <asm/mtrr.h>
  47#include <asm/hwcap2.h>
  48#include <linux/numa.h>
  49#include <asm/numa.h>
  50#include <asm/asm.h>
  51#include <asm/bugs.h>
  52#include <asm/cpu.h>
  53#include <asm/mce.h>
  54#include <asm/msr.h>
  55#include <asm/cacheinfo.h>
  56#include <asm/memtype.h>
  57#include <asm/microcode.h>
  58#include <asm/microcode_intel.h>
  59#include <asm/intel-family.h>
  60#include <asm/cpu_device_id.h>
 
 
  61#include <asm/uv/uv.h>
  62#include <asm/sigframe.h>
  63#include <asm/traps.h>
  64#include <asm/sev.h>
  65
  66#include "cpu.h"
  67
  68u32 elf_hwcap2 __read_mostly;
  69
  70/* all of these masks are initialized in setup_cpu_local_masks() */
  71cpumask_var_t cpu_initialized_mask;
  72cpumask_var_t cpu_callout_mask;
  73cpumask_var_t cpu_callin_mask;
  74
  75/* representing cpus for which sibling maps can be computed */
  76cpumask_var_t cpu_sibling_setup_mask;
  77
  78/* Number of siblings per CPU package */
  79int smp_num_siblings = 1;
  80EXPORT_SYMBOL(smp_num_siblings);
  81
  82/* Last level cache ID of each logical CPU */
  83DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
  84
  85u16 get_llc_id(unsigned int cpu)
  86{
  87	return per_cpu(cpu_llc_id, cpu);
  88}
  89EXPORT_SYMBOL_GPL(get_llc_id);
  90
  91/* L2 cache ID of each logical CPU */
  92DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_l2c_id) = BAD_APICID;
  93
  94static struct ppin_info {
  95	int	feature;
  96	int	msr_ppin_ctl;
  97	int	msr_ppin;
  98} ppin_info[] = {
  99	[X86_VENDOR_INTEL] = {
 100		.feature = X86_FEATURE_INTEL_PPIN,
 101		.msr_ppin_ctl = MSR_PPIN_CTL,
 102		.msr_ppin = MSR_PPIN
 103	},
 104	[X86_VENDOR_AMD] = {
 105		.feature = X86_FEATURE_AMD_PPIN,
 106		.msr_ppin_ctl = MSR_AMD_PPIN_CTL,
 107		.msr_ppin = MSR_AMD_PPIN
 108	},
 109};
 110
 111static const struct x86_cpu_id ppin_cpuids[] = {
 112	X86_MATCH_FEATURE(X86_FEATURE_AMD_PPIN, &ppin_info[X86_VENDOR_AMD]),
 113	X86_MATCH_FEATURE(X86_FEATURE_INTEL_PPIN, &ppin_info[X86_VENDOR_INTEL]),
 114
 115	/* Legacy models without CPUID enumeration */
 116	X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &ppin_info[X86_VENDOR_INTEL]),
 117	X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &ppin_info[X86_VENDOR_INTEL]),
 118	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &ppin_info[X86_VENDOR_INTEL]),
 119	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &ppin_info[X86_VENDOR_INTEL]),
 120	X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &ppin_info[X86_VENDOR_INTEL]),
 121	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &ppin_info[X86_VENDOR_INTEL]),
 122	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &ppin_info[X86_VENDOR_INTEL]),
 123	X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
 124	X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &ppin_info[X86_VENDOR_INTEL]),
 125	X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &ppin_info[X86_VENDOR_INTEL]),
 126
 127	{}
 128};
 129
 130static void ppin_init(struct cpuinfo_x86 *c)
 131{
 132	const struct x86_cpu_id *id;
 133	unsigned long long val;
 134	struct ppin_info *info;
 135
 136	id = x86_match_cpu(ppin_cpuids);
 137	if (!id)
 138		return;
 139
 140	/*
 141	 * Testing the presence of the MSR is not enough. Need to check
 142	 * that the PPIN_CTL allows reading of the PPIN.
 143	 */
 144	info = (struct ppin_info *)id->driver_data;
 145
 146	if (rdmsrl_safe(info->msr_ppin_ctl, &val))
 147		goto clear_ppin;
 148
 149	if ((val & 3UL) == 1UL) {
 150		/* PPIN locked in disabled mode */
 151		goto clear_ppin;
 152	}
 153
 154	/* If PPIN is disabled, try to enable */
 155	if (!(val & 2UL)) {
 156		wrmsrl_safe(info->msr_ppin_ctl,  val | 2UL);
 157		rdmsrl_safe(info->msr_ppin_ctl, &val);
 158	}
 159
 160	/* Is the enable bit set? */
 161	if (val & 2UL) {
 162		c->ppin = __rdmsr(info->msr_ppin);
 163		set_cpu_cap(c, info->feature);
 164		return;
 165	}
 166
 167clear_ppin:
 168	clear_cpu_cap(c, info->feature);
 169}
 170
 171/* correctly size the local cpu masks */
 172void __init setup_cpu_local_masks(void)
 173{
 174	alloc_bootmem_cpumask_var(&cpu_initialized_mask);
 175	alloc_bootmem_cpumask_var(&cpu_callin_mask);
 176	alloc_bootmem_cpumask_var(&cpu_callout_mask);
 177	alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
 178}
 179
 180static void default_init(struct cpuinfo_x86 *c)
 181{
 182#ifdef CONFIG_X86_64
 183	cpu_detect_cache_sizes(c);
 184#else
 185	/* Not much we can do here... */
 186	/* Check if at least it has cpuid */
 187	if (c->cpuid_level == -1) {
 188		/* No cpuid. It must be an ancient CPU */
 189		if (c->x86 == 4)
 190			strcpy(c->x86_model_id, "486");
 191		else if (c->x86 == 3)
 192			strcpy(c->x86_model_id, "386");
 193	}
 194#endif
 195}
 196
 197static const struct cpu_dev default_cpu = {
 198	.c_init		= default_init,
 199	.c_vendor	= "Unknown",
 200	.c_x86_vendor	= X86_VENDOR_UNKNOWN,
 201};
 202
 203static const struct cpu_dev *this_cpu = &default_cpu;
 204
 205DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
 206#ifdef CONFIG_X86_64
 207	/*
 208	 * We need valid kernel segments for data and code in long mode too
 209	 * IRET will check the segment types  kkeil 2000/10/28
 210	 * Also sysret mandates a special GDT layout
 211	 *
 212	 * TLS descriptors are currently at a different place compared to i386.
 213	 * Hopefully nobody expects them at a fixed place (Wine?)
 214	 */
 215	[GDT_ENTRY_KERNEL32_CS]		= GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
 216	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
 217	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
 218	[GDT_ENTRY_DEFAULT_USER32_CS]	= GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
 219	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
 220	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
 221#else
 222	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
 223	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
 224	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
 225	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
 226	/*
 227	 * Segments used for calling PnP BIOS have byte granularity.
 228	 * They code segments and data segments have fixed 64k limits,
 229	 * the transfer segment sizes are set at run time.
 230	 */
 231	/* 32-bit code */
 232	[GDT_ENTRY_PNPBIOS_CS32]	= GDT_ENTRY_INIT(0x409a, 0, 0xffff),
 233	/* 16-bit code */
 234	[GDT_ENTRY_PNPBIOS_CS16]	= GDT_ENTRY_INIT(0x009a, 0, 0xffff),
 235	/* 16-bit data */
 236	[GDT_ENTRY_PNPBIOS_DS]		= GDT_ENTRY_INIT(0x0092, 0, 0xffff),
 237	/* 16-bit data */
 238	[GDT_ENTRY_PNPBIOS_TS1]		= GDT_ENTRY_INIT(0x0092, 0, 0),
 239	/* 16-bit data */
 240	[GDT_ENTRY_PNPBIOS_TS2]		= GDT_ENTRY_INIT(0x0092, 0, 0),
 241	/*
 242	 * The APM segments have byte granularity and their bases
 243	 * are set at run time.  All have 64k limits.
 244	 */
 245	/* 32-bit code */
 246	[GDT_ENTRY_APMBIOS_BASE]	= GDT_ENTRY_INIT(0x409a, 0, 0xffff),
 247	/* 16-bit code */
 248	[GDT_ENTRY_APMBIOS_BASE+1]	= GDT_ENTRY_INIT(0x009a, 0, 0xffff),
 249	/* data */
 250	[GDT_ENTRY_APMBIOS_BASE+2]	= GDT_ENTRY_INIT(0x4092, 0, 0xffff),
 251
 252	[GDT_ENTRY_ESPFIX_SS]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
 253	[GDT_ENTRY_PERCPU]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
 
 254#endif
 255} };
 256EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
 257
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 258#ifdef CONFIG_X86_64
 259static int __init x86_nopcid_setup(char *s)
 260{
 261	/* nopcid doesn't accept parameters */
 262	if (s)
 263		return -EINVAL;
 264
 265	/* do not emit a message if the feature is not present */
 266	if (!boot_cpu_has(X86_FEATURE_PCID))
 267		return 0;
 268
 269	setup_clear_cpu_cap(X86_FEATURE_PCID);
 270	pr_info("nopcid: PCID feature disabled\n");
 271	return 0;
 272}
 273early_param("nopcid", x86_nopcid_setup);
 274#endif
 275
 276static int __init x86_noinvpcid_setup(char *s)
 277{
 278	/* noinvpcid doesn't accept parameters */
 279	if (s)
 280		return -EINVAL;
 281
 282	/* do not emit a message if the feature is not present */
 283	if (!boot_cpu_has(X86_FEATURE_INVPCID))
 284		return 0;
 285
 286	setup_clear_cpu_cap(X86_FEATURE_INVPCID);
 287	pr_info("noinvpcid: INVPCID feature disabled\n");
 288	return 0;
 289}
 290early_param("noinvpcid", x86_noinvpcid_setup);
 291
 292#ifdef CONFIG_X86_32
 293static int cachesize_override = -1;
 294static int disable_x86_serial_nr = 1;
 295
 296static int __init cachesize_setup(char *str)
 297{
 298	get_option(&str, &cachesize_override);
 299	return 1;
 300}
 301__setup("cachesize=", cachesize_setup);
 302
 
 
 
 
 
 
 
 303/* Standard macro to see if a specific flag is changeable */
 304static inline int flag_is_changeable_p(u32 flag)
 305{
 306	u32 f1, f2;
 307
 308	/*
 309	 * Cyrix and IDT cpus allow disabling of CPUID
 310	 * so the code below may return different results
 311	 * when it is executed before and after enabling
 312	 * the CPUID. Add "volatile" to not allow gcc to
 313	 * optimize the subsequent calls to this function.
 314	 */
 315	asm volatile ("pushfl		\n\t"
 316		      "pushfl		\n\t"
 317		      "popl %0		\n\t"
 318		      "movl %0, %1	\n\t"
 319		      "xorl %2, %0	\n\t"
 320		      "pushl %0		\n\t"
 321		      "popfl		\n\t"
 322		      "pushfl		\n\t"
 323		      "popl %0		\n\t"
 324		      "popfl		\n\t"
 325
 326		      : "=&r" (f1), "=&r" (f2)
 327		      : "ir" (flag));
 328
 329	return ((f1^f2) & flag) != 0;
 330}
 331
 332/* Probe for the CPUID instruction */
 333int have_cpuid_p(void)
 334{
 335	return flag_is_changeable_p(X86_EFLAGS_ID);
 336}
 337
 338static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
 339{
 340	unsigned long lo, hi;
 341
 342	if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
 343		return;
 344
 345	/* Disable processor serial number: */
 346
 347	rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
 348	lo |= 0x200000;
 349	wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
 350
 351	pr_notice("CPU serial number disabled.\n");
 352	clear_cpu_cap(c, X86_FEATURE_PN);
 353
 354	/* Disabling the serial number may affect the cpuid level */
 355	c->cpuid_level = cpuid_eax(0);
 356}
 357
 358static int __init x86_serial_nr_setup(char *s)
 359{
 360	disable_x86_serial_nr = 0;
 361	return 1;
 362}
 363__setup("serialnumber", x86_serial_nr_setup);
 364#else
 365static inline int flag_is_changeable_p(u32 flag)
 366{
 367	return 1;
 368}
 369static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
 370{
 371}
 372#endif
 373
 
 
 
 
 
 
 
 
 
 374static __always_inline void setup_smep(struct cpuinfo_x86 *c)
 375{
 376	if (cpu_has(c, X86_FEATURE_SMEP))
 377		cr4_set_bits(X86_CR4_SMEP);
 378}
 379
 
 
 
 
 
 
 
 380static __always_inline void setup_smap(struct cpuinfo_x86 *c)
 381{
 382	unsigned long eflags = native_save_fl();
 383
 384	/* This should have been cleared long ago */
 385	BUG_ON(eflags & X86_EFLAGS_AC);
 386
 387	if (cpu_has(c, X86_FEATURE_SMAP))
 
 388		cr4_set_bits(X86_CR4_SMAP);
 
 
 
 
 389}
 390
 391static __always_inline void setup_umip(struct cpuinfo_x86 *c)
 392{
 393	/* Check the boot processor, plus build option for UMIP. */
 394	if (!cpu_feature_enabled(X86_FEATURE_UMIP))
 395		goto out;
 396
 397	/* Check the current processor's cpuid bits. */
 398	if (!cpu_has(c, X86_FEATURE_UMIP))
 399		goto out;
 400
 401	cr4_set_bits(X86_CR4_UMIP);
 402
 403	pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n");
 404
 405	return;
 406
 407out:
 408	/*
 409	 * Make sure UMIP is disabled in case it was enabled in a
 410	 * previous boot (e.g., via kexec).
 411	 */
 412	cr4_clear_bits(X86_CR4_UMIP);
 413}
 414
 415/* These bits should not change their value after CPU init is finished. */
 416static const unsigned long cr4_pinned_mask =
 417	X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP |
 418	X86_CR4_FSGSBASE | X86_CR4_CET;
 419static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
 420static unsigned long cr4_pinned_bits __ro_after_init;
 421
 422void native_write_cr0(unsigned long val)
 423{
 424	unsigned long bits_missing = 0;
 425
 426set_register:
 427	asm volatile("mov %0,%%cr0": "+r" (val) : : "memory");
 428
 429	if (static_branch_likely(&cr_pinning)) {
 430		if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) {
 431			bits_missing = X86_CR0_WP;
 432			val |= bits_missing;
 433			goto set_register;
 434		}
 435		/* Warn after we've set the missing bits. */
 436		WARN_ONCE(bits_missing, "CR0 WP bit went missing!?\n");
 437	}
 438}
 439EXPORT_SYMBOL(native_write_cr0);
 440
 441void __no_profile native_write_cr4(unsigned long val)
 442{
 443	unsigned long bits_changed = 0;
 444
 445set_register:
 446	asm volatile("mov %0,%%cr4": "+r" (val) : : "memory");
 447
 448	if (static_branch_likely(&cr_pinning)) {
 449		if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
 450			bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits;
 451			val = (val & ~cr4_pinned_mask) | cr4_pinned_bits;
 452			goto set_register;
 453		}
 454		/* Warn after we've corrected the changed bits. */
 455		WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n",
 456			  bits_changed);
 457	}
 458}
 459#if IS_MODULE(CONFIG_LKDTM)
 460EXPORT_SYMBOL_GPL(native_write_cr4);
 461#endif
 462
 463void cr4_update_irqsoff(unsigned long set, unsigned long clear)
 464{
 465	unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
 466
 467	lockdep_assert_irqs_disabled();
 468
 469	newval = (cr4 & ~clear) | set;
 470	if (newval != cr4) {
 471		this_cpu_write(cpu_tlbstate.cr4, newval);
 472		__write_cr4(newval);
 473	}
 474}
 475EXPORT_SYMBOL(cr4_update_irqsoff);
 476
 477/* Read the CR4 shadow. */
 478unsigned long cr4_read_shadow(void)
 479{
 480	return this_cpu_read(cpu_tlbstate.cr4);
 481}
 482EXPORT_SYMBOL_GPL(cr4_read_shadow);
 483
 484void cr4_init(void)
 485{
 486	unsigned long cr4 = __read_cr4();
 487
 488	if (boot_cpu_has(X86_FEATURE_PCID))
 489		cr4 |= X86_CR4_PCIDE;
 490	if (static_branch_likely(&cr_pinning))
 491		cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits;
 492
 493	__write_cr4(cr4);
 494
 495	/* Initialize cr4 shadow for this CPU. */
 496	this_cpu_write(cpu_tlbstate.cr4, cr4);
 497}
 498
 499/*
 500 * Once CPU feature detection is finished (and boot params have been
 501 * parsed), record any of the sensitive CR bits that are set, and
 502 * enable CR pinning.
 503 */
 504static void __init setup_cr_pinning(void)
 505{
 506	cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask;
 507	static_key_enable(&cr_pinning.key);
 508}
 509
 510static __init int x86_nofsgsbase_setup(char *arg)
 511{
 512	/* Require an exact match without trailing characters. */
 513	if (strlen(arg))
 514		return 0;
 515
 516	/* Do not emit a message if the feature is not present. */
 517	if (!boot_cpu_has(X86_FEATURE_FSGSBASE))
 518		return 1;
 519
 520	setup_clear_cpu_cap(X86_FEATURE_FSGSBASE);
 521	pr_info("FSGSBASE disabled via kernel command line\n");
 522	return 1;
 523}
 524__setup("nofsgsbase", x86_nofsgsbase_setup);
 525
 526/*
 527 * Protection Keys are not available in 32-bit mode.
 528 */
 529static bool pku_disabled;
 530
 531static __always_inline void setup_pku(struct cpuinfo_x86 *c)
 532{
 533	if (c == &boot_cpu_data) {
 534		if (pku_disabled || !cpu_feature_enabled(X86_FEATURE_PKU))
 535			return;
 536		/*
 537		 * Setting CR4.PKE will cause the X86_FEATURE_OSPKE cpuid
 538		 * bit to be set.  Enforce it.
 539		 */
 540		setup_force_cpu_cap(X86_FEATURE_OSPKE);
 541
 542	} else if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) {
 543		return;
 544	}
 545
 546	cr4_set_bits(X86_CR4_PKE);
 547	/* Load the default PKRU value */
 548	pkru_write_default();
 
 
 
 
 549}
 550
 551#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
 552static __init int setup_disable_pku(char *arg)
 553{
 554	/*
 555	 * Do not clear the X86_FEATURE_PKU bit.  All of the
 556	 * runtime checks are against OSPKE so clearing the
 557	 * bit does nothing.
 558	 *
 559	 * This way, we will see "pku" in cpuinfo, but not
 560	 * "ospke", which is exactly what we want.  It shows
 561	 * that the CPU has PKU, but the OS has not enabled it.
 562	 * This happens to be exactly how a system would look
 563	 * if we disabled the config option.
 564	 */
 565	pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n");
 566	pku_disabled = true;
 567	return 1;
 568}
 569__setup("nopku", setup_disable_pku);
 570#endif /* CONFIG_X86_64 */
 571
 572#ifdef CONFIG_X86_KERNEL_IBT
 573
 574__noendbr u64 ibt_save(void)
 575{
 576	u64 msr = 0;
 577
 578	if (cpu_feature_enabled(X86_FEATURE_IBT)) {
 579		rdmsrl(MSR_IA32_S_CET, msr);
 580		wrmsrl(MSR_IA32_S_CET, msr & ~CET_ENDBR_EN);
 581	}
 582
 583	return msr;
 584}
 585
 586__noendbr void ibt_restore(u64 save)
 587{
 588	u64 msr;
 589
 590	if (cpu_feature_enabled(X86_FEATURE_IBT)) {
 591		rdmsrl(MSR_IA32_S_CET, msr);
 592		msr &= ~CET_ENDBR_EN;
 593		msr |= (save & CET_ENDBR_EN);
 594		wrmsrl(MSR_IA32_S_CET, msr);
 595	}
 596}
 597
 598#endif
 599
 600static __always_inline void setup_cet(struct cpuinfo_x86 *c)
 601{
 602	u64 msr = CET_ENDBR_EN;
 603
 604	if (!HAS_KERNEL_IBT ||
 605	    !cpu_feature_enabled(X86_FEATURE_IBT))
 606		return;
 607
 608	wrmsrl(MSR_IA32_S_CET, msr);
 609	cr4_set_bits(X86_CR4_CET);
 610
 611	if (!ibt_selftest()) {
 612		pr_err("IBT selftest: Failed!\n");
 613		wrmsrl(MSR_IA32_S_CET, 0);
 614		setup_clear_cpu_cap(X86_FEATURE_IBT);
 615		return;
 616	}
 617}
 618
 619__noendbr void cet_disable(void)
 620{
 621	if (cpu_feature_enabled(X86_FEATURE_IBT))
 622		wrmsrl(MSR_IA32_S_CET, 0);
 623}
 624
 625/*
 626 * Some CPU features depend on higher CPUID levels, which may not always
 627 * be available due to CPUID level capping or broken virtualization
 628 * software.  Add those features to this table to auto-disable them.
 629 */
 630struct cpuid_dependent_feature {
 631	u32 feature;
 632	u32 level;
 633};
 634
 635static const struct cpuid_dependent_feature
 636cpuid_dependent_features[] = {
 637	{ X86_FEATURE_MWAIT,		0x00000005 },
 638	{ X86_FEATURE_DCA,		0x00000009 },
 639	{ X86_FEATURE_XSAVE,		0x0000000d },
 640	{ 0, 0 }
 641};
 642
 643static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
 644{
 645	const struct cpuid_dependent_feature *df;
 646
 647	for (df = cpuid_dependent_features; df->feature; df++) {
 648
 649		if (!cpu_has(c, df->feature))
 650			continue;
 651		/*
 652		 * Note: cpuid_level is set to -1 if unavailable, but
 653		 * extended_extended_level is set to 0 if unavailable
 654		 * and the legitimate extended levels are all negative
 655		 * when signed; hence the weird messing around with
 656		 * signs here...
 657		 */
 658		if (!((s32)df->level < 0 ?
 659		     (u32)df->level > (u32)c->extended_cpuid_level :
 660		     (s32)df->level > (s32)c->cpuid_level))
 661			continue;
 662
 663		clear_cpu_cap(c, df->feature);
 664		if (!warn)
 665			continue;
 666
 667		pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
 668			x86_cap_flag(df->feature), df->level);
 669	}
 670}
 671
 672/*
 673 * Naming convention should be: <Name> [(<Codename>)]
 674 * This table only is used unless init_<vendor>() below doesn't set it;
 675 * in particular, if CPUID levels 0x80000002..4 are supported, this
 676 * isn't used
 677 */
 678
 679/* Look up CPU names by table lookup. */
 680static const char *table_lookup_model(struct cpuinfo_x86 *c)
 681{
 682#ifdef CONFIG_X86_32
 683	const struct legacy_cpu_model_info *info;
 684
 685	if (c->x86_model >= 16)
 686		return NULL;	/* Range check */
 687
 688	if (!this_cpu)
 689		return NULL;
 690
 691	info = this_cpu->legacy_models;
 692
 693	while (info->family) {
 694		if (info->family == c->x86)
 695			return info->model_names[c->x86_model];
 696		info++;
 697	}
 698#endif
 699	return NULL;		/* Not found */
 700}
 701
 702/* Aligned to unsigned long to avoid split lock in atomic bitmap ops */
 703__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
 704__u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
 
 
 
 
 
 
 
 
 
 
 705
 706#ifdef CONFIG_X86_32
 707/* The 32-bit entry code needs to find cpu_entry_area. */
 708DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
 709#endif
 710
 
 
 
 
 
 
 
 
 
 
 
 
 
 711/* Load the original GDT from the per-cpu structure */
 712void load_direct_gdt(int cpu)
 713{
 714	struct desc_ptr gdt_descr;
 715
 716	gdt_descr.address = (long)get_cpu_gdt_rw(cpu);
 717	gdt_descr.size = GDT_SIZE - 1;
 718	load_gdt(&gdt_descr);
 719}
 720EXPORT_SYMBOL_GPL(load_direct_gdt);
 721
 722/* Load a fixmap remapping of the per-cpu GDT */
 723void load_fixmap_gdt(int cpu)
 724{
 725	struct desc_ptr gdt_descr;
 726
 727	gdt_descr.address = (long)get_cpu_gdt_ro(cpu);
 728	gdt_descr.size = GDT_SIZE - 1;
 729	load_gdt(&gdt_descr);
 730}
 731EXPORT_SYMBOL_GPL(load_fixmap_gdt);
 732
 733/**
 734 * switch_gdt_and_percpu_base - Switch to direct GDT and runtime per CPU base
 735 * @cpu:	The CPU number for which this is invoked
 736 *
 737 * Invoked during early boot to switch from early GDT and early per CPU to
 738 * the direct GDT and the runtime per CPU area. On 32-bit the percpu base
 739 * switch is implicit by loading the direct GDT. On 64bit this requires
 740 * to update GSBASE.
 741 */
 742void __init switch_gdt_and_percpu_base(int cpu)
 743{
 
 744	load_direct_gdt(cpu);
 745
 746#ifdef CONFIG_X86_64
 747	/*
 748	 * No need to load %gs. It is already correct.
 749	 *
 750	 * Writing %gs on 64bit would zero GSBASE which would make any per
 751	 * CPU operation up to the point of the wrmsrl() fault.
 752	 *
 753	 * Set GSBASE to the new offset. Until the wrmsrl() happens the
 754	 * early mapping is still valid. That means the GSBASE update will
 755	 * lose any prior per CPU data which was not copied over in
 756	 * setup_per_cpu_areas().
 757	 *
 758	 * This works even with stackprotector enabled because the
 759	 * per CPU stack canary is 0 in both per CPU areas.
 760	 */
 761	wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
 762#else
 763	/*
 764	 * %fs is already set to __KERNEL_PERCPU, but after switching GDT
 765	 * it is required to load FS again so that the 'hidden' part is
 766	 * updated from the new GDT. Up to this point the early per CPU
 767	 * translation is active. Any content of the early per CPU data
 768	 * which was not copied over in setup_per_cpu_areas() is lost.
 769	 */
 770	loadsegment(fs, __KERNEL_PERCPU);
 771#endif
 772}
 773
 774static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
 775
 776static void get_model_name(struct cpuinfo_x86 *c)
 777{
 778	unsigned int *v;
 779	char *p, *q, *s;
 780
 781	if (c->extended_cpuid_level < 0x80000004)
 782		return;
 783
 784	v = (unsigned int *)c->x86_model_id;
 785	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
 786	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
 787	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
 788	c->x86_model_id[48] = 0;
 789
 790	/* Trim whitespace */
 791	p = q = s = &c->x86_model_id[0];
 792
 793	while (*p == ' ')
 794		p++;
 795
 796	while (*p) {
 797		/* Note the last non-whitespace index */
 798		if (!isspace(*p))
 799			s = q;
 800
 801		*q++ = *p++;
 802	}
 803
 804	*(s + 1) = '\0';
 805}
 806
 807void detect_num_cpu_cores(struct cpuinfo_x86 *c)
 808{
 809	unsigned int eax, ebx, ecx, edx;
 810
 811	c->x86_max_cores = 1;
 812	if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
 813		return;
 814
 815	cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
 816	if (eax & 0x1f)
 817		c->x86_max_cores = (eax >> 26) + 1;
 818}
 819
 820void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
 821{
 822	unsigned int n, dummy, ebx, ecx, edx, l2size;
 823
 824	n = c->extended_cpuid_level;
 825
 826	if (n >= 0x80000005) {
 827		cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
 828		c->x86_cache_size = (ecx>>24) + (edx>>24);
 829#ifdef CONFIG_X86_64
 830		/* On K8 L1 TLB is inclusive, so don't count it */
 831		c->x86_tlbsize = 0;
 832#endif
 833	}
 834
 835	if (n < 0x80000006)	/* Some chips just has a large L1. */
 836		return;
 837
 838	cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
 839	l2size = ecx >> 16;
 840
 841#ifdef CONFIG_X86_64
 842	c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
 843#else
 844	/* do processor-specific cache resizing */
 845	if (this_cpu->legacy_cache_size)
 846		l2size = this_cpu->legacy_cache_size(c, l2size);
 847
 848	/* Allow user to override all this if necessary. */
 849	if (cachesize_override != -1)
 850		l2size = cachesize_override;
 851
 852	if (l2size == 0)
 853		return;		/* Again, no L2 cache is possible */
 854#endif
 855
 856	c->x86_cache_size = l2size;
 857}
 858
 859u16 __read_mostly tlb_lli_4k[NR_INFO];
 860u16 __read_mostly tlb_lli_2m[NR_INFO];
 861u16 __read_mostly tlb_lli_4m[NR_INFO];
 862u16 __read_mostly tlb_lld_4k[NR_INFO];
 863u16 __read_mostly tlb_lld_2m[NR_INFO];
 864u16 __read_mostly tlb_lld_4m[NR_INFO];
 865u16 __read_mostly tlb_lld_1g[NR_INFO];
 866
 867static void cpu_detect_tlb(struct cpuinfo_x86 *c)
 868{
 869	if (this_cpu->c_detect_tlb)
 870		this_cpu->c_detect_tlb(c);
 871
 872	pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
 873		tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
 874		tlb_lli_4m[ENTRIES]);
 875
 876	pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
 877		tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
 878		tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
 879}
 880
 881int detect_ht_early(struct cpuinfo_x86 *c)
 882{
 883#ifdef CONFIG_SMP
 884	u32 eax, ebx, ecx, edx;
 
 
 885
 886	if (!cpu_has(c, X86_FEATURE_HT))
 887		return -1;
 888
 889	if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
 890		return -1;
 891
 892	if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
 893		return -1;
 894
 895	cpuid(1, &eax, &ebx, &ecx, &edx);
 896
 897	smp_num_siblings = (ebx & 0xff0000) >> 16;
 898	if (smp_num_siblings == 1)
 
 899		pr_info_once("CPU0: Hyper-Threading is disabled\n");
 900#endif
 901	return 0;
 902}
 903
 904void detect_ht(struct cpuinfo_x86 *c)
 905{
 906#ifdef CONFIG_SMP
 907	int index_msb, core_bits;
 908
 909	if (detect_ht_early(c) < 0)
 910		return;
 911
 912	index_msb = get_count_order(smp_num_siblings);
 913	c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
 914
 915	smp_num_siblings = smp_num_siblings / c->x86_max_cores;
 916
 917	index_msb = get_count_order(smp_num_siblings);
 918
 919	core_bits = get_count_order(c->x86_max_cores);
 920
 921	c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
 922				       ((1 << core_bits) - 1);
 
 
 
 
 
 
 
 
 
 923#endif
 924}
 925
 926static void get_cpu_vendor(struct cpuinfo_x86 *c)
 927{
 928	char *v = c->x86_vendor_id;
 929	int i;
 930
 931	for (i = 0; i < X86_VENDOR_NUM; i++) {
 932		if (!cpu_devs[i])
 933			break;
 934
 935		if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
 936		    (cpu_devs[i]->c_ident[1] &&
 937		     !strcmp(v, cpu_devs[i]->c_ident[1]))) {
 938
 939			this_cpu = cpu_devs[i];
 940			c->x86_vendor = this_cpu->c_x86_vendor;
 941			return;
 942		}
 943	}
 944
 945	pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
 946		    "CPU: Your system may be unstable.\n", v);
 947
 948	c->x86_vendor = X86_VENDOR_UNKNOWN;
 949	this_cpu = &default_cpu;
 950}
 951
 952void cpu_detect(struct cpuinfo_x86 *c)
 953{
 954	/* Get vendor name */
 955	cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
 956	      (unsigned int *)&c->x86_vendor_id[0],
 957	      (unsigned int *)&c->x86_vendor_id[8],
 958	      (unsigned int *)&c->x86_vendor_id[4]);
 959
 960	c->x86 = 4;
 961	/* Intel-defined flags: level 0x00000001 */
 962	if (c->cpuid_level >= 0x00000001) {
 963		u32 junk, tfms, cap0, misc;
 964
 965		cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
 966		c->x86		= x86_family(tfms);
 967		c->x86_model	= x86_model(tfms);
 968		c->x86_stepping	= x86_stepping(tfms);
 969
 970		if (cap0 & (1<<19)) {
 971			c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
 972			c->x86_cache_alignment = c->x86_clflush_size;
 973		}
 974	}
 975}
 976
 977static void apply_forced_caps(struct cpuinfo_x86 *c)
 978{
 979	int i;
 980
 981	for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
 982		c->x86_capability[i] &= ~cpu_caps_cleared[i];
 983		c->x86_capability[i] |= cpu_caps_set[i];
 984	}
 985}
 986
 987static void init_speculation_control(struct cpuinfo_x86 *c)
 988{
 989	/*
 990	 * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
 991	 * and they also have a different bit for STIBP support. Also,
 992	 * a hypervisor might have set the individual AMD bits even on
 993	 * Intel CPUs, for finer-grained selection of what's available.
 994	 */
 995	if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
 996		set_cpu_cap(c, X86_FEATURE_IBRS);
 997		set_cpu_cap(c, X86_FEATURE_IBPB);
 998		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
 999	}
1000
1001	if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
1002		set_cpu_cap(c, X86_FEATURE_STIBP);
1003
1004	if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
1005	    cpu_has(c, X86_FEATURE_VIRT_SSBD))
1006		set_cpu_cap(c, X86_FEATURE_SSBD);
1007
1008	if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
1009		set_cpu_cap(c, X86_FEATURE_IBRS);
1010		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1011	}
1012
1013	if (cpu_has(c, X86_FEATURE_AMD_IBPB))
1014		set_cpu_cap(c, X86_FEATURE_IBPB);
1015
1016	if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
1017		set_cpu_cap(c, X86_FEATURE_STIBP);
1018		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1019	}
1020
1021	if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
1022		set_cpu_cap(c, X86_FEATURE_SSBD);
1023		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1024		clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
1025	}
1026}
1027
1028void get_cpu_cap(struct cpuinfo_x86 *c)
1029{
1030	u32 eax, ebx, ecx, edx;
1031
1032	/* Intel-defined flags: level 0x00000001 */
1033	if (c->cpuid_level >= 0x00000001) {
1034		cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
1035
1036		c->x86_capability[CPUID_1_ECX] = ecx;
1037		c->x86_capability[CPUID_1_EDX] = edx;
1038	}
1039
1040	/* Thermal and Power Management Leaf: level 0x00000006 (eax) */
1041	if (c->cpuid_level >= 0x00000006)
1042		c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
1043
1044	/* Additional Intel-defined flags: level 0x00000007 */
1045	if (c->cpuid_level >= 0x00000007) {
1046		cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
1047		c->x86_capability[CPUID_7_0_EBX] = ebx;
1048		c->x86_capability[CPUID_7_ECX] = ecx;
1049		c->x86_capability[CPUID_7_EDX] = edx;
1050
1051		/* Check valid sub-leaf index before accessing it */
1052		if (eax >= 1) {
1053			cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx);
1054			c->x86_capability[CPUID_7_1_EAX] = eax;
1055		}
1056	}
1057
1058	/* Extended state features: level 0x0000000d */
1059	if (c->cpuid_level >= 0x0000000d) {
1060		cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
1061
1062		c->x86_capability[CPUID_D_1_EAX] = eax;
1063	}
1064
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1065	/* AMD-defined flags: level 0x80000001 */
1066	eax = cpuid_eax(0x80000000);
1067	c->extended_cpuid_level = eax;
1068
1069	if ((eax & 0xffff0000) == 0x80000000) {
1070		if (eax >= 0x80000001) {
1071			cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
1072
1073			c->x86_capability[CPUID_8000_0001_ECX] = ecx;
1074			c->x86_capability[CPUID_8000_0001_EDX] = edx;
1075		}
1076	}
1077
1078	if (c->extended_cpuid_level >= 0x80000007) {
1079		cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
1080
1081		c->x86_capability[CPUID_8000_0007_EBX] = ebx;
1082		c->x86_power = edx;
1083	}
1084
1085	if (c->extended_cpuid_level >= 0x80000008) {
1086		cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
1087		c->x86_capability[CPUID_8000_0008_EBX] = ebx;
1088	}
1089
1090	if (c->extended_cpuid_level >= 0x8000000a)
1091		c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
1092
1093	if (c->extended_cpuid_level >= 0x8000001f)
1094		c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f);
1095
1096	init_scattered_cpuid_features(c);
1097	init_speculation_control(c);
1098
1099	/*
1100	 * Clear/Set all flags overridden by options, after probe.
1101	 * This needs to happen each time we re-probe, which may happen
1102	 * several times during CPU initialization.
1103	 */
1104	apply_forced_caps(c);
1105}
1106
1107void get_cpu_address_sizes(struct cpuinfo_x86 *c)
1108{
1109	u32 eax, ebx, ecx, edx;
1110
1111	if (c->extended_cpuid_level >= 0x80000008) {
1112		cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
1113
1114		c->x86_virt_bits = (eax >> 8) & 0xff;
1115		c->x86_phys_bits = eax & 0xff;
1116	}
1117#ifdef CONFIG_X86_32
1118	else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
1119		c->x86_phys_bits = 36;
1120#endif
1121	c->x86_cache_bits = c->x86_phys_bits;
1122}
1123
1124static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
1125{
1126#ifdef CONFIG_X86_32
1127	int i;
1128
1129	/*
1130	 * First of all, decide if this is a 486 or higher
1131	 * It's a 486 if we can modify the AC flag
1132	 */
1133	if (flag_is_changeable_p(X86_EFLAGS_AC))
1134		c->x86 = 4;
1135	else
1136		c->x86 = 3;
1137
1138	for (i = 0; i < X86_VENDOR_NUM; i++)
1139		if (cpu_devs[i] && cpu_devs[i]->c_identify) {
1140			c->x86_vendor_id[0] = 0;
1141			cpu_devs[i]->c_identify(c);
1142			if (c->x86_vendor_id[0]) {
1143				get_cpu_vendor(c);
1144				break;
1145			}
1146		}
1147#endif
1148}
1149
1150#define NO_SPECULATION		BIT(0)
1151#define NO_MELTDOWN		BIT(1)
1152#define NO_SSB			BIT(2)
1153#define NO_L1TF			BIT(3)
1154#define NO_MDS			BIT(4)
1155#define MSBDS_ONLY		BIT(5)
1156#define NO_SWAPGS		BIT(6)
1157#define NO_ITLB_MULTIHIT	BIT(7)
1158#define NO_SPECTRE_V2		BIT(8)
1159#define NO_MMIO			BIT(9)
1160#define NO_EIBRS_PBRSB		BIT(10)
1161
1162#define VULNWL(vendor, family, model, whitelist)	\
1163	X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
1164
1165#define VULNWL_INTEL(model, whitelist)		\
1166	VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
1167
1168#define VULNWL_AMD(family, whitelist)		\
1169	VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
1170
1171#define VULNWL_HYGON(family, whitelist)		\
1172	VULNWL(HYGON, family, X86_MODEL_ANY, whitelist)
1173
1174static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
1175	VULNWL(ANY,	4, X86_MODEL_ANY,	NO_SPECULATION),
1176	VULNWL(CENTAUR,	5, X86_MODEL_ANY,	NO_SPECULATION),
1177	VULNWL(INTEL,	5, X86_MODEL_ANY,	NO_SPECULATION),
1178	VULNWL(NSC,	5, X86_MODEL_ANY,	NO_SPECULATION),
1179	VULNWL(VORTEX,	5, X86_MODEL_ANY,	NO_SPECULATION),
1180	VULNWL(VORTEX,	6, X86_MODEL_ANY,	NO_SPECULATION),
1181
1182	/* Intel Family 6 */
1183	VULNWL_INTEL(TIGERLAKE,			NO_MMIO),
1184	VULNWL_INTEL(TIGERLAKE_L,		NO_MMIO),
1185	VULNWL_INTEL(ALDERLAKE,			NO_MMIO),
1186	VULNWL_INTEL(ALDERLAKE_L,		NO_MMIO),
1187
1188	VULNWL_INTEL(ATOM_SALTWELL,		NO_SPECULATION | NO_ITLB_MULTIHIT),
1189	VULNWL_INTEL(ATOM_SALTWELL_TABLET,	NO_SPECULATION | NO_ITLB_MULTIHIT),
1190	VULNWL_INTEL(ATOM_SALTWELL_MID,		NO_SPECULATION | NO_ITLB_MULTIHIT),
1191	VULNWL_INTEL(ATOM_BONNELL,		NO_SPECULATION | NO_ITLB_MULTIHIT),
1192	VULNWL_INTEL(ATOM_BONNELL_MID,		NO_SPECULATION | NO_ITLB_MULTIHIT),
1193
1194	VULNWL_INTEL(ATOM_SILVERMONT,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1195	VULNWL_INTEL(ATOM_SILVERMONT_D,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1196	VULNWL_INTEL(ATOM_SILVERMONT_MID,	NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1197	VULNWL_INTEL(ATOM_AIRMONT,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1198	VULNWL_INTEL(XEON_PHI_KNL,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1199	VULNWL_INTEL(XEON_PHI_KNM,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1200
1201	VULNWL_INTEL(CORE_YONAH,		NO_SSB),
1202
1203	VULNWL_INTEL(ATOM_AIRMONT_MID,		NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1204	VULNWL_INTEL(ATOM_AIRMONT_NP,		NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
1205
1206	VULNWL_INTEL(ATOM_GOLDMONT,		NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1207	VULNWL_INTEL(ATOM_GOLDMONT_D,		NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1208	VULNWL_INTEL(ATOM_GOLDMONT_PLUS,	NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
1209
1210	/*
1211	 * Technically, swapgs isn't serializing on AMD (despite it previously
1212	 * being documented as such in the APM).  But according to AMD, %gs is
1213	 * updated non-speculatively, and the issuing of %gs-relative memory
1214	 * operands will be blocked until the %gs update completes, which is
1215	 * good enough for our purposes.
1216	 */
1217
1218	VULNWL_INTEL(ATOM_TREMONT,		NO_EIBRS_PBRSB),
1219	VULNWL_INTEL(ATOM_TREMONT_L,		NO_EIBRS_PBRSB),
1220	VULNWL_INTEL(ATOM_TREMONT_D,		NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
1221
1222	/* AMD Family 0xf - 0x12 */
1223	VULNWL_AMD(0x0f,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1224	VULNWL_AMD(0x10,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1225	VULNWL_AMD(0x11,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1226	VULNWL_AMD(0x12,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1227
1228	/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
1229	VULNWL_AMD(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1230	VULNWL_HYGON(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1231
1232	/* Zhaoxin Family 7 */
1233	VULNWL(CENTAUR,	7, X86_MODEL_ANY,	NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
1234	VULNWL(ZHAOXIN,	7, X86_MODEL_ANY,	NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
1235	{}
1236};
1237
1238#define VULNBL(vendor, family, model, blacklist)	\
1239	X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist)
 
 
1240
1241#define VULNBL_INTEL_STEPPINGS(model, steppings, issues)		   \
1242	X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6,		   \
1243					    INTEL_FAM6_##model, steppings, \
1244					    X86_FEATURE_ANY, issues)
1245
1246#define VULNBL_AMD(family, blacklist)		\
1247	VULNBL(AMD, family, X86_MODEL_ANY, blacklist)
1248
1249#define VULNBL_HYGON(family, blacklist)		\
1250	VULNBL(HYGON, family, X86_MODEL_ANY, blacklist)
1251
1252#define SRBDS		BIT(0)
1253/* CPU is affected by X86_BUG_MMIO_STALE_DATA */
1254#define MMIO		BIT(1)
1255/* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */
1256#define MMIO_SBDS	BIT(2)
1257/* CPU is affected by RETbleed, speculating where you would not expect it */
1258#define RETBLEED	BIT(3)
1259/* CPU is affected by SMT (cross-thread) return predictions */
1260#define SMT_RSB		BIT(4)
1261
1262static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
1263	VULNBL_INTEL_STEPPINGS(IVYBRIDGE,	X86_STEPPING_ANY,		SRBDS),
1264	VULNBL_INTEL_STEPPINGS(HASWELL,		X86_STEPPING_ANY,		SRBDS),
1265	VULNBL_INTEL_STEPPINGS(HASWELL_L,	X86_STEPPING_ANY,		SRBDS),
1266	VULNBL_INTEL_STEPPINGS(HASWELL_G,	X86_STEPPING_ANY,		SRBDS),
1267	VULNBL_INTEL_STEPPINGS(HASWELL_X,	X86_STEPPING_ANY,		MMIO),
1268	VULNBL_INTEL_STEPPINGS(BROADWELL_D,	X86_STEPPING_ANY,		MMIO),
1269	VULNBL_INTEL_STEPPINGS(BROADWELL_G,	X86_STEPPING_ANY,		SRBDS),
1270	VULNBL_INTEL_STEPPINGS(BROADWELL_X,	X86_STEPPING_ANY,		MMIO),
1271	VULNBL_INTEL_STEPPINGS(BROADWELL,	X86_STEPPING_ANY,		SRBDS),
1272	VULNBL_INTEL_STEPPINGS(SKYLAKE_L,	X86_STEPPING_ANY,		SRBDS | MMIO | RETBLEED),
1273	VULNBL_INTEL_STEPPINGS(SKYLAKE_X,	X86_STEPPING_ANY,		MMIO | RETBLEED),
1274	VULNBL_INTEL_STEPPINGS(SKYLAKE,		X86_STEPPING_ANY,		SRBDS | MMIO | RETBLEED),
1275	VULNBL_INTEL_STEPPINGS(KABYLAKE_L,	X86_STEPPING_ANY,		SRBDS | MMIO | RETBLEED),
1276	VULNBL_INTEL_STEPPINGS(KABYLAKE,	X86_STEPPING_ANY,		SRBDS | MMIO | RETBLEED),
1277	VULNBL_INTEL_STEPPINGS(CANNONLAKE_L,	X86_STEPPING_ANY,		RETBLEED),
1278	VULNBL_INTEL_STEPPINGS(ICELAKE_L,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED),
1279	VULNBL_INTEL_STEPPINGS(ICELAKE_D,	X86_STEPPING_ANY,		MMIO),
1280	VULNBL_INTEL_STEPPINGS(ICELAKE_X,	X86_STEPPING_ANY,		MMIO),
1281	VULNBL_INTEL_STEPPINGS(COMETLAKE,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED),
1282	VULNBL_INTEL_STEPPINGS(COMETLAKE_L,	X86_STEPPINGS(0x0, 0x0),	MMIO | RETBLEED),
1283	VULNBL_INTEL_STEPPINGS(COMETLAKE_L,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED),
1284	VULNBL_INTEL_STEPPINGS(LAKEFIELD,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED),
1285	VULNBL_INTEL_STEPPINGS(ROCKETLAKE,	X86_STEPPING_ANY,		MMIO | RETBLEED),
1286	VULNBL_INTEL_STEPPINGS(ATOM_TREMONT,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS),
1287	VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D,	X86_STEPPING_ANY,		MMIO),
1288	VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS),
1289
1290	VULNBL_AMD(0x15, RETBLEED),
1291	VULNBL_AMD(0x16, RETBLEED),
1292	VULNBL_AMD(0x17, RETBLEED | SMT_RSB),
1293	VULNBL_HYGON(0x18, RETBLEED | SMT_RSB),
1294	{}
1295};
1296
1297static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which)
1298{
1299	const struct x86_cpu_id *m = x86_match_cpu(table);
1300
1301	return m && !!(m->driver_data & which);
1302}
1303
1304u64 x86_read_arch_cap_msr(void)
1305{
1306	u64 ia32_cap = 0;
1307
1308	if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
1309		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
1310
1311	return ia32_cap;
1312}
1313
1314static bool arch_cap_mmio_immune(u64 ia32_cap)
1315{
1316	return (ia32_cap & ARCH_CAP_FBSDP_NO &&
1317		ia32_cap & ARCH_CAP_PSDP_NO &&
1318		ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
1319}
1320
1321static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1322{
1323	u64 ia32_cap = x86_read_arch_cap_msr();
1324
1325	/* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
1326	if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
1327	    !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
1328		setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
1329
1330	if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
1331		return;
1332
1333	setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
 
1334
1335	if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2))
1336		setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
1337
1338	if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
1339	    !(ia32_cap & ARCH_CAP_SSB_NO) &&
1340	   !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
1341		setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
1342
1343	if (ia32_cap & ARCH_CAP_IBRS_ALL)
1344		setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
1345
1346	if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
1347	    !(ia32_cap & ARCH_CAP_MDS_NO)) {
1348		setup_force_cpu_bug(X86_BUG_MDS);
1349		if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
1350			setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
1351	}
1352
1353	if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS))
1354		setup_force_cpu_bug(X86_BUG_SWAPGS);
1355
1356	/*
1357	 * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when:
1358	 *	- TSX is supported or
1359	 *	- TSX_CTRL is present
1360	 *
1361	 * TSX_CTRL check is needed for cases when TSX could be disabled before
1362	 * the kernel boot e.g. kexec.
1363	 * TSX_CTRL check alone is not sufficient for cases when the microcode
1364	 * update is not present or running as guest that don't get TSX_CTRL.
1365	 */
1366	if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
1367	    (cpu_has(c, X86_FEATURE_RTM) ||
1368	     (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
1369		setup_force_cpu_bug(X86_BUG_TAA);
1370
1371	/*
1372	 * SRBDS affects CPUs which support RDRAND or RDSEED and are listed
1373	 * in the vulnerability blacklist.
1374	 *
1375	 * Some of the implications and mitigation of Shared Buffers Data
1376	 * Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as
1377	 * SRBDS.
1378	 */
1379	if ((cpu_has(c, X86_FEATURE_RDRAND) ||
1380	     cpu_has(c, X86_FEATURE_RDSEED)) &&
1381	    cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS))
1382		    setup_force_cpu_bug(X86_BUG_SRBDS);
1383
1384	/*
1385	 * Processor MMIO Stale Data bug enumeration
1386	 *
1387	 * Affected CPU list is generally enough to enumerate the vulnerability,
1388	 * but for virtualization case check for ARCH_CAP MSR bits also, VMM may
1389	 * not want the guest to enumerate the bug.
1390	 *
1391	 * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
1392	 * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
1393	 */
1394	if (!arch_cap_mmio_immune(ia32_cap)) {
1395		if (cpu_matches(cpu_vuln_blacklist, MMIO))
1396			setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
1397		else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
1398			setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN);
1399	}
1400
1401	if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
1402		if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
1403			setup_force_cpu_bug(X86_BUG_RETBLEED);
1404	}
1405
1406	if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
1407	    !cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
1408	    !(ia32_cap & ARCH_CAP_PBRSB_NO))
1409		setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
1410
1411	if (cpu_matches(cpu_vuln_blacklist, SMT_RSB))
1412		setup_force_cpu_bug(X86_BUG_SMT_RSB);
1413
1414	if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
1415		return;
1416
1417	/* Rogue Data Cache Load? No! */
1418	if (ia32_cap & ARCH_CAP_RDCL_NO)
1419		return;
1420
1421	setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
1422
1423	if (cpu_matches(cpu_vuln_whitelist, NO_L1TF))
1424		return;
1425
1426	setup_force_cpu_bug(X86_BUG_L1TF);
1427}
1428
1429/*
1430 * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
1431 * unfortunately, that's not true in practice because of early VIA
1432 * chips and (more importantly) broken virtualizers that are not easy
1433 * to detect. In the latter case it doesn't even *fail* reliably, so
1434 * probing for it doesn't even work. Disable it completely on 32-bit
1435 * unless we can find a reliable way to detect all the broken cases.
1436 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
1437 */
1438static void detect_nopl(void)
1439{
1440#ifdef CONFIG_X86_32
1441	setup_clear_cpu_cap(X86_FEATURE_NOPL);
1442#else
1443	setup_force_cpu_cap(X86_FEATURE_NOPL);
1444#endif
1445}
1446
1447/*
1448 * We parse cpu parameters early because fpu__init_system() is executed
1449 * before parse_early_param().
1450 */
1451static void __init cpu_parse_early_param(void)
1452{
1453	char arg[128];
1454	char *argptr = arg, *opt;
1455	int arglen, taint = 0;
1456
1457#ifdef CONFIG_X86_32
1458	if (cmdline_find_option_bool(boot_command_line, "no387"))
1459#ifdef CONFIG_MATH_EMULATION
1460		setup_clear_cpu_cap(X86_FEATURE_FPU);
1461#else
1462		pr_err("Option 'no387' required CONFIG_MATH_EMULATION enabled.\n");
1463#endif
1464
1465	if (cmdline_find_option_bool(boot_command_line, "nofxsr"))
1466		setup_clear_cpu_cap(X86_FEATURE_FXSR);
1467#endif
1468
1469	if (cmdline_find_option_bool(boot_command_line, "noxsave"))
1470		setup_clear_cpu_cap(X86_FEATURE_XSAVE);
1471
1472	if (cmdline_find_option_bool(boot_command_line, "noxsaveopt"))
1473		setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
1474
1475	if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
1476		setup_clear_cpu_cap(X86_FEATURE_XSAVES);
1477
1478	arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
1479	if (arglen <= 0)
1480		return;
1481
1482	pr_info("Clearing CPUID bits:");
1483
1484	while (argptr) {
1485		bool found __maybe_unused = false;
1486		unsigned int bit;
1487
1488		opt = strsep(&argptr, ",");
1489
1490		/*
1491		 * Handle naked numbers first for feature flags which don't
1492		 * have names.
1493		 */
1494		if (!kstrtouint(opt, 10, &bit)) {
1495			if (bit < NCAPINTS * 32) {
1496
1497#ifdef CONFIG_X86_FEATURE_NAMES
1498				/* empty-string, i.e., ""-defined feature flags */
1499				if (!x86_cap_flags[bit])
1500					pr_cont(" " X86_CAP_FMT_NUM, x86_cap_flag_num(bit));
1501				else
1502#endif
1503					pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
1504
1505				setup_clear_cpu_cap(bit);
1506				taint++;
1507			}
1508			/*
1509			 * The assumption is that there are no feature names with only
1510			 * numbers in the name thus go to the next argument.
1511			 */
1512			continue;
1513		}
1514
1515#ifdef CONFIG_X86_FEATURE_NAMES
1516		for (bit = 0; bit < 32 * NCAPINTS; bit++) {
1517			if (!x86_cap_flag(bit))
1518				continue;
1519
1520			if (strcmp(x86_cap_flag(bit), opt))
1521				continue;
1522
1523			pr_cont(" %s", opt);
1524			setup_clear_cpu_cap(bit);
1525			taint++;
1526			found = true;
1527			break;
1528		}
1529
1530		if (!found)
1531			pr_cont(" (unknown: %s)", opt);
1532#endif
1533	}
1534	pr_cont("\n");
1535
1536	if (taint)
1537		add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1538}
1539
1540/*
1541 * Do minimum CPU detection early.
1542 * Fields really needed: vendor, cpuid_level, family, model, mask,
1543 * cache alignment.
1544 * The others are not touched to avoid unwanted side effects.
1545 *
1546 * WARNING: this function is only called on the boot CPU.  Don't add code
1547 * here that is supposed to run on all CPUs.
1548 */
1549static void __init early_identify_cpu(struct cpuinfo_x86 *c)
1550{
1551#ifdef CONFIG_X86_64
1552	c->x86_clflush_size = 64;
1553	c->x86_phys_bits = 36;
1554	c->x86_virt_bits = 48;
1555#else
1556	c->x86_clflush_size = 32;
1557	c->x86_phys_bits = 32;
1558	c->x86_virt_bits = 32;
1559#endif
1560	c->x86_cache_alignment = c->x86_clflush_size;
1561
1562	memset(&c->x86_capability, 0, sizeof(c->x86_capability));
1563	c->extended_cpuid_level = 0;
1564
1565	if (!have_cpuid_p())
1566		identify_cpu_without_cpuid(c);
1567
1568	/* cyrix could have cpuid enabled via c_identify()*/
1569	if (have_cpuid_p()) {
1570		cpu_detect(c);
1571		get_cpu_vendor(c);
1572		get_cpu_cap(c);
1573		get_cpu_address_sizes(c);
1574		setup_force_cpu_cap(X86_FEATURE_CPUID);
1575		cpu_parse_early_param();
1576
1577		if (this_cpu->c_early_init)
1578			this_cpu->c_early_init(c);
1579
1580		c->cpu_index = 0;
1581		filter_cpuid_features(c, false);
1582
1583		if (this_cpu->c_bsp_init)
1584			this_cpu->c_bsp_init(c);
1585	} else {
 
1586		setup_clear_cpu_cap(X86_FEATURE_CPUID);
1587	}
1588
1589	setup_force_cpu_cap(X86_FEATURE_ALWAYS);
1590
1591	cpu_set_bug_bits(c);
1592
1593	sld_setup(c);
1594
1595	fpu__init_system(c);
1596
1597	init_sigframe_size();
1598
1599#ifdef CONFIG_X86_32
1600	/*
1601	 * Regardless of whether PCID is enumerated, the SDM says
1602	 * that it can't be enabled in 32-bit mode.
1603	 */
1604	setup_clear_cpu_cap(X86_FEATURE_PCID);
1605#endif
1606
1607	/*
1608	 * Later in the boot process pgtable_l5_enabled() relies on
1609	 * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not
1610	 * enabled by this point we need to clear the feature bit to avoid
1611	 * false-positives at the later stage.
1612	 *
1613	 * pgtable_l5_enabled() can be false here for several reasons:
1614	 *  - 5-level paging is disabled compile-time;
1615	 *  - it's 32-bit kernel;
1616	 *  - machine doesn't support 5-level paging;
1617	 *  - user specified 'no5lvl' in kernel command line.
1618	 */
1619	if (!pgtable_l5_enabled())
1620		setup_clear_cpu_cap(X86_FEATURE_LA57);
1621
1622	detect_nopl();
1623}
1624
1625void __init early_cpu_init(void)
1626{
1627	const struct cpu_dev *const *cdev;
1628	int count = 0;
1629
1630#ifdef CONFIG_PROCESSOR_SELECT
1631	pr_info("KERNEL supported cpus:\n");
1632#endif
1633
1634	for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
1635		const struct cpu_dev *cpudev = *cdev;
1636
1637		if (count >= X86_VENDOR_NUM)
1638			break;
1639		cpu_devs[count] = cpudev;
1640		count++;
1641
1642#ifdef CONFIG_PROCESSOR_SELECT
1643		{
1644			unsigned int j;
1645
1646			for (j = 0; j < 2; j++) {
1647				if (!cpudev->c_ident[j])
1648					continue;
1649				pr_info("  %s %s\n", cpudev->c_vendor,
1650					cpudev->c_ident[j]);
1651			}
1652		}
1653#endif
1654	}
1655	early_identify_cpu(&boot_cpu_data);
1656}
1657
1658static bool detect_null_seg_behavior(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1659{
 
1660	/*
1661	 * Empirically, writing zero to a segment selector on AMD does
1662	 * not clear the base, whereas writing zero to a segment
1663	 * selector on Intel does clear the base.  Intel's behavior
1664	 * allows slightly faster context switches in the common case
1665	 * where GS is unused by the prev and next threads.
1666	 *
1667	 * Since neither vendor documents this anywhere that I can see,
1668	 * detect it directly instead of hard-coding the choice by
1669	 * vendor.
1670	 *
1671	 * I've designated AMD's behavior as the "bug" because it's
1672	 * counterintuitive and less friendly.
1673	 */
1674
1675	unsigned long old_base, tmp;
1676	rdmsrl(MSR_FS_BASE, old_base);
1677	wrmsrl(MSR_FS_BASE, 1);
1678	loadsegment(fs, 0);
1679	rdmsrl(MSR_FS_BASE, tmp);
 
 
1680	wrmsrl(MSR_FS_BASE, old_base);
1681	return tmp == 0;
1682}
1683
1684void check_null_seg_clears_base(struct cpuinfo_x86 *c)
1685{
1686	/* BUG_NULL_SEG is only relevant with 64bit userspace */
1687	if (!IS_ENABLED(CONFIG_X86_64))
1688		return;
1689
1690	/* Zen3 CPUs advertise Null Selector Clears Base in CPUID. */
1691	if (c->extended_cpuid_level >= 0x80000021 &&
1692	    cpuid_eax(0x80000021) & BIT(6))
1693		return;
1694
1695	/*
1696	 * CPUID bit above wasn't set. If this kernel is still running
1697	 * as a HV guest, then the HV has decided not to advertize
1698	 * that CPUID bit for whatever reason.	For example, one
1699	 * member of the migration pool might be vulnerable.  Which
1700	 * means, the bug is present: set the BUG flag and return.
1701	 */
1702	if (cpu_has(c, X86_FEATURE_HYPERVISOR)) {
1703		set_cpu_bug(c, X86_BUG_NULL_SEG);
1704		return;
1705	}
1706
1707	/*
1708	 * Zen2 CPUs also have this behaviour, but no CPUID bit.
1709	 * 0x18 is the respective family for Hygon.
1710	 */
1711	if ((c->x86 == 0x17 || c->x86 == 0x18) &&
1712	    detect_null_seg_behavior())
1713		return;
1714
1715	/* All the remaining ones are affected */
1716	set_cpu_bug(c, X86_BUG_NULL_SEG);
1717}
1718
1719static void generic_identify(struct cpuinfo_x86 *c)
1720{
1721	c->extended_cpuid_level = 0;
1722
1723	if (!have_cpuid_p())
1724		identify_cpu_without_cpuid(c);
1725
1726	/* cyrix could have cpuid enabled via c_identify()*/
1727	if (!have_cpuid_p())
1728		return;
1729
1730	cpu_detect(c);
1731
1732	get_cpu_vendor(c);
1733
1734	get_cpu_cap(c);
1735
1736	get_cpu_address_sizes(c);
1737
1738	if (c->cpuid_level >= 0x00000001) {
1739		c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
1740#ifdef CONFIG_X86_32
1741# ifdef CONFIG_SMP
1742		c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
1743# else
1744		c->apicid = c->initial_apicid;
1745# endif
1746#endif
1747		c->phys_proc_id = c->initial_apicid;
1748	}
1749
1750	get_model_name(c); /* Default name */
1751
 
 
 
 
1752	/*
1753	 * ESPFIX is a strange bug.  All real CPUs have it.  Paravirt
1754	 * systems that run Linux at CPL > 0 may or may not have the
1755	 * issue, but, even if they have the issue, there's absolutely
1756	 * nothing we can do about it because we can't use the real IRET
1757	 * instruction.
1758	 *
1759	 * NB: For the time being, only 32-bit kernels support
1760	 * X86_BUG_ESPFIX as such.  64-bit kernels directly choose
1761	 * whether to apply espfix using paravirt hooks.  If any
1762	 * non-paravirt system ever shows up that does *not* have the
1763	 * ESPFIX issue, we can change this.
1764	 */
1765#ifdef CONFIG_X86_32
 
 
 
 
 
 
 
1766	set_cpu_bug(c, X86_BUG_ESPFIX);
 
1767#endif
1768}
1769
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1770/*
1771 * Validate that ACPI/mptables have the same information about the
1772 * effective APIC id and update the package map.
1773 */
1774static void validate_apic_and_package_id(struct cpuinfo_x86 *c)
1775{
1776#ifdef CONFIG_SMP
1777	unsigned int apicid, cpu = smp_processor_id();
1778
1779	apicid = apic->cpu_present_to_apicid(cpu);
1780
1781	if (apicid != c->apicid) {
1782		pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n",
1783		       cpu, apicid, c->initial_apicid);
1784	}
1785	BUG_ON(topology_update_package_map(c->phys_proc_id, cpu));
1786	BUG_ON(topology_update_die_map(c->cpu_die_id, cpu));
1787#else
1788	c->logical_proc_id = 0;
1789#endif
1790}
1791
1792/*
1793 * This does the hard work of actually picking apart the CPU stuff...
1794 */
1795static void identify_cpu(struct cpuinfo_x86 *c)
1796{
1797	int i;
1798
1799	c->loops_per_jiffy = loops_per_jiffy;
1800	c->x86_cache_size = 0;
1801	c->x86_vendor = X86_VENDOR_UNKNOWN;
1802	c->x86_model = c->x86_stepping = 0;	/* So far unknown... */
1803	c->x86_vendor_id[0] = '\0'; /* Unset */
1804	c->x86_model_id[0] = '\0';  /* Unset */
1805	c->x86_max_cores = 1;
1806	c->x86_coreid_bits = 0;
1807	c->cu_id = 0xff;
1808#ifdef CONFIG_X86_64
1809	c->x86_clflush_size = 64;
1810	c->x86_phys_bits = 36;
1811	c->x86_virt_bits = 48;
1812#else
1813	c->cpuid_level = -1;	/* CPUID not detected */
1814	c->x86_clflush_size = 32;
1815	c->x86_phys_bits = 32;
1816	c->x86_virt_bits = 32;
1817#endif
1818	c->x86_cache_alignment = c->x86_clflush_size;
1819	memset(&c->x86_capability, 0, sizeof(c->x86_capability));
1820#ifdef CONFIG_X86_VMX_FEATURE_NAMES
1821	memset(&c->vmx_capability, 0, sizeof(c->vmx_capability));
1822#endif
1823
1824	generic_identify(c);
1825
1826	if (this_cpu->c_identify)
1827		this_cpu->c_identify(c);
1828
1829	/* Clear/Set all flags overridden by options, after probe */
1830	apply_forced_caps(c);
1831
1832#ifdef CONFIG_X86_64
1833	c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
1834#endif
1835
1836	/*
1837	 * Vendor-specific initialization.  In this section we
1838	 * canonicalize the feature flags, meaning if there are
1839	 * features a certain CPU supports which CPUID doesn't
1840	 * tell us, CPUID claiming incorrect flags, or other bugs,
1841	 * we handle them here.
1842	 *
1843	 * At the end of this section, c->x86_capability better
1844	 * indicate the features this CPU genuinely supports!
1845	 */
1846	if (this_cpu->c_init)
1847		this_cpu->c_init(c);
1848
1849	/* Disable the PN if appropriate */
1850	squash_the_stupid_serial_number(c);
1851
1852	/* Set up SMEP/SMAP/UMIP */
1853	setup_smep(c);
1854	setup_smap(c);
1855	setup_umip(c);
1856
1857	/* Enable FSGSBASE instructions if available. */
1858	if (cpu_has(c, X86_FEATURE_FSGSBASE)) {
1859		cr4_set_bits(X86_CR4_FSGSBASE);
1860		elf_hwcap2 |= HWCAP2_FSGSBASE;
1861	}
1862
1863	/*
1864	 * The vendor-specific functions might have changed features.
1865	 * Now we do "generic changes."
1866	 */
1867
1868	/* Filter out anything that depends on CPUID levels we don't have */
1869	filter_cpuid_features(c, true);
1870
1871	/* If the model name is still unset, do table lookup. */
1872	if (!c->x86_model_id[0]) {
1873		const char *p;
1874		p = table_lookup_model(c);
1875		if (p)
1876			strcpy(c->x86_model_id, p);
1877		else
1878			/* Last resort... */
1879			sprintf(c->x86_model_id, "%02x/%02x",
1880				c->x86, c->x86_model);
1881	}
1882
1883#ifdef CONFIG_X86_64
1884	detect_ht(c);
1885#endif
1886
1887	x86_init_rdrand(c);
 
1888	setup_pku(c);
1889	setup_cet(c);
1890
1891	/*
1892	 * Clear/Set all flags overridden by options, need do it
1893	 * before following smp all cpus cap AND.
1894	 */
1895	apply_forced_caps(c);
1896
1897	/*
1898	 * On SMP, boot_cpu_data holds the common feature set between
1899	 * all CPUs; so make sure that we indicate which features are
1900	 * common between the CPUs.  The first time this routine gets
1901	 * executed, c == &boot_cpu_data.
1902	 */
1903	if (c != &boot_cpu_data) {
1904		/* AND the already accumulated flags with these */
1905		for (i = 0; i < NCAPINTS; i++)
1906			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1907
1908		/* OR, i.e. replicate the bug flags */
1909		for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
1910			c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
1911	}
1912
1913	ppin_init(c);
1914
1915	/* Init Machine Check Exception if available. */
1916	mcheck_cpu_init(c);
1917
1918	select_idle_routine(c);
1919
1920#ifdef CONFIG_NUMA
1921	numa_add_cpu(smp_processor_id());
1922#endif
1923}
1924
1925/*
1926 * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
1927 * on 32-bit kernels:
1928 */
1929#ifdef CONFIG_X86_32
1930void enable_sep_cpu(void)
1931{
1932	struct tss_struct *tss;
1933	int cpu;
1934
1935	if (!boot_cpu_has(X86_FEATURE_SEP))
1936		return;
1937
1938	cpu = get_cpu();
1939	tss = &per_cpu(cpu_tss_rw, cpu);
1940
1941	/*
1942	 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
1943	 * see the big comment in struct x86_hw_tss's definition.
1944	 */
1945
1946	tss->x86_tss.ss1 = __KERNEL_CS;
1947	wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
1948	wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
1949	wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
1950
1951	put_cpu();
1952}
1953#endif
1954
1955void __init identify_boot_cpu(void)
1956{
1957	identify_cpu(&boot_cpu_data);
1958	if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT))
1959		pr_info("CET detected: Indirect Branch Tracking enabled\n");
1960#ifdef CONFIG_X86_32
1961	sysenter_setup();
1962	enable_sep_cpu();
1963#endif
1964	cpu_detect_tlb(&boot_cpu_data);
1965	setup_cr_pinning();
1966
1967	tsx_init();
1968}
1969
1970void identify_secondary_cpu(struct cpuinfo_x86 *c)
1971{
1972	BUG_ON(c == &boot_cpu_data);
1973	identify_cpu(c);
1974#ifdef CONFIG_X86_32
1975	enable_sep_cpu();
1976#endif
 
1977	validate_apic_and_package_id(c);
1978	x86_spec_ctrl_setup_ap();
1979	update_srbds_msr();
1980
1981	tsx_ap_init();
 
 
 
 
1982}
 
1983
1984void print_cpu_info(struct cpuinfo_x86 *c)
1985{
1986	const char *vendor = NULL;
1987
1988	if (c->x86_vendor < X86_VENDOR_NUM) {
1989		vendor = this_cpu->c_vendor;
1990	} else {
1991		if (c->cpuid_level >= 0)
1992			vendor = c->x86_vendor_id;
1993	}
1994
1995	if (vendor && !strstr(c->x86_model_id, vendor))
1996		pr_cont("%s ", vendor);
1997
1998	if (c->x86_model_id[0])
1999		pr_cont("%s", c->x86_model_id);
2000	else
2001		pr_cont("%d86", c->x86);
2002
2003	pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
2004
2005	if (c->x86_stepping || c->cpuid_level >= 0)
2006		pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
2007	else
2008		pr_cont(")\n");
2009}
2010
2011/*
2012 * clearcpuid= was already parsed in cpu_parse_early_param().  This dummy
2013 * function prevents it from becoming an environment variable for init.
 
2014 */
2015static __init int setup_clearcpuid(char *arg)
2016{
2017	return 1;
2018}
2019__setup("clearcpuid=", setup_clearcpuid);
2020
2021DEFINE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot) = {
2022	.current_task	= &init_task,
2023	.preempt_count	= INIT_PREEMPT_COUNT,
2024	.top_of_stack	= TOP_OF_INIT_STACK,
2025};
2026EXPORT_PER_CPU_SYMBOL(pcpu_hot);
 
 
 
 
 
 
 
 
 
2027
2028#ifdef CONFIG_X86_64
2029DEFINE_PER_CPU_FIRST(struct fixed_percpu_data,
2030		     fixed_percpu_data) __aligned(PAGE_SIZE) __visible;
2031EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data);
2032
2033static void wrmsrl_cstar(unsigned long val)
2034{
2035	/*
2036	 * Intel CPUs do not support 32-bit SYSCALL. Writing to MSR_CSTAR
2037	 * is so far ignored by the CPU, but raises a #VE trap in a TDX
2038	 * guest. Avoid the pointless write on all Intel CPUs.
2039	 */
2040	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2041		wrmsrl(MSR_CSTAR, val);
2042}
2043
2044/* May not be marked __init: used by software suspend */
2045void syscall_init(void)
2046{
 
 
 
 
 
 
 
 
2047	wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
2048	wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
 
 
 
2049
2050#ifdef CONFIG_IA32_EMULATION
2051	wrmsrl_cstar((unsigned long)entry_SYSCALL_compat);
2052	/*
2053	 * This only works on Intel CPUs.
2054	 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
2055	 * This does not cause SYSENTER to jump to the wrong location, because
2056	 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
2057	 */
2058	wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
2059	wrmsrl_safe(MSR_IA32_SYSENTER_ESP,
2060		    (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1));
2061	wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
2062#else
2063	wrmsrl_cstar((unsigned long)ignore_sysret);
2064	wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
2065	wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
2066	wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
2067#endif
2068
2069	/*
2070	 * Flags to clear on syscall; clear as much as possible
2071	 * to minimize user space-kernel interference.
2072	 */
2073	wrmsrl(MSR_SYSCALL_MASK,
2074	       X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
2075	       X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_TF|
2076	       X86_EFLAGS_IF|X86_EFLAGS_DF|X86_EFLAGS_OF|
2077	       X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_RF|
2078	       X86_EFLAGS_AC|X86_EFLAGS_ID);
 
 
 
 
 
 
 
 
 
 
 
 
 
2079}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2080
2081#else	/* CONFIG_X86_64 */
2082
2083#ifdef CONFIG_STACKPROTECTOR
2084DEFINE_PER_CPU(unsigned long, __stack_chk_guard);
2085EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
 
 
 
 
 
 
 
 
 
 
 
 
 
2086#endif
2087
2088#endif	/* CONFIG_X86_64 */
2089
2090/*
2091 * Clear all 6 debug registers:
2092 */
2093static void clear_all_debug_regs(void)
2094{
2095	int i;
2096
2097	for (i = 0; i < 8; i++) {
2098		/* Ignore db4, db5 */
2099		if ((i == 4) || (i == 5))
2100			continue;
2101
2102		set_debugreg(0, i);
2103	}
2104}
2105
2106#ifdef CONFIG_KGDB
2107/*
2108 * Restore debug regs if using kgdbwait and you have a kernel debugger
2109 * connection established.
2110 */
2111static void dbg_restore_debug_regs(void)
2112{
2113	if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
2114		arch_kgdb_ops.correct_hw_break();
2115}
2116#else /* ! CONFIG_KGDB */
2117#define dbg_restore_debug_regs()
2118#endif /* ! CONFIG_KGDB */
2119
2120static void wait_for_master_cpu(int cpu)
2121{
2122#ifdef CONFIG_SMP
2123	/*
2124	 * wait for ACK from master CPU before continuing
2125	 * with AP initialization
2126	 */
2127	WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
2128	while (!cpumask_test_cpu(cpu, cpu_callout_mask))
2129		cpu_relax();
2130#endif
2131}
2132
 
 
 
 
 
 
 
2133#ifdef CONFIG_X86_64
2134static inline void setup_getcpu(int cpu)
 
2135{
2136	unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
2137	struct desc_struct d = { };
 
 
 
 
 
 
 
 
 
 
 
 
2138
2139	if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
2140		wrmsr(MSR_TSC_AUX, cpudata, 0);
2141
2142	/* Store CPU and node number in limit. */
2143	d.limit0 = cpudata;
2144	d.limit1 = cpudata >> 16;
2145
2146	d.type = 5;		/* RO data, expand down, accessed */
2147	d.dpl = 3;		/* Visible to user code */
2148	d.s = 1;		/* Not a system segment */
2149	d.p = 1;		/* Present */
2150	d.d = 1;		/* 32-bit */
2151
2152	write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S);
2153}
2154
2155static inline void ucode_cpu_init(int cpu)
2156{
2157	if (cpu)
2158		load_ucode_ap();
2159}
2160
2161static inline void tss_setup_ist(struct tss_struct *tss)
2162{
2163	/* Set up the per-CPU TSS IST stacks */
2164	tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF);
2165	tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
2166	tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
2167	tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
2168	/* Only mapped when SEV-ES is active */
2169	tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC);
2170}
2171
2172#else /* CONFIG_X86_64 */
 
 
 
2173
2174static inline void setup_getcpu(int cpu) { }
 
2175
2176static inline void ucode_cpu_init(int cpu)
2177{
2178	show_ucode_info_early();
2179}
2180
2181static inline void tss_setup_ist(struct tss_struct *tss) { }
 
2182
2183#endif /* !CONFIG_X86_64 */
 
 
2184
2185static inline void tss_setup_io_bitmap(struct tss_struct *tss)
2186{
2187	tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID;
2188
2189#ifdef CONFIG_X86_IOPL_IOPERM
2190	tss->io_bitmap.prev_max = 0;
2191	tss->io_bitmap.prev_sequence = 0;
2192	memset(tss->io_bitmap.bitmap, 0xff, sizeof(tss->io_bitmap.bitmap));
2193	/*
2194	 * Invalidate the extra array entry past the end of the all
2195	 * permission bitmap as required by the hardware.
2196	 */
2197	tss->io_bitmap.mapall[IO_BITMAP_LONGS] = ~0UL;
2198#endif
2199}
 
 
 
 
 
 
 
 
 
 
2200
2201/*
2202 * Setup everything needed to handle exceptions from the IDT, including the IST
2203 * exceptions which use paranoid_entry().
2204 */
2205void cpu_init_exception_handling(void)
2206{
2207	struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
2208	int cpu = raw_smp_processor_id();
2209
2210	/* paranoid_entry() gets the CPU number from the GDT */
2211	setup_getcpu(cpu);
 
 
 
2212
2213	/* IST vectors need TSS to be set up. */
2214	tss_setup_ist(tss);
2215	tss_setup_io_bitmap(tss);
 
2216	set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
 
 
 
 
 
 
 
2217
2218	load_TR_desc();
2219
2220	/* GHCB needs to be setup to handle #VC. */
2221	setup_ghcb();
2222
2223	/* Finally load the IDT */
2224	load_current_idt();
2225}
2226
2227/*
2228 * cpu_init() initializes state that is per-CPU. Some data is already
2229 * initialized (naturally) in the bootstrap process, such as the GDT.  We
2230 * reload it nevertheless, this function acts as a 'CPU state barrier',
2231 * nothing should get across.
2232 */
2233void cpu_init(void)
2234{
2235	struct task_struct *cur = current;
2236	int cpu = raw_smp_processor_id();
 
2237
2238	wait_for_master_cpu(cpu);
2239
2240	ucode_cpu_init(cpu);
 
 
 
 
 
 
2241
2242#ifdef CONFIG_NUMA
2243	if (this_cpu_read(numa_node) == 0 &&
2244	    early_cpu_to_node(cpu) != NUMA_NO_NODE)
2245		set_numa_node(early_cpu_to_node(cpu));
2246#endif
2247	pr_debug("Initializing CPU#%d\n", cpu);
2248
2249	if (IS_ENABLED(CONFIG_X86_64) || cpu_feature_enabled(X86_FEATURE_VME) ||
2250	    boot_cpu_has(X86_FEATURE_TSC) || boot_cpu_has(X86_FEATURE_DE))
 
2251		cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
2252
2253	if (IS_ENABLED(CONFIG_X86_64)) {
2254		loadsegment(fs, 0);
2255		memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
2256		syscall_init();
2257
2258		wrmsrl(MSR_FS_BASE, 0);
2259		wrmsrl(MSR_KERNEL_GS_BASE, 0);
2260		barrier();
2261
2262		x2apic_setup();
2263	}
2264
 
 
 
2265	mmgrab(&init_mm);
2266	cur->active_mm = &init_mm;
2267	BUG_ON(cur->mm);
2268	initialize_tlbstate_and_flush();
2269	enter_lazy_tlb(&init_mm, cur);
2270
2271	/*
2272	 * sp0 points to the entry trampoline stack regardless of what task
2273	 * is running.
2274	 */
2275	load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
 
2276
2277	load_mm_ldt(&init_mm);
2278
 
 
 
 
 
 
 
2279	clear_all_debug_regs();
2280	dbg_restore_debug_regs();
2281
2282	doublefault_init_cpu_tss();
2283
2284	fpu__init_cpu();
2285
2286	if (is_uv_system())
2287		uv_cpu_init();
 
2288
2289	load_fixmap_gdt(cpu);
 
 
 
2290}
2291
2292#ifdef CONFIG_SMP
2293void cpu_init_secondary(void)
 
 
 
2294{
2295	/*
2296	 * Relies on the BP having set-up the IDT tables, which are loaded
2297	 * on this CPU in cpu_init_exception_handling().
2298	 */
2299	cpu_init_exception_handling();
2300	cpu_init();
2301}
2302#endif
2303
2304#ifdef CONFIG_MICROCODE_LATE_LOADING
2305/*
2306 * The microcode loader calls this upon late microcode load to recheck features,
2307 * only when microcode has been updated. Caller holds microcode_mutex and CPU
2308 * hotplug lock.
2309 */
2310void microcode_check(void)
2311{
2312	struct cpuinfo_x86 info;
2313
2314	perf_check_microcode();
2315
2316	/* Reload CPUID max function as it might've changed. */
2317	info.cpuid_level = cpuid_eax(0);
2318
2319	/*
2320	 * Copy all capability leafs to pick up the synthetic ones so that
2321	 * memcmp() below doesn't fail on that. The ones coming from CPUID will
2322	 * get overwritten in get_cpu_cap().
2323	 */
2324	memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability));
2325
2326	get_cpu_cap(&info);
2327
2328	if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)))
2329		return;
2330
2331	pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
2332	pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
2333}
2334#endif
2335
2336/*
2337 * Invoked from core CPU hotplug code after hotplug operations
2338 */
2339void arch_smt_update(void)
2340{
2341	/* Handle the speculative execution misfeatures */
2342	cpu_bugs_smt_update();
2343	/* Check whether IPI broadcasting can be enabled */
2344	apic_smt_update();
2345}