Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/kernel.h>
   3
   4#include <linux/string.h>
   5#include <linux/bitops.h>
   6#include <linux/smp.h>
   7#include <linux/sched.h>
   8#include <linux/sched/clock.h>
   9#include <linux/thread_info.h>
  10#include <linux/init.h>
  11#include <linux/uaccess.h>
  12
  13#include <asm/cpufeature.h>
  14#include <asm/pgtable.h>
  15#include <asm/msr.h>
  16#include <asm/bugs.h>
  17#include <asm/cpu.h>
  18#include <asm/intel-family.h>
  19#include <asm/microcode_intel.h>
  20#include <asm/hwcap2.h>
  21#include <asm/elf.h>
  22
  23#ifdef CONFIG_X86_64
  24#include <linux/topology.h>
 
  25#endif
  26
  27#include "cpu.h"
  28
  29#ifdef CONFIG_X86_LOCAL_APIC
  30#include <asm/mpspec.h>
  31#include <asm/apic.h>
  32#endif
  33
  34/*
  35 * Just in case our CPU detection goes bad, or you have a weird system,
  36 * allow a way to override the automatic disabling of MPX.
  37 */
  38static int forcempx;
  39
  40static int __init forcempx_setup(char *__unused)
  41{
  42	forcempx = 1;
  43
  44	return 1;
  45}
  46__setup("intel-skd-046-workaround=disable", forcempx_setup);
  47
  48void check_mpx_erratum(struct cpuinfo_x86 *c)
  49{
  50	if (forcempx)
  51		return;
  52	/*
  53	 * Turn off the MPX feature on CPUs where SMEP is not
  54	 * available or disabled.
  55	 *
  56	 * Works around Intel Erratum SKD046: "Branch Instructions
  57	 * May Initialize MPX Bound Registers Incorrectly".
  58	 *
  59	 * This might falsely disable MPX on systems without
  60	 * SMEP, like Atom processors without SMEP.  But there
  61	 * is no such hardware known at the moment.
  62	 */
  63	if (cpu_has(c, X86_FEATURE_MPX) && !cpu_has(c, X86_FEATURE_SMEP)) {
  64		setup_clear_cpu_cap(X86_FEATURE_MPX);
  65		pr_warn("x86/mpx: Disabling MPX since SMEP not present\n");
  66	}
  67}
  68
  69/*
  70 * Processors which have self-snooping capability can handle conflicting
  71 * memory type across CPUs by snooping its own cache. However, there exists
  72 * CPU models in which having conflicting memory types still leads to
  73 * unpredictable behavior, machine check errors, or hangs. Clear this
  74 * feature to prevent its use on machines with known erratas.
  75 */
  76static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c)
  77{
  78	switch (c->x86_model) {
  79	case INTEL_FAM6_CORE_YONAH:
  80	case INTEL_FAM6_CORE2_MEROM:
  81	case INTEL_FAM6_CORE2_MEROM_L:
  82	case INTEL_FAM6_CORE2_PENRYN:
  83	case INTEL_FAM6_CORE2_DUNNINGTON:
  84	case INTEL_FAM6_NEHALEM:
  85	case INTEL_FAM6_NEHALEM_G:
  86	case INTEL_FAM6_NEHALEM_EP:
  87	case INTEL_FAM6_NEHALEM_EX:
  88	case INTEL_FAM6_WESTMERE:
  89	case INTEL_FAM6_WESTMERE_EP:
  90	case INTEL_FAM6_SANDYBRIDGE:
  91		setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP);
  92	}
  93}
  94
  95static bool ring3mwait_disabled __read_mostly;
  96
  97static int __init ring3mwait_disable(char *__unused)
  98{
  99	ring3mwait_disabled = true;
 100	return 0;
 101}
 102__setup("ring3mwait=disable", ring3mwait_disable);
 103
 104static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
 105{
 106	/*
 107	 * Ring 3 MONITOR/MWAIT feature cannot be detected without
 108	 * cpu model and family comparison.
 109	 */
 110	if (c->x86 != 6)
 111		return;
 112	switch (c->x86_model) {
 113	case INTEL_FAM6_XEON_PHI_KNL:
 114	case INTEL_FAM6_XEON_PHI_KNM:
 115		break;
 116	default:
 117		return;
 118	}
 119
 120	if (ring3mwait_disabled)
 121		return;
 122
 123	set_cpu_cap(c, X86_FEATURE_RING3MWAIT);
 124	this_cpu_or(msr_misc_features_shadow,
 125		    1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT);
 126
 127	if (c == &boot_cpu_data)
 128		ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
 129}
 130
 131/*
 132 * Early microcode releases for the Spectre v2 mitigation were broken.
 133 * Information taken from;
 134 * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
 135 * - https://kb.vmware.com/s/article/52345
 136 * - Microcode revisions observed in the wild
 137 * - Release note from 20180108 microcode release
 138 */
 139struct sku_microcode {
 140	u8 model;
 141	u8 stepping;
 142	u32 microcode;
 143};
 144static const struct sku_microcode spectre_bad_microcodes[] = {
 145	{ INTEL_FAM6_KABYLAKE,		0x0B,	0x80 },
 146	{ INTEL_FAM6_KABYLAKE,		0x0A,	0x80 },
 147	{ INTEL_FAM6_KABYLAKE,		0x09,	0x80 },
 148	{ INTEL_FAM6_KABYLAKE_L,	0x0A,	0x80 },
 149	{ INTEL_FAM6_KABYLAKE_L,	0x09,	0x80 },
 150	{ INTEL_FAM6_SKYLAKE_X,		0x03,	0x0100013e },
 151	{ INTEL_FAM6_SKYLAKE_X,		0x04,	0x0200003c },
 152	{ INTEL_FAM6_BROADWELL,		0x04,	0x28 },
 153	{ INTEL_FAM6_BROADWELL_G,	0x01,	0x1b },
 154	{ INTEL_FAM6_BROADWELL_D,	0x02,	0x14 },
 155	{ INTEL_FAM6_BROADWELL_D,	0x03,	0x07000011 },
 156	{ INTEL_FAM6_BROADWELL_X,	0x01,	0x0b000025 },
 157	{ INTEL_FAM6_HASWELL_L,		0x01,	0x21 },
 158	{ INTEL_FAM6_HASWELL_G,		0x01,	0x18 },
 159	{ INTEL_FAM6_HASWELL,		0x03,	0x23 },
 160	{ INTEL_FAM6_HASWELL_X,		0x02,	0x3b },
 161	{ INTEL_FAM6_HASWELL_X,		0x04,	0x10 },
 162	{ INTEL_FAM6_IVYBRIDGE_X,	0x04,	0x42a },
 163	/* Observed in the wild */
 164	{ INTEL_FAM6_SANDYBRIDGE_X,	0x06,	0x61b },
 165	{ INTEL_FAM6_SANDYBRIDGE_X,	0x07,	0x712 },
 166};
 167
 168static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
 169{
 170	int i;
 171
 172	/*
 173	 * We know that the hypervisor lie to us on the microcode version so
 174	 * we may as well hope that it is running the correct version.
 175	 */
 176	if (cpu_has(c, X86_FEATURE_HYPERVISOR))
 177		return false;
 178
 179	if (c->x86 != 6)
 180		return false;
 181
 182	for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
 183		if (c->x86_model == spectre_bad_microcodes[i].model &&
 184		    c->x86_stepping == spectre_bad_microcodes[i].stepping)
 185			return (c->microcode <= spectre_bad_microcodes[i].microcode);
 186	}
 187	return false;
 188}
 189
 190static void early_init_intel(struct cpuinfo_x86 *c)
 191{
 192	u64 misc_enable;
 193
 194	/* Unmask CPUID levels if masked: */
 195	if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
 196		if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
 197				  MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
 
 
 
 198			c->cpuid_level = cpuid_eax(0);
 199			get_cpu_cap(c);
 200		}
 201	}
 202
 203	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
 204		(c->x86 == 0x6 && c->x86_model >= 0x0e))
 205		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 206
 207	if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
 208		c->microcode = intel_get_microcode_revision();
 209
 210	/* Now if any of them are set, check the blacklist and clear the lot */
 211	if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
 212	     cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
 213	     cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
 214	     cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
 215		pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
 216		setup_clear_cpu_cap(X86_FEATURE_IBRS);
 217		setup_clear_cpu_cap(X86_FEATURE_IBPB);
 218		setup_clear_cpu_cap(X86_FEATURE_STIBP);
 219		setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
 220		setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
 221		setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
 222		setup_clear_cpu_cap(X86_FEATURE_SSBD);
 223		setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
 224	}
 225
 226	/*
 227	 * Atom erratum AAE44/AAF40/AAG38/AAH41:
 228	 *
 229	 * A race condition between speculative fetches and invalidating
 230	 * a large page.  This is worked around in microcode, but we
 231	 * need the microcode to have already been loaded... so if it is
 232	 * not, recommend a BIOS update and disable large pages.
 233	 */
 234	if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
 235	    c->microcode < 0x20e) {
 236		pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
 237		clear_cpu_cap(c, X86_FEATURE_PSE);
 238	}
 239
 240#ifdef CONFIG_X86_64
 241	set_cpu_cap(c, X86_FEATURE_SYSENTER32);
 242#else
 243	/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
 244	if (c->x86 == 15 && c->x86_cache_alignment == 64)
 245		c->x86_cache_alignment = 128;
 246#endif
 247
 248	/* CPUID workaround for 0F33/0F34 CPU */
 249	if (c->x86 == 0xF && c->x86_model == 0x3
 250	    && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
 251		c->x86_phys_bits = 36;
 252
 253	/*
 254	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
 255	 * with P/T states and does not stop in deep C-states.
 256	 *
 257	 * It is also reliable across cores and sockets. (but not across
 258	 * cabinets - we turn it off in that case explicitly.)
 259	 */
 260	if (c->x86_power & (1 << 8)) {
 261		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 262		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
 263	}
 264
 265	/* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
 266	if (c->x86 == 6) {
 267		switch (c->x86_model) {
 268		case INTEL_FAM6_ATOM_SALTWELL_MID:
 269		case INTEL_FAM6_ATOM_SALTWELL_TABLET:
 270		case INTEL_FAM6_ATOM_SILVERMONT_MID:
 271		case INTEL_FAM6_ATOM_AIRMONT_NP:
 272			set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
 273			break;
 274		default:
 275			break;
 276		}
 277	}
 278
 279	/*
 280	 * There is a known erratum on Pentium III and Core Solo
 281	 * and Core Duo CPUs.
 282	 * " Page with PAT set to WC while associated MTRR is UC
 283	 *   may consolidate to UC "
 284	 * Because of this erratum, it is better to stick with
 285	 * setting WC in MTRR rather than using PAT on these CPUs.
 286	 *
 287	 * Enable PAT WC only on P4, Core 2 or later CPUs.
 288	 */
 289	if (c->x86 == 6 && c->x86_model < 15)
 290		clear_cpu_cap(c, X86_FEATURE_PAT);
 291
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 292	/*
 293	 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
 294	 * clear the fast string and enhanced fast string CPU capabilities.
 295	 */
 296	if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
 297		rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
 298		if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
 299			pr_info("Disabled fast string operations\n");
 300			setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
 301			setup_clear_cpu_cap(X86_FEATURE_ERMS);
 302		}
 303	}
 304
 305	/*
 306	 * Intel Quark Core DevMan_001.pdf section 6.4.11
 307	 * "The operating system also is required to invalidate (i.e., flush)
 308	 *  the TLB when any changes are made to any of the page table entries.
 309	 *  The operating system must reload CR3 to cause the TLB to be flushed"
 310	 *
 311	 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
 312	 * should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
 313	 * to be modified.
 314	 */
 315	if (c->x86 == 5 && c->x86_model == 9) {
 316		pr_info("Disabling PGE capability bit\n");
 317		setup_clear_cpu_cap(X86_FEATURE_PGE);
 318	}
 319
 320	if (c->cpuid_level >= 0x00000001) {
 321		u32 eax, ebx, ecx, edx;
 322
 323		cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
 324		/*
 325		 * If HTT (EDX[28]) is set EBX[16:23] contain the number of
 326		 * apicids which are reserved per package. Store the resulting
 327		 * shift value for the package management code.
 328		 */
 329		if (edx & (1U << 28))
 330			c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
 331	}
 332
 333	check_mpx_erratum(c);
 334	check_memory_type_self_snoop_errata(c);
 335
 336	/*
 337	 * Get the number of SMT siblings early from the extended topology
 338	 * leaf, if available. Otherwise try the legacy SMT detection.
 339	 */
 340	if (detect_extended_topology_early(c) < 0)
 341		detect_ht_early(c);
 342}
 343
 344#ifdef CONFIG_X86_32
 345/*
 346 *	Early probe support logic for ppro memory erratum #50
 347 *
 348 *	This is called before we do cpu ident work
 349 */
 350
 351int ppro_with_ram_bug(void)
 352{
 353	/* Uses data from early_cpu_detect now */
 354	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
 355	    boot_cpu_data.x86 == 6 &&
 356	    boot_cpu_data.x86_model == 1 &&
 357	    boot_cpu_data.x86_stepping < 8) {
 358		pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
 359		return 1;
 360	}
 361	return 0;
 362}
 363
 364static void intel_smp_check(struct cpuinfo_x86 *c)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 365{
 366	/* calling is from identify_secondary_cpu() ? */
 367	if (!c->cpu_index)
 368		return;
 369
 370	/*
 371	 * Mask B, Pentium, but not Pentium MMX
 372	 */
 373	if (c->x86 == 5 &&
 374	    c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
 375	    c->x86_model <= 3) {
 376		/*
 377		 * Remember we have B step Pentia with bugs
 378		 */
 379		WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
 380				    "with B stepping processors.\n");
 381	}
 382}
 383
 384static int forcepae;
 385static int __init forcepae_setup(char *__unused)
 386{
 387	forcepae = 1;
 388	return 1;
 389}
 390__setup("forcepae", forcepae_setup);
 391
 392static void intel_workarounds(struct cpuinfo_x86 *c)
 393{
 394#ifdef CONFIG_X86_F00F_BUG
 395	/*
 396	 * All models of Pentium and Pentium with MMX technology CPUs
 397	 * have the F0 0F bug, which lets nonprivileged users lock up the
 398	 * system. Announce that the fault handler will be checking for it.
 399	 * The Quark is also family 5, but does not have the same bug.
 400	 */
 401	clear_cpu_bug(c, X86_BUG_F00F);
 402	if (c->x86 == 5 && c->x86_model < 9) {
 403		static int f00f_workaround_enabled;
 404
 405		set_cpu_bug(c, X86_BUG_F00F);
 406		if (!f00f_workaround_enabled) {
 407			pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
 
 408			f00f_workaround_enabled = 1;
 409		}
 410	}
 411#endif
 412
 413	/*
 414	 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
 415	 * model 3 mask 3
 416	 */
 417	if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
 418		clear_cpu_cap(c, X86_FEATURE_SEP);
 419
 420	/*
 421	 * PAE CPUID issue: many Pentium M report no PAE but may have a
 422	 * functionally usable PAE implementation.
 423	 * Forcefully enable PAE if kernel parameter "forcepae" is present.
 424	 */
 425	if (forcepae) {
 426		pr_warn("PAE forced!\n");
 427		set_cpu_cap(c, X86_FEATURE_PAE);
 428		add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
 429	}
 430
 431	/*
 432	 * P4 Xeon erratum 037 workaround.
 433	 * Hardware prefetcher may cause stale data to be loaded into the cache.
 434	 */
 435	if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
 436		if (msr_set_bit(MSR_IA32_MISC_ENABLE,
 437				MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
 438			pr_info("CPU: C0 stepping P4 Xeon detected.\n");
 439			pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
 
 
 440		}
 441	}
 442
 443	/*
 444	 * See if we have a good local APIC by checking for buggy Pentia,
 445	 * i.e. all B steppings and the C2 stepping of P54C when using their
 446	 * integrated APIC (see 11AP erratum in "Pentium Processor
 447	 * Specification Update").
 448	 */
 449	if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
 450	    (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
 451		set_cpu_bug(c, X86_BUG_11AP);
 452
 453
 454#ifdef CONFIG_X86_INTEL_USERCOPY
 455	/*
 456	 * Set up the preferred alignment for movsl bulk memory moves
 457	 */
 458	switch (c->x86) {
 459	case 4:		/* 486: untested */
 460		break;
 461	case 5:		/* Old Pentia: untested */
 462		break;
 463	case 6:		/* PII/PIII only like movsl with 8-byte alignment */
 464		movsl_mask.mask = 7;
 465		break;
 466	case 15:	/* P4 is OK down to 8-byte alignment */
 467		movsl_mask.mask = 7;
 468		break;
 469	}
 470#endif
 471
 
 
 
 
 472	intel_smp_check(c);
 473}
 474#else
 475static void intel_workarounds(struct cpuinfo_x86 *c)
 476{
 477}
 478#endif
 479
 480static void srat_detect_node(struct cpuinfo_x86 *c)
 481{
 482#ifdef CONFIG_NUMA
 483	unsigned node;
 484	int cpu = smp_processor_id();
 485
 486	/* Don't do the funky fallback heuristics the AMD version employs
 487	   for now. */
 488	node = numa_cpu_node(cpu);
 489	if (node == NUMA_NO_NODE || !node_online(node)) {
 490		/* reuse the value from init_cpu_to_node() */
 491		node = cpu_to_node(cpu);
 492	}
 493	numa_set_node(cpu, node);
 494#endif
 495}
 496
 497static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 498{
 499	/* Intel VMX MSR indicated features */
 500#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW	0x00200000
 501#define X86_VMX_FEATURE_PROC_CTLS_VNMI		0x00400000
 502#define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS	0x80000000
 503#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC	0x00000001
 504#define X86_VMX_FEATURE_PROC_CTLS2_EPT		0x00000002
 505#define X86_VMX_FEATURE_PROC_CTLS2_VPID		0x00000020
 506#define x86_VMX_FEATURE_EPT_CAP_AD		0x00200000
 507
 508	u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
 509	u32 msr_vpid_cap, msr_ept_cap;
 510
 511	clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
 512	clear_cpu_cap(c, X86_FEATURE_VNMI);
 513	clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
 514	clear_cpu_cap(c, X86_FEATURE_EPT);
 515	clear_cpu_cap(c, X86_FEATURE_VPID);
 516	clear_cpu_cap(c, X86_FEATURE_EPT_AD);
 517
 518	rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
 519	msr_ctl = vmx_msr_high | vmx_msr_low;
 520	if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
 521		set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
 522	if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
 523		set_cpu_cap(c, X86_FEATURE_VNMI);
 524	if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
 525		rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
 526		      vmx_msr_low, vmx_msr_high);
 527		msr_ctl2 = vmx_msr_high | vmx_msr_low;
 528		if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
 529		    (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
 530			set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
 531		if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) {
 532			set_cpu_cap(c, X86_FEATURE_EPT);
 533			rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
 534			      msr_ept_cap, msr_vpid_cap);
 535			if (msr_ept_cap & x86_VMX_FEATURE_EPT_CAP_AD)
 536				set_cpu_cap(c, X86_FEATURE_EPT_AD);
 537		}
 538		if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
 539			set_cpu_cap(c, X86_FEATURE_VPID);
 540	}
 541}
 542
 543#define MSR_IA32_TME_ACTIVATE		0x982
 544
 545/* Helpers to access TME_ACTIVATE MSR */
 546#define TME_ACTIVATE_LOCKED(x)		(x & 0x1)
 547#define TME_ACTIVATE_ENABLED(x)		(x & 0x2)
 548
 549#define TME_ACTIVATE_POLICY(x)		((x >> 4) & 0xf)	/* Bits 7:4 */
 550#define TME_ACTIVATE_POLICY_AES_XTS_128	0
 551
 552#define TME_ACTIVATE_KEYID_BITS(x)	((x >> 32) & 0xf)	/* Bits 35:32 */
 553
 554#define TME_ACTIVATE_CRYPTO_ALGS(x)	((x >> 48) & 0xffff)	/* Bits 63:48 */
 555#define TME_ACTIVATE_CRYPTO_AES_XTS_128	1
 556
 557/* Values for mktme_status (SW only construct) */
 558#define MKTME_ENABLED			0
 559#define MKTME_DISABLED			1
 560#define MKTME_UNINITIALIZED		2
 561static int mktme_status = MKTME_UNINITIALIZED;
 562
 563static void detect_tme(struct cpuinfo_x86 *c)
 564{
 565	u64 tme_activate, tme_policy, tme_crypto_algs;
 566	int keyid_bits = 0, nr_keyids = 0;
 567	static u64 tme_activate_cpu0 = 0;
 568
 569	rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
 570
 571	if (mktme_status != MKTME_UNINITIALIZED) {
 572		if (tme_activate != tme_activate_cpu0) {
 573			/* Broken BIOS? */
 574			pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
 575			pr_err_once("x86/tme: MKTME is not usable\n");
 576			mktme_status = MKTME_DISABLED;
 577
 578			/* Proceed. We may need to exclude bits from x86_phys_bits. */
 579		}
 580	} else {
 581		tme_activate_cpu0 = tme_activate;
 582	}
 583
 584	if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
 585		pr_info_once("x86/tme: not enabled by BIOS\n");
 586		mktme_status = MKTME_DISABLED;
 587		return;
 588	}
 589
 590	if (mktme_status != MKTME_UNINITIALIZED)
 591		goto detect_keyid_bits;
 592
 593	pr_info("x86/tme: enabled by BIOS\n");
 594
 595	tme_policy = TME_ACTIVATE_POLICY(tme_activate);
 596	if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
 597		pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
 598
 599	tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
 600	if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
 601		pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
 602				tme_crypto_algs);
 603		mktme_status = MKTME_DISABLED;
 604	}
 605detect_keyid_bits:
 606	keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
 607	nr_keyids = (1UL << keyid_bits) - 1;
 608	if (nr_keyids) {
 609		pr_info_once("x86/mktme: enabled by BIOS\n");
 610		pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
 611	} else {
 612		pr_info_once("x86/mktme: disabled by BIOS\n");
 613	}
 614
 615	if (mktme_status == MKTME_UNINITIALIZED) {
 616		/* MKTME is usable */
 617		mktme_status = MKTME_ENABLED;
 618	}
 619
 620	/*
 621	 * KeyID bits effectively lower the number of physical address
 622	 * bits.  Update cpuinfo_x86::x86_phys_bits accordingly.
 623	 */
 624	c->x86_phys_bits -= keyid_bits;
 625}
 626
 627static void init_cpuid_fault(struct cpuinfo_x86 *c)
 628{
 629	u64 msr;
 630
 631	if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) {
 632		if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
 633			set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
 634	}
 635}
 636
 637static void init_intel_misc_features(struct cpuinfo_x86 *c)
 638{
 639	u64 msr;
 640
 641	if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr))
 642		return;
 643
 644	/* Clear all MISC features */
 645	this_cpu_write(msr_misc_features_shadow, 0);
 646
 647	/* Check features and update capabilities and shadow control bits */
 648	init_cpuid_fault(c);
 649	probe_xeon_phi_r3mwait(c);
 650
 651	msr = this_cpu_read(msr_misc_features_shadow);
 652	wrmsrl(MSR_MISC_FEATURES_ENABLES, msr);
 653}
 654
 655static void init_intel(struct cpuinfo_x86 *c)
 656{
 657	early_init_intel(c);
 658
 659	intel_workarounds(c);
 660
 661	/*
 662	 * Detect the extended topology information if available. This
 663	 * will reinitialise the initial_apicid which will be used
 664	 * in init_intel_cacheinfo()
 665	 */
 666	detect_extended_topology(c);
 667
 668	if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
 669		/*
 670		 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
 671		 * detection.
 672		 */
 673		detect_num_cpu_cores(c);
 674#ifdef CONFIG_X86_32
 675		detect_ht(c);
 676#endif
 677	}
 678
 679	init_intel_cacheinfo(c);
 680
 681	if (c->cpuid_level > 9) {
 682		unsigned eax = cpuid_eax(10);
 683		/* Check for version and the number of counters */
 684		if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
 685			set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
 686	}
 687
 688	if (cpu_has(c, X86_FEATURE_XMM2))
 689		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
 690
 691	if (boot_cpu_has(X86_FEATURE_DS)) {
 692		unsigned int l1, l2;
 693
 694		rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
 695		if (!(l1 & (1<<11)))
 696			set_cpu_cap(c, X86_FEATURE_BTS);
 697		if (!(l1 & (1<<12)))
 698			set_cpu_cap(c, X86_FEATURE_PEBS);
 699	}
 700
 701	if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) &&
 702	    (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
 703		set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
 704
 705	if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) &&
 706		((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT)))
 707		set_cpu_bug(c, X86_BUG_MONITOR);
 708
 709#ifdef CONFIG_X86_64
 710	if (c->x86 == 15)
 711		c->x86_cache_alignment = c->x86_clflush_size * 2;
 712	if (c->x86 == 6)
 713		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
 714#else
 715	/*
 716	 * Names for the Pentium II/Celeron processors
 717	 * detectable only by also checking the cache size.
 718	 * Dixon is NOT a Celeron.
 719	 */
 720	if (c->x86 == 6) {
 721		unsigned int l2 = c->x86_cache_size;
 722		char *p = NULL;
 723
 724		switch (c->x86_model) {
 725		case 5:
 726			if (l2 == 0)
 727				p = "Celeron (Covington)";
 728			else if (l2 == 256)
 729				p = "Mobile Pentium II (Dixon)";
 730			break;
 731
 732		case 6:
 733			if (l2 == 128)
 734				p = "Celeron (Mendocino)";
 735			else if (c->x86_stepping == 0 || c->x86_stepping == 5)
 736				p = "Celeron-A";
 737			break;
 738
 739		case 8:
 740			if (l2 == 128)
 741				p = "Celeron (Coppermine)";
 742			break;
 743		}
 744
 745		if (p)
 746			strcpy(c->x86_model_id, p);
 747	}
 748
 749	if (c->x86 == 15)
 750		set_cpu_cap(c, X86_FEATURE_P4);
 751	if (c->x86 == 6)
 752		set_cpu_cap(c, X86_FEATURE_P3);
 753#endif
 754
 
 
 
 
 
 
 
 
 
 
 
 755	/* Work around errata */
 756	srat_detect_node(c);
 757
 758	if (cpu_has(c, X86_FEATURE_VMX))
 759		detect_vmx_virtcap(c);
 760
 761	if (cpu_has(c, X86_FEATURE_TME))
 762		detect_tme(c);
 763
 764	init_intel_misc_features(c);
 
 
 765
 766	if (tsx_ctrl_state == TSX_CTRL_ENABLE)
 767		tsx_enable();
 768	if (tsx_ctrl_state == TSX_CTRL_DISABLE)
 769		tsx_disable();
 
 
 
 
 
 
 770}
 771
 772#ifdef CONFIG_X86_32
 773static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
 774{
 775	/*
 776	 * Intel PIII Tualatin. This comes in two flavours.
 777	 * One has 256kb of cache, the other 512. We have no way
 778	 * to determine which, so we use a boottime override
 779	 * for the 512kb model, and assume 256 otherwise.
 780	 */
 781	if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
 782		size = 256;
 783
 784	/*
 785	 * Intel Quark SoC X1000 contains a 4-way set associative
 786	 * 16K cache with a 16 byte cache line and 256 lines per tag
 787	 */
 788	if ((c->x86 == 5) && (c->x86_model == 9))
 789		size = 16;
 790	return size;
 791}
 792#endif
 793
 794#define TLB_INST_4K	0x01
 795#define TLB_INST_4M	0x02
 796#define TLB_INST_2M_4M	0x03
 797
 798#define TLB_INST_ALL	0x05
 799#define TLB_INST_1G	0x06
 800
 801#define TLB_DATA_4K	0x11
 802#define TLB_DATA_4M	0x12
 803#define TLB_DATA_2M_4M	0x13
 804#define TLB_DATA_4K_4M	0x14
 805
 806#define TLB_DATA_1G	0x16
 807
 808#define TLB_DATA0_4K	0x21
 809#define TLB_DATA0_4M	0x22
 810#define TLB_DATA0_2M_4M	0x23
 811
 812#define STLB_4K		0x41
 813#define STLB_4K_2M	0x42
 814
 815static const struct _tlb_table intel_tlb_table[] = {
 816	{ 0x01, TLB_INST_4K,		32,	" TLB_INST 4 KByte pages, 4-way set associative" },
 817	{ 0x02, TLB_INST_4M,		2,	" TLB_INST 4 MByte pages, full associative" },
 818	{ 0x03, TLB_DATA_4K,		64,	" TLB_DATA 4 KByte pages, 4-way set associative" },
 819	{ 0x04, TLB_DATA_4M,		8,	" TLB_DATA 4 MByte pages, 4-way set associative" },
 820	{ 0x05, TLB_DATA_4M,		32,	" TLB_DATA 4 MByte pages, 4-way set associative" },
 821	{ 0x0b, TLB_INST_4M,		4,	" TLB_INST 4 MByte pages, 4-way set associative" },
 822	{ 0x4f, TLB_INST_4K,		32,	" TLB_INST 4 KByte pages */" },
 823	{ 0x50, TLB_INST_ALL,		64,	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
 824	{ 0x51, TLB_INST_ALL,		128,	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
 825	{ 0x52, TLB_INST_ALL,		256,	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
 826	{ 0x55, TLB_INST_2M_4M,		7,	" TLB_INST 2-MByte or 4-MByte pages, fully associative" },
 827	{ 0x56, TLB_DATA0_4M,		16,	" TLB_DATA0 4 MByte pages, 4-way set associative" },
 828	{ 0x57, TLB_DATA0_4K,		16,	" TLB_DATA0 4 KByte pages, 4-way associative" },
 829	{ 0x59, TLB_DATA0_4K,		16,	" TLB_DATA0 4 KByte pages, fully associative" },
 830	{ 0x5a, TLB_DATA0_2M_4M,	32,	" TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
 831	{ 0x5b, TLB_DATA_4K_4M,		64,	" TLB_DATA 4 KByte and 4 MByte pages" },
 832	{ 0x5c, TLB_DATA_4K_4M,		128,	" TLB_DATA 4 KByte and 4 MByte pages" },
 833	{ 0x5d, TLB_DATA_4K_4M,		256,	" TLB_DATA 4 KByte and 4 MByte pages" },
 834	{ 0x61, TLB_INST_4K,		48,	" TLB_INST 4 KByte pages, full associative" },
 835	{ 0x63, TLB_DATA_1G,		4,	" TLB_DATA 1 GByte pages, 4-way set associative" },
 836	{ 0x6b, TLB_DATA_4K,		256,	" TLB_DATA 4 KByte pages, 8-way associative" },
 837	{ 0x6c, TLB_DATA_2M_4M,		128,	" TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
 838	{ 0x6d, TLB_DATA_1G,		16,	" TLB_DATA 1 GByte pages, fully associative" },
 839	{ 0x76, TLB_INST_2M_4M,		8,	" TLB_INST 2-MByte or 4-MByte pages, fully associative" },
 840	{ 0xb0, TLB_INST_4K,		128,	" TLB_INST 4 KByte pages, 4-way set associative" },
 841	{ 0xb1, TLB_INST_2M_4M,		4,	" TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
 842	{ 0xb2, TLB_INST_4K,		64,	" TLB_INST 4KByte pages, 4-way set associative" },
 843	{ 0xb3, TLB_DATA_4K,		128,	" TLB_DATA 4 KByte pages, 4-way set associative" },
 844	{ 0xb4, TLB_DATA_4K,		256,	" TLB_DATA 4 KByte pages, 4-way associative" },
 845	{ 0xb5, TLB_INST_4K,		64,	" TLB_INST 4 KByte pages, 8-way set associative" },
 846	{ 0xb6, TLB_INST_4K,		128,	" TLB_INST 4 KByte pages, 8-way set associative" },
 847	{ 0xba, TLB_DATA_4K,		64,	" TLB_DATA 4 KByte pages, 4-way associative" },
 848	{ 0xc0, TLB_DATA_4K_4M,		8,	" TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
 849	{ 0xc1, STLB_4K_2M,		1024,	" STLB 4 KByte and 2 MByte pages, 8-way associative" },
 850	{ 0xc2, TLB_DATA_2M_4M,		16,	" DTLB 2 MByte/4MByte pages, 4-way associative" },
 851	{ 0xca, STLB_4K,		512,	" STLB 4 KByte pages, 4-way associative" },
 852	{ 0x00, 0, 0 }
 853};
 854
 855static void intel_tlb_lookup(const unsigned char desc)
 856{
 857	unsigned char k;
 858	if (desc == 0)
 859		return;
 860
 861	/* look up this descriptor in the table */
 862	for (k = 0; intel_tlb_table[k].descriptor != desc && \
 863			intel_tlb_table[k].descriptor != 0; k++)
 864		;
 865
 866	if (intel_tlb_table[k].tlb_type == 0)
 867		return;
 868
 869	switch (intel_tlb_table[k].tlb_type) {
 870	case STLB_4K:
 871		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
 872			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
 873		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
 874			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
 875		break;
 876	case STLB_4K_2M:
 877		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
 878			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
 879		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
 880			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
 881		if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
 882			tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
 883		if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
 884			tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
 885		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
 886			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
 887		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
 888			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
 889		break;
 890	case TLB_INST_ALL:
 891		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
 892			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
 893		if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
 894			tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
 895		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
 896			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
 897		break;
 898	case TLB_INST_4K:
 899		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
 900			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
 901		break;
 902	case TLB_INST_4M:
 903		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
 904			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
 905		break;
 906	case TLB_INST_2M_4M:
 907		if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
 908			tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
 909		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
 910			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
 911		break;
 912	case TLB_DATA_4K:
 913	case TLB_DATA0_4K:
 914		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
 915			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
 916		break;
 917	case TLB_DATA_4M:
 918	case TLB_DATA0_4M:
 919		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
 920			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
 921		break;
 922	case TLB_DATA_2M_4M:
 923	case TLB_DATA0_2M_4M:
 924		if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
 925			tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
 926		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
 927			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
 928		break;
 929	case TLB_DATA_4K_4M:
 930		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
 931			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
 932		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
 933			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
 934		break;
 935	case TLB_DATA_1G:
 936		if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
 937			tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
 938		break;
 939	}
 940}
 941
 942static void intel_detect_tlb(struct cpuinfo_x86 *c)
 943{
 944	int i, j, n;
 945	unsigned int regs[4];
 946	unsigned char *desc = (unsigned char *)regs;
 947
 948	if (c->cpuid_level < 2)
 949		return;
 950
 951	/* Number of times to iterate */
 952	n = cpuid_eax(2) & 0xFF;
 953
 954	for (i = 0 ; i < n ; i++) {
 955		cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
 956
 957		/* If bit 31 is set, this is an unknown format */
 958		for (j = 0 ; j < 3 ; j++)
 959			if (regs[j] & (1 << 31))
 960				regs[j] = 0;
 961
 962		/* Byte 0 is level count, not a descriptor */
 963		for (j = 1 ; j < 16 ; j++)
 964			intel_tlb_lookup(desc[j]);
 965	}
 966}
 967
 968static const struct cpu_dev intel_cpu_dev = {
 969	.c_vendor	= "Intel",
 970	.c_ident	= { "GenuineIntel" },
 971#ifdef CONFIG_X86_32
 972	.legacy_models = {
 973		{ .family = 4, .model_names =
 974		  {
 975			  [0] = "486 DX-25/33",
 976			  [1] = "486 DX-50",
 977			  [2] = "486 SX",
 978			  [3] = "486 DX/2",
 979			  [4] = "486 SL",
 980			  [5] = "486 SX/2",
 981			  [7] = "486 DX/2-WB",
 982			  [8] = "486 DX/4",
 983			  [9] = "486 DX/4-WB"
 984		  }
 985		},
 986		{ .family = 5, .model_names =
 987		  {
 988			  [0] = "Pentium 60/66 A-step",
 989			  [1] = "Pentium 60/66",
 990			  [2] = "Pentium 75 - 200",
 991			  [3] = "OverDrive PODP5V83",
 992			  [4] = "Pentium MMX",
 993			  [7] = "Mobile Pentium 75 - 200",
 994			  [8] = "Mobile Pentium MMX",
 995			  [9] = "Quark SoC X1000",
 996		  }
 997		},
 998		{ .family = 6, .model_names =
 999		  {
1000			  [0] = "Pentium Pro A-step",
1001			  [1] = "Pentium Pro",
1002			  [3] = "Pentium II (Klamath)",
1003			  [4] = "Pentium II (Deschutes)",
1004			  [5] = "Pentium II (Deschutes)",
1005			  [6] = "Mobile Pentium II",
1006			  [7] = "Pentium III (Katmai)",
1007			  [8] = "Pentium III (Coppermine)",
1008			  [10] = "Pentium III (Cascades)",
1009			  [11] = "Pentium III (Tualatin)",
1010		  }
1011		},
1012		{ .family = 15, .model_names =
1013		  {
1014			  [0] = "Pentium 4 (Unknown)",
1015			  [1] = "Pentium 4 (Willamette)",
1016			  [2] = "Pentium 4 (Northwood)",
1017			  [4] = "Pentium 4 (Foster)",
1018			  [5] = "Pentium 4 (Foster)",
1019		  }
1020		},
1021	},
1022	.legacy_cache_size = intel_size_cache,
1023#endif
1024	.c_detect_tlb	= intel_detect_tlb,
1025	.c_early_init   = early_init_intel,
1026	.c_init		= init_intel,
1027	.c_x86_vendor	= X86_VENDOR_INTEL,
1028};
1029
1030cpu_dev_register(intel_cpu_dev);
v3.5.6
  1#include <linux/init.h>
  2#include <linux/kernel.h>
  3
  4#include <linux/string.h>
  5#include <linux/bitops.h>
  6#include <linux/smp.h>
  7#include <linux/sched.h>
 
  8#include <linux/thread_info.h>
  9#include <linux/module.h>
 10#include <linux/uaccess.h>
 11
 12#include <asm/processor.h>
 13#include <asm/pgtable.h>
 14#include <asm/msr.h>
 15#include <asm/bugs.h>
 16#include <asm/cpu.h>
 
 
 
 
 17
 18#ifdef CONFIG_X86_64
 19#include <linux/topology.h>
 20#include <asm/numa_64.h>
 21#endif
 22
 23#include "cpu.h"
 24
 25#ifdef CONFIG_X86_LOCAL_APIC
 26#include <asm/mpspec.h>
 27#include <asm/apic.h>
 28#endif
 29
 30static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 31{
 32	u64 misc_enable;
 33
 34	/* Unmask CPUID levels if masked: */
 35	if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
 36		rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
 37
 38		if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
 39			misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
 40			wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
 41			c->cpuid_level = cpuid_eax(0);
 42			get_cpu_cap(c);
 43		}
 44	}
 45
 46	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
 47		(c->x86 == 0x6 && c->x86_model >= 0x0e))
 48		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 49
 50	if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
 51		unsigned lower_word;
 52
 53		wrmsr(MSR_IA32_UCODE_REV, 0, 0);
 54		/* Required by the SDM */
 55		sync_core();
 56		rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
 
 
 
 
 
 
 
 
 
 
 57	}
 58
 59	/*
 60	 * Atom erratum AAE44/AAF40/AAG38/AAH41:
 61	 *
 62	 * A race condition between speculative fetches and invalidating
 63	 * a large page.  This is worked around in microcode, but we
 64	 * need the microcode to have already been loaded... so if it is
 65	 * not, recommend a BIOS update and disable large pages.
 66	 */
 67	if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
 68	    c->microcode < 0x20e) {
 69		printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
 70		clear_cpu_cap(c, X86_FEATURE_PSE);
 71	}
 72
 73#ifdef CONFIG_X86_64
 74	set_cpu_cap(c, X86_FEATURE_SYSENTER32);
 75#else
 76	/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
 77	if (c->x86 == 15 && c->x86_cache_alignment == 64)
 78		c->x86_cache_alignment = 128;
 79#endif
 80
 81	/* CPUID workaround for 0F33/0F34 CPU */
 82	if (c->x86 == 0xF && c->x86_model == 0x3
 83	    && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
 84		c->x86_phys_bits = 36;
 85
 86	/*
 87	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
 88	 * with P/T states and does not stop in deep C-states.
 89	 *
 90	 * It is also reliable across cores and sockets. (but not across
 91	 * cabinets - we turn it off in that case explicitly.)
 92	 */
 93	if (c->x86_power & (1 << 8)) {
 94		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 95		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
 96		if (!check_tsc_unstable())
 97			sched_clock_stable = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 98	}
 99
100	/*
101	 * There is a known erratum on Pentium III and Core Solo
102	 * and Core Duo CPUs.
103	 * " Page with PAT set to WC while associated MTRR is UC
104	 *   may consolidate to UC "
105	 * Because of this erratum, it is better to stick with
106	 * setting WC in MTRR rather than using PAT on these CPUs.
107	 *
108	 * Enable PAT WC only on P4, Core 2 or later CPUs.
109	 */
110	if (c->x86 == 6 && c->x86_model < 15)
111		clear_cpu_cap(c, X86_FEATURE_PAT);
112
113#ifdef CONFIG_KMEMCHECK
114	/*
115	 * P4s have a "fast strings" feature which causes single-
116	 * stepping REP instructions to only generate a #DB on
117	 * cache-line boundaries.
118	 *
119	 * Ingo Molnar reported a Pentium D (model 6) and a Xeon
120	 * (model 2) with the same problem.
121	 */
122	if (c->x86 == 15) {
123		rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
124
125		if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
126			printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");
127
128			misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
129			wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
130		}
131	}
132#endif
133
134	/*
135	 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
136	 * clear the fast string and enhanced fast string CPU capabilities.
137	 */
138	if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
139		rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
140		if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
141			printk(KERN_INFO "Disabled fast string operations\n");
142			setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
143			setup_clear_cpu_cap(X86_FEATURE_ERMS);
144		}
145	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146}
147
148#ifdef CONFIG_X86_32
149/*
150 *	Early probe support logic for ppro memory erratum #50
151 *
152 *	This is called before we do cpu ident work
153 */
154
155int __cpuinit ppro_with_ram_bug(void)
156{
157	/* Uses data from early_cpu_detect now */
158	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
159	    boot_cpu_data.x86 == 6 &&
160	    boot_cpu_data.x86_model == 1 &&
161	    boot_cpu_data.x86_mask < 8) {
162		printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
163		return 1;
164	}
165	return 0;
166}
167
168#ifdef CONFIG_X86_F00F_BUG
169static void __cpuinit trap_init_f00f_bug(void)
170{
171	__set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
172
173	/*
174	 * Update the IDT descriptor and reload the IDT so that
175	 * it uses the read-only mapped virtual address.
176	 */
177	idt_descr.address = fix_to_virt(FIX_F00F_IDT);
178	load_idt(&idt_descr);
179}
180#endif
181
182static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
183{
184	/* calling is from identify_secondary_cpu() ? */
185	if (!c->cpu_index)
186		return;
187
188	/*
189	 * Mask B, Pentium, but not Pentium MMX
190	 */
191	if (c->x86 == 5 &&
192	    c->x86_mask >= 1 && c->x86_mask <= 4 &&
193	    c->x86_model <= 3) {
194		/*
195		 * Remember we have B step Pentia with bugs
196		 */
197		WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
198				    "with B stepping processors.\n");
199	}
200}
201
202static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
 
203{
204	unsigned long lo, hi;
 
 
 
205
 
 
206#ifdef CONFIG_X86_F00F_BUG
207	/*
208	 * All current models of Pentium and Pentium with MMX technology CPUs
209	 * have the F0 0F bug, which lets nonprivileged users lock up the
210	 * system.
211	 * Note that the workaround only should be initialized once...
212	 */
213	c->f00f_bug = 0;
214	if (!paravirt_enabled() && c->x86 == 5) {
215		static int f00f_workaround_enabled;
216
217		c->f00f_bug = 1;
218		if (!f00f_workaround_enabled) {
219			trap_init_f00f_bug();
220			printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
221			f00f_workaround_enabled = 1;
222		}
223	}
224#endif
225
226	/*
227	 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
228	 * model 3 mask 3
229	 */
230	if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
231		clear_cpu_cap(c, X86_FEATURE_SEP);
232
233	/*
234	 * P4 Xeon errata 037 workaround.
 
 
 
 
 
 
 
 
 
 
 
235	 * Hardware prefetcher may cause stale data to be loaded into the cache.
236	 */
237	if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
238		rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
239		if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) {
240			printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
241			printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
242			lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
243			wrmsr(MSR_IA32_MISC_ENABLE, lo, hi);
244		}
245	}
246
247	/*
248	 * See if we have a good local APIC by checking for buggy Pentia,
249	 * i.e. all B steppings and the C2 stepping of P54C when using their
250	 * integrated APIC (see 11AP erratum in "Pentium Processor
251	 * Specification Update").
252	 */
253	if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
254	    (c->x86_mask < 0x6 || c->x86_mask == 0xb))
255		set_cpu_cap(c, X86_FEATURE_11AP);
256
257
258#ifdef CONFIG_X86_INTEL_USERCOPY
259	/*
260	 * Set up the preferred alignment for movsl bulk memory moves
261	 */
262	switch (c->x86) {
263	case 4:		/* 486: untested */
264		break;
265	case 5:		/* Old Pentia: untested */
266		break;
267	case 6:		/* PII/PIII only like movsl with 8-byte alignment */
268		movsl_mask.mask = 7;
269		break;
270	case 15:	/* P4 is OK down to 8-byte alignment */
271		movsl_mask.mask = 7;
272		break;
273	}
274#endif
275
276#ifdef CONFIG_X86_NUMAQ
277	numaq_tsc_disable();
278#endif
279
280	intel_smp_check(c);
281}
282#else
283static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
284{
285}
286#endif
287
288static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
289{
290#ifdef CONFIG_NUMA
291	unsigned node;
292	int cpu = smp_processor_id();
293
294	/* Don't do the funky fallback heuristics the AMD version employs
295	   for now. */
296	node = numa_cpu_node(cpu);
297	if (node == NUMA_NO_NODE || !node_online(node)) {
298		/* reuse the value from init_cpu_to_node() */
299		node = cpu_to_node(cpu);
300	}
301	numa_set_node(cpu, node);
302#endif
303}
304
305/*
306 * find out the number of processor cores on the die
307 */
308static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
309{
310	unsigned int eax, ebx, ecx, edx;
311
312	if (c->cpuid_level < 4)
313		return 1;
314
315	/* Intel has a non-standard dependency on %ecx for this CPUID level. */
316	cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
317	if (eax & 0x1f)
318		return (eax >> 26) + 1;
319	else
320		return 1;
321}
322
323static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c)
324{
325	/* Intel VMX MSR indicated features */
326#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW	0x00200000
327#define X86_VMX_FEATURE_PROC_CTLS_VNMI		0x00400000
328#define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS	0x80000000
329#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC	0x00000001
330#define X86_VMX_FEATURE_PROC_CTLS2_EPT		0x00000002
331#define X86_VMX_FEATURE_PROC_CTLS2_VPID		0x00000020
 
332
333	u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
 
334
335	clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
336	clear_cpu_cap(c, X86_FEATURE_VNMI);
337	clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
338	clear_cpu_cap(c, X86_FEATURE_EPT);
339	clear_cpu_cap(c, X86_FEATURE_VPID);
 
340
341	rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
342	msr_ctl = vmx_msr_high | vmx_msr_low;
343	if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
344		set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
345	if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
346		set_cpu_cap(c, X86_FEATURE_VNMI);
347	if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
348		rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
349		      vmx_msr_low, vmx_msr_high);
350		msr_ctl2 = vmx_msr_high | vmx_msr_low;
351		if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
352		    (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
353			set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
354		if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
355			set_cpu_cap(c, X86_FEATURE_EPT);
 
 
 
 
 
356		if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
357			set_cpu_cap(c, X86_FEATURE_VPID);
358	}
359}
360
361static void __cpuinit init_intel(struct cpuinfo_x86 *c)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
362{
363	unsigned int l2 = 0;
 
 
 
 
 
 
 
 
 
 
364
 
 
 
 
 
 
365	early_init_intel(c);
366
367	intel_workarounds(c);
368
369	/*
370	 * Detect the extended topology information if available. This
371	 * will reinitialise the initial_apicid which will be used
372	 * in init_intel_cacheinfo()
373	 */
374	detect_extended_topology(c);
375
376	l2 = init_intel_cacheinfo(c);
 
 
 
 
 
 
 
 
 
 
 
 
377	if (c->cpuid_level > 9) {
378		unsigned eax = cpuid_eax(10);
379		/* Check for version and the number of counters */
380		if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
381			set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
382	}
383
384	if (cpu_has_xmm2)
385		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
386	if (cpu_has_ds) {
387		unsigned int l1;
 
 
388		rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
389		if (!(l1 & (1<<11)))
390			set_cpu_cap(c, X86_FEATURE_BTS);
391		if (!(l1 & (1<<12)))
392			set_cpu_cap(c, X86_FEATURE_PEBS);
393	}
394
395	if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
396		set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
 
 
 
 
 
397
398#ifdef CONFIG_X86_64
399	if (c->x86 == 15)
400		c->x86_cache_alignment = c->x86_clflush_size * 2;
401	if (c->x86 == 6)
402		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
403#else
404	/*
405	 * Names for the Pentium II/Celeron processors
406	 * detectable only by also checking the cache size.
407	 * Dixon is NOT a Celeron.
408	 */
409	if (c->x86 == 6) {
 
410		char *p = NULL;
411
412		switch (c->x86_model) {
413		case 5:
414			if (l2 == 0)
415				p = "Celeron (Covington)";
416			else if (l2 == 256)
417				p = "Mobile Pentium II (Dixon)";
418			break;
419
420		case 6:
421			if (l2 == 128)
422				p = "Celeron (Mendocino)";
423			else if (c->x86_mask == 0 || c->x86_mask == 5)
424				p = "Celeron-A";
425			break;
426
427		case 8:
428			if (l2 == 128)
429				p = "Celeron (Coppermine)";
430			break;
431		}
432
433		if (p)
434			strcpy(c->x86_model_id, p);
435	}
436
437	if (c->x86 == 15)
438		set_cpu_cap(c, X86_FEATURE_P4);
439	if (c->x86 == 6)
440		set_cpu_cap(c, X86_FEATURE_P3);
441#endif
442
443	if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
444		/*
445		 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
446		 * detection.
447		 */
448		c->x86_max_cores = intel_num_cpu_cores(c);
449#ifdef CONFIG_X86_32
450		detect_ht(c);
451#endif
452	}
453
454	/* Work around errata */
455	srat_detect_node(c);
456
457	if (cpu_has(c, X86_FEATURE_VMX))
458		detect_vmx_virtcap(c);
459
460	/*
461	 * Initialize MSR_IA32_ENERGY_PERF_BIAS if BIOS did not.
462	 * x86_energy_perf_policy(8) is available to change it at run-time
463	 */
464	if (cpu_has(c, X86_FEATURE_EPB)) {
465		u64 epb;
466
467		rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
468		if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) {
469			printk_once(KERN_WARNING "ENERGY_PERF_BIAS:"
470				" Set to 'normal', was 'performance'\n"
471				"ENERGY_PERF_BIAS: View and update with"
472				" x86_energy_perf_policy(8)\n");
473			epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
474			wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
475		}
476	}
477}
478
479#ifdef CONFIG_X86_32
480static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
481{
482	/*
483	 * Intel PIII Tualatin. This comes in two flavours.
484	 * One has 256kb of cache, the other 512. We have no way
485	 * to determine which, so we use a boottime override
486	 * for the 512kb model, and assume 256 otherwise.
487	 */
488	if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
489		size = 256;
 
 
 
 
 
 
 
490	return size;
491}
492#endif
493
494static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
495	.c_vendor	= "Intel",
496	.c_ident	= { "GenuineIntel" },
497#ifdef CONFIG_X86_32
498	.c_models = {
499		{ .vendor = X86_VENDOR_INTEL, .family = 4, .model_names =
500		  {
501			  [0] = "486 DX-25/33",
502			  [1] = "486 DX-50",
503			  [2] = "486 SX",
504			  [3] = "486 DX/2",
505			  [4] = "486 SL",
506			  [5] = "486 SX/2",
507			  [7] = "486 DX/2-WB",
508			  [8] = "486 DX/4",
509			  [9] = "486 DX/4-WB"
510		  }
511		},
512		{ .vendor = X86_VENDOR_INTEL, .family = 5, .model_names =
513		  {
514			  [0] = "Pentium 60/66 A-step",
515			  [1] = "Pentium 60/66",
516			  [2] = "Pentium 75 - 200",
517			  [3] = "OverDrive PODP5V83",
518			  [4] = "Pentium MMX",
519			  [7] = "Mobile Pentium 75 - 200",
520			  [8] = "Mobile Pentium MMX"
 
521		  }
522		},
523		{ .vendor = X86_VENDOR_INTEL, .family = 6, .model_names =
524		  {
525			  [0] = "Pentium Pro A-step",
526			  [1] = "Pentium Pro",
527			  [3] = "Pentium II (Klamath)",
528			  [4] = "Pentium II (Deschutes)",
529			  [5] = "Pentium II (Deschutes)",
530			  [6] = "Mobile Pentium II",
531			  [7] = "Pentium III (Katmai)",
532			  [8] = "Pentium III (Coppermine)",
533			  [10] = "Pentium III (Cascades)",
534			  [11] = "Pentium III (Tualatin)",
535		  }
536		},
537		{ .vendor = X86_VENDOR_INTEL, .family = 15, .model_names =
538		  {
539			  [0] = "Pentium 4 (Unknown)",
540			  [1] = "Pentium 4 (Willamette)",
541			  [2] = "Pentium 4 (Northwood)",
542			  [4] = "Pentium 4 (Foster)",
543			  [5] = "Pentium 4 (Foster)",
544		  }
545		},
546	},
547	.c_size_cache	= intel_size_cache,
548#endif
 
549	.c_early_init   = early_init_intel,
550	.c_init		= init_intel,
551	.c_x86_vendor	= X86_VENDOR_INTEL,
552};
553
554cpu_dev_register(intel_cpu_dev);
555