Linux Audio

Check our new training course

Loading...
v3.5.6
  1#include <linux/init.h>
  2#include <linux/kernel.h>
 
  3
  4#include <linux/string.h>
  5#include <linux/bitops.h>
  6#include <linux/smp.h>
  7#include <linux/sched.h>
 
 
  8#include <linux/thread_info.h>
  9#include <linux/module.h>
 10#include <linux/uaccess.h>
 
 
 
 11
 12#include <asm/processor.h>
 13#include <asm/pgtable.h>
 14#include <asm/msr.h>
 15#include <asm/bugs.h>
 16#include <asm/cpu.h>
 
 
 
 
 
 
 
 
 
 
 17
 18#ifdef CONFIG_X86_64
 19#include <linux/topology.h>
 20#include <asm/numa_64.h>
 21#endif
 22
 23#include "cpu.h"
 24
 25#ifdef CONFIG_X86_LOCAL_APIC
 26#include <asm/mpspec.h>
 27#include <asm/apic.h>
 28#endif
 29
 30static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 31{
 32	u64 misc_enable;
 33
 34	/* Unmask CPUID levels if masked: */
 35	if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
 36		rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
 37
 38		if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
 39			misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
 40			wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
 41			c->cpuid_level = cpuid_eax(0);
 42			get_cpu_cap(c);
 43		}
 44	}
 45
 46	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
 47		(c->x86 == 0x6 && c->x86_model >= 0x0e))
 48		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 49
 50	if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
 51		unsigned lower_word;
 52
 53		wrmsr(MSR_IA32_UCODE_REV, 0, 0);
 54		/* Required by the SDM */
 55		sync_core();
 56		rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
 
 
 
 
 
 
 
 
 
 
 57	}
 58
 59	/*
 60	 * Atom erratum AAE44/AAF40/AAG38/AAH41:
 61	 *
 62	 * A race condition between speculative fetches and invalidating
 63	 * a large page.  This is worked around in microcode, but we
 64	 * need the microcode to have already been loaded... so if it is
 65	 * not, recommend a BIOS update and disable large pages.
 66	 */
 67	if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
 68	    c->microcode < 0x20e) {
 69		printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
 70		clear_cpu_cap(c, X86_FEATURE_PSE);
 71	}
 72
 73#ifdef CONFIG_X86_64
 74	set_cpu_cap(c, X86_FEATURE_SYSENTER32);
 75#else
 76	/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
 77	if (c->x86 == 15 && c->x86_cache_alignment == 64)
 78		c->x86_cache_alignment = 128;
 79#endif
 80
 81	/* CPUID workaround for 0F33/0F34 CPU */
 82	if (c->x86 == 0xF && c->x86_model == 0x3
 83	    && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
 84		c->x86_phys_bits = 36;
 85
 86	/*
 87	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
 88	 * with P/T states and does not stop in deep C-states.
 89	 *
 90	 * It is also reliable across cores and sockets. (but not across
 91	 * cabinets - we turn it off in that case explicitly.)
 92	 */
 93	if (c->x86_power & (1 << 8)) {
 94		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 95		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
 96		if (!check_tsc_unstable())
 97			sched_clock_stable = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 98	}
 99
100	/*
101	 * There is a known erratum on Pentium III and Core Solo
102	 * and Core Duo CPUs.
103	 * " Page with PAT set to WC while associated MTRR is UC
104	 *   may consolidate to UC "
105	 * Because of this erratum, it is better to stick with
106	 * setting WC in MTRR rather than using PAT on these CPUs.
107	 *
108	 * Enable PAT WC only on P4, Core 2 or later CPUs.
109	 */
110	if (c->x86 == 6 && c->x86_model < 15)
111		clear_cpu_cap(c, X86_FEATURE_PAT);
112
113#ifdef CONFIG_KMEMCHECK
114	/*
115	 * P4s have a "fast strings" feature which causes single-
116	 * stepping REP instructions to only generate a #DB on
117	 * cache-line boundaries.
118	 *
119	 * Ingo Molnar reported a Pentium D (model 6) and a Xeon
120	 * (model 2) with the same problem.
121	 */
122	if (c->x86 == 15) {
123		rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
124
125		if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
126			printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");
127
128			misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
129			wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
130		}
131	}
132#endif
133
134	/*
135	 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
136	 * clear the fast string and enhanced fast string CPU capabilities.
137	 */
138	if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
139		rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
140		if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
141			printk(KERN_INFO "Disabled fast string operations\n");
142			setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
143			setup_clear_cpu_cap(X86_FEATURE_ERMS);
144		}
145	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146}
147
148#ifdef CONFIG_X86_32
149/*
150 *	Early probe support logic for ppro memory erratum #50
151 *
152 *	This is called before we do cpu ident work
153 */
154
155int __cpuinit ppro_with_ram_bug(void)
156{
157	/* Uses data from early_cpu_detect now */
158	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
159	    boot_cpu_data.x86 == 6 &&
160	    boot_cpu_data.x86_model == 1 &&
161	    boot_cpu_data.x86_mask < 8) {
162		printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
163		return 1;
164	}
165	return 0;
166}
167
168#ifdef CONFIG_X86_F00F_BUG
169static void __cpuinit trap_init_f00f_bug(void)
170{
171	__set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
172
173	/*
174	 * Update the IDT descriptor and reload the IDT so that
175	 * it uses the read-only mapped virtual address.
176	 */
177	idt_descr.address = fix_to_virt(FIX_F00F_IDT);
178	load_idt(&idt_descr);
179}
180#endif
181
182static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
183{
184	/* calling is from identify_secondary_cpu() ? */
185	if (!c->cpu_index)
186		return;
187
188	/*
189	 * Mask B, Pentium, but not Pentium MMX
190	 */
191	if (c->x86 == 5 &&
192	    c->x86_mask >= 1 && c->x86_mask <= 4 &&
193	    c->x86_model <= 3) {
194		/*
195		 * Remember we have B step Pentia with bugs
196		 */
197		WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
198				    "with B stepping processors.\n");
199	}
200}
201
202static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
 
203{
204	unsigned long lo, hi;
 
 
 
205
 
 
206#ifdef CONFIG_X86_F00F_BUG
207	/*
208	 * All current models of Pentium and Pentium with MMX technology CPUs
209	 * have the F0 0F bug, which lets nonprivileged users lock up the
210	 * system.
211	 * Note that the workaround only should be initialized once...
212	 */
213	c->f00f_bug = 0;
214	if (!paravirt_enabled() && c->x86 == 5) {
215		static int f00f_workaround_enabled;
216
217		c->f00f_bug = 1;
218		if (!f00f_workaround_enabled) {
219			trap_init_f00f_bug();
220			printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
221			f00f_workaround_enabled = 1;
222		}
223	}
224#endif
225
226	/*
227	 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
228	 * model 3 mask 3
229	 */
230	if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
231		clear_cpu_cap(c, X86_FEATURE_SEP);
232
233	/*
234	 * P4 Xeon errata 037 workaround.
 
 
 
 
 
 
 
 
 
 
 
235	 * Hardware prefetcher may cause stale data to be loaded into the cache.
236	 */
237	if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
238		rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
239		if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) {
240			printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
241			printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
242			lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
243			wrmsr(MSR_IA32_MISC_ENABLE, lo, hi);
244		}
245	}
246
247	/*
248	 * See if we have a good local APIC by checking for buggy Pentia,
249	 * i.e. all B steppings and the C2 stepping of P54C when using their
250	 * integrated APIC (see 11AP erratum in "Pentium Processor
251	 * Specification Update").
252	 */
253	if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
254	    (c->x86_mask < 0x6 || c->x86_mask == 0xb))
255		set_cpu_cap(c, X86_FEATURE_11AP);
256
257
258#ifdef CONFIG_X86_INTEL_USERCOPY
259	/*
260	 * Set up the preferred alignment for movsl bulk memory moves
261	 */
262	switch (c->x86) {
263	case 4:		/* 486: untested */
264		break;
265	case 5:		/* Old Pentia: untested */
266		break;
267	case 6:		/* PII/PIII only like movsl with 8-byte alignment */
268		movsl_mask.mask = 7;
269		break;
270	case 15:	/* P4 is OK down to 8-byte alignment */
271		movsl_mask.mask = 7;
272		break;
273	}
274#endif
275
276#ifdef CONFIG_X86_NUMAQ
277	numaq_tsc_disable();
278#endif
279
280	intel_smp_check(c);
281}
282#else
283static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
284{
285}
286#endif
287
288static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
289{
290#ifdef CONFIG_NUMA
291	unsigned node;
292	int cpu = smp_processor_id();
293
294	/* Don't do the funky fallback heuristics the AMD version employs
295	   for now. */
296	node = numa_cpu_node(cpu);
297	if (node == NUMA_NO_NODE || !node_online(node)) {
298		/* reuse the value from init_cpu_to_node() */
299		node = cpu_to_node(cpu);
300	}
301	numa_set_node(cpu, node);
302#endif
303}
304
305/*
306 * find out the number of processor cores on the die
307 */
308static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
309{
310	unsigned int eax, ebx, ecx, edx;
311
312	if (c->cpuid_level < 4)
313		return 1;
314
315	/* Intel has a non-standard dependency on %ecx for this CPUID level. */
316	cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
317	if (eax & 0x1f)
318		return (eax >> 26) + 1;
319	else
320		return 1;
321}
322
323static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c)
324{
325	/* Intel VMX MSR indicated features */
326#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW	0x00200000
327#define X86_VMX_FEATURE_PROC_CTLS_VNMI		0x00400000
328#define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS	0x80000000
329#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC	0x00000001
330#define X86_VMX_FEATURE_PROC_CTLS2_EPT		0x00000002
331#define X86_VMX_FEATURE_PROC_CTLS2_VPID		0x00000020
332
333	u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
334
335	clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
336	clear_cpu_cap(c, X86_FEATURE_VNMI);
337	clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
338	clear_cpu_cap(c, X86_FEATURE_EPT);
339	clear_cpu_cap(c, X86_FEATURE_VPID);
340
341	rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
342	msr_ctl = vmx_msr_high | vmx_msr_low;
343	if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
344		set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
345	if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
346		set_cpu_cap(c, X86_FEATURE_VNMI);
347	if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
348		rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
349		      vmx_msr_low, vmx_msr_high);
350		msr_ctl2 = vmx_msr_high | vmx_msr_low;
351		if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
352		    (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
353			set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
354		if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
355			set_cpu_cap(c, X86_FEATURE_EPT);
356		if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
357			set_cpu_cap(c, X86_FEATURE_VPID);
358	}
359}
360
361static void __cpuinit init_intel(struct cpuinfo_x86 *c)
362{
363	unsigned int l2 = 0;
364
 
 
365	early_init_intel(c);
366
367	intel_workarounds(c);
368
369	/*
370	 * Detect the extended topology information if available. This
371	 * will reinitialise the initial_apicid which will be used
372	 * in init_intel_cacheinfo()
373	 */
374	detect_extended_topology(c);
375
376	l2 = init_intel_cacheinfo(c);
 
 
 
 
 
 
 
 
 
 
 
 
377	if (c->cpuid_level > 9) {
378		unsigned eax = cpuid_eax(10);
379		/* Check for version and the number of counters */
380		if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
381			set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
382	}
383
384	if (cpu_has_xmm2)
385		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
386	if (cpu_has_ds) {
387		unsigned int l1;
 
 
388		rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
389		if (!(l1 & (1<<11)))
390			set_cpu_cap(c, X86_FEATURE_BTS);
391		if (!(l1 & (1<<12)))
392			set_cpu_cap(c, X86_FEATURE_PEBS);
393	}
394
395	if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
396		set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
 
 
 
 
 
397
398#ifdef CONFIG_X86_64
399	if (c->x86 == 15)
400		c->x86_cache_alignment = c->x86_clflush_size * 2;
401	if (c->x86 == 6)
402		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
403#else
404	/*
405	 * Names for the Pentium II/Celeron processors
406	 * detectable only by also checking the cache size.
407	 * Dixon is NOT a Celeron.
408	 */
409	if (c->x86 == 6) {
 
410		char *p = NULL;
411
412		switch (c->x86_model) {
413		case 5:
414			if (l2 == 0)
415				p = "Celeron (Covington)";
416			else if (l2 == 256)
417				p = "Mobile Pentium II (Dixon)";
418			break;
419
420		case 6:
421			if (l2 == 128)
422				p = "Celeron (Mendocino)";
423			else if (c->x86_mask == 0 || c->x86_mask == 5)
424				p = "Celeron-A";
425			break;
426
427		case 8:
428			if (l2 == 128)
429				p = "Celeron (Coppermine)";
430			break;
431		}
432
433		if (p)
434			strcpy(c->x86_model_id, p);
435	}
436
437	if (c->x86 == 15)
438		set_cpu_cap(c, X86_FEATURE_P4);
439	if (c->x86 == 6)
440		set_cpu_cap(c, X86_FEATURE_P3);
441#endif
442
443	if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
444		/*
445		 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
446		 * detection.
447		 */
448		c->x86_max_cores = intel_num_cpu_cores(c);
449#ifdef CONFIG_X86_32
450		detect_ht(c);
451#endif
452	}
453
454	/* Work around errata */
455	srat_detect_node(c);
456
457	if (cpu_has(c, X86_FEATURE_VMX))
458		detect_vmx_virtcap(c);
459
460	/*
461	 * Initialize MSR_IA32_ENERGY_PERF_BIAS if BIOS did not.
462	 * x86_energy_perf_policy(8) is available to change it at run-time
463	 */
464	if (cpu_has(c, X86_FEATURE_EPB)) {
465		u64 epb;
466
467		rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
468		if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) {
469			printk_once(KERN_WARNING "ENERGY_PERF_BIAS:"
470				" Set to 'normal', was 'performance'\n"
471				"ENERGY_PERF_BIAS: View and update with"
472				" x86_energy_perf_policy(8)\n");
473			epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
474			wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
475		}
476	}
477}
478
479#ifdef CONFIG_X86_32
480static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
481{
482	/*
483	 * Intel PIII Tualatin. This comes in two flavours.
484	 * One has 256kb of cache, the other 512. We have no way
485	 * to determine which, so we use a boottime override
486	 * for the 512kb model, and assume 256 otherwise.
487	 */
488	if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
489		size = 256;
 
 
 
 
 
 
 
490	return size;
491}
492#endif
493
494static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
495	.c_vendor	= "Intel",
496	.c_ident	= { "GenuineIntel" },
497#ifdef CONFIG_X86_32
498	.c_models = {
499		{ .vendor = X86_VENDOR_INTEL, .family = 4, .model_names =
500		  {
501			  [0] = "486 DX-25/33",
502			  [1] = "486 DX-50",
503			  [2] = "486 SX",
504			  [3] = "486 DX/2",
505			  [4] = "486 SL",
506			  [5] = "486 SX/2",
507			  [7] = "486 DX/2-WB",
508			  [8] = "486 DX/4",
509			  [9] = "486 DX/4-WB"
510		  }
511		},
512		{ .vendor = X86_VENDOR_INTEL, .family = 5, .model_names =
513		  {
514			  [0] = "Pentium 60/66 A-step",
515			  [1] = "Pentium 60/66",
516			  [2] = "Pentium 75 - 200",
517			  [3] = "OverDrive PODP5V83",
518			  [4] = "Pentium MMX",
519			  [7] = "Mobile Pentium 75 - 200",
520			  [8] = "Mobile Pentium MMX"
 
521		  }
522		},
523		{ .vendor = X86_VENDOR_INTEL, .family = 6, .model_names =
524		  {
525			  [0] = "Pentium Pro A-step",
526			  [1] = "Pentium Pro",
527			  [3] = "Pentium II (Klamath)",
528			  [4] = "Pentium II (Deschutes)",
529			  [5] = "Pentium II (Deschutes)",
530			  [6] = "Mobile Pentium II",
531			  [7] = "Pentium III (Katmai)",
532			  [8] = "Pentium III (Coppermine)",
533			  [10] = "Pentium III (Cascades)",
534			  [11] = "Pentium III (Tualatin)",
535		  }
536		},
537		{ .vendor = X86_VENDOR_INTEL, .family = 15, .model_names =
538		  {
539			  [0] = "Pentium 4 (Unknown)",
540			  [1] = "Pentium 4 (Willamette)",
541			  [2] = "Pentium 4 (Northwood)",
542			  [4] = "Pentium 4 (Foster)",
543			  [5] = "Pentium 4 (Foster)",
544		  }
545		},
546	},
547	.c_size_cache	= intel_size_cache,
548#endif
 
549	.c_early_init   = early_init_intel,
 
550	.c_init		= init_intel,
551	.c_x86_vendor	= X86_VENDOR_INTEL,
552};
553
554cpu_dev_register(intel_cpu_dev);
555
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/kernel.h>
   3#include <linux/pgtable.h>
   4
   5#include <linux/string.h>
   6#include <linux/bitops.h>
   7#include <linux/smp.h>
   8#include <linux/sched.h>
   9#include <linux/sched/clock.h>
  10#include <linux/semaphore.h>
  11#include <linux/thread_info.h>
  12#include <linux/init.h>
  13#include <linux/uaccess.h>
  14#include <linux/workqueue.h>
  15#include <linux/delay.h>
  16#include <linux/cpuhotplug.h>
  17
  18#include <asm/cpufeature.h>
 
  19#include <asm/msr.h>
  20#include <asm/bugs.h>
  21#include <asm/cpu.h>
  22#include <asm/intel-family.h>
  23#include <asm/microcode.h>
  24#include <asm/hwcap2.h>
  25#include <asm/elf.h>
  26#include <asm/cpu_device_id.h>
  27#include <asm/cmdline.h>
  28#include <asm/traps.h>
  29#include <asm/resctrl.h>
  30#include <asm/numa.h>
  31#include <asm/thermal.h>
  32
  33#ifdef CONFIG_X86_64
  34#include <linux/topology.h>
 
  35#endif
  36
  37#include "cpu.h"
  38
  39#ifdef CONFIG_X86_LOCAL_APIC
  40#include <asm/mpspec.h>
  41#include <asm/apic.h>
  42#endif
  43
  44enum split_lock_detect_state {
  45	sld_off = 0,
  46	sld_warn,
  47	sld_fatal,
  48	sld_ratelimit,
  49};
  50
  51/*
  52 * Default to sld_off because most systems do not support split lock detection.
  53 * sld_state_setup() will switch this to sld_warn on systems that support
  54 * split lock/bus lock detect, unless there is a command line override.
  55 */
  56static enum split_lock_detect_state sld_state __ro_after_init = sld_off;
  57static u64 msr_test_ctrl_cache __ro_after_init;
  58
  59/*
  60 * With a name like MSR_TEST_CTL it should go without saying, but don't touch
  61 * MSR_TEST_CTL unless the CPU is one of the whitelisted models.  Writing it
  62 * on CPUs that do not support SLD can cause fireworks, even when writing '0'.
  63 */
  64static bool cpu_model_supports_sld __ro_after_init;
  65
  66/*
  67 * Processors which have self-snooping capability can handle conflicting
  68 * memory type across CPUs by snooping its own cache. However, there exists
  69 * CPU models in which having conflicting memory types still leads to
  70 * unpredictable behavior, machine check errors, or hangs. Clear this
  71 * feature to prevent its use on machines with known erratas.
  72 */
  73static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c)
  74{
  75	switch (c->x86_model) {
  76	case INTEL_FAM6_CORE_YONAH:
  77	case INTEL_FAM6_CORE2_MEROM:
  78	case INTEL_FAM6_CORE2_MEROM_L:
  79	case INTEL_FAM6_CORE2_PENRYN:
  80	case INTEL_FAM6_CORE2_DUNNINGTON:
  81	case INTEL_FAM6_NEHALEM:
  82	case INTEL_FAM6_NEHALEM_G:
  83	case INTEL_FAM6_NEHALEM_EP:
  84	case INTEL_FAM6_NEHALEM_EX:
  85	case INTEL_FAM6_WESTMERE:
  86	case INTEL_FAM6_WESTMERE_EP:
  87	case INTEL_FAM6_SANDYBRIDGE:
  88		setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP);
  89	}
  90}
  91
  92static bool ring3mwait_disabled __read_mostly;
  93
  94static int __init ring3mwait_disable(char *__unused)
  95{
  96	ring3mwait_disabled = true;
  97	return 1;
  98}
  99__setup("ring3mwait=disable", ring3mwait_disable);
 100
 101static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
 102{
 103	/*
 104	 * Ring 3 MONITOR/MWAIT feature cannot be detected without
 105	 * cpu model and family comparison.
 106	 */
 107	if (c->x86 != 6)
 108		return;
 109	switch (c->x86_model) {
 110	case INTEL_FAM6_XEON_PHI_KNL:
 111	case INTEL_FAM6_XEON_PHI_KNM:
 112		break;
 113	default:
 114		return;
 115	}
 116
 117	if (ring3mwait_disabled)
 118		return;
 119
 120	set_cpu_cap(c, X86_FEATURE_RING3MWAIT);
 121	this_cpu_or(msr_misc_features_shadow,
 122		    1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT);
 123
 124	if (c == &boot_cpu_data)
 125		ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
 126}
 127
 128/*
 129 * Early microcode releases for the Spectre v2 mitigation were broken.
 130 * Information taken from;
 131 * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
 132 * - https://kb.vmware.com/s/article/52345
 133 * - Microcode revisions observed in the wild
 134 * - Release note from 20180108 microcode release
 135 */
 136struct sku_microcode {
 137	u8 model;
 138	u8 stepping;
 139	u32 microcode;
 140};
 141static const struct sku_microcode spectre_bad_microcodes[] = {
 142	{ INTEL_FAM6_KABYLAKE,		0x0B,	0x80 },
 143	{ INTEL_FAM6_KABYLAKE,		0x0A,	0x80 },
 144	{ INTEL_FAM6_KABYLAKE,		0x09,	0x80 },
 145	{ INTEL_FAM6_KABYLAKE_L,	0x0A,	0x80 },
 146	{ INTEL_FAM6_KABYLAKE_L,	0x09,	0x80 },
 147	{ INTEL_FAM6_SKYLAKE_X,		0x03,	0x0100013e },
 148	{ INTEL_FAM6_SKYLAKE_X,		0x04,	0x0200003c },
 149	{ INTEL_FAM6_BROADWELL,		0x04,	0x28 },
 150	{ INTEL_FAM6_BROADWELL_G,	0x01,	0x1b },
 151	{ INTEL_FAM6_BROADWELL_D,	0x02,	0x14 },
 152	{ INTEL_FAM6_BROADWELL_D,	0x03,	0x07000011 },
 153	{ INTEL_FAM6_BROADWELL_X,	0x01,	0x0b000025 },
 154	{ INTEL_FAM6_HASWELL_L,		0x01,	0x21 },
 155	{ INTEL_FAM6_HASWELL_G,		0x01,	0x18 },
 156	{ INTEL_FAM6_HASWELL,		0x03,	0x23 },
 157	{ INTEL_FAM6_HASWELL_X,		0x02,	0x3b },
 158	{ INTEL_FAM6_HASWELL_X,		0x04,	0x10 },
 159	{ INTEL_FAM6_IVYBRIDGE_X,	0x04,	0x42a },
 160	/* Observed in the wild */
 161	{ INTEL_FAM6_SANDYBRIDGE_X,	0x06,	0x61b },
 162	{ INTEL_FAM6_SANDYBRIDGE_X,	0x07,	0x712 },
 163};
 164
 165static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
 166{
 167	int i;
 168
 169	/*
 170	 * We know that the hypervisor lie to us on the microcode version so
 171	 * we may as well hope that it is running the correct version.
 172	 */
 173	if (cpu_has(c, X86_FEATURE_HYPERVISOR))
 174		return false;
 175
 176	if (c->x86 != 6)
 177		return false;
 178
 179	for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
 180		if (c->x86_model == spectre_bad_microcodes[i].model &&
 181		    c->x86_stepping == spectre_bad_microcodes[i].stepping)
 182			return (c->microcode <= spectre_bad_microcodes[i].microcode);
 183	}
 184	return false;
 185}
 186
 187#define MSR_IA32_TME_ACTIVATE		0x982
 188
 189/* Helpers to access TME_ACTIVATE MSR */
 190#define TME_ACTIVATE_LOCKED(x)		(x & 0x1)
 191#define TME_ACTIVATE_ENABLED(x)		(x & 0x2)
 192
 193#define TME_ACTIVATE_POLICY(x)		((x >> 4) & 0xf)	/* Bits 7:4 */
 194#define TME_ACTIVATE_POLICY_AES_XTS_128	0
 195
 196#define TME_ACTIVATE_KEYID_BITS(x)	((x >> 32) & 0xf)	/* Bits 35:32 */
 197
 198#define TME_ACTIVATE_CRYPTO_ALGS(x)	((x >> 48) & 0xffff)	/* Bits 63:48 */
 199#define TME_ACTIVATE_CRYPTO_AES_XTS_128	1
 200
 201/* Values for mktme_status (SW only construct) */
 202#define MKTME_ENABLED			0
 203#define MKTME_DISABLED			1
 204#define MKTME_UNINITIALIZED		2
 205static int mktme_status = MKTME_UNINITIALIZED;
 206
 207static void detect_tme_early(struct cpuinfo_x86 *c)
 208{
 209	u64 tme_activate, tme_policy, tme_crypto_algs;
 210	int keyid_bits = 0, nr_keyids = 0;
 211	static u64 tme_activate_cpu0 = 0;
 212
 213	rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
 214
 215	if (mktme_status != MKTME_UNINITIALIZED) {
 216		if (tme_activate != tme_activate_cpu0) {
 217			/* Broken BIOS? */
 218			pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
 219			pr_err_once("x86/tme: MKTME is not usable\n");
 220			mktme_status = MKTME_DISABLED;
 221
 222			/* Proceed. We may need to exclude bits from x86_phys_bits. */
 223		}
 224	} else {
 225		tme_activate_cpu0 = tme_activate;
 226	}
 227
 228	if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
 229		pr_info_once("x86/tme: not enabled by BIOS\n");
 230		mktme_status = MKTME_DISABLED;
 231		return;
 232	}
 233
 234	if (mktme_status != MKTME_UNINITIALIZED)
 235		goto detect_keyid_bits;
 236
 237	pr_info("x86/tme: enabled by BIOS\n");
 238
 239	tme_policy = TME_ACTIVATE_POLICY(tme_activate);
 240	if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
 241		pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
 242
 243	tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
 244	if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
 245		pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
 246				tme_crypto_algs);
 247		mktme_status = MKTME_DISABLED;
 248	}
 249detect_keyid_bits:
 250	keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
 251	nr_keyids = (1UL << keyid_bits) - 1;
 252	if (nr_keyids) {
 253		pr_info_once("x86/mktme: enabled by BIOS\n");
 254		pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
 255	} else {
 256		pr_info_once("x86/mktme: disabled by BIOS\n");
 257	}
 258
 259	if (mktme_status == MKTME_UNINITIALIZED) {
 260		/* MKTME is usable */
 261		mktme_status = MKTME_ENABLED;
 262	}
 263
 264	/*
 265	 * KeyID bits effectively lower the number of physical address
 266	 * bits.  Update cpuinfo_x86::x86_phys_bits accordingly.
 267	 */
 268	c->x86_phys_bits -= keyid_bits;
 269}
 270
 271static void early_init_intel(struct cpuinfo_x86 *c)
 272{
 273	u64 misc_enable;
 274
 275	/* Unmask CPUID levels if masked: */
 276	if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
 277		if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
 278				  MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
 
 
 
 279			c->cpuid_level = cpuid_eax(0);
 280			get_cpu_cap(c);
 281		}
 282	}
 283
 284	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
 285		(c->x86 == 0x6 && c->x86_model >= 0x0e))
 286		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 287
 288	if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
 289		c->microcode = intel_get_microcode_revision();
 290
 291	/* Now if any of them are set, check the blacklist and clear the lot */
 292	if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
 293	     cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
 294	     cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
 295	     cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
 296		pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
 297		setup_clear_cpu_cap(X86_FEATURE_IBRS);
 298		setup_clear_cpu_cap(X86_FEATURE_IBPB);
 299		setup_clear_cpu_cap(X86_FEATURE_STIBP);
 300		setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
 301		setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
 302		setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
 303		setup_clear_cpu_cap(X86_FEATURE_SSBD);
 304		setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
 305	}
 306
 307	/*
 308	 * Atom erratum AAE44/AAF40/AAG38/AAH41:
 309	 *
 310	 * A race condition between speculative fetches and invalidating
 311	 * a large page.  This is worked around in microcode, but we
 312	 * need the microcode to have already been loaded... so if it is
 313	 * not, recommend a BIOS update and disable large pages.
 314	 */
 315	if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
 316	    c->microcode < 0x20e) {
 317		pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
 318		clear_cpu_cap(c, X86_FEATURE_PSE);
 319	}
 320
 321#ifdef CONFIG_X86_64
 322	set_cpu_cap(c, X86_FEATURE_SYSENTER32);
 323#else
 324	/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
 325	if (c->x86 == 15 && c->x86_cache_alignment == 64)
 326		c->x86_cache_alignment = 128;
 327#endif
 328
 329	/* CPUID workaround for 0F33/0F34 CPU */
 330	if (c->x86 == 0xF && c->x86_model == 0x3
 331	    && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
 332		c->x86_phys_bits = 36;
 333
 334	/*
 335	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
 336	 * with P/T states and does not stop in deep C-states.
 337	 *
 338	 * It is also reliable across cores and sockets. (but not across
 339	 * cabinets - we turn it off in that case explicitly.)
 340	 */
 341	if (c->x86_power & (1 << 8)) {
 342		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 343		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
 344	}
 345
 346	/* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
 347	if (c->x86 == 6) {
 348		switch (c->x86_model) {
 349		case INTEL_FAM6_ATOM_SALTWELL_MID:
 350		case INTEL_FAM6_ATOM_SALTWELL_TABLET:
 351		case INTEL_FAM6_ATOM_SILVERMONT_MID:
 352		case INTEL_FAM6_ATOM_AIRMONT_NP:
 353			set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
 354			break;
 355		default:
 356			break;
 357		}
 358	}
 359
 360	/*
 361	 * There is a known erratum on Pentium III and Core Solo
 362	 * and Core Duo CPUs.
 363	 * " Page with PAT set to WC while associated MTRR is UC
 364	 *   may consolidate to UC "
 365	 * Because of this erratum, it is better to stick with
 366	 * setting WC in MTRR rather than using PAT on these CPUs.
 367	 *
 368	 * Enable PAT WC only on P4, Core 2 or later CPUs.
 369	 */
 370	if (c->x86 == 6 && c->x86_model < 15)
 371		clear_cpu_cap(c, X86_FEATURE_PAT);
 372
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 373	/*
 374	 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
 375	 * clear the fast string and enhanced fast string CPU capabilities.
 376	 */
 377	if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
 378		rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
 379		if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
 380			pr_info("Disabled fast string operations\n");
 381			setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
 382			setup_clear_cpu_cap(X86_FEATURE_ERMS);
 383		}
 384	}
 385
 386	/*
 387	 * Intel Quark Core DevMan_001.pdf section 6.4.11
 388	 * "The operating system also is required to invalidate (i.e., flush)
 389	 *  the TLB when any changes are made to any of the page table entries.
 390	 *  The operating system must reload CR3 to cause the TLB to be flushed"
 391	 *
 392	 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
 393	 * should be false so that __flush_tlb_all() causes CR3 instead of CR4.PGE
 394	 * to be modified.
 395	 */
 396	if (c->x86 == 5 && c->x86_model == 9) {
 397		pr_info("Disabling PGE capability bit\n");
 398		setup_clear_cpu_cap(X86_FEATURE_PGE);
 399	}
 400
 401	check_memory_type_self_snoop_errata(c);
 402
 403	/*
 404	 * Get the number of SMT siblings early from the extended topology
 405	 * leaf, if available. Otherwise try the legacy SMT detection.
 406	 */
 407	if (detect_extended_topology_early(c) < 0)
 408		detect_ht_early(c);
 409
 410	/*
 411	 * Adjust the number of physical bits early because it affects the
 412	 * valid bits of the MTRR mask registers.
 413	 */
 414	if (cpu_has(c, X86_FEATURE_TME))
 415		detect_tme_early(c);
 416}
 417
 418static void bsp_init_intel(struct cpuinfo_x86 *c)
 419{
 420	resctrl_cpu_detect(c);
 421}
 422
 423#ifdef CONFIG_X86_32
 424/*
 425 *	Early probe support logic for ppro memory erratum #50
 426 *
 427 *	This is called before we do cpu ident work
 428 */
 429
 430int ppro_with_ram_bug(void)
 431{
 432	/* Uses data from early_cpu_detect now */
 433	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
 434	    boot_cpu_data.x86 == 6 &&
 435	    boot_cpu_data.x86_model == 1 &&
 436	    boot_cpu_data.x86_stepping < 8) {
 437		pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
 438		return 1;
 439	}
 440	return 0;
 441}
 442
 443static void intel_smp_check(struct cpuinfo_x86 *c)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 444{
 445	/* calling is from identify_secondary_cpu() ? */
 446	if (!c->cpu_index)
 447		return;
 448
 449	/*
 450	 * Mask B, Pentium, but not Pentium MMX
 451	 */
 452	if (c->x86 == 5 &&
 453	    c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
 454	    c->x86_model <= 3) {
 455		/*
 456		 * Remember we have B step Pentia with bugs
 457		 */
 458		WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
 459				    "with B stepping processors.\n");
 460	}
 461}
 462
 463static int forcepae;
 464static int __init forcepae_setup(char *__unused)
 465{
 466	forcepae = 1;
 467	return 1;
 468}
 469__setup("forcepae", forcepae_setup);
 470
 471static void intel_workarounds(struct cpuinfo_x86 *c)
 472{
 473#ifdef CONFIG_X86_F00F_BUG
 474	/*
 475	 * All models of Pentium and Pentium with MMX technology CPUs
 476	 * have the F0 0F bug, which lets nonprivileged users lock up the
 477	 * system. Announce that the fault handler will be checking for it.
 478	 * The Quark is also family 5, but does not have the same bug.
 479	 */
 480	clear_cpu_bug(c, X86_BUG_F00F);
 481	if (c->x86 == 5 && c->x86_model < 9) {
 482		static int f00f_workaround_enabled;
 483
 484		set_cpu_bug(c, X86_BUG_F00F);
 485		if (!f00f_workaround_enabled) {
 486			pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
 
 487			f00f_workaround_enabled = 1;
 488		}
 489	}
 490#endif
 491
 492	/*
 493	 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
 494	 * model 3 mask 3
 495	 */
 496	if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
 497		clear_cpu_cap(c, X86_FEATURE_SEP);
 498
 499	/*
 500	 * PAE CPUID issue: many Pentium M report no PAE but may have a
 501	 * functionally usable PAE implementation.
 502	 * Forcefully enable PAE if kernel parameter "forcepae" is present.
 503	 */
 504	if (forcepae) {
 505		pr_warn("PAE forced!\n");
 506		set_cpu_cap(c, X86_FEATURE_PAE);
 507		add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
 508	}
 509
 510	/*
 511	 * P4 Xeon erratum 037 workaround.
 512	 * Hardware prefetcher may cause stale data to be loaded into the cache.
 513	 */
 514	if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
 515		if (msr_set_bit(MSR_IA32_MISC_ENABLE,
 516				MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
 517			pr_info("CPU: C0 stepping P4 Xeon detected.\n");
 518			pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
 
 
 519		}
 520	}
 521
 522	/*
 523	 * See if we have a good local APIC by checking for buggy Pentia,
 524	 * i.e. all B steppings and the C2 stepping of P54C when using their
 525	 * integrated APIC (see 11AP erratum in "Pentium Processor
 526	 * Specification Update").
 527	 */
 528	if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
 529	    (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
 530		set_cpu_bug(c, X86_BUG_11AP);
 531
 532
 533#ifdef CONFIG_X86_INTEL_USERCOPY
 534	/*
 535	 * Set up the preferred alignment for movsl bulk memory moves
 536	 */
 537	switch (c->x86) {
 538	case 4:		/* 486: untested */
 539		break;
 540	case 5:		/* Old Pentia: untested */
 541		break;
 542	case 6:		/* PII/PIII only like movsl with 8-byte alignment */
 543		movsl_mask.mask = 7;
 544		break;
 545	case 15:	/* P4 is OK down to 8-byte alignment */
 546		movsl_mask.mask = 7;
 547		break;
 548	}
 549#endif
 550
 
 
 
 
 551	intel_smp_check(c);
 552}
 553#else
 554static void intel_workarounds(struct cpuinfo_x86 *c)
 555{
 556}
 557#endif
 558
 559static void srat_detect_node(struct cpuinfo_x86 *c)
 560{
 561#ifdef CONFIG_NUMA
 562	unsigned node;
 563	int cpu = smp_processor_id();
 564
 565	/* Don't do the funky fallback heuristics the AMD version employs
 566	   for now. */
 567	node = numa_cpu_node(cpu);
 568	if (node == NUMA_NO_NODE || !node_online(node)) {
 569		/* reuse the value from init_cpu_to_node() */
 570		node = cpu_to_node(cpu);
 571	}
 572	numa_set_node(cpu, node);
 573#endif
 574}
 575
 576static void init_cpuid_fault(struct cpuinfo_x86 *c)
 
 
 
 577{
 578	u64 msr;
 579
 580	if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) {
 581		if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
 582			set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
 583	}
 
 
 
 
 
 584}
 585
 586static void init_intel_misc_features(struct cpuinfo_x86 *c)
 587{
 588	u64 msr;
 589
 590	if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr))
 591		return;
 592
 593	/* Clear all MISC features */
 594	this_cpu_write(msr_misc_features_shadow, 0);
 595
 596	/* Check features and update capabilities and shadow control bits */
 597	init_cpuid_fault(c);
 598	probe_xeon_phi_r3mwait(c);
 599
 600	msr = this_cpu_read(msr_misc_features_shadow);
 601	wrmsrl(MSR_MISC_FEATURES_ENABLES, msr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 602}
 603
 604static void split_lock_init(void);
 605static void bus_lock_init(void);
 
 606
 607static void init_intel(struct cpuinfo_x86 *c)
 608{
 609	early_init_intel(c);
 610
 611	intel_workarounds(c);
 612
 613	/*
 614	 * Detect the extended topology information if available. This
 615	 * will reinitialise the initial_apicid which will be used
 616	 * in init_intel_cacheinfo()
 617	 */
 618	detect_extended_topology(c);
 619
 620	if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
 621		/*
 622		 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
 623		 * detection.
 624		 */
 625		detect_num_cpu_cores(c);
 626#ifdef CONFIG_X86_32
 627		detect_ht(c);
 628#endif
 629	}
 630
 631	init_intel_cacheinfo(c);
 632
 633	if (c->cpuid_level > 9) {
 634		unsigned eax = cpuid_eax(10);
 635		/* Check for version and the number of counters */
 636		if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
 637			set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
 638	}
 639
 640	if (cpu_has(c, X86_FEATURE_XMM2))
 641		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
 642
 643	if (boot_cpu_has(X86_FEATURE_DS)) {
 644		unsigned int l1, l2;
 645
 646		rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
 647		if (!(l1 & MSR_IA32_MISC_ENABLE_BTS_UNAVAIL))
 648			set_cpu_cap(c, X86_FEATURE_BTS);
 649		if (!(l1 & MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL))
 650			set_cpu_cap(c, X86_FEATURE_PEBS);
 651	}
 652
 653	if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) &&
 654	    (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
 655		set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
 656
 657	if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) &&
 658		((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT)))
 659		set_cpu_bug(c, X86_BUG_MONITOR);
 660
 661#ifdef CONFIG_X86_64
 662	if (c->x86 == 15)
 663		c->x86_cache_alignment = c->x86_clflush_size * 2;
 664	if (c->x86 == 6)
 665		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
 666#else
 667	/*
 668	 * Names for the Pentium II/Celeron processors
 669	 * detectable only by also checking the cache size.
 670	 * Dixon is NOT a Celeron.
 671	 */
 672	if (c->x86 == 6) {
 673		unsigned int l2 = c->x86_cache_size;
 674		char *p = NULL;
 675
 676		switch (c->x86_model) {
 677		case 5:
 678			if (l2 == 0)
 679				p = "Celeron (Covington)";
 680			else if (l2 == 256)
 681				p = "Mobile Pentium II (Dixon)";
 682			break;
 683
 684		case 6:
 685			if (l2 == 128)
 686				p = "Celeron (Mendocino)";
 687			else if (c->x86_stepping == 0 || c->x86_stepping == 5)
 688				p = "Celeron-A";
 689			break;
 690
 691		case 8:
 692			if (l2 == 128)
 693				p = "Celeron (Coppermine)";
 694			break;
 695		}
 696
 697		if (p)
 698			strcpy(c->x86_model_id, p);
 699	}
 700
 701	if (c->x86 == 15)
 702		set_cpu_cap(c, X86_FEATURE_P4);
 703	if (c->x86 == 6)
 704		set_cpu_cap(c, X86_FEATURE_P3);
 705#endif
 706
 
 
 
 
 
 
 
 
 
 
 
 707	/* Work around errata */
 708	srat_detect_node(c);
 709
 710	init_ia32_feat_ctl(c);
 
 711
 712	init_intel_misc_features(c);
 713
 714	split_lock_init();
 715	bus_lock_init();
 716
 717	intel_init_thermal(c);
 
 
 
 
 
 
 
 
 
 
 
 718}
 719
 720#ifdef CONFIG_X86_32
 721static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
 722{
 723	/*
 724	 * Intel PIII Tualatin. This comes in two flavours.
 725	 * One has 256kb of cache, the other 512. We have no way
 726	 * to determine which, so we use a boottime override
 727	 * for the 512kb model, and assume 256 otherwise.
 728	 */
 729	if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
 730		size = 256;
 731
 732	/*
 733	 * Intel Quark SoC X1000 contains a 4-way set associative
 734	 * 16K cache with a 16 byte cache line and 256 lines per tag
 735	 */
 736	if ((c->x86 == 5) && (c->x86_model == 9))
 737		size = 16;
 738	return size;
 739}
 740#endif
 741
 742#define TLB_INST_4K	0x01
 743#define TLB_INST_4M	0x02
 744#define TLB_INST_2M_4M	0x03
 745
 746#define TLB_INST_ALL	0x05
 747#define TLB_INST_1G	0x06
 748
 749#define TLB_DATA_4K	0x11
 750#define TLB_DATA_4M	0x12
 751#define TLB_DATA_2M_4M	0x13
 752#define TLB_DATA_4K_4M	0x14
 753
 754#define TLB_DATA_1G	0x16
 755
 756#define TLB_DATA0_4K	0x21
 757#define TLB_DATA0_4M	0x22
 758#define TLB_DATA0_2M_4M	0x23
 759
 760#define STLB_4K		0x41
 761#define STLB_4K_2M	0x42
 762
 763static const struct _tlb_table intel_tlb_table[] = {
 764	{ 0x01, TLB_INST_4K,		32,	" TLB_INST 4 KByte pages, 4-way set associative" },
 765	{ 0x02, TLB_INST_4M,		2,	" TLB_INST 4 MByte pages, full associative" },
 766	{ 0x03, TLB_DATA_4K,		64,	" TLB_DATA 4 KByte pages, 4-way set associative" },
 767	{ 0x04, TLB_DATA_4M,		8,	" TLB_DATA 4 MByte pages, 4-way set associative" },
 768	{ 0x05, TLB_DATA_4M,		32,	" TLB_DATA 4 MByte pages, 4-way set associative" },
 769	{ 0x0b, TLB_INST_4M,		4,	" TLB_INST 4 MByte pages, 4-way set associative" },
 770	{ 0x4f, TLB_INST_4K,		32,	" TLB_INST 4 KByte pages" },
 771	{ 0x50, TLB_INST_ALL,		64,	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
 772	{ 0x51, TLB_INST_ALL,		128,	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
 773	{ 0x52, TLB_INST_ALL,		256,	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
 774	{ 0x55, TLB_INST_2M_4M,		7,	" TLB_INST 2-MByte or 4-MByte pages, fully associative" },
 775	{ 0x56, TLB_DATA0_4M,		16,	" TLB_DATA0 4 MByte pages, 4-way set associative" },
 776	{ 0x57, TLB_DATA0_4K,		16,	" TLB_DATA0 4 KByte pages, 4-way associative" },
 777	{ 0x59, TLB_DATA0_4K,		16,	" TLB_DATA0 4 KByte pages, fully associative" },
 778	{ 0x5a, TLB_DATA0_2M_4M,	32,	" TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
 779	{ 0x5b, TLB_DATA_4K_4M,		64,	" TLB_DATA 4 KByte and 4 MByte pages" },
 780	{ 0x5c, TLB_DATA_4K_4M,		128,	" TLB_DATA 4 KByte and 4 MByte pages" },
 781	{ 0x5d, TLB_DATA_4K_4M,		256,	" TLB_DATA 4 KByte and 4 MByte pages" },
 782	{ 0x61, TLB_INST_4K,		48,	" TLB_INST 4 KByte pages, full associative" },
 783	{ 0x63, TLB_DATA_1G,		4,	" TLB_DATA 1 GByte pages, 4-way set associative" },
 784	{ 0x6b, TLB_DATA_4K,		256,	" TLB_DATA 4 KByte pages, 8-way associative" },
 785	{ 0x6c, TLB_DATA_2M_4M,		128,	" TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
 786	{ 0x6d, TLB_DATA_1G,		16,	" TLB_DATA 1 GByte pages, fully associative" },
 787	{ 0x76, TLB_INST_2M_4M,		8,	" TLB_INST 2-MByte or 4-MByte pages, fully associative" },
 788	{ 0xb0, TLB_INST_4K,		128,	" TLB_INST 4 KByte pages, 4-way set associative" },
 789	{ 0xb1, TLB_INST_2M_4M,		4,	" TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
 790	{ 0xb2, TLB_INST_4K,		64,	" TLB_INST 4KByte pages, 4-way set associative" },
 791	{ 0xb3, TLB_DATA_4K,		128,	" TLB_DATA 4 KByte pages, 4-way set associative" },
 792	{ 0xb4, TLB_DATA_4K,		256,	" TLB_DATA 4 KByte pages, 4-way associative" },
 793	{ 0xb5, TLB_INST_4K,		64,	" TLB_INST 4 KByte pages, 8-way set associative" },
 794	{ 0xb6, TLB_INST_4K,		128,	" TLB_INST 4 KByte pages, 8-way set associative" },
 795	{ 0xba, TLB_DATA_4K,		64,	" TLB_DATA 4 KByte pages, 4-way associative" },
 796	{ 0xc0, TLB_DATA_4K_4M,		8,	" TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
 797	{ 0xc1, STLB_4K_2M,		1024,	" STLB 4 KByte and 2 MByte pages, 8-way associative" },
 798	{ 0xc2, TLB_DATA_2M_4M,		16,	" TLB_DATA 2 MByte/4MByte pages, 4-way associative" },
 799	{ 0xca, STLB_4K,		512,	" STLB 4 KByte pages, 4-way associative" },
 800	{ 0x00, 0, 0 }
 801};
 802
 803static void intel_tlb_lookup(const unsigned char desc)
 804{
 805	unsigned char k;
 806	if (desc == 0)
 807		return;
 808
 809	/* look up this descriptor in the table */
 810	for (k = 0; intel_tlb_table[k].descriptor != desc &&
 811	     intel_tlb_table[k].descriptor != 0; k++)
 812		;
 813
 814	if (intel_tlb_table[k].tlb_type == 0)
 815		return;
 816
 817	switch (intel_tlb_table[k].tlb_type) {
 818	case STLB_4K:
 819		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
 820			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
 821		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
 822			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
 823		break;
 824	case STLB_4K_2M:
 825		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
 826			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
 827		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
 828			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
 829		if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
 830			tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
 831		if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
 832			tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
 833		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
 834			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
 835		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
 836			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
 837		break;
 838	case TLB_INST_ALL:
 839		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
 840			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
 841		if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
 842			tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
 843		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
 844			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
 845		break;
 846	case TLB_INST_4K:
 847		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
 848			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
 849		break;
 850	case TLB_INST_4M:
 851		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
 852			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
 853		break;
 854	case TLB_INST_2M_4M:
 855		if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
 856			tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
 857		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
 858			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
 859		break;
 860	case TLB_DATA_4K:
 861	case TLB_DATA0_4K:
 862		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
 863			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
 864		break;
 865	case TLB_DATA_4M:
 866	case TLB_DATA0_4M:
 867		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
 868			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
 869		break;
 870	case TLB_DATA_2M_4M:
 871	case TLB_DATA0_2M_4M:
 872		if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
 873			tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
 874		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
 875			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
 876		break;
 877	case TLB_DATA_4K_4M:
 878		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
 879			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
 880		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
 881			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
 882		break;
 883	case TLB_DATA_1G:
 884		if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
 885			tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
 886		break;
 887	}
 888}
 889
 890static void intel_detect_tlb(struct cpuinfo_x86 *c)
 891{
 892	int i, j, n;
 893	unsigned int regs[4];
 894	unsigned char *desc = (unsigned char *)regs;
 895
 896	if (c->cpuid_level < 2)
 897		return;
 898
 899	/* Number of times to iterate */
 900	n = cpuid_eax(2) & 0xFF;
 901
 902	for (i = 0 ; i < n ; i++) {
 903		cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
 904
 905		/* If bit 31 is set, this is an unknown format */
 906		for (j = 0 ; j < 3 ; j++)
 907			if (regs[j] & (1 << 31))
 908				regs[j] = 0;
 909
 910		/* Byte 0 is level count, not a descriptor */
 911		for (j = 1 ; j < 16 ; j++)
 912			intel_tlb_lookup(desc[j]);
 913	}
 914}
 915
 916static const struct cpu_dev intel_cpu_dev = {
 917	.c_vendor	= "Intel",
 918	.c_ident	= { "GenuineIntel" },
 919#ifdef CONFIG_X86_32
 920	.legacy_models = {
 921		{ .family = 4, .model_names =
 922		  {
 923			  [0] = "486 DX-25/33",
 924			  [1] = "486 DX-50",
 925			  [2] = "486 SX",
 926			  [3] = "486 DX/2",
 927			  [4] = "486 SL",
 928			  [5] = "486 SX/2",
 929			  [7] = "486 DX/2-WB",
 930			  [8] = "486 DX/4",
 931			  [9] = "486 DX/4-WB"
 932		  }
 933		},
 934		{ .family = 5, .model_names =
 935		  {
 936			  [0] = "Pentium 60/66 A-step",
 937			  [1] = "Pentium 60/66",
 938			  [2] = "Pentium 75 - 200",
 939			  [3] = "OverDrive PODP5V83",
 940			  [4] = "Pentium MMX",
 941			  [7] = "Mobile Pentium 75 - 200",
 942			  [8] = "Mobile Pentium MMX",
 943			  [9] = "Quark SoC X1000",
 944		  }
 945		},
 946		{ .family = 6, .model_names =
 947		  {
 948			  [0] = "Pentium Pro A-step",
 949			  [1] = "Pentium Pro",
 950			  [3] = "Pentium II (Klamath)",
 951			  [4] = "Pentium II (Deschutes)",
 952			  [5] = "Pentium II (Deschutes)",
 953			  [6] = "Mobile Pentium II",
 954			  [7] = "Pentium III (Katmai)",
 955			  [8] = "Pentium III (Coppermine)",
 956			  [10] = "Pentium III (Cascades)",
 957			  [11] = "Pentium III (Tualatin)",
 958		  }
 959		},
 960		{ .family = 15, .model_names =
 961		  {
 962			  [0] = "Pentium 4 (Unknown)",
 963			  [1] = "Pentium 4 (Willamette)",
 964			  [2] = "Pentium 4 (Northwood)",
 965			  [4] = "Pentium 4 (Foster)",
 966			  [5] = "Pentium 4 (Foster)",
 967		  }
 968		},
 969	},
 970	.legacy_cache_size = intel_size_cache,
 971#endif
 972	.c_detect_tlb	= intel_detect_tlb,
 973	.c_early_init   = early_init_intel,
 974	.c_bsp_init	= bsp_init_intel,
 975	.c_init		= init_intel,
 976	.c_x86_vendor	= X86_VENDOR_INTEL,
 977};
 978
 979cpu_dev_register(intel_cpu_dev);
 980
 981#undef pr_fmt
 982#define pr_fmt(fmt) "x86/split lock detection: " fmt
 983
 984static const struct {
 985	const char			*option;
 986	enum split_lock_detect_state	state;
 987} sld_options[] __initconst = {
 988	{ "off",	sld_off   },
 989	{ "warn",	sld_warn  },
 990	{ "fatal",	sld_fatal },
 991	{ "ratelimit:", sld_ratelimit },
 992};
 993
 994static struct ratelimit_state bld_ratelimit;
 995
 996static unsigned int sysctl_sld_mitigate = 1;
 997static DEFINE_SEMAPHORE(buslock_sem, 1);
 998
 999#ifdef CONFIG_PROC_SYSCTL
1000static struct ctl_table sld_sysctls[] = {
1001	{
1002		.procname       = "split_lock_mitigate",
1003		.data           = &sysctl_sld_mitigate,
1004		.maxlen         = sizeof(unsigned int),
1005		.mode           = 0644,
1006		.proc_handler	= proc_douintvec_minmax,
1007		.extra1         = SYSCTL_ZERO,
1008		.extra2         = SYSCTL_ONE,
1009	},
1010};
1011
1012static int __init sld_mitigate_sysctl_init(void)
1013{
1014	register_sysctl_init("kernel", sld_sysctls);
1015	return 0;
1016}
1017
1018late_initcall(sld_mitigate_sysctl_init);
1019#endif
1020
1021static inline bool match_option(const char *arg, int arglen, const char *opt)
1022{
1023	int len = strlen(opt), ratelimit;
1024
1025	if (strncmp(arg, opt, len))
1026		return false;
1027
1028	/*
1029	 * Min ratelimit is 1 bus lock/sec.
1030	 * Max ratelimit is 1000 bus locks/sec.
1031	 */
1032	if (sscanf(arg, "ratelimit:%d", &ratelimit) == 1 &&
1033	    ratelimit > 0 && ratelimit <= 1000) {
1034		ratelimit_state_init(&bld_ratelimit, HZ, ratelimit);
1035		ratelimit_set_flags(&bld_ratelimit, RATELIMIT_MSG_ON_RELEASE);
1036		return true;
1037	}
1038
1039	return len == arglen;
1040}
1041
1042static bool split_lock_verify_msr(bool on)
1043{
1044	u64 ctrl, tmp;
1045
1046	if (rdmsrl_safe(MSR_TEST_CTRL, &ctrl))
1047		return false;
1048	if (on)
1049		ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1050	else
1051		ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1052	if (wrmsrl_safe(MSR_TEST_CTRL, ctrl))
1053		return false;
1054	rdmsrl(MSR_TEST_CTRL, tmp);
1055	return ctrl == tmp;
1056}
1057
1058static void __init sld_state_setup(void)
1059{
1060	enum split_lock_detect_state state = sld_warn;
1061	char arg[20];
1062	int i, ret;
1063
1064	if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) &&
1065	    !boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
1066		return;
1067
1068	ret = cmdline_find_option(boot_command_line, "split_lock_detect",
1069				  arg, sizeof(arg));
1070	if (ret >= 0) {
1071		for (i = 0; i < ARRAY_SIZE(sld_options); i++) {
1072			if (match_option(arg, ret, sld_options[i].option)) {
1073				state = sld_options[i].state;
1074				break;
1075			}
1076		}
1077	}
1078	sld_state = state;
1079}
1080
1081static void __init __split_lock_setup(void)
1082{
1083	if (!split_lock_verify_msr(false)) {
1084		pr_info("MSR access failed: Disabled\n");
1085		return;
1086	}
1087
1088	rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
1089
1090	if (!split_lock_verify_msr(true)) {
1091		pr_info("MSR access failed: Disabled\n");
1092		return;
1093	}
1094
1095	/* Restore the MSR to its cached value. */
1096	wrmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
1097
1098	setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT);
1099}
1100
1101/*
1102 * MSR_TEST_CTRL is per core, but we treat it like a per CPU MSR. Locking
1103 * is not implemented as one thread could undo the setting of the other
1104 * thread immediately after dropping the lock anyway.
1105 */
1106static void sld_update_msr(bool on)
1107{
1108	u64 test_ctrl_val = msr_test_ctrl_cache;
1109
1110	if (on)
1111		test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1112
1113	wrmsrl(MSR_TEST_CTRL, test_ctrl_val);
1114}
1115
1116static void split_lock_init(void)
1117{
1118	/*
1119	 * #DB for bus lock handles ratelimit and #AC for split lock is
1120	 * disabled.
1121	 */
1122	if (sld_state == sld_ratelimit) {
1123		split_lock_verify_msr(false);
1124		return;
1125	}
1126
1127	if (cpu_model_supports_sld)
1128		split_lock_verify_msr(sld_state != sld_off);
1129}
1130
1131static void __split_lock_reenable_unlock(struct work_struct *work)
1132{
1133	sld_update_msr(true);
1134	up(&buslock_sem);
1135}
1136
1137static DECLARE_DELAYED_WORK(sl_reenable_unlock, __split_lock_reenable_unlock);
1138
1139static void __split_lock_reenable(struct work_struct *work)
1140{
1141	sld_update_msr(true);
1142}
1143static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable);
1144
1145/*
1146 * If a CPU goes offline with pending delayed work to re-enable split lock
1147 * detection then the delayed work will be executed on some other CPU. That
1148 * handles releasing the buslock_sem, but because it executes on a
1149 * different CPU probably won't re-enable split lock detection. This is a
1150 * problem on HT systems since the sibling CPU on the same core may then be
1151 * left running with split lock detection disabled.
1152 *
1153 * Unconditionally re-enable detection here.
1154 */
1155static int splitlock_cpu_offline(unsigned int cpu)
1156{
1157	sld_update_msr(true);
1158
1159	return 0;
1160}
1161
1162static void split_lock_warn(unsigned long ip)
1163{
1164	struct delayed_work *work;
1165	int cpu;
1166
1167	if (!current->reported_split_lock)
1168		pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
1169				    current->comm, current->pid, ip);
1170	current->reported_split_lock = 1;
1171
1172	if (sysctl_sld_mitigate) {
1173		/*
1174		 * misery factor #1:
1175		 * sleep 10ms before trying to execute split lock.
1176		 */
1177		if (msleep_interruptible(10) > 0)
1178			return;
1179		/*
1180		 * Misery factor #2:
1181		 * only allow one buslocked disabled core at a time.
1182		 */
1183		if (down_interruptible(&buslock_sem) == -EINTR)
1184			return;
1185		work = &sl_reenable_unlock;
1186	} else {
1187		work = &sl_reenable;
1188	}
1189
1190	cpu = get_cpu();
1191	schedule_delayed_work_on(cpu, work, 2);
1192
1193	/* Disable split lock detection on this CPU to make progress */
1194	sld_update_msr(false);
1195	put_cpu();
1196}
1197
1198bool handle_guest_split_lock(unsigned long ip)
1199{
1200	if (sld_state == sld_warn) {
1201		split_lock_warn(ip);
1202		return true;
1203	}
1204
1205	pr_warn_once("#AC: %s/%d %s split_lock trap at address: 0x%lx\n",
1206		     current->comm, current->pid,
1207		     sld_state == sld_fatal ? "fatal" : "bogus", ip);
1208
1209	current->thread.error_code = 0;
1210	current->thread.trap_nr = X86_TRAP_AC;
1211	force_sig_fault(SIGBUS, BUS_ADRALN, NULL);
1212	return false;
1213}
1214EXPORT_SYMBOL_GPL(handle_guest_split_lock);
1215
1216static void bus_lock_init(void)
1217{
1218	u64 val;
1219
1220	if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
1221		return;
1222
1223	rdmsrl(MSR_IA32_DEBUGCTLMSR, val);
1224
1225	if ((boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) &&
1226	    (sld_state == sld_warn || sld_state == sld_fatal)) ||
1227	    sld_state == sld_off) {
1228		/*
1229		 * Warn and fatal are handled by #AC for split lock if #AC for
1230		 * split lock is supported.
1231		 */
1232		val &= ~DEBUGCTLMSR_BUS_LOCK_DETECT;
1233	} else {
1234		val |= DEBUGCTLMSR_BUS_LOCK_DETECT;
1235	}
1236
1237	wrmsrl(MSR_IA32_DEBUGCTLMSR, val);
1238}
1239
1240bool handle_user_split_lock(struct pt_regs *regs, long error_code)
1241{
1242	if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal)
1243		return false;
1244	split_lock_warn(regs->ip);
1245	return true;
1246}
1247
1248void handle_bus_lock(struct pt_regs *regs)
1249{
1250	switch (sld_state) {
1251	case sld_off:
1252		break;
1253	case sld_ratelimit:
1254		/* Enforce no more than bld_ratelimit bus locks/sec. */
1255		while (!__ratelimit(&bld_ratelimit))
1256			msleep(20);
1257		/* Warn on the bus lock. */
1258		fallthrough;
1259	case sld_warn:
1260		pr_warn_ratelimited("#DB: %s/%d took a bus_lock trap at address: 0x%lx\n",
1261				    current->comm, current->pid, regs->ip);
1262		break;
1263	case sld_fatal:
1264		force_sig_fault(SIGBUS, BUS_ADRALN, NULL);
1265		break;
1266	}
1267}
1268
1269/*
1270 * CPU models that are known to have the per-core split-lock detection
1271 * feature even though they do not enumerate IA32_CORE_CAPABILITIES.
1272 */
1273static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
1274	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X,	0),
1275	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L,	0),
1276	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D,	0),
1277	{}
1278};
1279
1280static void __init split_lock_setup(struct cpuinfo_x86 *c)
1281{
1282	const struct x86_cpu_id *m;
1283	u64 ia32_core_caps;
1284
1285	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1286		return;
1287
1288	/* Check for CPUs that have support but do not enumerate it: */
1289	m = x86_match_cpu(split_lock_cpu_ids);
1290	if (m)
1291		goto supported;
1292
1293	if (!cpu_has(c, X86_FEATURE_CORE_CAPABILITIES))
1294		return;
1295
1296	/*
1297	 * Not all bits in MSR_IA32_CORE_CAPS are architectural, but
1298	 * MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT is.  All CPUs that set
1299	 * it have split lock detection.
1300	 */
1301	rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps);
1302	if (ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT)
1303		goto supported;
1304
1305	/* CPU is not in the model list and does not have the MSR bit: */
1306	return;
1307
1308supported:
1309	cpu_model_supports_sld = true;
1310	__split_lock_setup();
1311}
1312
1313static void sld_state_show(void)
1314{
1315	if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) &&
1316	    !boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
1317		return;
1318
1319	switch (sld_state) {
1320	case sld_off:
1321		pr_info("disabled\n");
1322		break;
1323	case sld_warn:
1324		if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {
1325			pr_info("#AC: crashing the kernel on kernel split_locks and warning on user-space split_locks\n");
1326			if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
1327					      "x86/splitlock", NULL, splitlock_cpu_offline) < 0)
1328				pr_warn("No splitlock CPU offline handler\n");
1329		} else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) {
1330			pr_info("#DB: warning on user-space bus_locks\n");
1331		}
1332		break;
1333	case sld_fatal:
1334		if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {
1335			pr_info("#AC: crashing the kernel on kernel split_locks and sending SIGBUS on user-space split_locks\n");
1336		} else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) {
1337			pr_info("#DB: sending SIGBUS on user-space bus_locks%s\n",
1338				boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) ?
1339				" from non-WB" : "");
1340		}
1341		break;
1342	case sld_ratelimit:
1343		if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
1344			pr_info("#DB: setting system wide bus lock rate limit to %u/sec\n", bld_ratelimit.burst);
1345		break;
1346	}
1347}
1348
1349void __init sld_setup(struct cpuinfo_x86 *c)
1350{
1351	split_lock_setup(c);
1352	sld_state_setup();
1353	sld_state_show();
1354}
1355
1356#define X86_HYBRID_CPU_TYPE_ID_SHIFT	24
1357
1358/**
1359 * get_this_hybrid_cpu_type() - Get the type of this hybrid CPU
1360 *
1361 * Returns the CPU type [31:24] (i.e., Atom or Core) of a CPU in
1362 * a hybrid processor. If the processor is not hybrid, returns 0.
1363 */
1364u8 get_this_hybrid_cpu_type(void)
1365{
1366	if (!cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
1367		return 0;
1368
1369	return cpuid_eax(0x0000001a) >> X86_HYBRID_CPU_TYPE_ID_SHIFT;
1370}