Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/pgtable.h>
4
5#include <linux/string.h>
6#include <linux/bitops.h>
7#include <linux/smp.h>
8#include <linux/sched.h>
9#include <linux/sched/clock.h>
10#include <linux/thread_info.h>
11#include <linux/init.h>
12#include <linux/uaccess.h>
13
14#include <asm/cpufeature.h>
15#include <asm/msr.h>
16#include <asm/bugs.h>
17#include <asm/cpu.h>
18#include <asm/intel-family.h>
19#include <asm/microcode_intel.h>
20#include <asm/hwcap2.h>
21#include <asm/elf.h>
22#include <asm/cpu_device_id.h>
23#include <asm/cmdline.h>
24#include <asm/traps.h>
25#include <asm/resctrl.h>
26#include <asm/numa.h>
27
28#ifdef CONFIG_X86_64
29#include <linux/topology.h>
30#endif
31
32#include "cpu.h"
33
34#ifdef CONFIG_X86_LOCAL_APIC
35#include <asm/mpspec.h>
36#include <asm/apic.h>
37#endif
38
39enum split_lock_detect_state {
40 sld_off = 0,
41 sld_warn,
42 sld_fatal,
43};
44
45/*
46 * Default to sld_off because most systems do not support split lock detection
47 * split_lock_setup() will switch this to sld_warn on systems that support
48 * split lock detect, unless there is a command line override.
49 */
50static enum split_lock_detect_state sld_state __ro_after_init = sld_off;
51static u64 msr_test_ctrl_cache __ro_after_init;
52
53/*
54 * With a name like MSR_TEST_CTL it should go without saying, but don't touch
55 * MSR_TEST_CTL unless the CPU is one of the whitelisted models. Writing it
56 * on CPUs that do not support SLD can cause fireworks, even when writing '0'.
57 */
58static bool cpu_model_supports_sld __ro_after_init;
59
60/*
61 * Processors which have self-snooping capability can handle conflicting
62 * memory type across CPUs by snooping its own cache. However, there exists
63 * CPU models in which having conflicting memory types still leads to
64 * unpredictable behavior, machine check errors, or hangs. Clear this
65 * feature to prevent its use on machines with known erratas.
66 */
67static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c)
68{
69 switch (c->x86_model) {
70 case INTEL_FAM6_CORE_YONAH:
71 case INTEL_FAM6_CORE2_MEROM:
72 case INTEL_FAM6_CORE2_MEROM_L:
73 case INTEL_FAM6_CORE2_PENRYN:
74 case INTEL_FAM6_CORE2_DUNNINGTON:
75 case INTEL_FAM6_NEHALEM:
76 case INTEL_FAM6_NEHALEM_G:
77 case INTEL_FAM6_NEHALEM_EP:
78 case INTEL_FAM6_NEHALEM_EX:
79 case INTEL_FAM6_WESTMERE:
80 case INTEL_FAM6_WESTMERE_EP:
81 case INTEL_FAM6_SANDYBRIDGE:
82 setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP);
83 }
84}
85
86static bool ring3mwait_disabled __read_mostly;
87
88static int __init ring3mwait_disable(char *__unused)
89{
90 ring3mwait_disabled = true;
91 return 0;
92}
93__setup("ring3mwait=disable", ring3mwait_disable);
94
95static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
96{
97 /*
98 * Ring 3 MONITOR/MWAIT feature cannot be detected without
99 * cpu model and family comparison.
100 */
101 if (c->x86 != 6)
102 return;
103 switch (c->x86_model) {
104 case INTEL_FAM6_XEON_PHI_KNL:
105 case INTEL_FAM6_XEON_PHI_KNM:
106 break;
107 default:
108 return;
109 }
110
111 if (ring3mwait_disabled)
112 return;
113
114 set_cpu_cap(c, X86_FEATURE_RING3MWAIT);
115 this_cpu_or(msr_misc_features_shadow,
116 1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT);
117
118 if (c == &boot_cpu_data)
119 ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
120}
121
122/*
123 * Early microcode releases for the Spectre v2 mitigation were broken.
124 * Information taken from;
125 * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
126 * - https://kb.vmware.com/s/article/52345
127 * - Microcode revisions observed in the wild
128 * - Release note from 20180108 microcode release
129 */
130struct sku_microcode {
131 u8 model;
132 u8 stepping;
133 u32 microcode;
134};
135static const struct sku_microcode spectre_bad_microcodes[] = {
136 { INTEL_FAM6_KABYLAKE, 0x0B, 0x80 },
137 { INTEL_FAM6_KABYLAKE, 0x0A, 0x80 },
138 { INTEL_FAM6_KABYLAKE, 0x09, 0x80 },
139 { INTEL_FAM6_KABYLAKE_L, 0x0A, 0x80 },
140 { INTEL_FAM6_KABYLAKE_L, 0x09, 0x80 },
141 { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
142 { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
143 { INTEL_FAM6_BROADWELL, 0x04, 0x28 },
144 { INTEL_FAM6_BROADWELL_G, 0x01, 0x1b },
145 { INTEL_FAM6_BROADWELL_D, 0x02, 0x14 },
146 { INTEL_FAM6_BROADWELL_D, 0x03, 0x07000011 },
147 { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
148 { INTEL_FAM6_HASWELL_L, 0x01, 0x21 },
149 { INTEL_FAM6_HASWELL_G, 0x01, 0x18 },
150 { INTEL_FAM6_HASWELL, 0x03, 0x23 },
151 { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
152 { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
153 { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
154 /* Observed in the wild */
155 { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
156 { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
157};
158
159static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
160{
161 int i;
162
163 /*
164 * We know that the hypervisor lie to us on the microcode version so
165 * we may as well hope that it is running the correct version.
166 */
167 if (cpu_has(c, X86_FEATURE_HYPERVISOR))
168 return false;
169
170 if (c->x86 != 6)
171 return false;
172
173 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
174 if (c->x86_model == spectre_bad_microcodes[i].model &&
175 c->x86_stepping == spectre_bad_microcodes[i].stepping)
176 return (c->microcode <= spectre_bad_microcodes[i].microcode);
177 }
178 return false;
179}
180
181static void early_init_intel(struct cpuinfo_x86 *c)
182{
183 u64 misc_enable;
184
185 /* Unmask CPUID levels if masked: */
186 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
187 if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
188 MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
189 c->cpuid_level = cpuid_eax(0);
190 get_cpu_cap(c);
191 }
192 }
193
194 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
195 (c->x86 == 0x6 && c->x86_model >= 0x0e))
196 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
197
198 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
199 c->microcode = intel_get_microcode_revision();
200
201 /* Now if any of them are set, check the blacklist and clear the lot */
202 if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
203 cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
204 cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
205 cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
206 pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
207 setup_clear_cpu_cap(X86_FEATURE_IBRS);
208 setup_clear_cpu_cap(X86_FEATURE_IBPB);
209 setup_clear_cpu_cap(X86_FEATURE_STIBP);
210 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
211 setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
212 setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
213 setup_clear_cpu_cap(X86_FEATURE_SSBD);
214 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
215 }
216
217 /*
218 * Atom erratum AAE44/AAF40/AAG38/AAH41:
219 *
220 * A race condition between speculative fetches and invalidating
221 * a large page. This is worked around in microcode, but we
222 * need the microcode to have already been loaded... so if it is
223 * not, recommend a BIOS update and disable large pages.
224 */
225 if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
226 c->microcode < 0x20e) {
227 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
228 clear_cpu_cap(c, X86_FEATURE_PSE);
229 }
230
231#ifdef CONFIG_X86_64
232 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
233#else
234 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
235 if (c->x86 == 15 && c->x86_cache_alignment == 64)
236 c->x86_cache_alignment = 128;
237#endif
238
239 /* CPUID workaround for 0F33/0F34 CPU */
240 if (c->x86 == 0xF && c->x86_model == 0x3
241 && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
242 c->x86_phys_bits = 36;
243
244 /*
245 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
246 * with P/T states and does not stop in deep C-states.
247 *
248 * It is also reliable across cores and sockets. (but not across
249 * cabinets - we turn it off in that case explicitly.)
250 */
251 if (c->x86_power & (1 << 8)) {
252 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
253 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
254 }
255
256 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
257 if (c->x86 == 6) {
258 switch (c->x86_model) {
259 case INTEL_FAM6_ATOM_SALTWELL_MID:
260 case INTEL_FAM6_ATOM_SALTWELL_TABLET:
261 case INTEL_FAM6_ATOM_SILVERMONT_MID:
262 case INTEL_FAM6_ATOM_AIRMONT_NP:
263 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
264 break;
265 default:
266 break;
267 }
268 }
269
270 /*
271 * There is a known erratum on Pentium III and Core Solo
272 * and Core Duo CPUs.
273 * " Page with PAT set to WC while associated MTRR is UC
274 * may consolidate to UC "
275 * Because of this erratum, it is better to stick with
276 * setting WC in MTRR rather than using PAT on these CPUs.
277 *
278 * Enable PAT WC only on P4, Core 2 or later CPUs.
279 */
280 if (c->x86 == 6 && c->x86_model < 15)
281 clear_cpu_cap(c, X86_FEATURE_PAT);
282
283 /*
284 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
285 * clear the fast string and enhanced fast string CPU capabilities.
286 */
287 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
288 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
289 if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
290 pr_info("Disabled fast string operations\n");
291 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
292 setup_clear_cpu_cap(X86_FEATURE_ERMS);
293 }
294 }
295
296 /*
297 * Intel Quark Core DevMan_001.pdf section 6.4.11
298 * "The operating system also is required to invalidate (i.e., flush)
299 * the TLB when any changes are made to any of the page table entries.
300 * The operating system must reload CR3 to cause the TLB to be flushed"
301 *
302 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
303 * should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
304 * to be modified.
305 */
306 if (c->x86 == 5 && c->x86_model == 9) {
307 pr_info("Disabling PGE capability bit\n");
308 setup_clear_cpu_cap(X86_FEATURE_PGE);
309 }
310
311 if (c->cpuid_level >= 0x00000001) {
312 u32 eax, ebx, ecx, edx;
313
314 cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
315 /*
316 * If HTT (EDX[28]) is set EBX[16:23] contain the number of
317 * apicids which are reserved per package. Store the resulting
318 * shift value for the package management code.
319 */
320 if (edx & (1U << 28))
321 c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
322 }
323
324 check_memory_type_self_snoop_errata(c);
325
326 /*
327 * Get the number of SMT siblings early from the extended topology
328 * leaf, if available. Otherwise try the legacy SMT detection.
329 */
330 if (detect_extended_topology_early(c) < 0)
331 detect_ht_early(c);
332}
333
334static void bsp_init_intel(struct cpuinfo_x86 *c)
335{
336 resctrl_cpu_detect(c);
337}
338
339#ifdef CONFIG_X86_32
340/*
341 * Early probe support logic for ppro memory erratum #50
342 *
343 * This is called before we do cpu ident work
344 */
345
346int ppro_with_ram_bug(void)
347{
348 /* Uses data from early_cpu_detect now */
349 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
350 boot_cpu_data.x86 == 6 &&
351 boot_cpu_data.x86_model == 1 &&
352 boot_cpu_data.x86_stepping < 8) {
353 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
354 return 1;
355 }
356 return 0;
357}
358
359static void intel_smp_check(struct cpuinfo_x86 *c)
360{
361 /* calling is from identify_secondary_cpu() ? */
362 if (!c->cpu_index)
363 return;
364
365 /*
366 * Mask B, Pentium, but not Pentium MMX
367 */
368 if (c->x86 == 5 &&
369 c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
370 c->x86_model <= 3) {
371 /*
372 * Remember we have B step Pentia with bugs
373 */
374 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
375 "with B stepping processors.\n");
376 }
377}
378
379static int forcepae;
380static int __init forcepae_setup(char *__unused)
381{
382 forcepae = 1;
383 return 1;
384}
385__setup("forcepae", forcepae_setup);
386
387static void intel_workarounds(struct cpuinfo_x86 *c)
388{
389#ifdef CONFIG_X86_F00F_BUG
390 /*
391 * All models of Pentium and Pentium with MMX technology CPUs
392 * have the F0 0F bug, which lets nonprivileged users lock up the
393 * system. Announce that the fault handler will be checking for it.
394 * The Quark is also family 5, but does not have the same bug.
395 */
396 clear_cpu_bug(c, X86_BUG_F00F);
397 if (c->x86 == 5 && c->x86_model < 9) {
398 static int f00f_workaround_enabled;
399
400 set_cpu_bug(c, X86_BUG_F00F);
401 if (!f00f_workaround_enabled) {
402 pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
403 f00f_workaround_enabled = 1;
404 }
405 }
406#endif
407
408 /*
409 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
410 * model 3 mask 3
411 */
412 if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
413 clear_cpu_cap(c, X86_FEATURE_SEP);
414
415 /*
416 * PAE CPUID issue: many Pentium M report no PAE but may have a
417 * functionally usable PAE implementation.
418 * Forcefully enable PAE if kernel parameter "forcepae" is present.
419 */
420 if (forcepae) {
421 pr_warn("PAE forced!\n");
422 set_cpu_cap(c, X86_FEATURE_PAE);
423 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
424 }
425
426 /*
427 * P4 Xeon erratum 037 workaround.
428 * Hardware prefetcher may cause stale data to be loaded into the cache.
429 */
430 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
431 if (msr_set_bit(MSR_IA32_MISC_ENABLE,
432 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
433 pr_info("CPU: C0 stepping P4 Xeon detected.\n");
434 pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
435 }
436 }
437
438 /*
439 * See if we have a good local APIC by checking for buggy Pentia,
440 * i.e. all B steppings and the C2 stepping of P54C when using their
441 * integrated APIC (see 11AP erratum in "Pentium Processor
442 * Specification Update").
443 */
444 if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
445 (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
446 set_cpu_bug(c, X86_BUG_11AP);
447
448
449#ifdef CONFIG_X86_INTEL_USERCOPY
450 /*
451 * Set up the preferred alignment for movsl bulk memory moves
452 */
453 switch (c->x86) {
454 case 4: /* 486: untested */
455 break;
456 case 5: /* Old Pentia: untested */
457 break;
458 case 6: /* PII/PIII only like movsl with 8-byte alignment */
459 movsl_mask.mask = 7;
460 break;
461 case 15: /* P4 is OK down to 8-byte alignment */
462 movsl_mask.mask = 7;
463 break;
464 }
465#endif
466
467 intel_smp_check(c);
468}
469#else
470static void intel_workarounds(struct cpuinfo_x86 *c)
471{
472}
473#endif
474
475static void srat_detect_node(struct cpuinfo_x86 *c)
476{
477#ifdef CONFIG_NUMA
478 unsigned node;
479 int cpu = smp_processor_id();
480
481 /* Don't do the funky fallback heuristics the AMD version employs
482 for now. */
483 node = numa_cpu_node(cpu);
484 if (node == NUMA_NO_NODE || !node_online(node)) {
485 /* reuse the value from init_cpu_to_node() */
486 node = cpu_to_node(cpu);
487 }
488 numa_set_node(cpu, node);
489#endif
490}
491
492#define MSR_IA32_TME_ACTIVATE 0x982
493
494/* Helpers to access TME_ACTIVATE MSR */
495#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
496#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
497
498#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
499#define TME_ACTIVATE_POLICY_AES_XTS_128 0
500
501#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
502
503#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
504#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
505
506/* Values for mktme_status (SW only construct) */
507#define MKTME_ENABLED 0
508#define MKTME_DISABLED 1
509#define MKTME_UNINITIALIZED 2
510static int mktme_status = MKTME_UNINITIALIZED;
511
512static void detect_tme(struct cpuinfo_x86 *c)
513{
514 u64 tme_activate, tme_policy, tme_crypto_algs;
515 int keyid_bits = 0, nr_keyids = 0;
516 static u64 tme_activate_cpu0 = 0;
517
518 rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
519
520 if (mktme_status != MKTME_UNINITIALIZED) {
521 if (tme_activate != tme_activate_cpu0) {
522 /* Broken BIOS? */
523 pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
524 pr_err_once("x86/tme: MKTME is not usable\n");
525 mktme_status = MKTME_DISABLED;
526
527 /* Proceed. We may need to exclude bits from x86_phys_bits. */
528 }
529 } else {
530 tme_activate_cpu0 = tme_activate;
531 }
532
533 if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
534 pr_info_once("x86/tme: not enabled by BIOS\n");
535 mktme_status = MKTME_DISABLED;
536 return;
537 }
538
539 if (mktme_status != MKTME_UNINITIALIZED)
540 goto detect_keyid_bits;
541
542 pr_info("x86/tme: enabled by BIOS\n");
543
544 tme_policy = TME_ACTIVATE_POLICY(tme_activate);
545 if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
546 pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
547
548 tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
549 if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
550 pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
551 tme_crypto_algs);
552 mktme_status = MKTME_DISABLED;
553 }
554detect_keyid_bits:
555 keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
556 nr_keyids = (1UL << keyid_bits) - 1;
557 if (nr_keyids) {
558 pr_info_once("x86/mktme: enabled by BIOS\n");
559 pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
560 } else {
561 pr_info_once("x86/mktme: disabled by BIOS\n");
562 }
563
564 if (mktme_status == MKTME_UNINITIALIZED) {
565 /* MKTME is usable */
566 mktme_status = MKTME_ENABLED;
567 }
568
569 /*
570 * KeyID bits effectively lower the number of physical address
571 * bits. Update cpuinfo_x86::x86_phys_bits accordingly.
572 */
573 c->x86_phys_bits -= keyid_bits;
574}
575
576static void init_cpuid_fault(struct cpuinfo_x86 *c)
577{
578 u64 msr;
579
580 if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) {
581 if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
582 set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
583 }
584}
585
586static void init_intel_misc_features(struct cpuinfo_x86 *c)
587{
588 u64 msr;
589
590 if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr))
591 return;
592
593 /* Clear all MISC features */
594 this_cpu_write(msr_misc_features_shadow, 0);
595
596 /* Check features and update capabilities and shadow control bits */
597 init_cpuid_fault(c);
598 probe_xeon_phi_r3mwait(c);
599
600 msr = this_cpu_read(msr_misc_features_shadow);
601 wrmsrl(MSR_MISC_FEATURES_ENABLES, msr);
602}
603
604static void split_lock_init(void);
605
606static void init_intel(struct cpuinfo_x86 *c)
607{
608 early_init_intel(c);
609
610 intel_workarounds(c);
611
612 /*
613 * Detect the extended topology information if available. This
614 * will reinitialise the initial_apicid which will be used
615 * in init_intel_cacheinfo()
616 */
617 detect_extended_topology(c);
618
619 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
620 /*
621 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
622 * detection.
623 */
624 detect_num_cpu_cores(c);
625#ifdef CONFIG_X86_32
626 detect_ht(c);
627#endif
628 }
629
630 init_intel_cacheinfo(c);
631
632 if (c->cpuid_level > 9) {
633 unsigned eax = cpuid_eax(10);
634 /* Check for version and the number of counters */
635 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
636 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
637 }
638
639 if (cpu_has(c, X86_FEATURE_XMM2))
640 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
641
642 if (boot_cpu_has(X86_FEATURE_DS)) {
643 unsigned int l1, l2;
644
645 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
646 if (!(l1 & (1<<11)))
647 set_cpu_cap(c, X86_FEATURE_BTS);
648 if (!(l1 & (1<<12)))
649 set_cpu_cap(c, X86_FEATURE_PEBS);
650 }
651
652 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) &&
653 (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
654 set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
655
656 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) &&
657 ((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT)))
658 set_cpu_bug(c, X86_BUG_MONITOR);
659
660#ifdef CONFIG_X86_64
661 if (c->x86 == 15)
662 c->x86_cache_alignment = c->x86_clflush_size * 2;
663 if (c->x86 == 6)
664 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
665#else
666 /*
667 * Names for the Pentium II/Celeron processors
668 * detectable only by also checking the cache size.
669 * Dixon is NOT a Celeron.
670 */
671 if (c->x86 == 6) {
672 unsigned int l2 = c->x86_cache_size;
673 char *p = NULL;
674
675 switch (c->x86_model) {
676 case 5:
677 if (l2 == 0)
678 p = "Celeron (Covington)";
679 else if (l2 == 256)
680 p = "Mobile Pentium II (Dixon)";
681 break;
682
683 case 6:
684 if (l2 == 128)
685 p = "Celeron (Mendocino)";
686 else if (c->x86_stepping == 0 || c->x86_stepping == 5)
687 p = "Celeron-A";
688 break;
689
690 case 8:
691 if (l2 == 128)
692 p = "Celeron (Coppermine)";
693 break;
694 }
695
696 if (p)
697 strcpy(c->x86_model_id, p);
698 }
699
700 if (c->x86 == 15)
701 set_cpu_cap(c, X86_FEATURE_P4);
702 if (c->x86 == 6)
703 set_cpu_cap(c, X86_FEATURE_P3);
704#endif
705
706 /* Work around errata */
707 srat_detect_node(c);
708
709 init_ia32_feat_ctl(c);
710
711 if (cpu_has(c, X86_FEATURE_TME))
712 detect_tme(c);
713
714 init_intel_misc_features(c);
715
716 if (tsx_ctrl_state == TSX_CTRL_ENABLE)
717 tsx_enable();
718 if (tsx_ctrl_state == TSX_CTRL_DISABLE)
719 tsx_disable();
720
721 split_lock_init();
722}
723
724#ifdef CONFIG_X86_32
725static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
726{
727 /*
728 * Intel PIII Tualatin. This comes in two flavours.
729 * One has 256kb of cache, the other 512. We have no way
730 * to determine which, so we use a boottime override
731 * for the 512kb model, and assume 256 otherwise.
732 */
733 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
734 size = 256;
735
736 /*
737 * Intel Quark SoC X1000 contains a 4-way set associative
738 * 16K cache with a 16 byte cache line and 256 lines per tag
739 */
740 if ((c->x86 == 5) && (c->x86_model == 9))
741 size = 16;
742 return size;
743}
744#endif
745
746#define TLB_INST_4K 0x01
747#define TLB_INST_4M 0x02
748#define TLB_INST_2M_4M 0x03
749
750#define TLB_INST_ALL 0x05
751#define TLB_INST_1G 0x06
752
753#define TLB_DATA_4K 0x11
754#define TLB_DATA_4M 0x12
755#define TLB_DATA_2M_4M 0x13
756#define TLB_DATA_4K_4M 0x14
757
758#define TLB_DATA_1G 0x16
759
760#define TLB_DATA0_4K 0x21
761#define TLB_DATA0_4M 0x22
762#define TLB_DATA0_2M_4M 0x23
763
764#define STLB_4K 0x41
765#define STLB_4K_2M 0x42
766
767static const struct _tlb_table intel_tlb_table[] = {
768 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
769 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
770 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
771 { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
772 { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
773 { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
774 { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages" },
775 { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
776 { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
777 { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
778 { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
779 { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" },
780 { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" },
781 { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" },
782 { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
783 { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
784 { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
785 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
786 { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
787 { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
788 { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" },
789 { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
790 { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" },
791 { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
792 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
793 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
794 { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
795 { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
796 { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
797 { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" },
798 { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" },
799 { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
800 { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
801 { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" },
802 { 0xc2, TLB_DATA_2M_4M, 16, " TLB_DATA 2 MByte/4MByte pages, 4-way associative" },
803 { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
804 { 0x00, 0, 0 }
805};
806
807static void intel_tlb_lookup(const unsigned char desc)
808{
809 unsigned char k;
810 if (desc == 0)
811 return;
812
813 /* look up this descriptor in the table */
814 for (k = 0; intel_tlb_table[k].descriptor != desc &&
815 intel_tlb_table[k].descriptor != 0; k++)
816 ;
817
818 if (intel_tlb_table[k].tlb_type == 0)
819 return;
820
821 switch (intel_tlb_table[k].tlb_type) {
822 case STLB_4K:
823 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
824 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
825 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
826 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
827 break;
828 case STLB_4K_2M:
829 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
830 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
831 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
832 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
833 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
834 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
835 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
836 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
837 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
838 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
839 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
840 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
841 break;
842 case TLB_INST_ALL:
843 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
844 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
845 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
846 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
847 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
848 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
849 break;
850 case TLB_INST_4K:
851 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
852 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
853 break;
854 case TLB_INST_4M:
855 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
856 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
857 break;
858 case TLB_INST_2M_4M:
859 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
860 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
861 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
862 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
863 break;
864 case TLB_DATA_4K:
865 case TLB_DATA0_4K:
866 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
867 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
868 break;
869 case TLB_DATA_4M:
870 case TLB_DATA0_4M:
871 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
872 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
873 break;
874 case TLB_DATA_2M_4M:
875 case TLB_DATA0_2M_4M:
876 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
877 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
878 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
879 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
880 break;
881 case TLB_DATA_4K_4M:
882 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
883 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
884 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
885 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
886 break;
887 case TLB_DATA_1G:
888 if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
889 tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
890 break;
891 }
892}
893
894static void intel_detect_tlb(struct cpuinfo_x86 *c)
895{
896 int i, j, n;
897 unsigned int regs[4];
898 unsigned char *desc = (unsigned char *)regs;
899
900 if (c->cpuid_level < 2)
901 return;
902
903 /* Number of times to iterate */
904 n = cpuid_eax(2) & 0xFF;
905
906 for (i = 0 ; i < n ; i++) {
907 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
908
909 /* If bit 31 is set, this is an unknown format */
910 for (j = 0 ; j < 3 ; j++)
911 if (regs[j] & (1 << 31))
912 regs[j] = 0;
913
914 /* Byte 0 is level count, not a descriptor */
915 for (j = 1 ; j < 16 ; j++)
916 intel_tlb_lookup(desc[j]);
917 }
918}
919
920static const struct cpu_dev intel_cpu_dev = {
921 .c_vendor = "Intel",
922 .c_ident = { "GenuineIntel" },
923#ifdef CONFIG_X86_32
924 .legacy_models = {
925 { .family = 4, .model_names =
926 {
927 [0] = "486 DX-25/33",
928 [1] = "486 DX-50",
929 [2] = "486 SX",
930 [3] = "486 DX/2",
931 [4] = "486 SL",
932 [5] = "486 SX/2",
933 [7] = "486 DX/2-WB",
934 [8] = "486 DX/4",
935 [9] = "486 DX/4-WB"
936 }
937 },
938 { .family = 5, .model_names =
939 {
940 [0] = "Pentium 60/66 A-step",
941 [1] = "Pentium 60/66",
942 [2] = "Pentium 75 - 200",
943 [3] = "OverDrive PODP5V83",
944 [4] = "Pentium MMX",
945 [7] = "Mobile Pentium 75 - 200",
946 [8] = "Mobile Pentium MMX",
947 [9] = "Quark SoC X1000",
948 }
949 },
950 { .family = 6, .model_names =
951 {
952 [0] = "Pentium Pro A-step",
953 [1] = "Pentium Pro",
954 [3] = "Pentium II (Klamath)",
955 [4] = "Pentium II (Deschutes)",
956 [5] = "Pentium II (Deschutes)",
957 [6] = "Mobile Pentium II",
958 [7] = "Pentium III (Katmai)",
959 [8] = "Pentium III (Coppermine)",
960 [10] = "Pentium III (Cascades)",
961 [11] = "Pentium III (Tualatin)",
962 }
963 },
964 { .family = 15, .model_names =
965 {
966 [0] = "Pentium 4 (Unknown)",
967 [1] = "Pentium 4 (Willamette)",
968 [2] = "Pentium 4 (Northwood)",
969 [4] = "Pentium 4 (Foster)",
970 [5] = "Pentium 4 (Foster)",
971 }
972 },
973 },
974 .legacy_cache_size = intel_size_cache,
975#endif
976 .c_detect_tlb = intel_detect_tlb,
977 .c_early_init = early_init_intel,
978 .c_bsp_init = bsp_init_intel,
979 .c_init = init_intel,
980 .c_x86_vendor = X86_VENDOR_INTEL,
981};
982
983cpu_dev_register(intel_cpu_dev);
984
985#undef pr_fmt
986#define pr_fmt(fmt) "x86/split lock detection: " fmt
987
988static const struct {
989 const char *option;
990 enum split_lock_detect_state state;
991} sld_options[] __initconst = {
992 { "off", sld_off },
993 { "warn", sld_warn },
994 { "fatal", sld_fatal },
995};
996
997static inline bool match_option(const char *arg, int arglen, const char *opt)
998{
999 int len = strlen(opt);
1000
1001 return len == arglen && !strncmp(arg, opt, len);
1002}
1003
1004static bool split_lock_verify_msr(bool on)
1005{
1006 u64 ctrl, tmp;
1007
1008 if (rdmsrl_safe(MSR_TEST_CTRL, &ctrl))
1009 return false;
1010 if (on)
1011 ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1012 else
1013 ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1014 if (wrmsrl_safe(MSR_TEST_CTRL, ctrl))
1015 return false;
1016 rdmsrl(MSR_TEST_CTRL, tmp);
1017 return ctrl == tmp;
1018}
1019
1020static void __init split_lock_setup(void)
1021{
1022 enum split_lock_detect_state state = sld_warn;
1023 char arg[20];
1024 int i, ret;
1025
1026 if (!split_lock_verify_msr(false)) {
1027 pr_info("MSR access failed: Disabled\n");
1028 return;
1029 }
1030
1031 ret = cmdline_find_option(boot_command_line, "split_lock_detect",
1032 arg, sizeof(arg));
1033 if (ret >= 0) {
1034 for (i = 0; i < ARRAY_SIZE(sld_options); i++) {
1035 if (match_option(arg, ret, sld_options[i].option)) {
1036 state = sld_options[i].state;
1037 break;
1038 }
1039 }
1040 }
1041
1042 switch (state) {
1043 case sld_off:
1044 pr_info("disabled\n");
1045 return;
1046 case sld_warn:
1047 pr_info("warning about user-space split_locks\n");
1048 break;
1049 case sld_fatal:
1050 pr_info("sending SIGBUS on user-space split_locks\n");
1051 break;
1052 }
1053
1054 rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
1055
1056 if (!split_lock_verify_msr(true)) {
1057 pr_info("MSR access failed: Disabled\n");
1058 return;
1059 }
1060
1061 sld_state = state;
1062 setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT);
1063}
1064
1065/*
1066 * MSR_TEST_CTRL is per core, but we treat it like a per CPU MSR. Locking
1067 * is not implemented as one thread could undo the setting of the other
1068 * thread immediately after dropping the lock anyway.
1069 */
1070static void sld_update_msr(bool on)
1071{
1072 u64 test_ctrl_val = msr_test_ctrl_cache;
1073
1074 if (on)
1075 test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1076
1077 wrmsrl(MSR_TEST_CTRL, test_ctrl_val);
1078}
1079
1080static void split_lock_init(void)
1081{
1082 if (cpu_model_supports_sld)
1083 split_lock_verify_msr(sld_state != sld_off);
1084}
1085
1086static void split_lock_warn(unsigned long ip)
1087{
1088 pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
1089 current->comm, current->pid, ip);
1090
1091 /*
1092 * Disable the split lock detection for this task so it can make
1093 * progress and set TIF_SLD so the detection is re-enabled via
1094 * switch_to_sld() when the task is scheduled out.
1095 */
1096 sld_update_msr(false);
1097 set_tsk_thread_flag(current, TIF_SLD);
1098}
1099
1100bool handle_guest_split_lock(unsigned long ip)
1101{
1102 if (sld_state == sld_warn) {
1103 split_lock_warn(ip);
1104 return true;
1105 }
1106
1107 pr_warn_once("#AC: %s/%d %s split_lock trap at address: 0x%lx\n",
1108 current->comm, current->pid,
1109 sld_state == sld_fatal ? "fatal" : "bogus", ip);
1110
1111 current->thread.error_code = 0;
1112 current->thread.trap_nr = X86_TRAP_AC;
1113 force_sig_fault(SIGBUS, BUS_ADRALN, NULL);
1114 return false;
1115}
1116EXPORT_SYMBOL_GPL(handle_guest_split_lock);
1117
1118bool handle_user_split_lock(struct pt_regs *regs, long error_code)
1119{
1120 if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal)
1121 return false;
1122 split_lock_warn(regs->ip);
1123 return true;
1124}
1125
1126/*
1127 * This function is called only when switching between tasks with
1128 * different split-lock detection modes. It sets the MSR for the
1129 * mode of the new task. This is right most of the time, but since
1130 * the MSR is shared by hyperthreads on a physical core there can
1131 * be glitches when the two threads need different modes.
1132 */
1133void switch_to_sld(unsigned long tifn)
1134{
1135 sld_update_msr(!(tifn & _TIF_SLD));
1136}
1137
1138/*
1139 * Bits in the IA32_CORE_CAPABILITIES are not architectural, so they should
1140 * only be trusted if it is confirmed that a CPU model implements a
1141 * specific feature at a particular bit position.
1142 *
1143 * The possible driver data field values:
1144 *
1145 * - 0: CPU models that are known to have the per-core split-lock detection
1146 * feature even though they do not enumerate IA32_CORE_CAPABILITIES.
1147 *
1148 * - 1: CPU models which may enumerate IA32_CORE_CAPABILITIES and if so use
1149 * bit 5 to enumerate the per-core split-lock detection feature.
1150 */
1151static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
1152 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0),
1153 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0),
1154 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0),
1155 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, 1),
1156 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, 1),
1157 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, 1),
1158 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, 1),
1159 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, 1),
1160 X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 1),
1161 X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 1),
1162 {}
1163};
1164
1165void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
1166{
1167 const struct x86_cpu_id *m;
1168 u64 ia32_core_caps;
1169
1170 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1171 return;
1172
1173 m = x86_match_cpu(split_lock_cpu_ids);
1174 if (!m)
1175 return;
1176
1177 switch (m->driver_data) {
1178 case 0:
1179 break;
1180 case 1:
1181 if (!cpu_has(c, X86_FEATURE_CORE_CAPABILITIES))
1182 return;
1183 rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps);
1184 if (!(ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT))
1185 return;
1186 break;
1187 default:
1188 return;
1189 }
1190
1191 cpu_model_supports_sld = true;
1192 split_lock_setup();
1193}
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/pgtable.h>
4
5#include <linux/string.h>
6#include <linux/bitops.h>
7#include <linux/smp.h>
8#include <linux/sched.h>
9#include <linux/sched/clock.h>
10#include <linux/semaphore.h>
11#include <linux/thread_info.h>
12#include <linux/init.h>
13#include <linux/uaccess.h>
14#include <linux/workqueue.h>
15#include <linux/delay.h>
16#include <linux/cpuhotplug.h>
17
18#include <asm/cpufeature.h>
19#include <asm/msr.h>
20#include <asm/bugs.h>
21#include <asm/cpu.h>
22#include <asm/intel-family.h>
23#include <asm/microcode.h>
24#include <asm/hwcap2.h>
25#include <asm/elf.h>
26#include <asm/cpu_device_id.h>
27#include <asm/cmdline.h>
28#include <asm/traps.h>
29#include <asm/resctrl.h>
30#include <asm/numa.h>
31#include <asm/thermal.h>
32
33#ifdef CONFIG_X86_64
34#include <linux/topology.h>
35#endif
36
37#include "cpu.h"
38
39#ifdef CONFIG_X86_LOCAL_APIC
40#include <asm/mpspec.h>
41#include <asm/apic.h>
42#endif
43
44enum split_lock_detect_state {
45 sld_off = 0,
46 sld_warn,
47 sld_fatal,
48 sld_ratelimit,
49};
50
51/*
52 * Default to sld_off because most systems do not support split lock detection.
53 * sld_state_setup() will switch this to sld_warn on systems that support
54 * split lock/bus lock detect, unless there is a command line override.
55 */
56static enum split_lock_detect_state sld_state __ro_after_init = sld_off;
57static u64 msr_test_ctrl_cache __ro_after_init;
58
59/*
60 * With a name like MSR_TEST_CTL it should go without saying, but don't touch
61 * MSR_TEST_CTL unless the CPU is one of the whitelisted models. Writing it
62 * on CPUs that do not support SLD can cause fireworks, even when writing '0'.
63 */
64static bool cpu_model_supports_sld __ro_after_init;
65
66/*
67 * Processors which have self-snooping capability can handle conflicting
68 * memory type across CPUs by snooping its own cache. However, there exists
69 * CPU models in which having conflicting memory types still leads to
70 * unpredictable behavior, machine check errors, or hangs. Clear this
71 * feature to prevent its use on machines with known erratas.
72 */
73static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c)
74{
75 switch (c->x86_model) {
76 case INTEL_FAM6_CORE_YONAH:
77 case INTEL_FAM6_CORE2_MEROM:
78 case INTEL_FAM6_CORE2_MEROM_L:
79 case INTEL_FAM6_CORE2_PENRYN:
80 case INTEL_FAM6_CORE2_DUNNINGTON:
81 case INTEL_FAM6_NEHALEM:
82 case INTEL_FAM6_NEHALEM_G:
83 case INTEL_FAM6_NEHALEM_EP:
84 case INTEL_FAM6_NEHALEM_EX:
85 case INTEL_FAM6_WESTMERE:
86 case INTEL_FAM6_WESTMERE_EP:
87 case INTEL_FAM6_SANDYBRIDGE:
88 setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP);
89 }
90}
91
92static bool ring3mwait_disabled __read_mostly;
93
94static int __init ring3mwait_disable(char *__unused)
95{
96 ring3mwait_disabled = true;
97 return 1;
98}
99__setup("ring3mwait=disable", ring3mwait_disable);
100
101static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
102{
103 /*
104 * Ring 3 MONITOR/MWAIT feature cannot be detected without
105 * cpu model and family comparison.
106 */
107 if (c->x86 != 6)
108 return;
109 switch (c->x86_model) {
110 case INTEL_FAM6_XEON_PHI_KNL:
111 case INTEL_FAM6_XEON_PHI_KNM:
112 break;
113 default:
114 return;
115 }
116
117 if (ring3mwait_disabled)
118 return;
119
120 set_cpu_cap(c, X86_FEATURE_RING3MWAIT);
121 this_cpu_or(msr_misc_features_shadow,
122 1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT);
123
124 if (c == &boot_cpu_data)
125 ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
126}
127
128/*
129 * Early microcode releases for the Spectre v2 mitigation were broken.
130 * Information taken from;
131 * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
132 * - https://kb.vmware.com/s/article/52345
133 * - Microcode revisions observed in the wild
134 * - Release note from 20180108 microcode release
135 */
136struct sku_microcode {
137 u8 model;
138 u8 stepping;
139 u32 microcode;
140};
141static const struct sku_microcode spectre_bad_microcodes[] = {
142 { INTEL_FAM6_KABYLAKE, 0x0B, 0x80 },
143 { INTEL_FAM6_KABYLAKE, 0x0A, 0x80 },
144 { INTEL_FAM6_KABYLAKE, 0x09, 0x80 },
145 { INTEL_FAM6_KABYLAKE_L, 0x0A, 0x80 },
146 { INTEL_FAM6_KABYLAKE_L, 0x09, 0x80 },
147 { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
148 { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
149 { INTEL_FAM6_BROADWELL, 0x04, 0x28 },
150 { INTEL_FAM6_BROADWELL_G, 0x01, 0x1b },
151 { INTEL_FAM6_BROADWELL_D, 0x02, 0x14 },
152 { INTEL_FAM6_BROADWELL_D, 0x03, 0x07000011 },
153 { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
154 { INTEL_FAM6_HASWELL_L, 0x01, 0x21 },
155 { INTEL_FAM6_HASWELL_G, 0x01, 0x18 },
156 { INTEL_FAM6_HASWELL, 0x03, 0x23 },
157 { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
158 { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
159 { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
160 /* Observed in the wild */
161 { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
162 { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
163};
164
165static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
166{
167 int i;
168
169 /*
170 * We know that the hypervisor lie to us on the microcode version so
171 * we may as well hope that it is running the correct version.
172 */
173 if (cpu_has(c, X86_FEATURE_HYPERVISOR))
174 return false;
175
176 if (c->x86 != 6)
177 return false;
178
179 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
180 if (c->x86_model == spectre_bad_microcodes[i].model &&
181 c->x86_stepping == spectre_bad_microcodes[i].stepping)
182 return (c->microcode <= spectre_bad_microcodes[i].microcode);
183 }
184 return false;
185}
186
187#define MSR_IA32_TME_ACTIVATE 0x982
188
189/* Helpers to access TME_ACTIVATE MSR */
190#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
191#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
192
193#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
194#define TME_ACTIVATE_POLICY_AES_XTS_128 0
195
196#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
197
198#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
199#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
200
201/* Values for mktme_status (SW only construct) */
202#define MKTME_ENABLED 0
203#define MKTME_DISABLED 1
204#define MKTME_UNINITIALIZED 2
205static int mktme_status = MKTME_UNINITIALIZED;
206
207static void detect_tme_early(struct cpuinfo_x86 *c)
208{
209 u64 tme_activate, tme_policy, tme_crypto_algs;
210 int keyid_bits = 0, nr_keyids = 0;
211 static u64 tme_activate_cpu0 = 0;
212
213 rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
214
215 if (mktme_status != MKTME_UNINITIALIZED) {
216 if (tme_activate != tme_activate_cpu0) {
217 /* Broken BIOS? */
218 pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
219 pr_err_once("x86/tme: MKTME is not usable\n");
220 mktme_status = MKTME_DISABLED;
221
222 /* Proceed. We may need to exclude bits from x86_phys_bits. */
223 }
224 } else {
225 tme_activate_cpu0 = tme_activate;
226 }
227
228 if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
229 pr_info_once("x86/tme: not enabled by BIOS\n");
230 mktme_status = MKTME_DISABLED;
231 return;
232 }
233
234 if (mktme_status != MKTME_UNINITIALIZED)
235 goto detect_keyid_bits;
236
237 pr_info("x86/tme: enabled by BIOS\n");
238
239 tme_policy = TME_ACTIVATE_POLICY(tme_activate);
240 if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
241 pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
242
243 tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
244 if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
245 pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
246 tme_crypto_algs);
247 mktme_status = MKTME_DISABLED;
248 }
249detect_keyid_bits:
250 keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
251 nr_keyids = (1UL << keyid_bits) - 1;
252 if (nr_keyids) {
253 pr_info_once("x86/mktme: enabled by BIOS\n");
254 pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
255 } else {
256 pr_info_once("x86/mktme: disabled by BIOS\n");
257 }
258
259 if (mktme_status == MKTME_UNINITIALIZED) {
260 /* MKTME is usable */
261 mktme_status = MKTME_ENABLED;
262 }
263
264 /*
265 * KeyID bits effectively lower the number of physical address
266 * bits. Update cpuinfo_x86::x86_phys_bits accordingly.
267 */
268 c->x86_phys_bits -= keyid_bits;
269}
270
271void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c)
272{
273 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
274 return;
275
276 if (c->x86 < 6 || (c->x86 == 6 && c->x86_model < 0xd))
277 return;
278
279 /*
280 * The BIOS can have limited CPUID to leaf 2, which breaks feature
281 * enumeration. Unlock it and update the maximum leaf info.
282 */
283 if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0)
284 c->cpuid_level = cpuid_eax(0);
285}
286
287static void early_init_intel(struct cpuinfo_x86 *c)
288{
289 u64 misc_enable;
290
291 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
292 (c->x86 == 0x6 && c->x86_model >= 0x0e))
293 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
294
295 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
296 c->microcode = intel_get_microcode_revision();
297
298 /* Now if any of them are set, check the blacklist and clear the lot */
299 if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
300 cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
301 cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
302 cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
303 pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
304 setup_clear_cpu_cap(X86_FEATURE_IBRS);
305 setup_clear_cpu_cap(X86_FEATURE_IBPB);
306 setup_clear_cpu_cap(X86_FEATURE_STIBP);
307 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
308 setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
309 setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
310 setup_clear_cpu_cap(X86_FEATURE_SSBD);
311 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
312 }
313
314 /*
315 * Atom erratum AAE44/AAF40/AAG38/AAH41:
316 *
317 * A race condition between speculative fetches and invalidating
318 * a large page. This is worked around in microcode, but we
319 * need the microcode to have already been loaded... so if it is
320 * not, recommend a BIOS update and disable large pages.
321 */
322 if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
323 c->microcode < 0x20e) {
324 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
325 clear_cpu_cap(c, X86_FEATURE_PSE);
326 }
327
328#ifdef CONFIG_X86_64
329 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
330#else
331 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
332 if (c->x86 == 15 && c->x86_cache_alignment == 64)
333 c->x86_cache_alignment = 128;
334#endif
335
336 /* CPUID workaround for 0F33/0F34 CPU */
337 if (c->x86 == 0xF && c->x86_model == 0x3
338 && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
339 c->x86_phys_bits = 36;
340
341 /*
342 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
343 * with P/T states and does not stop in deep C-states.
344 *
345 * It is also reliable across cores and sockets. (but not across
346 * cabinets - we turn it off in that case explicitly.)
347 */
348 if (c->x86_power & (1 << 8)) {
349 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
350 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
351 }
352
353 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
354 if (c->x86 == 6) {
355 switch (c->x86_model) {
356 case INTEL_FAM6_ATOM_SALTWELL_MID:
357 case INTEL_FAM6_ATOM_SALTWELL_TABLET:
358 case INTEL_FAM6_ATOM_SILVERMONT_MID:
359 case INTEL_FAM6_ATOM_AIRMONT_NP:
360 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
361 break;
362 default:
363 break;
364 }
365 }
366
367 /*
368 * There is a known erratum on Pentium III and Core Solo
369 * and Core Duo CPUs.
370 * " Page with PAT set to WC while associated MTRR is UC
371 * may consolidate to UC "
372 * Because of this erratum, it is better to stick with
373 * setting WC in MTRR rather than using PAT on these CPUs.
374 *
375 * Enable PAT WC only on P4, Core 2 or later CPUs.
376 */
377 if (c->x86 == 6 && c->x86_model < 15)
378 clear_cpu_cap(c, X86_FEATURE_PAT);
379
380 /*
381 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
382 * clear the fast string and enhanced fast string CPU capabilities.
383 */
384 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
385 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
386 if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
387 pr_info("Disabled fast string operations\n");
388 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
389 setup_clear_cpu_cap(X86_FEATURE_ERMS);
390 }
391 }
392
393 /*
394 * Intel Quark Core DevMan_001.pdf section 6.4.11
395 * "The operating system also is required to invalidate (i.e., flush)
396 * the TLB when any changes are made to any of the page table entries.
397 * The operating system must reload CR3 to cause the TLB to be flushed"
398 *
399 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
400 * should be false so that __flush_tlb_all() causes CR3 instead of CR4.PGE
401 * to be modified.
402 */
403 if (c->x86 == 5 && c->x86_model == 9) {
404 pr_info("Disabling PGE capability bit\n");
405 setup_clear_cpu_cap(X86_FEATURE_PGE);
406 }
407
408 check_memory_type_self_snoop_errata(c);
409
410 /*
411 * Adjust the number of physical bits early because it affects the
412 * valid bits of the MTRR mask registers.
413 */
414 if (cpu_has(c, X86_FEATURE_TME))
415 detect_tme_early(c);
416}
417
418static void bsp_init_intel(struct cpuinfo_x86 *c)
419{
420 resctrl_cpu_detect(c);
421}
422
423#ifdef CONFIG_X86_32
424/*
425 * Early probe support logic for ppro memory erratum #50
426 *
427 * This is called before we do cpu ident work
428 */
429
430int ppro_with_ram_bug(void)
431{
432 /* Uses data from early_cpu_detect now */
433 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
434 boot_cpu_data.x86 == 6 &&
435 boot_cpu_data.x86_model == 1 &&
436 boot_cpu_data.x86_stepping < 8) {
437 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
438 return 1;
439 }
440 return 0;
441}
442
443static void intel_smp_check(struct cpuinfo_x86 *c)
444{
445 /* calling is from identify_secondary_cpu() ? */
446 if (!c->cpu_index)
447 return;
448
449 /*
450 * Mask B, Pentium, but not Pentium MMX
451 */
452 if (c->x86 == 5 &&
453 c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
454 c->x86_model <= 3) {
455 /*
456 * Remember we have B step Pentia with bugs
457 */
458 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
459 "with B stepping processors.\n");
460 }
461}
462
463static int forcepae;
464static int __init forcepae_setup(char *__unused)
465{
466 forcepae = 1;
467 return 1;
468}
469__setup("forcepae", forcepae_setup);
470
471static void intel_workarounds(struct cpuinfo_x86 *c)
472{
473#ifdef CONFIG_X86_F00F_BUG
474 /*
475 * All models of Pentium and Pentium with MMX technology CPUs
476 * have the F0 0F bug, which lets nonprivileged users lock up the
477 * system. Announce that the fault handler will be checking for it.
478 * The Quark is also family 5, but does not have the same bug.
479 */
480 clear_cpu_bug(c, X86_BUG_F00F);
481 if (c->x86 == 5 && c->x86_model < 9) {
482 static int f00f_workaround_enabled;
483
484 set_cpu_bug(c, X86_BUG_F00F);
485 if (!f00f_workaround_enabled) {
486 pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
487 f00f_workaround_enabled = 1;
488 }
489 }
490#endif
491
492 /*
493 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
494 * model 3 mask 3
495 */
496 if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
497 clear_cpu_cap(c, X86_FEATURE_SEP);
498
499 /*
500 * PAE CPUID issue: many Pentium M report no PAE but may have a
501 * functionally usable PAE implementation.
502 * Forcefully enable PAE if kernel parameter "forcepae" is present.
503 */
504 if (forcepae) {
505 pr_warn("PAE forced!\n");
506 set_cpu_cap(c, X86_FEATURE_PAE);
507 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
508 }
509
510 /*
511 * P4 Xeon erratum 037 workaround.
512 * Hardware prefetcher may cause stale data to be loaded into the cache.
513 */
514 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
515 if (msr_set_bit(MSR_IA32_MISC_ENABLE,
516 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
517 pr_info("CPU: C0 stepping P4 Xeon detected.\n");
518 pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
519 }
520 }
521
522 /*
523 * See if we have a good local APIC by checking for buggy Pentia,
524 * i.e. all B steppings and the C2 stepping of P54C when using their
525 * integrated APIC (see 11AP erratum in "Pentium Processor
526 * Specification Update").
527 */
528 if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
529 (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
530 set_cpu_bug(c, X86_BUG_11AP);
531
532
533#ifdef CONFIG_X86_INTEL_USERCOPY
534 /*
535 * Set up the preferred alignment for movsl bulk memory moves
536 */
537 switch (c->x86) {
538 case 4: /* 486: untested */
539 break;
540 case 5: /* Old Pentia: untested */
541 break;
542 case 6: /* PII/PIII only like movsl with 8-byte alignment */
543 movsl_mask.mask = 7;
544 break;
545 case 15: /* P4 is OK down to 8-byte alignment */
546 movsl_mask.mask = 7;
547 break;
548 }
549#endif
550
551 intel_smp_check(c);
552}
553#else
554static void intel_workarounds(struct cpuinfo_x86 *c)
555{
556}
557#endif
558
559static void srat_detect_node(struct cpuinfo_x86 *c)
560{
561#ifdef CONFIG_NUMA
562 unsigned node;
563 int cpu = smp_processor_id();
564
565 /* Don't do the funky fallback heuristics the AMD version employs
566 for now. */
567 node = numa_cpu_node(cpu);
568 if (node == NUMA_NO_NODE || !node_online(node)) {
569 /* reuse the value from init_cpu_to_node() */
570 node = cpu_to_node(cpu);
571 }
572 numa_set_node(cpu, node);
573#endif
574}
575
576static void init_cpuid_fault(struct cpuinfo_x86 *c)
577{
578 u64 msr;
579
580 if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) {
581 if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
582 set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
583 }
584}
585
586static void init_intel_misc_features(struct cpuinfo_x86 *c)
587{
588 u64 msr;
589
590 if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr))
591 return;
592
593 /* Clear all MISC features */
594 this_cpu_write(msr_misc_features_shadow, 0);
595
596 /* Check features and update capabilities and shadow control bits */
597 init_cpuid_fault(c);
598 probe_xeon_phi_r3mwait(c);
599
600 msr = this_cpu_read(msr_misc_features_shadow);
601 wrmsrl(MSR_MISC_FEATURES_ENABLES, msr);
602}
603
604static void split_lock_init(void);
605static void bus_lock_init(void);
606
607static void init_intel(struct cpuinfo_x86 *c)
608{
609 early_init_intel(c);
610
611 intel_workarounds(c);
612
613 init_intel_cacheinfo(c);
614
615 if (c->cpuid_level > 9) {
616 unsigned eax = cpuid_eax(10);
617 /* Check for version and the number of counters */
618 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
619 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
620 }
621
622 if (cpu_has(c, X86_FEATURE_XMM2))
623 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
624
625 if (boot_cpu_has(X86_FEATURE_DS)) {
626 unsigned int l1, l2;
627
628 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
629 if (!(l1 & MSR_IA32_MISC_ENABLE_BTS_UNAVAIL))
630 set_cpu_cap(c, X86_FEATURE_BTS);
631 if (!(l1 & MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL))
632 set_cpu_cap(c, X86_FEATURE_PEBS);
633 }
634
635 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) &&
636 (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
637 set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
638
639 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) &&
640 ((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT)))
641 set_cpu_bug(c, X86_BUG_MONITOR);
642
643#ifdef CONFIG_X86_64
644 if (c->x86 == 15)
645 c->x86_cache_alignment = c->x86_clflush_size * 2;
646 if (c->x86 == 6)
647 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
648#else
649 /*
650 * Names for the Pentium II/Celeron processors
651 * detectable only by also checking the cache size.
652 * Dixon is NOT a Celeron.
653 */
654 if (c->x86 == 6) {
655 unsigned int l2 = c->x86_cache_size;
656 char *p = NULL;
657
658 switch (c->x86_model) {
659 case 5:
660 if (l2 == 0)
661 p = "Celeron (Covington)";
662 else if (l2 == 256)
663 p = "Mobile Pentium II (Dixon)";
664 break;
665
666 case 6:
667 if (l2 == 128)
668 p = "Celeron (Mendocino)";
669 else if (c->x86_stepping == 0 || c->x86_stepping == 5)
670 p = "Celeron-A";
671 break;
672
673 case 8:
674 if (l2 == 128)
675 p = "Celeron (Coppermine)";
676 break;
677 }
678
679 if (p)
680 strcpy(c->x86_model_id, p);
681 }
682
683 if (c->x86 == 15)
684 set_cpu_cap(c, X86_FEATURE_P4);
685 if (c->x86 == 6)
686 set_cpu_cap(c, X86_FEATURE_P3);
687#endif
688
689 /* Work around errata */
690 srat_detect_node(c);
691
692 init_ia32_feat_ctl(c);
693
694 init_intel_misc_features(c);
695
696 split_lock_init();
697 bus_lock_init();
698
699 intel_init_thermal(c);
700}
701
702#ifdef CONFIG_X86_32
703static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
704{
705 /*
706 * Intel PIII Tualatin. This comes in two flavours.
707 * One has 256kb of cache, the other 512. We have no way
708 * to determine which, so we use a boottime override
709 * for the 512kb model, and assume 256 otherwise.
710 */
711 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
712 size = 256;
713
714 /*
715 * Intel Quark SoC X1000 contains a 4-way set associative
716 * 16K cache with a 16 byte cache line and 256 lines per tag
717 */
718 if ((c->x86 == 5) && (c->x86_model == 9))
719 size = 16;
720 return size;
721}
722#endif
723
724#define TLB_INST_4K 0x01
725#define TLB_INST_4M 0x02
726#define TLB_INST_2M_4M 0x03
727
728#define TLB_INST_ALL 0x05
729#define TLB_INST_1G 0x06
730
731#define TLB_DATA_4K 0x11
732#define TLB_DATA_4M 0x12
733#define TLB_DATA_2M_4M 0x13
734#define TLB_DATA_4K_4M 0x14
735
736#define TLB_DATA_1G 0x16
737
738#define TLB_DATA0_4K 0x21
739#define TLB_DATA0_4M 0x22
740#define TLB_DATA0_2M_4M 0x23
741
742#define STLB_4K 0x41
743#define STLB_4K_2M 0x42
744
745static const struct _tlb_table intel_tlb_table[] = {
746 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
747 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
748 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
749 { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
750 { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
751 { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
752 { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages" },
753 { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
754 { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
755 { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
756 { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
757 { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" },
758 { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" },
759 { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" },
760 { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
761 { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
762 { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
763 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
764 { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
765 { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
766 { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" },
767 { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
768 { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" },
769 { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
770 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
771 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
772 { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
773 { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
774 { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
775 { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" },
776 { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" },
777 { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
778 { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
779 { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" },
780 { 0xc2, TLB_DATA_2M_4M, 16, " TLB_DATA 2 MByte/4MByte pages, 4-way associative" },
781 { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
782 { 0x00, 0, 0 }
783};
784
785static void intel_tlb_lookup(const unsigned char desc)
786{
787 unsigned char k;
788 if (desc == 0)
789 return;
790
791 /* look up this descriptor in the table */
792 for (k = 0; intel_tlb_table[k].descriptor != desc &&
793 intel_tlb_table[k].descriptor != 0; k++)
794 ;
795
796 if (intel_tlb_table[k].tlb_type == 0)
797 return;
798
799 switch (intel_tlb_table[k].tlb_type) {
800 case STLB_4K:
801 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
802 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
803 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
804 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
805 break;
806 case STLB_4K_2M:
807 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
808 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
809 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
810 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
811 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
812 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
813 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
814 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
815 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
816 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
817 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
818 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
819 break;
820 case TLB_INST_ALL:
821 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
822 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
823 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
824 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
825 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
826 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
827 break;
828 case TLB_INST_4K:
829 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
830 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
831 break;
832 case TLB_INST_4M:
833 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
834 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
835 break;
836 case TLB_INST_2M_4M:
837 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
838 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
839 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
840 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
841 break;
842 case TLB_DATA_4K:
843 case TLB_DATA0_4K:
844 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
845 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
846 break;
847 case TLB_DATA_4M:
848 case TLB_DATA0_4M:
849 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
850 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
851 break;
852 case TLB_DATA_2M_4M:
853 case TLB_DATA0_2M_4M:
854 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
855 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
856 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
857 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
858 break;
859 case TLB_DATA_4K_4M:
860 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
861 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
862 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
863 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
864 break;
865 case TLB_DATA_1G:
866 if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
867 tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
868 break;
869 }
870}
871
872static void intel_detect_tlb(struct cpuinfo_x86 *c)
873{
874 int i, j, n;
875 unsigned int regs[4];
876 unsigned char *desc = (unsigned char *)regs;
877
878 if (c->cpuid_level < 2)
879 return;
880
881 /* Number of times to iterate */
882 n = cpuid_eax(2) & 0xFF;
883
884 for (i = 0 ; i < n ; i++) {
885 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
886
887 /* If bit 31 is set, this is an unknown format */
888 for (j = 0 ; j < 3 ; j++)
889 if (regs[j] & (1 << 31))
890 regs[j] = 0;
891
892 /* Byte 0 is level count, not a descriptor */
893 for (j = 1 ; j < 16 ; j++)
894 intel_tlb_lookup(desc[j]);
895 }
896}
897
898static const struct cpu_dev intel_cpu_dev = {
899 .c_vendor = "Intel",
900 .c_ident = { "GenuineIntel" },
901#ifdef CONFIG_X86_32
902 .legacy_models = {
903 { .family = 4, .model_names =
904 {
905 [0] = "486 DX-25/33",
906 [1] = "486 DX-50",
907 [2] = "486 SX",
908 [3] = "486 DX/2",
909 [4] = "486 SL",
910 [5] = "486 SX/2",
911 [7] = "486 DX/2-WB",
912 [8] = "486 DX/4",
913 [9] = "486 DX/4-WB"
914 }
915 },
916 { .family = 5, .model_names =
917 {
918 [0] = "Pentium 60/66 A-step",
919 [1] = "Pentium 60/66",
920 [2] = "Pentium 75 - 200",
921 [3] = "OverDrive PODP5V83",
922 [4] = "Pentium MMX",
923 [7] = "Mobile Pentium 75 - 200",
924 [8] = "Mobile Pentium MMX",
925 [9] = "Quark SoC X1000",
926 }
927 },
928 { .family = 6, .model_names =
929 {
930 [0] = "Pentium Pro A-step",
931 [1] = "Pentium Pro",
932 [3] = "Pentium II (Klamath)",
933 [4] = "Pentium II (Deschutes)",
934 [5] = "Pentium II (Deschutes)",
935 [6] = "Mobile Pentium II",
936 [7] = "Pentium III (Katmai)",
937 [8] = "Pentium III (Coppermine)",
938 [10] = "Pentium III (Cascades)",
939 [11] = "Pentium III (Tualatin)",
940 }
941 },
942 { .family = 15, .model_names =
943 {
944 [0] = "Pentium 4 (Unknown)",
945 [1] = "Pentium 4 (Willamette)",
946 [2] = "Pentium 4 (Northwood)",
947 [4] = "Pentium 4 (Foster)",
948 [5] = "Pentium 4 (Foster)",
949 }
950 },
951 },
952 .legacy_cache_size = intel_size_cache,
953#endif
954 .c_detect_tlb = intel_detect_tlb,
955 .c_early_init = early_init_intel,
956 .c_bsp_init = bsp_init_intel,
957 .c_init = init_intel,
958 .c_x86_vendor = X86_VENDOR_INTEL,
959};
960
961cpu_dev_register(intel_cpu_dev);
962
963#undef pr_fmt
964#define pr_fmt(fmt) "x86/split lock detection: " fmt
965
966static const struct {
967 const char *option;
968 enum split_lock_detect_state state;
969} sld_options[] __initconst = {
970 { "off", sld_off },
971 { "warn", sld_warn },
972 { "fatal", sld_fatal },
973 { "ratelimit:", sld_ratelimit },
974};
975
976static struct ratelimit_state bld_ratelimit;
977
978static unsigned int sysctl_sld_mitigate = 1;
979static DEFINE_SEMAPHORE(buslock_sem, 1);
980
981#ifdef CONFIG_PROC_SYSCTL
982static struct ctl_table sld_sysctls[] = {
983 {
984 .procname = "split_lock_mitigate",
985 .data = &sysctl_sld_mitigate,
986 .maxlen = sizeof(unsigned int),
987 .mode = 0644,
988 .proc_handler = proc_douintvec_minmax,
989 .extra1 = SYSCTL_ZERO,
990 .extra2 = SYSCTL_ONE,
991 },
992};
993
994static int __init sld_mitigate_sysctl_init(void)
995{
996 register_sysctl_init("kernel", sld_sysctls);
997 return 0;
998}
999
1000late_initcall(sld_mitigate_sysctl_init);
1001#endif
1002
1003static inline bool match_option(const char *arg, int arglen, const char *opt)
1004{
1005 int len = strlen(opt), ratelimit;
1006
1007 if (strncmp(arg, opt, len))
1008 return false;
1009
1010 /*
1011 * Min ratelimit is 1 bus lock/sec.
1012 * Max ratelimit is 1000 bus locks/sec.
1013 */
1014 if (sscanf(arg, "ratelimit:%d", &ratelimit) == 1 &&
1015 ratelimit > 0 && ratelimit <= 1000) {
1016 ratelimit_state_init(&bld_ratelimit, HZ, ratelimit);
1017 ratelimit_set_flags(&bld_ratelimit, RATELIMIT_MSG_ON_RELEASE);
1018 return true;
1019 }
1020
1021 return len == arglen;
1022}
1023
1024static bool split_lock_verify_msr(bool on)
1025{
1026 u64 ctrl, tmp;
1027
1028 if (rdmsrl_safe(MSR_TEST_CTRL, &ctrl))
1029 return false;
1030 if (on)
1031 ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1032 else
1033 ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1034 if (wrmsrl_safe(MSR_TEST_CTRL, ctrl))
1035 return false;
1036 rdmsrl(MSR_TEST_CTRL, tmp);
1037 return ctrl == tmp;
1038}
1039
1040static void __init sld_state_setup(void)
1041{
1042 enum split_lock_detect_state state = sld_warn;
1043 char arg[20];
1044 int i, ret;
1045
1046 if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) &&
1047 !boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
1048 return;
1049
1050 ret = cmdline_find_option(boot_command_line, "split_lock_detect",
1051 arg, sizeof(arg));
1052 if (ret >= 0) {
1053 for (i = 0; i < ARRAY_SIZE(sld_options); i++) {
1054 if (match_option(arg, ret, sld_options[i].option)) {
1055 state = sld_options[i].state;
1056 break;
1057 }
1058 }
1059 }
1060 sld_state = state;
1061}
1062
1063static void __init __split_lock_setup(void)
1064{
1065 if (!split_lock_verify_msr(false)) {
1066 pr_info("MSR access failed: Disabled\n");
1067 return;
1068 }
1069
1070 rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
1071
1072 if (!split_lock_verify_msr(true)) {
1073 pr_info("MSR access failed: Disabled\n");
1074 return;
1075 }
1076
1077 /* Restore the MSR to its cached value. */
1078 wrmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
1079
1080 setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT);
1081}
1082
1083/*
1084 * MSR_TEST_CTRL is per core, but we treat it like a per CPU MSR. Locking
1085 * is not implemented as one thread could undo the setting of the other
1086 * thread immediately after dropping the lock anyway.
1087 */
1088static void sld_update_msr(bool on)
1089{
1090 u64 test_ctrl_val = msr_test_ctrl_cache;
1091
1092 if (on)
1093 test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1094
1095 wrmsrl(MSR_TEST_CTRL, test_ctrl_val);
1096}
1097
1098static void split_lock_init(void)
1099{
1100 /*
1101 * #DB for bus lock handles ratelimit and #AC for split lock is
1102 * disabled.
1103 */
1104 if (sld_state == sld_ratelimit) {
1105 split_lock_verify_msr(false);
1106 return;
1107 }
1108
1109 if (cpu_model_supports_sld)
1110 split_lock_verify_msr(sld_state != sld_off);
1111}
1112
1113static void __split_lock_reenable_unlock(struct work_struct *work)
1114{
1115 sld_update_msr(true);
1116 up(&buslock_sem);
1117}
1118
1119static DECLARE_DELAYED_WORK(sl_reenable_unlock, __split_lock_reenable_unlock);
1120
1121static void __split_lock_reenable(struct work_struct *work)
1122{
1123 sld_update_msr(true);
1124}
1125static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable);
1126
1127/*
1128 * If a CPU goes offline with pending delayed work to re-enable split lock
1129 * detection then the delayed work will be executed on some other CPU. That
1130 * handles releasing the buslock_sem, but because it executes on a
1131 * different CPU probably won't re-enable split lock detection. This is a
1132 * problem on HT systems since the sibling CPU on the same core may then be
1133 * left running with split lock detection disabled.
1134 *
1135 * Unconditionally re-enable detection here.
1136 */
1137static int splitlock_cpu_offline(unsigned int cpu)
1138{
1139 sld_update_msr(true);
1140
1141 return 0;
1142}
1143
1144static void split_lock_warn(unsigned long ip)
1145{
1146 struct delayed_work *work;
1147 int cpu;
1148
1149 if (!current->reported_split_lock)
1150 pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
1151 current->comm, current->pid, ip);
1152 current->reported_split_lock = 1;
1153
1154 if (sysctl_sld_mitigate) {
1155 /*
1156 * misery factor #1:
1157 * sleep 10ms before trying to execute split lock.
1158 */
1159 if (msleep_interruptible(10) > 0)
1160 return;
1161 /*
1162 * Misery factor #2:
1163 * only allow one buslocked disabled core at a time.
1164 */
1165 if (down_interruptible(&buslock_sem) == -EINTR)
1166 return;
1167 work = &sl_reenable_unlock;
1168 } else {
1169 work = &sl_reenable;
1170 }
1171
1172 cpu = get_cpu();
1173 schedule_delayed_work_on(cpu, work, 2);
1174
1175 /* Disable split lock detection on this CPU to make progress */
1176 sld_update_msr(false);
1177 put_cpu();
1178}
1179
1180bool handle_guest_split_lock(unsigned long ip)
1181{
1182 if (sld_state == sld_warn) {
1183 split_lock_warn(ip);
1184 return true;
1185 }
1186
1187 pr_warn_once("#AC: %s/%d %s split_lock trap at address: 0x%lx\n",
1188 current->comm, current->pid,
1189 sld_state == sld_fatal ? "fatal" : "bogus", ip);
1190
1191 current->thread.error_code = 0;
1192 current->thread.trap_nr = X86_TRAP_AC;
1193 force_sig_fault(SIGBUS, BUS_ADRALN, NULL);
1194 return false;
1195}
1196EXPORT_SYMBOL_GPL(handle_guest_split_lock);
1197
1198static void bus_lock_init(void)
1199{
1200 u64 val;
1201
1202 if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
1203 return;
1204
1205 rdmsrl(MSR_IA32_DEBUGCTLMSR, val);
1206
1207 if ((boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) &&
1208 (sld_state == sld_warn || sld_state == sld_fatal)) ||
1209 sld_state == sld_off) {
1210 /*
1211 * Warn and fatal are handled by #AC for split lock if #AC for
1212 * split lock is supported.
1213 */
1214 val &= ~DEBUGCTLMSR_BUS_LOCK_DETECT;
1215 } else {
1216 val |= DEBUGCTLMSR_BUS_LOCK_DETECT;
1217 }
1218
1219 wrmsrl(MSR_IA32_DEBUGCTLMSR, val);
1220}
1221
1222bool handle_user_split_lock(struct pt_regs *regs, long error_code)
1223{
1224 if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal)
1225 return false;
1226 split_lock_warn(regs->ip);
1227 return true;
1228}
1229
1230void handle_bus_lock(struct pt_regs *regs)
1231{
1232 switch (sld_state) {
1233 case sld_off:
1234 break;
1235 case sld_ratelimit:
1236 /* Enforce no more than bld_ratelimit bus locks/sec. */
1237 while (!__ratelimit(&bld_ratelimit))
1238 msleep(20);
1239 /* Warn on the bus lock. */
1240 fallthrough;
1241 case sld_warn:
1242 pr_warn_ratelimited("#DB: %s/%d took a bus_lock trap at address: 0x%lx\n",
1243 current->comm, current->pid, regs->ip);
1244 break;
1245 case sld_fatal:
1246 force_sig_fault(SIGBUS, BUS_ADRALN, NULL);
1247 break;
1248 }
1249}
1250
1251/*
1252 * CPU models that are known to have the per-core split-lock detection
1253 * feature even though they do not enumerate IA32_CORE_CAPABILITIES.
1254 */
1255static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
1256 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0),
1257 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0),
1258 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0),
1259 {}
1260};
1261
1262static void __init split_lock_setup(struct cpuinfo_x86 *c)
1263{
1264 const struct x86_cpu_id *m;
1265 u64 ia32_core_caps;
1266
1267 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1268 return;
1269
1270 /* Check for CPUs that have support but do not enumerate it: */
1271 m = x86_match_cpu(split_lock_cpu_ids);
1272 if (m)
1273 goto supported;
1274
1275 if (!cpu_has(c, X86_FEATURE_CORE_CAPABILITIES))
1276 return;
1277
1278 /*
1279 * Not all bits in MSR_IA32_CORE_CAPS are architectural, but
1280 * MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT is. All CPUs that set
1281 * it have split lock detection.
1282 */
1283 rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps);
1284 if (ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT)
1285 goto supported;
1286
1287 /* CPU is not in the model list and does not have the MSR bit: */
1288 return;
1289
1290supported:
1291 cpu_model_supports_sld = true;
1292 __split_lock_setup();
1293}
1294
1295static void sld_state_show(void)
1296{
1297 if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) &&
1298 !boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
1299 return;
1300
1301 switch (sld_state) {
1302 case sld_off:
1303 pr_info("disabled\n");
1304 break;
1305 case sld_warn:
1306 if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {
1307 pr_info("#AC: crashing the kernel on kernel split_locks and warning on user-space split_locks\n");
1308 if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
1309 "x86/splitlock", NULL, splitlock_cpu_offline) < 0)
1310 pr_warn("No splitlock CPU offline handler\n");
1311 } else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) {
1312 pr_info("#DB: warning on user-space bus_locks\n");
1313 }
1314 break;
1315 case sld_fatal:
1316 if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {
1317 pr_info("#AC: crashing the kernel on kernel split_locks and sending SIGBUS on user-space split_locks\n");
1318 } else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) {
1319 pr_info("#DB: sending SIGBUS on user-space bus_locks%s\n",
1320 boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) ?
1321 " from non-WB" : "");
1322 }
1323 break;
1324 case sld_ratelimit:
1325 if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
1326 pr_info("#DB: setting system wide bus lock rate limit to %u/sec\n", bld_ratelimit.burst);
1327 break;
1328 }
1329}
1330
1331void __init sld_setup(struct cpuinfo_x86 *c)
1332{
1333 split_lock_setup(c);
1334 sld_state_setup();
1335 sld_state_show();
1336}
1337
1338#define X86_HYBRID_CPU_TYPE_ID_SHIFT 24
1339
1340/**
1341 * get_this_hybrid_cpu_type() - Get the type of this hybrid CPU
1342 *
1343 * Returns the CPU type [31:24] (i.e., Atom or Core) of a CPU in
1344 * a hybrid processor. If the processor is not hybrid, returns 0.
1345 */
1346u8 get_this_hybrid_cpu_type(void)
1347{
1348 if (!cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
1349 return 0;
1350
1351 return cpuid_eax(0x0000001a) >> X86_HYBRID_CPU_TYPE_ID_SHIFT;
1352}