Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/pgtable.h>
4
5#include <linux/string.h>
6#include <linux/bitops.h>
7#include <linux/smp.h>
8#include <linux/sched.h>
9#include <linux/sched/clock.h>
10#include <linux/thread_info.h>
11#include <linux/init.h>
12#include <linux/uaccess.h>
13
14#include <asm/cpufeature.h>
15#include <asm/msr.h>
16#include <asm/bugs.h>
17#include <asm/cpu.h>
18#include <asm/intel-family.h>
19#include <asm/microcode.h>
20#include <asm/hwcap2.h>
21#include <asm/elf.h>
22#include <asm/cpu_device_id.h>
23#include <asm/resctrl.h>
24#include <asm/numa.h>
25#include <asm/thermal.h>
26
27#ifdef CONFIG_X86_64
28#include <linux/topology.h>
29#endif
30
31#include "cpu.h"
32
33#ifdef CONFIG_X86_LOCAL_APIC
34#include <asm/mpspec.h>
35#include <asm/apic.h>
36#endif
37
38/*
39 * Processors which have self-snooping capability can handle conflicting
40 * memory type across CPUs by snooping its own cache. However, there exists
41 * CPU models in which having conflicting memory types still leads to
42 * unpredictable behavior, machine check errors, or hangs. Clear this
43 * feature to prevent its use on machines with known erratas.
44 */
45static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c)
46{
47 switch (c->x86_vfm) {
48 case INTEL_CORE_YONAH:
49 case INTEL_CORE2_MEROM:
50 case INTEL_CORE2_MEROM_L:
51 case INTEL_CORE2_PENRYN:
52 case INTEL_CORE2_DUNNINGTON:
53 case INTEL_NEHALEM:
54 case INTEL_NEHALEM_G:
55 case INTEL_NEHALEM_EP:
56 case INTEL_NEHALEM_EX:
57 case INTEL_WESTMERE:
58 case INTEL_WESTMERE_EP:
59 case INTEL_SANDYBRIDGE:
60 setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP);
61 }
62}
63
64static bool ring3mwait_disabled __read_mostly;
65
66static int __init ring3mwait_disable(char *__unused)
67{
68 ring3mwait_disabled = true;
69 return 1;
70}
71__setup("ring3mwait=disable", ring3mwait_disable);
72
73static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
74{
75 /*
76 * Ring 3 MONITOR/MWAIT feature cannot be detected without
77 * cpu model and family comparison.
78 */
79 if (c->x86 != 6)
80 return;
81 switch (c->x86_vfm) {
82 case INTEL_XEON_PHI_KNL:
83 case INTEL_XEON_PHI_KNM:
84 break;
85 default:
86 return;
87 }
88
89 if (ring3mwait_disabled)
90 return;
91
92 set_cpu_cap(c, X86_FEATURE_RING3MWAIT);
93 this_cpu_or(msr_misc_features_shadow,
94 1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT);
95
96 if (c == &boot_cpu_data)
97 ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
98}
99
100/*
101 * Early microcode releases for the Spectre v2 mitigation were broken.
102 * Information taken from;
103 * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
104 * - https://kb.vmware.com/s/article/52345
105 * - Microcode revisions observed in the wild
106 * - Release note from 20180108 microcode release
107 */
108struct sku_microcode {
109 u32 vfm;
110 u8 stepping;
111 u32 microcode;
112};
113static const struct sku_microcode spectre_bad_microcodes[] = {
114 { INTEL_KABYLAKE, 0x0B, 0x80 },
115 { INTEL_KABYLAKE, 0x0A, 0x80 },
116 { INTEL_KABYLAKE, 0x09, 0x80 },
117 { INTEL_KABYLAKE_L, 0x0A, 0x80 },
118 { INTEL_KABYLAKE_L, 0x09, 0x80 },
119 { INTEL_SKYLAKE_X, 0x03, 0x0100013e },
120 { INTEL_SKYLAKE_X, 0x04, 0x0200003c },
121 { INTEL_BROADWELL, 0x04, 0x28 },
122 { INTEL_BROADWELL_G, 0x01, 0x1b },
123 { INTEL_BROADWELL_D, 0x02, 0x14 },
124 { INTEL_BROADWELL_D, 0x03, 0x07000011 },
125 { INTEL_BROADWELL_X, 0x01, 0x0b000025 },
126 { INTEL_HASWELL_L, 0x01, 0x21 },
127 { INTEL_HASWELL_G, 0x01, 0x18 },
128 { INTEL_HASWELL, 0x03, 0x23 },
129 { INTEL_HASWELL_X, 0x02, 0x3b },
130 { INTEL_HASWELL_X, 0x04, 0x10 },
131 { INTEL_IVYBRIDGE_X, 0x04, 0x42a },
132 /* Observed in the wild */
133 { INTEL_SANDYBRIDGE_X, 0x06, 0x61b },
134 { INTEL_SANDYBRIDGE_X, 0x07, 0x712 },
135};
136
137static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
138{
139 int i;
140
141 /*
142 * We know that the hypervisor lie to us on the microcode version so
143 * we may as well hope that it is running the correct version.
144 */
145 if (cpu_has(c, X86_FEATURE_HYPERVISOR))
146 return false;
147
148 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
149 if (c->x86_vfm == spectre_bad_microcodes[i].vfm &&
150 c->x86_stepping == spectre_bad_microcodes[i].stepping)
151 return (c->microcode <= spectre_bad_microcodes[i].microcode);
152 }
153 return false;
154}
155
156#define MSR_IA32_TME_ACTIVATE 0x982
157
158/* Helpers to access TME_ACTIVATE MSR */
159#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
160#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
161
162#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
163
164static void detect_tme_early(struct cpuinfo_x86 *c)
165{
166 u64 tme_activate;
167 int keyid_bits;
168
169 rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
170
171 if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
172 pr_info_once("x86/tme: not enabled by BIOS\n");
173 clear_cpu_cap(c, X86_FEATURE_TME);
174 return;
175 }
176 pr_info_once("x86/tme: enabled by BIOS\n");
177 keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
178 if (!keyid_bits)
179 return;
180
181 /*
182 * KeyID bits are set by BIOS and can be present regardless
183 * of whether the kernel is using them. They effectively lower
184 * the number of physical address bits.
185 *
186 * Update cpuinfo_x86::x86_phys_bits accordingly.
187 */
188 c->x86_phys_bits -= keyid_bits;
189 pr_info_once("x86/mktme: BIOS enabled: x86_phys_bits reduced by %d\n",
190 keyid_bits);
191}
192
193void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c)
194{
195 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
196 return;
197
198 if (c->x86 < 6 || (c->x86 == 6 && c->x86_model < 0xd))
199 return;
200
201 /*
202 * The BIOS can have limited CPUID to leaf 2, which breaks feature
203 * enumeration. Unlock it and update the maximum leaf info.
204 */
205 if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0)
206 c->cpuid_level = cpuid_eax(0);
207}
208
209static void early_init_intel(struct cpuinfo_x86 *c)
210{
211 u64 misc_enable;
212
213 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
214 (c->x86 == 0x6 && c->x86_model >= 0x0e))
215 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
216
217 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
218 c->microcode = intel_get_microcode_revision();
219
220 /* Now if any of them are set, check the blacklist and clear the lot */
221 if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
222 cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
223 cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
224 cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
225 pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
226 setup_clear_cpu_cap(X86_FEATURE_IBRS);
227 setup_clear_cpu_cap(X86_FEATURE_IBPB);
228 setup_clear_cpu_cap(X86_FEATURE_STIBP);
229 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
230 setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
231 setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
232 setup_clear_cpu_cap(X86_FEATURE_SSBD);
233 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
234 }
235
236 /*
237 * Atom erratum AAE44/AAF40/AAG38/AAH41:
238 *
239 * A race condition between speculative fetches and invalidating
240 * a large page. This is worked around in microcode, but we
241 * need the microcode to have already been loaded... so if it is
242 * not, recommend a BIOS update and disable large pages.
243 */
244 if (c->x86_vfm == INTEL_ATOM_BONNELL && c->x86_stepping <= 2 &&
245 c->microcode < 0x20e) {
246 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
247 clear_cpu_cap(c, X86_FEATURE_PSE);
248 }
249
250#ifdef CONFIG_X86_64
251 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
252#else
253 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
254 if (c->x86 == 15 && c->x86_cache_alignment == 64)
255 c->x86_cache_alignment = 128;
256#endif
257
258 /* CPUID workaround for 0F33/0F34 CPU */
259 if (c->x86 == 0xF && c->x86_model == 0x3
260 && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
261 c->x86_phys_bits = 36;
262
263 /*
264 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
265 * with P/T states and does not stop in deep C-states.
266 *
267 * It is also reliable across cores and sockets. (but not across
268 * cabinets - we turn it off in that case explicitly.)
269 */
270 if (c->x86_power & (1 << 8)) {
271 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
272 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
273 }
274
275 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
276 switch (c->x86_vfm) {
277 case INTEL_ATOM_SALTWELL_MID:
278 case INTEL_ATOM_SALTWELL_TABLET:
279 case INTEL_ATOM_SILVERMONT_MID:
280 case INTEL_ATOM_AIRMONT_NP:
281 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
282 break;
283 }
284
285 /*
286 * PAT is broken on early family 6 CPUs, the last of which
287 * is "Yonah" where the erratum is named "AN7":
288 *
289 * Page with PAT (Page Attribute Table) Set to USWC
290 * (Uncacheable Speculative Write Combine) While
291 * Associated MTRR (Memory Type Range Register) Is UC
292 * (Uncacheable) May Consolidate to UC
293 *
294 * Disable PAT and fall back to MTRR on these CPUs.
295 */
296 if (c->x86_vfm >= INTEL_PENTIUM_PRO &&
297 c->x86_vfm <= INTEL_CORE_YONAH)
298 clear_cpu_cap(c, X86_FEATURE_PAT);
299
300 /*
301 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
302 * clear the fast string and enhanced fast string CPU capabilities.
303 */
304 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
305 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
306 if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
307 pr_info("Disabled fast string operations\n");
308 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
309 setup_clear_cpu_cap(X86_FEATURE_ERMS);
310 }
311 }
312
313 /*
314 * Intel Quark Core DevMan_001.pdf section 6.4.11
315 * "The operating system also is required to invalidate (i.e., flush)
316 * the TLB when any changes are made to any of the page table entries.
317 * The operating system must reload CR3 to cause the TLB to be flushed"
318 *
319 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
320 * should be false so that __flush_tlb_all() causes CR3 instead of CR4.PGE
321 * to be modified.
322 */
323 if (c->x86_vfm == INTEL_QUARK_X1000) {
324 pr_info("Disabling PGE capability bit\n");
325 setup_clear_cpu_cap(X86_FEATURE_PGE);
326 }
327
328 check_memory_type_self_snoop_errata(c);
329
330 /*
331 * Adjust the number of physical bits early because it affects the
332 * valid bits of the MTRR mask registers.
333 */
334 if (cpu_has(c, X86_FEATURE_TME))
335 detect_tme_early(c);
336}
337
338static void bsp_init_intel(struct cpuinfo_x86 *c)
339{
340 resctrl_cpu_detect(c);
341}
342
343#ifdef CONFIG_X86_32
344/*
345 * Early probe support logic for ppro memory erratum #50
346 *
347 * This is called before we do cpu ident work
348 */
349
350int ppro_with_ram_bug(void)
351{
352 /* Uses data from early_cpu_detect now */
353 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
354 boot_cpu_data.x86 == 6 &&
355 boot_cpu_data.x86_model == 1 &&
356 boot_cpu_data.x86_stepping < 8) {
357 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
358 return 1;
359 }
360 return 0;
361}
362
363static void intel_smp_check(struct cpuinfo_x86 *c)
364{
365 /* calling is from identify_secondary_cpu() ? */
366 if (!c->cpu_index)
367 return;
368
369 /*
370 * Mask B, Pentium, but not Pentium MMX
371 */
372 if (c->x86 == 5 &&
373 c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
374 c->x86_model <= 3) {
375 /*
376 * Remember we have B step Pentia with bugs
377 */
378 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
379 "with B stepping processors.\n");
380 }
381}
382
383static int forcepae;
384static int __init forcepae_setup(char *__unused)
385{
386 forcepae = 1;
387 return 1;
388}
389__setup("forcepae", forcepae_setup);
390
391static void intel_workarounds(struct cpuinfo_x86 *c)
392{
393#ifdef CONFIG_X86_F00F_BUG
394 /*
395 * All models of Pentium and Pentium with MMX technology CPUs
396 * have the F0 0F bug, which lets nonprivileged users lock up the
397 * system. Announce that the fault handler will be checking for it.
398 * The Quark is also family 5, but does not have the same bug.
399 */
400 clear_cpu_bug(c, X86_BUG_F00F);
401 if (c->x86 == 5 && c->x86_model < 9) {
402 static int f00f_workaround_enabled;
403
404 set_cpu_bug(c, X86_BUG_F00F);
405 if (!f00f_workaround_enabled) {
406 pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
407 f00f_workaround_enabled = 1;
408 }
409 }
410#endif
411
412 /*
413 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
414 * model 3 mask 3
415 */
416 if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
417 clear_cpu_cap(c, X86_FEATURE_SEP);
418
419 /*
420 * PAE CPUID issue: many Pentium M report no PAE but may have a
421 * functionally usable PAE implementation.
422 * Forcefully enable PAE if kernel parameter "forcepae" is present.
423 */
424 if (forcepae) {
425 pr_warn("PAE forced!\n");
426 set_cpu_cap(c, X86_FEATURE_PAE);
427 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
428 }
429
430 /*
431 * P4 Xeon erratum 037 workaround.
432 * Hardware prefetcher may cause stale data to be loaded into the cache.
433 */
434 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
435 if (msr_set_bit(MSR_IA32_MISC_ENABLE,
436 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
437 pr_info("CPU: C0 stepping P4 Xeon detected.\n");
438 pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
439 }
440 }
441
442 /*
443 * See if we have a good local APIC by checking for buggy Pentia,
444 * i.e. all B steppings and the C2 stepping of P54C when using their
445 * integrated APIC (see 11AP erratum in "Pentium Processor
446 * Specification Update").
447 */
448 if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
449 (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
450 set_cpu_bug(c, X86_BUG_11AP);
451
452
453#ifdef CONFIG_X86_INTEL_USERCOPY
454 /*
455 * Set up the preferred alignment for movsl bulk memory moves
456 */
457 switch (c->x86) {
458 case 4: /* 486: untested */
459 break;
460 case 5: /* Old Pentia: untested */
461 break;
462 case 6: /* PII/PIII only like movsl with 8-byte alignment */
463 movsl_mask.mask = 7;
464 break;
465 case 15: /* P4 is OK down to 8-byte alignment */
466 movsl_mask.mask = 7;
467 break;
468 }
469#endif
470
471 intel_smp_check(c);
472}
473#else
474static void intel_workarounds(struct cpuinfo_x86 *c)
475{
476}
477#endif
478
479static void srat_detect_node(struct cpuinfo_x86 *c)
480{
481#ifdef CONFIG_NUMA
482 unsigned node;
483 int cpu = smp_processor_id();
484
485 /* Don't do the funky fallback heuristics the AMD version employs
486 for now. */
487 node = numa_cpu_node(cpu);
488 if (node == NUMA_NO_NODE || !node_online(node)) {
489 /* reuse the value from init_cpu_to_node() */
490 node = cpu_to_node(cpu);
491 }
492 numa_set_node(cpu, node);
493#endif
494}
495
496static void init_cpuid_fault(struct cpuinfo_x86 *c)
497{
498 u64 msr;
499
500 if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) {
501 if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
502 set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
503 }
504}
505
506static void init_intel_misc_features(struct cpuinfo_x86 *c)
507{
508 u64 msr;
509
510 if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr))
511 return;
512
513 /* Clear all MISC features */
514 this_cpu_write(msr_misc_features_shadow, 0);
515
516 /* Check features and update capabilities and shadow control bits */
517 init_cpuid_fault(c);
518 probe_xeon_phi_r3mwait(c);
519
520 msr = this_cpu_read(msr_misc_features_shadow);
521 wrmsrl(MSR_MISC_FEATURES_ENABLES, msr);
522}
523
524static void init_intel(struct cpuinfo_x86 *c)
525{
526 early_init_intel(c);
527
528 intel_workarounds(c);
529
530 init_intel_cacheinfo(c);
531
532 if (c->cpuid_level > 9) {
533 unsigned eax = cpuid_eax(10);
534 /* Check for version and the number of counters */
535 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
536 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
537 }
538
539 if (cpu_has(c, X86_FEATURE_XMM2))
540 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
541
542 if (boot_cpu_has(X86_FEATURE_DS)) {
543 unsigned int l1, l2;
544
545 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
546 if (!(l1 & MSR_IA32_MISC_ENABLE_BTS_UNAVAIL))
547 set_cpu_cap(c, X86_FEATURE_BTS);
548 if (!(l1 & MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL))
549 set_cpu_cap(c, X86_FEATURE_PEBS);
550 }
551
552 if (boot_cpu_has(X86_FEATURE_CLFLUSH) &&
553 (c->x86_vfm == INTEL_CORE2_DUNNINGTON ||
554 c->x86_vfm == INTEL_NEHALEM_EX ||
555 c->x86_vfm == INTEL_WESTMERE_EX))
556 set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
557
558 if (boot_cpu_has(X86_FEATURE_MWAIT) &&
559 (c->x86_vfm == INTEL_ATOM_GOLDMONT ||
560 c->x86_vfm == INTEL_LUNARLAKE_M))
561 set_cpu_bug(c, X86_BUG_MONITOR);
562
563#ifdef CONFIG_X86_64
564 if (c->x86 == 15)
565 c->x86_cache_alignment = c->x86_clflush_size * 2;
566 if (c->x86 == 6)
567 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
568#else
569 /*
570 * Names for the Pentium II/Celeron processors
571 * detectable only by also checking the cache size.
572 * Dixon is NOT a Celeron.
573 */
574 if (c->x86 == 6) {
575 unsigned int l2 = c->x86_cache_size;
576 char *p = NULL;
577
578 switch (c->x86_model) {
579 case 5:
580 if (l2 == 0)
581 p = "Celeron (Covington)";
582 else if (l2 == 256)
583 p = "Mobile Pentium II (Dixon)";
584 break;
585
586 case 6:
587 if (l2 == 128)
588 p = "Celeron (Mendocino)";
589 else if (c->x86_stepping == 0 || c->x86_stepping == 5)
590 p = "Celeron-A";
591 break;
592
593 case 8:
594 if (l2 == 128)
595 p = "Celeron (Coppermine)";
596 break;
597 }
598
599 if (p)
600 strcpy(c->x86_model_id, p);
601 }
602
603 if (c->x86 == 15)
604 set_cpu_cap(c, X86_FEATURE_P4);
605 if (c->x86 == 6)
606 set_cpu_cap(c, X86_FEATURE_P3);
607#endif
608
609 /* Work around errata */
610 srat_detect_node(c);
611
612 init_ia32_feat_ctl(c);
613
614 init_intel_misc_features(c);
615
616 split_lock_init();
617
618 intel_init_thermal(c);
619}
620
621#ifdef CONFIG_X86_32
622static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
623{
624 /*
625 * Intel PIII Tualatin. This comes in two flavours.
626 * One has 256kb of cache, the other 512. We have no way
627 * to determine which, so we use a boottime override
628 * for the 512kb model, and assume 256 otherwise.
629 */
630 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
631 size = 256;
632
633 /*
634 * Intel Quark SoC X1000 contains a 4-way set associative
635 * 16K cache with a 16 byte cache line and 256 lines per tag
636 */
637 if ((c->x86 == 5) && (c->x86_model == 9))
638 size = 16;
639 return size;
640}
641#endif
642
643#define TLB_INST_4K 0x01
644#define TLB_INST_4M 0x02
645#define TLB_INST_2M_4M 0x03
646
647#define TLB_INST_ALL 0x05
648#define TLB_INST_1G 0x06
649
650#define TLB_DATA_4K 0x11
651#define TLB_DATA_4M 0x12
652#define TLB_DATA_2M_4M 0x13
653#define TLB_DATA_4K_4M 0x14
654
655#define TLB_DATA_1G 0x16
656#define TLB_DATA_1G_2M_4M 0x17
657
658#define TLB_DATA0_4K 0x21
659#define TLB_DATA0_4M 0x22
660#define TLB_DATA0_2M_4M 0x23
661
662#define STLB_4K 0x41
663#define STLB_4K_2M 0x42
664
665/*
666 * All of leaf 0x2's one-byte TLB descriptors implies the same number of
667 * entries for their respective TLB types. The 0x63 descriptor is an
668 * exception: it implies 4 dTLB entries for 1GB pages 32 dTLB entries
669 * for 2MB or 4MB pages. Encode descriptor 0x63 dTLB entry count for
670 * 2MB/4MB pages here, as its count for dTLB 1GB pages is already at the
671 * intel_tlb_table[] mapping.
672 */
673#define TLB_0x63_2M_4M_ENTRIES 32
674
675static const struct _tlb_table intel_tlb_table[] = {
676 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
677 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
678 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
679 { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
680 { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
681 { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
682 { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages" },
683 { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
684 { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
685 { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
686 { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
687 { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" },
688 { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" },
689 { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" },
690 { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
691 { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
692 { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
693 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
694 { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
695 { 0x63, TLB_DATA_1G_2M_4M, 4, " TLB_DATA 1 GByte pages, 4-way set associative"
696 " (plus 32 entries TLB_DATA 2 MByte or 4 MByte pages, not encoded here)" },
697 { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" },
698 { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
699 { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" },
700 { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
701 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
702 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
703 { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
704 { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
705 { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
706 { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" },
707 { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" },
708 { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
709 { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
710 { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" },
711 { 0xc2, TLB_DATA_2M_4M, 16, " TLB_DATA 2 MByte/4MByte pages, 4-way associative" },
712 { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
713 { 0x00, 0, 0 }
714};
715
716static void intel_tlb_lookup(const unsigned char desc)
717{
718 unsigned char k;
719 if (desc == 0)
720 return;
721
722 /* look up this descriptor in the table */
723 for (k = 0; intel_tlb_table[k].descriptor != desc &&
724 intel_tlb_table[k].descriptor != 0; k++)
725 ;
726
727 if (intel_tlb_table[k].tlb_type == 0)
728 return;
729
730 switch (intel_tlb_table[k].tlb_type) {
731 case STLB_4K:
732 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
733 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
734 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
735 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
736 break;
737 case STLB_4K_2M:
738 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
739 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
740 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
741 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
742 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
743 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
744 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
745 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
746 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
747 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
748 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
749 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
750 break;
751 case TLB_INST_ALL:
752 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
753 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
754 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
755 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
756 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
757 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
758 break;
759 case TLB_INST_4K:
760 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
761 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
762 break;
763 case TLB_INST_4M:
764 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
765 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
766 break;
767 case TLB_INST_2M_4M:
768 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
769 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
770 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
771 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
772 break;
773 case TLB_DATA_4K:
774 case TLB_DATA0_4K:
775 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
776 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
777 break;
778 case TLB_DATA_4M:
779 case TLB_DATA0_4M:
780 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
781 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
782 break;
783 case TLB_DATA_2M_4M:
784 case TLB_DATA0_2M_4M:
785 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
786 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
787 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
788 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
789 break;
790 case TLB_DATA_4K_4M:
791 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
792 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
793 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
794 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
795 break;
796 case TLB_DATA_1G_2M_4M:
797 if (tlb_lld_2m[ENTRIES] < TLB_0x63_2M_4M_ENTRIES)
798 tlb_lld_2m[ENTRIES] = TLB_0x63_2M_4M_ENTRIES;
799 if (tlb_lld_4m[ENTRIES] < TLB_0x63_2M_4M_ENTRIES)
800 tlb_lld_4m[ENTRIES] = TLB_0x63_2M_4M_ENTRIES;
801 fallthrough;
802 case TLB_DATA_1G:
803 if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
804 tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
805 break;
806 }
807}
808
809static void intel_detect_tlb(struct cpuinfo_x86 *c)
810{
811 int i, j, n;
812 unsigned int regs[4];
813 unsigned char *desc = (unsigned char *)regs;
814
815 if (c->cpuid_level < 2)
816 return;
817
818 /* Number of times to iterate */
819 n = cpuid_eax(2) & 0xFF;
820
821 for (i = 0 ; i < n ; i++) {
822 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
823
824 /* If bit 31 is set, this is an unknown format */
825 for (j = 0 ; j < 4 ; j++)
826 if (regs[j] & (1 << 31))
827 regs[j] = 0;
828
829 /* Byte 0 is level count, not a descriptor */
830 for (j = 1 ; j < 16 ; j++)
831 intel_tlb_lookup(desc[j]);
832 }
833}
834
835static const struct cpu_dev intel_cpu_dev = {
836 .c_vendor = "Intel",
837 .c_ident = { "GenuineIntel" },
838#ifdef CONFIG_X86_32
839 .legacy_models = {
840 { .family = 4, .model_names =
841 {
842 [0] = "486 DX-25/33",
843 [1] = "486 DX-50",
844 [2] = "486 SX",
845 [3] = "486 DX/2",
846 [4] = "486 SL",
847 [5] = "486 SX/2",
848 [7] = "486 DX/2-WB",
849 [8] = "486 DX/4",
850 [9] = "486 DX/4-WB"
851 }
852 },
853 { .family = 5, .model_names =
854 {
855 [0] = "Pentium 60/66 A-step",
856 [1] = "Pentium 60/66",
857 [2] = "Pentium 75 - 200",
858 [3] = "OverDrive PODP5V83",
859 [4] = "Pentium MMX",
860 [7] = "Mobile Pentium 75 - 200",
861 [8] = "Mobile Pentium MMX",
862 [9] = "Quark SoC X1000",
863 }
864 },
865 { .family = 6, .model_names =
866 {
867 [0] = "Pentium Pro A-step",
868 [1] = "Pentium Pro",
869 [3] = "Pentium II (Klamath)",
870 [4] = "Pentium II (Deschutes)",
871 [5] = "Pentium II (Deschutes)",
872 [6] = "Mobile Pentium II",
873 [7] = "Pentium III (Katmai)",
874 [8] = "Pentium III (Coppermine)",
875 [10] = "Pentium III (Cascades)",
876 [11] = "Pentium III (Tualatin)",
877 }
878 },
879 { .family = 15, .model_names =
880 {
881 [0] = "Pentium 4 (Unknown)",
882 [1] = "Pentium 4 (Willamette)",
883 [2] = "Pentium 4 (Northwood)",
884 [4] = "Pentium 4 (Foster)",
885 [5] = "Pentium 4 (Foster)",
886 }
887 },
888 },
889 .legacy_cache_size = intel_size_cache,
890#endif
891 .c_detect_tlb = intel_detect_tlb,
892 .c_early_init = early_init_intel,
893 .c_bsp_init = bsp_init_intel,
894 .c_init = init_intel,
895 .c_x86_vendor = X86_VENDOR_INTEL,
896};
897
898cpu_dev_register(intel_cpu_dev);
899
900#define X86_HYBRID_CPU_TYPE_ID_SHIFT 24
901
902/**
903 * get_this_hybrid_cpu_type() - Get the type of this hybrid CPU
904 *
905 * Returns the CPU type [31:24] (i.e., Atom or Core) of a CPU in
906 * a hybrid processor. If the processor is not hybrid, returns 0.
907 */
908u8 get_this_hybrid_cpu_type(void)
909{
910 if (!cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
911 return 0;
912
913 return cpuid_eax(0x0000001a) >> X86_HYBRID_CPU_TYPE_ID_SHIFT;
914}
915
916/**
917 * get_this_hybrid_cpu_native_id() - Get the native id of this hybrid CPU
918 *
919 * Returns the uarch native ID [23:0] of a CPU in a hybrid processor.
920 * If the processor is not hybrid, returns 0.
921 */
922u32 get_this_hybrid_cpu_native_id(void)
923{
924 if (!cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
925 return 0;
926
927 return cpuid_eax(0x0000001a) &
928 (BIT_ULL(X86_HYBRID_CPU_TYPE_ID_SHIFT) - 1);
929}
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/pgtable.h>
4
5#include <linux/string.h>
6#include <linux/bitops.h>
7#include <linux/smp.h>
8#include <linux/sched.h>
9#include <linux/sched/clock.h>
10#include <linux/thread_info.h>
11#include <linux/init.h>
12#include <linux/uaccess.h>
13
14#include <asm/cpufeature.h>
15#include <asm/msr.h>
16#include <asm/bugs.h>
17#include <asm/cpu.h>
18#include <asm/intel-family.h>
19#include <asm/microcode_intel.h>
20#include <asm/hwcap2.h>
21#include <asm/elf.h>
22#include <asm/cpu_device_id.h>
23#include <asm/cmdline.h>
24#include <asm/traps.h>
25#include <asm/resctrl.h>
26#include <asm/numa.h>
27
28#ifdef CONFIG_X86_64
29#include <linux/topology.h>
30#endif
31
32#include "cpu.h"
33
34#ifdef CONFIG_X86_LOCAL_APIC
35#include <asm/mpspec.h>
36#include <asm/apic.h>
37#endif
38
39enum split_lock_detect_state {
40 sld_off = 0,
41 sld_warn,
42 sld_fatal,
43};
44
45/*
46 * Default to sld_off because most systems do not support split lock detection
47 * split_lock_setup() will switch this to sld_warn on systems that support
48 * split lock detect, unless there is a command line override.
49 */
50static enum split_lock_detect_state sld_state __ro_after_init = sld_off;
51static u64 msr_test_ctrl_cache __ro_after_init;
52
53/*
54 * With a name like MSR_TEST_CTL it should go without saying, but don't touch
55 * MSR_TEST_CTL unless the CPU is one of the whitelisted models. Writing it
56 * on CPUs that do not support SLD can cause fireworks, even when writing '0'.
57 */
58static bool cpu_model_supports_sld __ro_after_init;
59
60/*
61 * Processors which have self-snooping capability can handle conflicting
62 * memory type across CPUs by snooping its own cache. However, there exists
63 * CPU models in which having conflicting memory types still leads to
64 * unpredictable behavior, machine check errors, or hangs. Clear this
65 * feature to prevent its use on machines with known erratas.
66 */
67static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c)
68{
69 switch (c->x86_model) {
70 case INTEL_FAM6_CORE_YONAH:
71 case INTEL_FAM6_CORE2_MEROM:
72 case INTEL_FAM6_CORE2_MEROM_L:
73 case INTEL_FAM6_CORE2_PENRYN:
74 case INTEL_FAM6_CORE2_DUNNINGTON:
75 case INTEL_FAM6_NEHALEM:
76 case INTEL_FAM6_NEHALEM_G:
77 case INTEL_FAM6_NEHALEM_EP:
78 case INTEL_FAM6_NEHALEM_EX:
79 case INTEL_FAM6_WESTMERE:
80 case INTEL_FAM6_WESTMERE_EP:
81 case INTEL_FAM6_SANDYBRIDGE:
82 setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP);
83 }
84}
85
86static bool ring3mwait_disabled __read_mostly;
87
88static int __init ring3mwait_disable(char *__unused)
89{
90 ring3mwait_disabled = true;
91 return 0;
92}
93__setup("ring3mwait=disable", ring3mwait_disable);
94
95static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
96{
97 /*
98 * Ring 3 MONITOR/MWAIT feature cannot be detected without
99 * cpu model and family comparison.
100 */
101 if (c->x86 != 6)
102 return;
103 switch (c->x86_model) {
104 case INTEL_FAM6_XEON_PHI_KNL:
105 case INTEL_FAM6_XEON_PHI_KNM:
106 break;
107 default:
108 return;
109 }
110
111 if (ring3mwait_disabled)
112 return;
113
114 set_cpu_cap(c, X86_FEATURE_RING3MWAIT);
115 this_cpu_or(msr_misc_features_shadow,
116 1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT);
117
118 if (c == &boot_cpu_data)
119 ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
120}
121
122/*
123 * Early microcode releases for the Spectre v2 mitigation were broken.
124 * Information taken from;
125 * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
126 * - https://kb.vmware.com/s/article/52345
127 * - Microcode revisions observed in the wild
128 * - Release note from 20180108 microcode release
129 */
130struct sku_microcode {
131 u8 model;
132 u8 stepping;
133 u32 microcode;
134};
135static const struct sku_microcode spectre_bad_microcodes[] = {
136 { INTEL_FAM6_KABYLAKE, 0x0B, 0x80 },
137 { INTEL_FAM6_KABYLAKE, 0x0A, 0x80 },
138 { INTEL_FAM6_KABYLAKE, 0x09, 0x80 },
139 { INTEL_FAM6_KABYLAKE_L, 0x0A, 0x80 },
140 { INTEL_FAM6_KABYLAKE_L, 0x09, 0x80 },
141 { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
142 { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
143 { INTEL_FAM6_BROADWELL, 0x04, 0x28 },
144 { INTEL_FAM6_BROADWELL_G, 0x01, 0x1b },
145 { INTEL_FAM6_BROADWELL_D, 0x02, 0x14 },
146 { INTEL_FAM6_BROADWELL_D, 0x03, 0x07000011 },
147 { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
148 { INTEL_FAM6_HASWELL_L, 0x01, 0x21 },
149 { INTEL_FAM6_HASWELL_G, 0x01, 0x18 },
150 { INTEL_FAM6_HASWELL, 0x03, 0x23 },
151 { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
152 { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
153 { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
154 /* Observed in the wild */
155 { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
156 { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
157};
158
159static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
160{
161 int i;
162
163 /*
164 * We know that the hypervisor lie to us on the microcode version so
165 * we may as well hope that it is running the correct version.
166 */
167 if (cpu_has(c, X86_FEATURE_HYPERVISOR))
168 return false;
169
170 if (c->x86 != 6)
171 return false;
172
173 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
174 if (c->x86_model == spectre_bad_microcodes[i].model &&
175 c->x86_stepping == spectre_bad_microcodes[i].stepping)
176 return (c->microcode <= spectre_bad_microcodes[i].microcode);
177 }
178 return false;
179}
180
181static void early_init_intel(struct cpuinfo_x86 *c)
182{
183 u64 misc_enable;
184
185 /* Unmask CPUID levels if masked: */
186 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
187 if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
188 MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
189 c->cpuid_level = cpuid_eax(0);
190 get_cpu_cap(c);
191 }
192 }
193
194 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
195 (c->x86 == 0x6 && c->x86_model >= 0x0e))
196 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
197
198 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
199 c->microcode = intel_get_microcode_revision();
200
201 /* Now if any of them are set, check the blacklist and clear the lot */
202 if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
203 cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
204 cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
205 cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
206 pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
207 setup_clear_cpu_cap(X86_FEATURE_IBRS);
208 setup_clear_cpu_cap(X86_FEATURE_IBPB);
209 setup_clear_cpu_cap(X86_FEATURE_STIBP);
210 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
211 setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
212 setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
213 setup_clear_cpu_cap(X86_FEATURE_SSBD);
214 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
215 }
216
217 /*
218 * Atom erratum AAE44/AAF40/AAG38/AAH41:
219 *
220 * A race condition between speculative fetches and invalidating
221 * a large page. This is worked around in microcode, but we
222 * need the microcode to have already been loaded... so if it is
223 * not, recommend a BIOS update and disable large pages.
224 */
225 if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
226 c->microcode < 0x20e) {
227 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
228 clear_cpu_cap(c, X86_FEATURE_PSE);
229 }
230
231#ifdef CONFIG_X86_64
232 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
233#else
234 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
235 if (c->x86 == 15 && c->x86_cache_alignment == 64)
236 c->x86_cache_alignment = 128;
237#endif
238
239 /* CPUID workaround for 0F33/0F34 CPU */
240 if (c->x86 == 0xF && c->x86_model == 0x3
241 && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
242 c->x86_phys_bits = 36;
243
244 /*
245 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
246 * with P/T states and does not stop in deep C-states.
247 *
248 * It is also reliable across cores and sockets. (but not across
249 * cabinets - we turn it off in that case explicitly.)
250 */
251 if (c->x86_power & (1 << 8)) {
252 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
253 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
254 }
255
256 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
257 if (c->x86 == 6) {
258 switch (c->x86_model) {
259 case INTEL_FAM6_ATOM_SALTWELL_MID:
260 case INTEL_FAM6_ATOM_SALTWELL_TABLET:
261 case INTEL_FAM6_ATOM_SILVERMONT_MID:
262 case INTEL_FAM6_ATOM_AIRMONT_NP:
263 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
264 break;
265 default:
266 break;
267 }
268 }
269
270 /*
271 * There is a known erratum on Pentium III and Core Solo
272 * and Core Duo CPUs.
273 * " Page with PAT set to WC while associated MTRR is UC
274 * may consolidate to UC "
275 * Because of this erratum, it is better to stick with
276 * setting WC in MTRR rather than using PAT on these CPUs.
277 *
278 * Enable PAT WC only on P4, Core 2 or later CPUs.
279 */
280 if (c->x86 == 6 && c->x86_model < 15)
281 clear_cpu_cap(c, X86_FEATURE_PAT);
282
283 /*
284 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
285 * clear the fast string and enhanced fast string CPU capabilities.
286 */
287 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
288 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
289 if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
290 pr_info("Disabled fast string operations\n");
291 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
292 setup_clear_cpu_cap(X86_FEATURE_ERMS);
293 }
294 }
295
296 /*
297 * Intel Quark Core DevMan_001.pdf section 6.4.11
298 * "The operating system also is required to invalidate (i.e., flush)
299 * the TLB when any changes are made to any of the page table entries.
300 * The operating system must reload CR3 to cause the TLB to be flushed"
301 *
302 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
303 * should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
304 * to be modified.
305 */
306 if (c->x86 == 5 && c->x86_model == 9) {
307 pr_info("Disabling PGE capability bit\n");
308 setup_clear_cpu_cap(X86_FEATURE_PGE);
309 }
310
311 if (c->cpuid_level >= 0x00000001) {
312 u32 eax, ebx, ecx, edx;
313
314 cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
315 /*
316 * If HTT (EDX[28]) is set EBX[16:23] contain the number of
317 * apicids which are reserved per package. Store the resulting
318 * shift value for the package management code.
319 */
320 if (edx & (1U << 28))
321 c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
322 }
323
324 check_memory_type_self_snoop_errata(c);
325
326 /*
327 * Get the number of SMT siblings early from the extended topology
328 * leaf, if available. Otherwise try the legacy SMT detection.
329 */
330 if (detect_extended_topology_early(c) < 0)
331 detect_ht_early(c);
332}
333
334static void bsp_init_intel(struct cpuinfo_x86 *c)
335{
336 resctrl_cpu_detect(c);
337}
338
339#ifdef CONFIG_X86_32
340/*
341 * Early probe support logic for ppro memory erratum #50
342 *
343 * This is called before we do cpu ident work
344 */
345
346int ppro_with_ram_bug(void)
347{
348 /* Uses data from early_cpu_detect now */
349 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
350 boot_cpu_data.x86 == 6 &&
351 boot_cpu_data.x86_model == 1 &&
352 boot_cpu_data.x86_stepping < 8) {
353 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
354 return 1;
355 }
356 return 0;
357}
358
359static void intel_smp_check(struct cpuinfo_x86 *c)
360{
361 /* calling is from identify_secondary_cpu() ? */
362 if (!c->cpu_index)
363 return;
364
365 /*
366 * Mask B, Pentium, but not Pentium MMX
367 */
368 if (c->x86 == 5 &&
369 c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
370 c->x86_model <= 3) {
371 /*
372 * Remember we have B step Pentia with bugs
373 */
374 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
375 "with B stepping processors.\n");
376 }
377}
378
379static int forcepae;
380static int __init forcepae_setup(char *__unused)
381{
382 forcepae = 1;
383 return 1;
384}
385__setup("forcepae", forcepae_setup);
386
387static void intel_workarounds(struct cpuinfo_x86 *c)
388{
389#ifdef CONFIG_X86_F00F_BUG
390 /*
391 * All models of Pentium and Pentium with MMX technology CPUs
392 * have the F0 0F bug, which lets nonprivileged users lock up the
393 * system. Announce that the fault handler will be checking for it.
394 * The Quark is also family 5, but does not have the same bug.
395 */
396 clear_cpu_bug(c, X86_BUG_F00F);
397 if (c->x86 == 5 && c->x86_model < 9) {
398 static int f00f_workaround_enabled;
399
400 set_cpu_bug(c, X86_BUG_F00F);
401 if (!f00f_workaround_enabled) {
402 pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
403 f00f_workaround_enabled = 1;
404 }
405 }
406#endif
407
408 /*
409 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
410 * model 3 mask 3
411 */
412 if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
413 clear_cpu_cap(c, X86_FEATURE_SEP);
414
415 /*
416 * PAE CPUID issue: many Pentium M report no PAE but may have a
417 * functionally usable PAE implementation.
418 * Forcefully enable PAE if kernel parameter "forcepae" is present.
419 */
420 if (forcepae) {
421 pr_warn("PAE forced!\n");
422 set_cpu_cap(c, X86_FEATURE_PAE);
423 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
424 }
425
426 /*
427 * P4 Xeon erratum 037 workaround.
428 * Hardware prefetcher may cause stale data to be loaded into the cache.
429 */
430 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
431 if (msr_set_bit(MSR_IA32_MISC_ENABLE,
432 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
433 pr_info("CPU: C0 stepping P4 Xeon detected.\n");
434 pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
435 }
436 }
437
438 /*
439 * See if we have a good local APIC by checking for buggy Pentia,
440 * i.e. all B steppings and the C2 stepping of P54C when using their
441 * integrated APIC (see 11AP erratum in "Pentium Processor
442 * Specification Update").
443 */
444 if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
445 (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
446 set_cpu_bug(c, X86_BUG_11AP);
447
448
449#ifdef CONFIG_X86_INTEL_USERCOPY
450 /*
451 * Set up the preferred alignment for movsl bulk memory moves
452 */
453 switch (c->x86) {
454 case 4: /* 486: untested */
455 break;
456 case 5: /* Old Pentia: untested */
457 break;
458 case 6: /* PII/PIII only like movsl with 8-byte alignment */
459 movsl_mask.mask = 7;
460 break;
461 case 15: /* P4 is OK down to 8-byte alignment */
462 movsl_mask.mask = 7;
463 break;
464 }
465#endif
466
467 intel_smp_check(c);
468}
469#else
470static void intel_workarounds(struct cpuinfo_x86 *c)
471{
472}
473#endif
474
475static void srat_detect_node(struct cpuinfo_x86 *c)
476{
477#ifdef CONFIG_NUMA
478 unsigned node;
479 int cpu = smp_processor_id();
480
481 /* Don't do the funky fallback heuristics the AMD version employs
482 for now. */
483 node = numa_cpu_node(cpu);
484 if (node == NUMA_NO_NODE || !node_online(node)) {
485 /* reuse the value from init_cpu_to_node() */
486 node = cpu_to_node(cpu);
487 }
488 numa_set_node(cpu, node);
489#endif
490}
491
492#define MSR_IA32_TME_ACTIVATE 0x982
493
494/* Helpers to access TME_ACTIVATE MSR */
495#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
496#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
497
498#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
499#define TME_ACTIVATE_POLICY_AES_XTS_128 0
500
501#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
502
503#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
504#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
505
506/* Values for mktme_status (SW only construct) */
507#define MKTME_ENABLED 0
508#define MKTME_DISABLED 1
509#define MKTME_UNINITIALIZED 2
510static int mktme_status = MKTME_UNINITIALIZED;
511
512static void detect_tme(struct cpuinfo_x86 *c)
513{
514 u64 tme_activate, tme_policy, tme_crypto_algs;
515 int keyid_bits = 0, nr_keyids = 0;
516 static u64 tme_activate_cpu0 = 0;
517
518 rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
519
520 if (mktme_status != MKTME_UNINITIALIZED) {
521 if (tme_activate != tme_activate_cpu0) {
522 /* Broken BIOS? */
523 pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
524 pr_err_once("x86/tme: MKTME is not usable\n");
525 mktme_status = MKTME_DISABLED;
526
527 /* Proceed. We may need to exclude bits from x86_phys_bits. */
528 }
529 } else {
530 tme_activate_cpu0 = tme_activate;
531 }
532
533 if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
534 pr_info_once("x86/tme: not enabled by BIOS\n");
535 mktme_status = MKTME_DISABLED;
536 return;
537 }
538
539 if (mktme_status != MKTME_UNINITIALIZED)
540 goto detect_keyid_bits;
541
542 pr_info("x86/tme: enabled by BIOS\n");
543
544 tme_policy = TME_ACTIVATE_POLICY(tme_activate);
545 if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
546 pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
547
548 tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
549 if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
550 pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
551 tme_crypto_algs);
552 mktme_status = MKTME_DISABLED;
553 }
554detect_keyid_bits:
555 keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
556 nr_keyids = (1UL << keyid_bits) - 1;
557 if (nr_keyids) {
558 pr_info_once("x86/mktme: enabled by BIOS\n");
559 pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
560 } else {
561 pr_info_once("x86/mktme: disabled by BIOS\n");
562 }
563
564 if (mktme_status == MKTME_UNINITIALIZED) {
565 /* MKTME is usable */
566 mktme_status = MKTME_ENABLED;
567 }
568
569 /*
570 * KeyID bits effectively lower the number of physical address
571 * bits. Update cpuinfo_x86::x86_phys_bits accordingly.
572 */
573 c->x86_phys_bits -= keyid_bits;
574}
575
576static void init_cpuid_fault(struct cpuinfo_x86 *c)
577{
578 u64 msr;
579
580 if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) {
581 if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
582 set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
583 }
584}
585
586static void init_intel_misc_features(struct cpuinfo_x86 *c)
587{
588 u64 msr;
589
590 if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr))
591 return;
592
593 /* Clear all MISC features */
594 this_cpu_write(msr_misc_features_shadow, 0);
595
596 /* Check features and update capabilities and shadow control bits */
597 init_cpuid_fault(c);
598 probe_xeon_phi_r3mwait(c);
599
600 msr = this_cpu_read(msr_misc_features_shadow);
601 wrmsrl(MSR_MISC_FEATURES_ENABLES, msr);
602}
603
604static void split_lock_init(void);
605
606static void init_intel(struct cpuinfo_x86 *c)
607{
608 early_init_intel(c);
609
610 intel_workarounds(c);
611
612 /*
613 * Detect the extended topology information if available. This
614 * will reinitialise the initial_apicid which will be used
615 * in init_intel_cacheinfo()
616 */
617 detect_extended_topology(c);
618
619 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
620 /*
621 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
622 * detection.
623 */
624 detect_num_cpu_cores(c);
625#ifdef CONFIG_X86_32
626 detect_ht(c);
627#endif
628 }
629
630 init_intel_cacheinfo(c);
631
632 if (c->cpuid_level > 9) {
633 unsigned eax = cpuid_eax(10);
634 /* Check for version and the number of counters */
635 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
636 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
637 }
638
639 if (cpu_has(c, X86_FEATURE_XMM2))
640 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
641
642 if (boot_cpu_has(X86_FEATURE_DS)) {
643 unsigned int l1, l2;
644
645 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
646 if (!(l1 & (1<<11)))
647 set_cpu_cap(c, X86_FEATURE_BTS);
648 if (!(l1 & (1<<12)))
649 set_cpu_cap(c, X86_FEATURE_PEBS);
650 }
651
652 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) &&
653 (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
654 set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
655
656 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) &&
657 ((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT)))
658 set_cpu_bug(c, X86_BUG_MONITOR);
659
660#ifdef CONFIG_X86_64
661 if (c->x86 == 15)
662 c->x86_cache_alignment = c->x86_clflush_size * 2;
663 if (c->x86 == 6)
664 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
665#else
666 /*
667 * Names for the Pentium II/Celeron processors
668 * detectable only by also checking the cache size.
669 * Dixon is NOT a Celeron.
670 */
671 if (c->x86 == 6) {
672 unsigned int l2 = c->x86_cache_size;
673 char *p = NULL;
674
675 switch (c->x86_model) {
676 case 5:
677 if (l2 == 0)
678 p = "Celeron (Covington)";
679 else if (l2 == 256)
680 p = "Mobile Pentium II (Dixon)";
681 break;
682
683 case 6:
684 if (l2 == 128)
685 p = "Celeron (Mendocino)";
686 else if (c->x86_stepping == 0 || c->x86_stepping == 5)
687 p = "Celeron-A";
688 break;
689
690 case 8:
691 if (l2 == 128)
692 p = "Celeron (Coppermine)";
693 break;
694 }
695
696 if (p)
697 strcpy(c->x86_model_id, p);
698 }
699
700 if (c->x86 == 15)
701 set_cpu_cap(c, X86_FEATURE_P4);
702 if (c->x86 == 6)
703 set_cpu_cap(c, X86_FEATURE_P3);
704#endif
705
706 /* Work around errata */
707 srat_detect_node(c);
708
709 init_ia32_feat_ctl(c);
710
711 if (cpu_has(c, X86_FEATURE_TME))
712 detect_tme(c);
713
714 init_intel_misc_features(c);
715
716 if (tsx_ctrl_state == TSX_CTRL_ENABLE)
717 tsx_enable();
718 if (tsx_ctrl_state == TSX_CTRL_DISABLE)
719 tsx_disable();
720
721 split_lock_init();
722}
723
724#ifdef CONFIG_X86_32
725static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
726{
727 /*
728 * Intel PIII Tualatin. This comes in two flavours.
729 * One has 256kb of cache, the other 512. We have no way
730 * to determine which, so we use a boottime override
731 * for the 512kb model, and assume 256 otherwise.
732 */
733 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
734 size = 256;
735
736 /*
737 * Intel Quark SoC X1000 contains a 4-way set associative
738 * 16K cache with a 16 byte cache line and 256 lines per tag
739 */
740 if ((c->x86 == 5) && (c->x86_model == 9))
741 size = 16;
742 return size;
743}
744#endif
745
746#define TLB_INST_4K 0x01
747#define TLB_INST_4M 0x02
748#define TLB_INST_2M_4M 0x03
749
750#define TLB_INST_ALL 0x05
751#define TLB_INST_1G 0x06
752
753#define TLB_DATA_4K 0x11
754#define TLB_DATA_4M 0x12
755#define TLB_DATA_2M_4M 0x13
756#define TLB_DATA_4K_4M 0x14
757
758#define TLB_DATA_1G 0x16
759
760#define TLB_DATA0_4K 0x21
761#define TLB_DATA0_4M 0x22
762#define TLB_DATA0_2M_4M 0x23
763
764#define STLB_4K 0x41
765#define STLB_4K_2M 0x42
766
767static const struct _tlb_table intel_tlb_table[] = {
768 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
769 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
770 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
771 { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
772 { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
773 { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
774 { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages" },
775 { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
776 { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
777 { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
778 { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
779 { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" },
780 { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" },
781 { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" },
782 { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
783 { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
784 { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
785 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
786 { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
787 { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
788 { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" },
789 { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
790 { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" },
791 { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
792 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
793 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
794 { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
795 { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
796 { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
797 { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" },
798 { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" },
799 { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
800 { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
801 { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" },
802 { 0xc2, TLB_DATA_2M_4M, 16, " TLB_DATA 2 MByte/4MByte pages, 4-way associative" },
803 { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
804 { 0x00, 0, 0 }
805};
806
807static void intel_tlb_lookup(const unsigned char desc)
808{
809 unsigned char k;
810 if (desc == 0)
811 return;
812
813 /* look up this descriptor in the table */
814 for (k = 0; intel_tlb_table[k].descriptor != desc &&
815 intel_tlb_table[k].descriptor != 0; k++)
816 ;
817
818 if (intel_tlb_table[k].tlb_type == 0)
819 return;
820
821 switch (intel_tlb_table[k].tlb_type) {
822 case STLB_4K:
823 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
824 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
825 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
826 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
827 break;
828 case STLB_4K_2M:
829 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
830 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
831 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
832 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
833 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
834 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
835 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
836 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
837 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
838 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
839 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
840 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
841 break;
842 case TLB_INST_ALL:
843 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
844 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
845 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
846 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
847 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
848 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
849 break;
850 case TLB_INST_4K:
851 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
852 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
853 break;
854 case TLB_INST_4M:
855 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
856 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
857 break;
858 case TLB_INST_2M_4M:
859 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
860 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
861 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
862 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
863 break;
864 case TLB_DATA_4K:
865 case TLB_DATA0_4K:
866 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
867 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
868 break;
869 case TLB_DATA_4M:
870 case TLB_DATA0_4M:
871 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
872 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
873 break;
874 case TLB_DATA_2M_4M:
875 case TLB_DATA0_2M_4M:
876 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
877 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
878 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
879 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
880 break;
881 case TLB_DATA_4K_4M:
882 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
883 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
884 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
885 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
886 break;
887 case TLB_DATA_1G:
888 if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
889 tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
890 break;
891 }
892}
893
894static void intel_detect_tlb(struct cpuinfo_x86 *c)
895{
896 int i, j, n;
897 unsigned int regs[4];
898 unsigned char *desc = (unsigned char *)regs;
899
900 if (c->cpuid_level < 2)
901 return;
902
903 /* Number of times to iterate */
904 n = cpuid_eax(2) & 0xFF;
905
906 for (i = 0 ; i < n ; i++) {
907 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
908
909 /* If bit 31 is set, this is an unknown format */
910 for (j = 0 ; j < 3 ; j++)
911 if (regs[j] & (1 << 31))
912 regs[j] = 0;
913
914 /* Byte 0 is level count, not a descriptor */
915 for (j = 1 ; j < 16 ; j++)
916 intel_tlb_lookup(desc[j]);
917 }
918}
919
920static const struct cpu_dev intel_cpu_dev = {
921 .c_vendor = "Intel",
922 .c_ident = { "GenuineIntel" },
923#ifdef CONFIG_X86_32
924 .legacy_models = {
925 { .family = 4, .model_names =
926 {
927 [0] = "486 DX-25/33",
928 [1] = "486 DX-50",
929 [2] = "486 SX",
930 [3] = "486 DX/2",
931 [4] = "486 SL",
932 [5] = "486 SX/2",
933 [7] = "486 DX/2-WB",
934 [8] = "486 DX/4",
935 [9] = "486 DX/4-WB"
936 }
937 },
938 { .family = 5, .model_names =
939 {
940 [0] = "Pentium 60/66 A-step",
941 [1] = "Pentium 60/66",
942 [2] = "Pentium 75 - 200",
943 [3] = "OverDrive PODP5V83",
944 [4] = "Pentium MMX",
945 [7] = "Mobile Pentium 75 - 200",
946 [8] = "Mobile Pentium MMX",
947 [9] = "Quark SoC X1000",
948 }
949 },
950 { .family = 6, .model_names =
951 {
952 [0] = "Pentium Pro A-step",
953 [1] = "Pentium Pro",
954 [3] = "Pentium II (Klamath)",
955 [4] = "Pentium II (Deschutes)",
956 [5] = "Pentium II (Deschutes)",
957 [6] = "Mobile Pentium II",
958 [7] = "Pentium III (Katmai)",
959 [8] = "Pentium III (Coppermine)",
960 [10] = "Pentium III (Cascades)",
961 [11] = "Pentium III (Tualatin)",
962 }
963 },
964 { .family = 15, .model_names =
965 {
966 [0] = "Pentium 4 (Unknown)",
967 [1] = "Pentium 4 (Willamette)",
968 [2] = "Pentium 4 (Northwood)",
969 [4] = "Pentium 4 (Foster)",
970 [5] = "Pentium 4 (Foster)",
971 }
972 },
973 },
974 .legacy_cache_size = intel_size_cache,
975#endif
976 .c_detect_tlb = intel_detect_tlb,
977 .c_early_init = early_init_intel,
978 .c_bsp_init = bsp_init_intel,
979 .c_init = init_intel,
980 .c_x86_vendor = X86_VENDOR_INTEL,
981};
982
983cpu_dev_register(intel_cpu_dev);
984
985#undef pr_fmt
986#define pr_fmt(fmt) "x86/split lock detection: " fmt
987
988static const struct {
989 const char *option;
990 enum split_lock_detect_state state;
991} sld_options[] __initconst = {
992 { "off", sld_off },
993 { "warn", sld_warn },
994 { "fatal", sld_fatal },
995};
996
997static inline bool match_option(const char *arg, int arglen, const char *opt)
998{
999 int len = strlen(opt);
1000
1001 return len == arglen && !strncmp(arg, opt, len);
1002}
1003
1004static bool split_lock_verify_msr(bool on)
1005{
1006 u64 ctrl, tmp;
1007
1008 if (rdmsrl_safe(MSR_TEST_CTRL, &ctrl))
1009 return false;
1010 if (on)
1011 ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1012 else
1013 ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1014 if (wrmsrl_safe(MSR_TEST_CTRL, ctrl))
1015 return false;
1016 rdmsrl(MSR_TEST_CTRL, tmp);
1017 return ctrl == tmp;
1018}
1019
1020static void __init split_lock_setup(void)
1021{
1022 enum split_lock_detect_state state = sld_warn;
1023 char arg[20];
1024 int i, ret;
1025
1026 if (!split_lock_verify_msr(false)) {
1027 pr_info("MSR access failed: Disabled\n");
1028 return;
1029 }
1030
1031 ret = cmdline_find_option(boot_command_line, "split_lock_detect",
1032 arg, sizeof(arg));
1033 if (ret >= 0) {
1034 for (i = 0; i < ARRAY_SIZE(sld_options); i++) {
1035 if (match_option(arg, ret, sld_options[i].option)) {
1036 state = sld_options[i].state;
1037 break;
1038 }
1039 }
1040 }
1041
1042 switch (state) {
1043 case sld_off:
1044 pr_info("disabled\n");
1045 return;
1046 case sld_warn:
1047 pr_info("warning about user-space split_locks\n");
1048 break;
1049 case sld_fatal:
1050 pr_info("sending SIGBUS on user-space split_locks\n");
1051 break;
1052 }
1053
1054 rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
1055
1056 if (!split_lock_verify_msr(true)) {
1057 pr_info("MSR access failed: Disabled\n");
1058 return;
1059 }
1060
1061 sld_state = state;
1062 setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT);
1063}
1064
1065/*
1066 * MSR_TEST_CTRL is per core, but we treat it like a per CPU MSR. Locking
1067 * is not implemented as one thread could undo the setting of the other
1068 * thread immediately after dropping the lock anyway.
1069 */
1070static void sld_update_msr(bool on)
1071{
1072 u64 test_ctrl_val = msr_test_ctrl_cache;
1073
1074 if (on)
1075 test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1076
1077 wrmsrl(MSR_TEST_CTRL, test_ctrl_val);
1078}
1079
1080static void split_lock_init(void)
1081{
1082 if (cpu_model_supports_sld)
1083 split_lock_verify_msr(sld_state != sld_off);
1084}
1085
1086static void split_lock_warn(unsigned long ip)
1087{
1088 pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
1089 current->comm, current->pid, ip);
1090
1091 /*
1092 * Disable the split lock detection for this task so it can make
1093 * progress and set TIF_SLD so the detection is re-enabled via
1094 * switch_to_sld() when the task is scheduled out.
1095 */
1096 sld_update_msr(false);
1097 set_tsk_thread_flag(current, TIF_SLD);
1098}
1099
1100bool handle_guest_split_lock(unsigned long ip)
1101{
1102 if (sld_state == sld_warn) {
1103 split_lock_warn(ip);
1104 return true;
1105 }
1106
1107 pr_warn_once("#AC: %s/%d %s split_lock trap at address: 0x%lx\n",
1108 current->comm, current->pid,
1109 sld_state == sld_fatal ? "fatal" : "bogus", ip);
1110
1111 current->thread.error_code = 0;
1112 current->thread.trap_nr = X86_TRAP_AC;
1113 force_sig_fault(SIGBUS, BUS_ADRALN, NULL);
1114 return false;
1115}
1116EXPORT_SYMBOL_GPL(handle_guest_split_lock);
1117
1118bool handle_user_split_lock(struct pt_regs *regs, long error_code)
1119{
1120 if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal)
1121 return false;
1122 split_lock_warn(regs->ip);
1123 return true;
1124}
1125
1126/*
1127 * This function is called only when switching between tasks with
1128 * different split-lock detection modes. It sets the MSR for the
1129 * mode of the new task. This is right most of the time, but since
1130 * the MSR is shared by hyperthreads on a physical core there can
1131 * be glitches when the two threads need different modes.
1132 */
1133void switch_to_sld(unsigned long tifn)
1134{
1135 sld_update_msr(!(tifn & _TIF_SLD));
1136}
1137
1138/*
1139 * Bits in the IA32_CORE_CAPABILITIES are not architectural, so they should
1140 * only be trusted if it is confirmed that a CPU model implements a
1141 * specific feature at a particular bit position.
1142 *
1143 * The possible driver data field values:
1144 *
1145 * - 0: CPU models that are known to have the per-core split-lock detection
1146 * feature even though they do not enumerate IA32_CORE_CAPABILITIES.
1147 *
1148 * - 1: CPU models which may enumerate IA32_CORE_CAPABILITIES and if so use
1149 * bit 5 to enumerate the per-core split-lock detection feature.
1150 */
1151static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
1152 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0),
1153 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0),
1154 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0),
1155 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, 1),
1156 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, 1),
1157 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, 1),
1158 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, 1),
1159 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, 1),
1160 X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 1),
1161 X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 1),
1162 {}
1163};
1164
1165void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
1166{
1167 const struct x86_cpu_id *m;
1168 u64 ia32_core_caps;
1169
1170 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1171 return;
1172
1173 m = x86_match_cpu(split_lock_cpu_ids);
1174 if (!m)
1175 return;
1176
1177 switch (m->driver_data) {
1178 case 0:
1179 break;
1180 case 1:
1181 if (!cpu_has(c, X86_FEATURE_CORE_CAPABILITIES))
1182 return;
1183 rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps);
1184 if (!(ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT))
1185 return;
1186 break;
1187 default:
1188 return;
1189 }
1190
1191 cpu_model_supports_sld = true;
1192 split_lock_setup();
1193}