Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2#include <linux/export.h>
3#include <linux/bitops.h>
4#include <linux/elf.h>
5#include <linux/mm.h>
6
7#include <linux/io.h>
8#include <linux/sched.h>
9#include <linux/sched/clock.h>
10#include <linux/random.h>
11#include <linux/topology.h>
12#include <asm/processor.h>
13#include <asm/apic.h>
14#include <asm/cacheinfo.h>
15#include <asm/cpu.h>
16#include <asm/spec-ctrl.h>
17#include <asm/smp.h>
18#include <asm/pci-direct.h>
19#include <asm/delay.h>
20#include <asm/debugreg.h>
21
22#ifdef CONFIG_X86_64
23# include <asm/mmconfig.h>
24# include <asm/set_memory.h>
25#endif
26
27#include "cpu.h"
28
29static const int amd_erratum_383[];
30static const int amd_erratum_400[];
31static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
32
33/*
34 * nodes_per_socket: Stores the number of nodes per socket.
35 * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
36 * Node Identifiers[10:8]
37 */
38static u32 nodes_per_socket = 1;
39
40static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
41{
42 u32 gprs[8] = { 0 };
43 int err;
44
45 WARN_ONCE((boot_cpu_data.x86 != 0xf),
46 "%s should only be used on K8!\n", __func__);
47
48 gprs[1] = msr;
49 gprs[7] = 0x9c5a203a;
50
51 err = rdmsr_safe_regs(gprs);
52
53 *p = gprs[0] | ((u64)gprs[2] << 32);
54
55 return err;
56}
57
58static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
59{
60 u32 gprs[8] = { 0 };
61
62 WARN_ONCE((boot_cpu_data.x86 != 0xf),
63 "%s should only be used on K8!\n", __func__);
64
65 gprs[0] = (u32)val;
66 gprs[1] = msr;
67 gprs[2] = val >> 32;
68 gprs[7] = 0x9c5a203a;
69
70 return wrmsr_safe_regs(gprs);
71}
72
73/*
74 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
75 * misexecution of code under Linux. Owners of such processors should
76 * contact AMD for precise details and a CPU swap.
77 *
78 * See http://www.multimania.com/poulot/k6bug.html
79 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
80 * (Publication # 21266 Issue Date: August 1998)
81 *
82 * The following test is erm.. interesting. AMD neglected to up
83 * the chip setting when fixing the bug but they also tweaked some
84 * performance at the same time..
85 */
86
87#ifdef CONFIG_X86_32
88extern __visible void vide(void);
89__asm__(".text\n"
90 ".globl vide\n"
91 ".type vide, @function\n"
92 ".align 4\n"
93 "vide: ret\n");
94#endif
95
96static void init_amd_k5(struct cpuinfo_x86 *c)
97{
98#ifdef CONFIG_X86_32
99/*
100 * General Systems BIOSen alias the cpu frequency registers
101 * of the Elan at 0x000df000. Unfortunately, one of the Linux
102 * drivers subsequently pokes it, and changes the CPU speed.
103 * Workaround : Remove the unneeded alias.
104 */
105#define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
106#define CBAR_ENB (0x80000000)
107#define CBAR_KEY (0X000000CB)
108 if (c->x86_model == 9 || c->x86_model == 10) {
109 if (inl(CBAR) & CBAR_ENB)
110 outl(0 | CBAR_KEY, CBAR);
111 }
112#endif
113}
114
115static void init_amd_k6(struct cpuinfo_x86 *c)
116{
117#ifdef CONFIG_X86_32
118 u32 l, h;
119 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
120
121 if (c->x86_model < 6) {
122 /* Based on AMD doc 20734R - June 2000 */
123 if (c->x86_model == 0) {
124 clear_cpu_cap(c, X86_FEATURE_APIC);
125 set_cpu_cap(c, X86_FEATURE_PGE);
126 }
127 return;
128 }
129
130 if (c->x86_model == 6 && c->x86_stepping == 1) {
131 const int K6_BUG_LOOP = 1000000;
132 int n;
133 void (*f_vide)(void);
134 u64 d, d2;
135
136 pr_info("AMD K6 stepping B detected - ");
137
138 /*
139 * It looks like AMD fixed the 2.6.2 bug and improved indirect
140 * calls at the same time.
141 */
142
143 n = K6_BUG_LOOP;
144 f_vide = vide;
145 OPTIMIZER_HIDE_VAR(f_vide);
146 d = rdtsc();
147 while (n--)
148 f_vide();
149 d2 = rdtsc();
150 d = d2-d;
151
152 if (d > 20*K6_BUG_LOOP)
153 pr_cont("system stability may be impaired when more than 32 MB are used.\n");
154 else
155 pr_cont("probably OK (after B9730xxxx).\n");
156 }
157
158 /* K6 with old style WHCR */
159 if (c->x86_model < 8 ||
160 (c->x86_model == 8 && c->x86_stepping < 8)) {
161 /* We can only write allocate on the low 508Mb */
162 if (mbytes > 508)
163 mbytes = 508;
164
165 rdmsr(MSR_K6_WHCR, l, h);
166 if ((l&0x0000FFFF) == 0) {
167 unsigned long flags;
168 l = (1<<0)|((mbytes/4)<<1);
169 local_irq_save(flags);
170 wbinvd();
171 wrmsr(MSR_K6_WHCR, l, h);
172 local_irq_restore(flags);
173 pr_info("Enabling old style K6 write allocation for %d Mb\n",
174 mbytes);
175 }
176 return;
177 }
178
179 if ((c->x86_model == 8 && c->x86_stepping > 7) ||
180 c->x86_model == 9 || c->x86_model == 13) {
181 /* The more serious chips .. */
182
183 if (mbytes > 4092)
184 mbytes = 4092;
185
186 rdmsr(MSR_K6_WHCR, l, h);
187 if ((l&0xFFFF0000) == 0) {
188 unsigned long flags;
189 l = ((mbytes>>2)<<22)|(1<<16);
190 local_irq_save(flags);
191 wbinvd();
192 wrmsr(MSR_K6_WHCR, l, h);
193 local_irq_restore(flags);
194 pr_info("Enabling new style K6 write allocation for %d Mb\n",
195 mbytes);
196 }
197
198 return;
199 }
200
201 if (c->x86_model == 10) {
202 /* AMD Geode LX is model 10 */
203 /* placeholder for any needed mods */
204 return;
205 }
206#endif
207}
208
209static void init_amd_k7(struct cpuinfo_x86 *c)
210{
211#ifdef CONFIG_X86_32
212 u32 l, h;
213
214 /*
215 * Bit 15 of Athlon specific MSR 15, needs to be 0
216 * to enable SSE on Palomino/Morgan/Barton CPU's.
217 * If the BIOS didn't enable it already, enable it here.
218 */
219 if (c->x86_model >= 6 && c->x86_model <= 10) {
220 if (!cpu_has(c, X86_FEATURE_XMM)) {
221 pr_info("Enabling disabled K7/SSE Support.\n");
222 msr_clear_bit(MSR_K7_HWCR, 15);
223 set_cpu_cap(c, X86_FEATURE_XMM);
224 }
225 }
226
227 /*
228 * It's been determined by AMD that Athlons since model 8 stepping 1
229 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
230 * As per AMD technical note 27212 0.2
231 */
232 if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
233 rdmsr(MSR_K7_CLK_CTL, l, h);
234 if ((l & 0xfff00000) != 0x20000000) {
235 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
236 l, ((l & 0x000fffff)|0x20000000));
237 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
238 }
239 }
240
241 /* calling is from identify_secondary_cpu() ? */
242 if (!c->cpu_index)
243 return;
244
245 /*
246 * Certain Athlons might work (for various values of 'work') in SMP
247 * but they are not certified as MP capable.
248 */
249 /* Athlon 660/661 is valid. */
250 if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
251 (c->x86_stepping == 1)))
252 return;
253
254 /* Duron 670 is valid */
255 if ((c->x86_model == 7) && (c->x86_stepping == 0))
256 return;
257
258 /*
259 * Athlon 662, Duron 671, and Athlon >model 7 have capability
260 * bit. It's worth noting that the A5 stepping (662) of some
261 * Athlon XP's have the MP bit set.
262 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
263 * more.
264 */
265 if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
266 ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
267 (c->x86_model > 7))
268 if (cpu_has(c, X86_FEATURE_MP))
269 return;
270
271 /* If we get here, not a certified SMP capable AMD system. */
272
273 /*
274 * Don't taint if we are running SMP kernel on a single non-MP
275 * approved Athlon
276 */
277 WARN_ONCE(1, "WARNING: This combination of AMD"
278 " processors is not suitable for SMP.\n");
279 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
280#endif
281}
282
283#ifdef CONFIG_NUMA
284/*
285 * To workaround broken NUMA config. Read the comment in
286 * srat_detect_node().
287 */
288static int nearby_node(int apicid)
289{
290 int i, node;
291
292 for (i = apicid - 1; i >= 0; i--) {
293 node = __apicid_to_node[i];
294 if (node != NUMA_NO_NODE && node_online(node))
295 return node;
296 }
297 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
298 node = __apicid_to_node[i];
299 if (node != NUMA_NO_NODE && node_online(node))
300 return node;
301 }
302 return first_node(node_online_map); /* Shouldn't happen */
303}
304#endif
305
306/*
307 * Fix up cpu_core_id for pre-F17h systems to be in the
308 * [0 .. cores_per_node - 1] range. Not really needed but
309 * kept so as not to break existing setups.
310 */
311static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
312{
313 u32 cus_per_node;
314
315 if (c->x86 >= 0x17)
316 return;
317
318 cus_per_node = c->x86_max_cores / nodes_per_socket;
319 c->cpu_core_id %= cus_per_node;
320}
321
322
323static void amd_get_topology_early(struct cpuinfo_x86 *c)
324{
325 if (cpu_has(c, X86_FEATURE_TOPOEXT))
326 smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
327}
328
329/*
330 * Fixup core topology information for
331 * (1) AMD multi-node processors
332 * Assumption: Number of cores in each internal node is the same.
333 * (2) AMD processors supporting compute units
334 */
335static void amd_get_topology(struct cpuinfo_x86 *c)
336{
337 u8 node_id;
338 int cpu = smp_processor_id();
339
340 /* get information required for multi-node processors */
341 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
342 int err;
343 u32 eax, ebx, ecx, edx;
344
345 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
346
347 node_id = ecx & 0xff;
348
349 if (c->x86 == 0x15)
350 c->cu_id = ebx & 0xff;
351
352 if (c->x86 >= 0x17) {
353 c->cpu_core_id = ebx & 0xff;
354
355 if (smp_num_siblings > 1)
356 c->x86_max_cores /= smp_num_siblings;
357 }
358
359 /*
360 * In case leaf B is available, use it to derive
361 * topology information.
362 */
363 err = detect_extended_topology(c);
364 if (!err)
365 c->x86_coreid_bits = get_count_order(c->x86_max_cores);
366
367 cacheinfo_amd_init_llc_id(c, cpu, node_id);
368
369 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
370 u64 value;
371
372 rdmsrl(MSR_FAM10H_NODE_ID, value);
373 node_id = value & 7;
374
375 per_cpu(cpu_llc_id, cpu) = node_id;
376 } else
377 return;
378
379 if (nodes_per_socket > 1) {
380 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
381 legacy_fixup_core_id(c);
382 }
383}
384
385/*
386 * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
387 * Assumes number of cores is a power of two.
388 */
389static void amd_detect_cmp(struct cpuinfo_x86 *c)
390{
391 unsigned bits;
392 int cpu = smp_processor_id();
393
394 bits = c->x86_coreid_bits;
395 /* Low order bits define the core id (index of core in socket) */
396 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
397 /* Convert the initial APIC ID into the socket ID */
398 c->phys_proc_id = c->initial_apicid >> bits;
399 /* use socket ID also for last level cache */
400 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
401}
402
403u16 amd_get_nb_id(int cpu)
404{
405 return per_cpu(cpu_llc_id, cpu);
406}
407EXPORT_SYMBOL_GPL(amd_get_nb_id);
408
409u32 amd_get_nodes_per_socket(void)
410{
411 return nodes_per_socket;
412}
413EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
414
415static void srat_detect_node(struct cpuinfo_x86 *c)
416{
417#ifdef CONFIG_NUMA
418 int cpu = smp_processor_id();
419 int node;
420 unsigned apicid = c->apicid;
421
422 node = numa_cpu_node(cpu);
423 if (node == NUMA_NO_NODE)
424 node = per_cpu(cpu_llc_id, cpu);
425
426 /*
427 * On multi-fabric platform (e.g. Numascale NumaChip) a
428 * platform-specific handler needs to be called to fixup some
429 * IDs of the CPU.
430 */
431 if (x86_cpuinit.fixup_cpu_id)
432 x86_cpuinit.fixup_cpu_id(c, node);
433
434 if (!node_online(node)) {
435 /*
436 * Two possibilities here:
437 *
438 * - The CPU is missing memory and no node was created. In
439 * that case try picking one from a nearby CPU.
440 *
441 * - The APIC IDs differ from the HyperTransport node IDs
442 * which the K8 northbridge parsing fills in. Assume
443 * they are all increased by a constant offset, but in
444 * the same order as the HT nodeids. If that doesn't
445 * result in a usable node fall back to the path for the
446 * previous case.
447 *
448 * This workaround operates directly on the mapping between
449 * APIC ID and NUMA node, assuming certain relationship
450 * between APIC ID, HT node ID and NUMA topology. As going
451 * through CPU mapping may alter the outcome, directly
452 * access __apicid_to_node[].
453 */
454 int ht_nodeid = c->initial_apicid;
455
456 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
457 node = __apicid_to_node[ht_nodeid];
458 /* Pick a nearby node */
459 if (!node_online(node))
460 node = nearby_node(apicid);
461 }
462 numa_set_node(cpu, node);
463#endif
464}
465
466static void early_init_amd_mc(struct cpuinfo_x86 *c)
467{
468#ifdef CONFIG_SMP
469 unsigned bits, ecx;
470
471 /* Multi core CPU? */
472 if (c->extended_cpuid_level < 0x80000008)
473 return;
474
475 ecx = cpuid_ecx(0x80000008);
476
477 c->x86_max_cores = (ecx & 0xff) + 1;
478
479 /* CPU telling us the core id bits shift? */
480 bits = (ecx >> 12) & 0xF;
481
482 /* Otherwise recompute */
483 if (bits == 0) {
484 while ((1 << bits) < c->x86_max_cores)
485 bits++;
486 }
487
488 c->x86_coreid_bits = bits;
489#endif
490}
491
492static void bsp_init_amd(struct cpuinfo_x86 *c)
493{
494
495#ifdef CONFIG_X86_64
496 if (c->x86 >= 0xf) {
497 unsigned long long tseg;
498
499 /*
500 * Split up direct mapping around the TSEG SMM area.
501 * Don't do it for gbpages because there seems very little
502 * benefit in doing so.
503 */
504 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
505 unsigned long pfn = tseg >> PAGE_SHIFT;
506
507 pr_debug("tseg: %010llx\n", tseg);
508 if (pfn_range_is_mapped(pfn, pfn + 1))
509 set_memory_4k((unsigned long)__va(tseg), 1);
510 }
511 }
512#endif
513
514 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
515
516 if (c->x86 > 0x10 ||
517 (c->x86 == 0x10 && c->x86_model >= 0x2)) {
518 u64 val;
519
520 rdmsrl(MSR_K7_HWCR, val);
521 if (!(val & BIT(24)))
522 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
523 }
524 }
525
526 if (c->x86 == 0x15) {
527 unsigned long upperbit;
528 u32 cpuid, assoc;
529
530 cpuid = cpuid_edx(0x80000005);
531 assoc = cpuid >> 16 & 0xff;
532 upperbit = ((cpuid >> 24) << 10) / assoc;
533
534 va_align.mask = (upperbit - 1) & PAGE_MASK;
535 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
536
537 /* A random value per boot for bit slice [12:upper_bit) */
538 va_align.bits = get_random_int() & va_align.mask;
539 }
540
541 if (cpu_has(c, X86_FEATURE_MWAITX))
542 use_mwaitx_delay();
543
544 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
545 u32 ecx;
546
547 ecx = cpuid_ecx(0x8000001e);
548 nodes_per_socket = ((ecx >> 8) & 7) + 1;
549 } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
550 u64 value;
551
552 rdmsrl(MSR_FAM10H_NODE_ID, value);
553 nodes_per_socket = ((value >> 3) & 7) + 1;
554 }
555
556 if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
557 !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
558 c->x86 >= 0x15 && c->x86 <= 0x17) {
559 unsigned int bit;
560
561 switch (c->x86) {
562 case 0x15: bit = 54; break;
563 case 0x16: bit = 33; break;
564 case 0x17: bit = 10; break;
565 default: return;
566 }
567 /*
568 * Try to cache the base value so further operations can
569 * avoid RMW. If that faults, do not enable SSBD.
570 */
571 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
572 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
573 setup_force_cpu_cap(X86_FEATURE_SSBD);
574 x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
575 }
576 }
577}
578
579static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
580{
581 u64 msr;
582
583 /*
584 * BIOS support is required for SME and SEV.
585 * For SME: If BIOS has enabled SME then adjust x86_phys_bits by
586 * the SME physical address space reduction value.
587 * If BIOS has not enabled SME then don't advertise the
588 * SME feature (set in scattered.c).
589 * For SEV: If BIOS has not enabled SEV then don't advertise the
590 * SEV feature (set in scattered.c).
591 *
592 * In all cases, since support for SME and SEV requires long mode,
593 * don't advertise the feature under CONFIG_X86_32.
594 */
595 if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
596 /* Check if memory encryption is enabled */
597 rdmsrl(MSR_K8_SYSCFG, msr);
598 if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
599 goto clear_all;
600
601 /*
602 * Always adjust physical address bits. Even though this
603 * will be a value above 32-bits this is still done for
604 * CONFIG_X86_32 so that accurate values are reported.
605 */
606 c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
607
608 if (IS_ENABLED(CONFIG_X86_32))
609 goto clear_all;
610
611 rdmsrl(MSR_K7_HWCR, msr);
612 if (!(msr & MSR_K7_HWCR_SMMLOCK))
613 goto clear_sev;
614
615 return;
616
617clear_all:
618 clear_cpu_cap(c, X86_FEATURE_SME);
619clear_sev:
620 clear_cpu_cap(c, X86_FEATURE_SEV);
621 }
622}
623
624static void early_init_amd(struct cpuinfo_x86 *c)
625{
626 u64 value;
627 u32 dummy;
628
629 early_init_amd_mc(c);
630
631#ifdef CONFIG_X86_32
632 if (c->x86 == 6)
633 set_cpu_cap(c, X86_FEATURE_K7);
634#endif
635
636 if (c->x86 >= 0xf)
637 set_cpu_cap(c, X86_FEATURE_K8);
638
639 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
640
641 /*
642 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
643 * with P/T states and does not stop in deep C-states
644 */
645 if (c->x86_power & (1 << 8)) {
646 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
647 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
648 }
649
650 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
651 if (c->x86_power & BIT(12))
652 set_cpu_cap(c, X86_FEATURE_ACC_POWER);
653
654#ifdef CONFIG_X86_64
655 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
656#else
657 /* Set MTRR capability flag if appropriate */
658 if (c->x86 == 5)
659 if (c->x86_model == 13 || c->x86_model == 9 ||
660 (c->x86_model == 8 && c->x86_stepping >= 8))
661 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
662#endif
663#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
664 /*
665 * ApicID can always be treated as an 8-bit value for AMD APIC versions
666 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
667 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
668 * after 16h.
669 */
670 if (boot_cpu_has(X86_FEATURE_APIC)) {
671 if (c->x86 > 0x16)
672 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
673 else if (c->x86 >= 0xf) {
674 /* check CPU config space for extended APIC ID */
675 unsigned int val;
676
677 val = read_pci_config(0, 24, 0, 0x68);
678 if ((val >> 17 & 0x3) == 0x3)
679 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
680 }
681 }
682#endif
683
684 /*
685 * This is only needed to tell the kernel whether to use VMCALL
686 * and VMMCALL. VMMCALL is never executed except under virt, so
687 * we can set it unconditionally.
688 */
689 set_cpu_cap(c, X86_FEATURE_VMMCALL);
690
691 /* F16h erratum 793, CVE-2013-6885 */
692 if (c->x86 == 0x16 && c->x86_model <= 0xf)
693 msr_set_bit(MSR_AMD64_LS_CFG, 15);
694
695 /*
696 * Check whether the machine is affected by erratum 400. This is
697 * used to select the proper idle routine and to enable the check
698 * whether the machine is affected in arch_post_acpi_init(), which
699 * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
700 */
701 if (cpu_has_amd_erratum(c, amd_erratum_400))
702 set_cpu_bug(c, X86_BUG_AMD_E400);
703
704 early_detect_mem_encrypt(c);
705
706 /* Re-enable TopologyExtensions if switched off by BIOS */
707 if (c->x86 == 0x15 &&
708 (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
709 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
710
711 if (msr_set_bit(0xc0011005, 54) > 0) {
712 rdmsrl(0xc0011005, value);
713 if (value & BIT_64(54)) {
714 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
715 pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
716 }
717 }
718 }
719
720 amd_get_topology_early(c);
721}
722
723static void init_amd_k8(struct cpuinfo_x86 *c)
724{
725 u32 level;
726 u64 value;
727
728 /* On C+ stepping K8 rep microcode works well for copy/memset */
729 level = cpuid_eax(1);
730 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
731 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
732
733 /*
734 * Some BIOSes incorrectly force this feature, but only K8 revision D
735 * (model = 0x14) and later actually support it.
736 * (AMD Erratum #110, docId: 25759).
737 */
738 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
739 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
740 if (!rdmsrl_amd_safe(0xc001100d, &value)) {
741 value &= ~BIT_64(32);
742 wrmsrl_amd_safe(0xc001100d, value);
743 }
744 }
745
746 if (!c->x86_model_id[0])
747 strcpy(c->x86_model_id, "Hammer");
748
749#ifdef CONFIG_SMP
750 /*
751 * Disable TLB flush filter by setting HWCR.FFDIS on K8
752 * bit 6 of msr C001_0015
753 *
754 * Errata 63 for SH-B3 steppings
755 * Errata 122 for all steppings (F+ have it disabled by default)
756 */
757 msr_set_bit(MSR_K7_HWCR, 6);
758#endif
759 set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
760}
761
762static void init_amd_gh(struct cpuinfo_x86 *c)
763{
764#ifdef CONFIG_MMCONF_FAM10H
765 /* do this for boot cpu */
766 if (c == &boot_cpu_data)
767 check_enable_amd_mmconf_dmi();
768
769 fam10h_check_enable_mmcfg();
770#endif
771
772 /*
773 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
774 * is always needed when GART is enabled, even in a kernel which has no
775 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
776 * If it doesn't, we do it here as suggested by the BKDG.
777 *
778 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
779 */
780 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
781
782 /*
783 * On family 10h BIOS may not have properly enabled WC+ support, causing
784 * it to be converted to CD memtype. This may result in performance
785 * degradation for certain nested-paging guests. Prevent this conversion
786 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
787 *
788 * NOTE: we want to use the _safe accessors so as not to #GP kvm
789 * guests on older kvm hosts.
790 */
791 msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
792
793 if (cpu_has_amd_erratum(c, amd_erratum_383))
794 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
795}
796
797#define MSR_AMD64_DE_CFG 0xC0011029
798
799static void init_amd_ln(struct cpuinfo_x86 *c)
800{
801 /*
802 * Apply erratum 665 fix unconditionally so machines without a BIOS
803 * fix work.
804 */
805 msr_set_bit(MSR_AMD64_DE_CFG, 31);
806}
807
808static bool rdrand_force;
809
810static int __init rdrand_cmdline(char *str)
811{
812 if (!str)
813 return -EINVAL;
814
815 if (!strcmp(str, "force"))
816 rdrand_force = true;
817 else
818 return -EINVAL;
819
820 return 0;
821}
822early_param("rdrand", rdrand_cmdline);
823
824static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
825{
826 /*
827 * Saving of the MSR used to hide the RDRAND support during
828 * suspend/resume is done by arch/x86/power/cpu.c, which is
829 * dependent on CONFIG_PM_SLEEP.
830 */
831 if (!IS_ENABLED(CONFIG_PM_SLEEP))
832 return;
833
834 /*
835 * The nordrand option can clear X86_FEATURE_RDRAND, so check for
836 * RDRAND support using the CPUID function directly.
837 */
838 if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
839 return;
840
841 msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
842
843 /*
844 * Verify that the CPUID change has occurred in case the kernel is
845 * running virtualized and the hypervisor doesn't support the MSR.
846 */
847 if (cpuid_ecx(1) & BIT(30)) {
848 pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
849 return;
850 }
851
852 clear_cpu_cap(c, X86_FEATURE_RDRAND);
853 pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
854}
855
856static void init_amd_jg(struct cpuinfo_x86 *c)
857{
858 /*
859 * Some BIOS implementations do not restore proper RDRAND support
860 * across suspend and resume. Check on whether to hide the RDRAND
861 * instruction support via CPUID.
862 */
863 clear_rdrand_cpuid_bit(c);
864}
865
866static void init_amd_bd(struct cpuinfo_x86 *c)
867{
868 u64 value;
869
870 /*
871 * The way access filter has a performance penalty on some workloads.
872 * Disable it on the affected CPUs.
873 */
874 if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
875 if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
876 value |= 0x1E;
877 wrmsrl_safe(MSR_F15H_IC_CFG, value);
878 }
879 }
880
881 /*
882 * Some BIOS implementations do not restore proper RDRAND support
883 * across suspend and resume. Check on whether to hide the RDRAND
884 * instruction support via CPUID.
885 */
886 clear_rdrand_cpuid_bit(c);
887}
888
889static void init_amd_zn(struct cpuinfo_x86 *c)
890{
891 set_cpu_cap(c, X86_FEATURE_ZEN);
892
893#ifdef CONFIG_NUMA
894 node_reclaim_distance = 32;
895#endif
896
897 /*
898 * Fix erratum 1076: CPB feature bit not being set in CPUID.
899 * Always set it, except when running under a hypervisor.
900 */
901 if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
902 set_cpu_cap(c, X86_FEATURE_CPB);
903}
904
905static void init_amd(struct cpuinfo_x86 *c)
906{
907 early_init_amd(c);
908
909 /*
910 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
911 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
912 */
913 clear_cpu_cap(c, 0*32+31);
914
915 if (c->x86 >= 0x10)
916 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
917
918 /* get apicid instead of initial apic id from cpuid */
919 c->apicid = hard_smp_processor_id();
920
921 /* K6s reports MCEs but don't actually have all the MSRs */
922 if (c->x86 < 6)
923 clear_cpu_cap(c, X86_FEATURE_MCE);
924
925 switch (c->x86) {
926 case 4: init_amd_k5(c); break;
927 case 5: init_amd_k6(c); break;
928 case 6: init_amd_k7(c); break;
929 case 0xf: init_amd_k8(c); break;
930 case 0x10: init_amd_gh(c); break;
931 case 0x12: init_amd_ln(c); break;
932 case 0x15: init_amd_bd(c); break;
933 case 0x16: init_amd_jg(c); break;
934 case 0x17: init_amd_zn(c); break;
935 }
936
937 /*
938 * Enable workaround for FXSAVE leak on CPUs
939 * without a XSaveErPtr feature
940 */
941 if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
942 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
943
944 cpu_detect_cache_sizes(c);
945
946 amd_detect_cmp(c);
947 amd_get_topology(c);
948 srat_detect_node(c);
949
950 init_amd_cacheinfo(c);
951
952 if (cpu_has(c, X86_FEATURE_XMM2)) {
953 /*
954 * Use LFENCE for execution serialization. On families which
955 * don't have that MSR, LFENCE is already serializing.
956 * msr_set_bit() uses the safe accessors, too, even if the MSR
957 * is not present.
958 */
959 msr_set_bit(MSR_F10H_DECFG,
960 MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
961
962 /* A serializing LFENCE stops RDTSC speculation */
963 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
964 }
965
966 /*
967 * Family 0x12 and above processors have APIC timer
968 * running in deep C states.
969 */
970 if (c->x86 > 0x11)
971 set_cpu_cap(c, X86_FEATURE_ARAT);
972
973 /* 3DNow or LM implies PREFETCHW */
974 if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
975 if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
976 set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
977
978 /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
979 if (!cpu_has(c, X86_FEATURE_XENPV))
980 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
981}
982
983#ifdef CONFIG_X86_32
984static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
985{
986 /* AMD errata T13 (order #21922) */
987 if (c->x86 == 6) {
988 /* Duron Rev A0 */
989 if (c->x86_model == 3 && c->x86_stepping == 0)
990 size = 64;
991 /* Tbird rev A1/A2 */
992 if (c->x86_model == 4 &&
993 (c->x86_stepping == 0 || c->x86_stepping == 1))
994 size = 256;
995 }
996 return size;
997}
998#endif
999
1000static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
1001{
1002 u32 ebx, eax, ecx, edx;
1003 u16 mask = 0xfff;
1004
1005 if (c->x86 < 0xf)
1006 return;
1007
1008 if (c->extended_cpuid_level < 0x80000006)
1009 return;
1010
1011 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
1012
1013 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
1014 tlb_lli_4k[ENTRIES] = ebx & mask;
1015
1016 /*
1017 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
1018 * characteristics from the CPUID function 0x80000005 instead.
1019 */
1020 if (c->x86 == 0xf) {
1021 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1022 mask = 0xff;
1023 }
1024
1025 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1026 if (!((eax >> 16) & mask))
1027 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
1028 else
1029 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
1030
1031 /* a 4M entry uses two 2M entries */
1032 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
1033
1034 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1035 if (!(eax & mask)) {
1036 /* Erratum 658 */
1037 if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
1038 tlb_lli_2m[ENTRIES] = 1024;
1039 } else {
1040 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1041 tlb_lli_2m[ENTRIES] = eax & 0xff;
1042 }
1043 } else
1044 tlb_lli_2m[ENTRIES] = eax & mask;
1045
1046 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
1047}
1048
1049static const struct cpu_dev amd_cpu_dev = {
1050 .c_vendor = "AMD",
1051 .c_ident = { "AuthenticAMD" },
1052#ifdef CONFIG_X86_32
1053 .legacy_models = {
1054 { .family = 4, .model_names =
1055 {
1056 [3] = "486 DX/2",
1057 [7] = "486 DX/2-WB",
1058 [8] = "486 DX/4",
1059 [9] = "486 DX/4-WB",
1060 [14] = "Am5x86-WT",
1061 [15] = "Am5x86-WB"
1062 }
1063 },
1064 },
1065 .legacy_cache_size = amd_size_cache,
1066#endif
1067 .c_early_init = early_init_amd,
1068 .c_detect_tlb = cpu_detect_tlb_amd,
1069 .c_bsp_init = bsp_init_amd,
1070 .c_init = init_amd,
1071 .c_x86_vendor = X86_VENDOR_AMD,
1072};
1073
1074cpu_dev_register(amd_cpu_dev);
1075
1076/*
1077 * AMD errata checking
1078 *
1079 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
1080 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
1081 * have an OSVW id assigned, which it takes as first argument. Both take a
1082 * variable number of family-specific model-stepping ranges created by
1083 * AMD_MODEL_RANGE().
1084 *
1085 * Example:
1086 *
1087 * const int amd_erratum_319[] =
1088 * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
1089 * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
1090 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
1091 */
1092
1093#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
1094#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
1095#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
1096 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
1097#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
1098#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
1099#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
1100
1101static const int amd_erratum_400[] =
1102 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
1103 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
1104
1105static const int amd_erratum_383[] =
1106 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
1107
1108
1109static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
1110{
1111 int osvw_id = *erratum++;
1112 u32 range;
1113 u32 ms;
1114
1115 if (osvw_id >= 0 && osvw_id < 65536 &&
1116 cpu_has(cpu, X86_FEATURE_OSVW)) {
1117 u64 osvw_len;
1118
1119 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
1120 if (osvw_id < osvw_len) {
1121 u64 osvw_bits;
1122
1123 rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
1124 osvw_bits);
1125 return osvw_bits & (1ULL << (osvw_id & 0x3f));
1126 }
1127 }
1128
1129 /* OSVW unavailable or ID unknown, match family-model-stepping range */
1130 ms = (cpu->x86_model << 4) | cpu->x86_stepping;
1131 while ((range = *erratum++))
1132 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
1133 (ms >= AMD_MODEL_RANGE_START(range)) &&
1134 (ms <= AMD_MODEL_RANGE_END(range)))
1135 return true;
1136
1137 return false;
1138}
1139
1140void set_dr_addr_mask(unsigned long mask, int dr)
1141{
1142 if (!boot_cpu_has(X86_FEATURE_BPEXT))
1143 return;
1144
1145 switch (dr) {
1146 case 0:
1147 wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
1148 break;
1149 case 1:
1150 case 2:
1151 case 3:
1152 wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
1153 break;
1154 default:
1155 break;
1156 }
1157}
1// SPDX-License-Identifier: GPL-2.0-only
2#include <linux/export.h>
3#include <linux/bitops.h>
4#include <linux/elf.h>
5#include <linux/mm.h>
6
7#include <linux/io.h>
8#include <linux/sched.h>
9#include <linux/sched/clock.h>
10#include <linux/random.h>
11#include <linux/topology.h>
12#include <asm/processor.h>
13#include <asm/apic.h>
14#include <asm/cacheinfo.h>
15#include <asm/cpu.h>
16#include <asm/spec-ctrl.h>
17#include <asm/smp.h>
18#include <asm/numa.h>
19#include <asm/pci-direct.h>
20#include <asm/delay.h>
21#include <asm/debugreg.h>
22#include <asm/resctrl.h>
23
24#ifdef CONFIG_X86_64
25# include <asm/mmconfig.h>
26# include <asm/set_memory.h>
27#endif
28
29#include "cpu.h"
30
31static const int amd_erratum_383[];
32static const int amd_erratum_400[];
33static const int amd_erratum_1054[];
34static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
35
36/*
37 * nodes_per_socket: Stores the number of nodes per socket.
38 * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
39 * Node Identifiers[10:8]
40 */
41static u32 nodes_per_socket = 1;
42
43static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
44{
45 u32 gprs[8] = { 0 };
46 int err;
47
48 WARN_ONCE((boot_cpu_data.x86 != 0xf),
49 "%s should only be used on K8!\n", __func__);
50
51 gprs[1] = msr;
52 gprs[7] = 0x9c5a203a;
53
54 err = rdmsr_safe_regs(gprs);
55
56 *p = gprs[0] | ((u64)gprs[2] << 32);
57
58 return err;
59}
60
61static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
62{
63 u32 gprs[8] = { 0 };
64
65 WARN_ONCE((boot_cpu_data.x86 != 0xf),
66 "%s should only be used on K8!\n", __func__);
67
68 gprs[0] = (u32)val;
69 gprs[1] = msr;
70 gprs[2] = val >> 32;
71 gprs[7] = 0x9c5a203a;
72
73 return wrmsr_safe_regs(gprs);
74}
75
76/*
77 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
78 * misexecution of code under Linux. Owners of such processors should
79 * contact AMD for precise details and a CPU swap.
80 *
81 * See http://www.multimania.com/poulot/k6bug.html
82 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
83 * (Publication # 21266 Issue Date: August 1998)
84 *
85 * The following test is erm.. interesting. AMD neglected to up
86 * the chip setting when fixing the bug but they also tweaked some
87 * performance at the same time..
88 */
89
90#ifdef CONFIG_X86_32
91extern __visible void vide(void);
92__asm__(".text\n"
93 ".globl vide\n"
94 ".type vide, @function\n"
95 ".align 4\n"
96 "vide: ret\n");
97#endif
98
99static void init_amd_k5(struct cpuinfo_x86 *c)
100{
101#ifdef CONFIG_X86_32
102/*
103 * General Systems BIOSen alias the cpu frequency registers
104 * of the Elan at 0x000df000. Unfortunately, one of the Linux
105 * drivers subsequently pokes it, and changes the CPU speed.
106 * Workaround : Remove the unneeded alias.
107 */
108#define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
109#define CBAR_ENB (0x80000000)
110#define CBAR_KEY (0X000000CB)
111 if (c->x86_model == 9 || c->x86_model == 10) {
112 if (inl(CBAR) & CBAR_ENB)
113 outl(0 | CBAR_KEY, CBAR);
114 }
115#endif
116}
117
118static void init_amd_k6(struct cpuinfo_x86 *c)
119{
120#ifdef CONFIG_X86_32
121 u32 l, h;
122 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
123
124 if (c->x86_model < 6) {
125 /* Based on AMD doc 20734R - June 2000 */
126 if (c->x86_model == 0) {
127 clear_cpu_cap(c, X86_FEATURE_APIC);
128 set_cpu_cap(c, X86_FEATURE_PGE);
129 }
130 return;
131 }
132
133 if (c->x86_model == 6 && c->x86_stepping == 1) {
134 const int K6_BUG_LOOP = 1000000;
135 int n;
136 void (*f_vide)(void);
137 u64 d, d2;
138
139 pr_info("AMD K6 stepping B detected - ");
140
141 /*
142 * It looks like AMD fixed the 2.6.2 bug and improved indirect
143 * calls at the same time.
144 */
145
146 n = K6_BUG_LOOP;
147 f_vide = vide;
148 OPTIMIZER_HIDE_VAR(f_vide);
149 d = rdtsc();
150 while (n--)
151 f_vide();
152 d2 = rdtsc();
153 d = d2-d;
154
155 if (d > 20*K6_BUG_LOOP)
156 pr_cont("system stability may be impaired when more than 32 MB are used.\n");
157 else
158 pr_cont("probably OK (after B9730xxxx).\n");
159 }
160
161 /* K6 with old style WHCR */
162 if (c->x86_model < 8 ||
163 (c->x86_model == 8 && c->x86_stepping < 8)) {
164 /* We can only write allocate on the low 508Mb */
165 if (mbytes > 508)
166 mbytes = 508;
167
168 rdmsr(MSR_K6_WHCR, l, h);
169 if ((l&0x0000FFFF) == 0) {
170 unsigned long flags;
171 l = (1<<0)|((mbytes/4)<<1);
172 local_irq_save(flags);
173 wbinvd();
174 wrmsr(MSR_K6_WHCR, l, h);
175 local_irq_restore(flags);
176 pr_info("Enabling old style K6 write allocation for %d Mb\n",
177 mbytes);
178 }
179 return;
180 }
181
182 if ((c->x86_model == 8 && c->x86_stepping > 7) ||
183 c->x86_model == 9 || c->x86_model == 13) {
184 /* The more serious chips .. */
185
186 if (mbytes > 4092)
187 mbytes = 4092;
188
189 rdmsr(MSR_K6_WHCR, l, h);
190 if ((l&0xFFFF0000) == 0) {
191 unsigned long flags;
192 l = ((mbytes>>2)<<22)|(1<<16);
193 local_irq_save(flags);
194 wbinvd();
195 wrmsr(MSR_K6_WHCR, l, h);
196 local_irq_restore(flags);
197 pr_info("Enabling new style K6 write allocation for %d Mb\n",
198 mbytes);
199 }
200
201 return;
202 }
203
204 if (c->x86_model == 10) {
205 /* AMD Geode LX is model 10 */
206 /* placeholder for any needed mods */
207 return;
208 }
209#endif
210}
211
212static void init_amd_k7(struct cpuinfo_x86 *c)
213{
214#ifdef CONFIG_X86_32
215 u32 l, h;
216
217 /*
218 * Bit 15 of Athlon specific MSR 15, needs to be 0
219 * to enable SSE on Palomino/Morgan/Barton CPU's.
220 * If the BIOS didn't enable it already, enable it here.
221 */
222 if (c->x86_model >= 6 && c->x86_model <= 10) {
223 if (!cpu_has(c, X86_FEATURE_XMM)) {
224 pr_info("Enabling disabled K7/SSE Support.\n");
225 msr_clear_bit(MSR_K7_HWCR, 15);
226 set_cpu_cap(c, X86_FEATURE_XMM);
227 }
228 }
229
230 /*
231 * It's been determined by AMD that Athlons since model 8 stepping 1
232 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
233 * As per AMD technical note 27212 0.2
234 */
235 if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
236 rdmsr(MSR_K7_CLK_CTL, l, h);
237 if ((l & 0xfff00000) != 0x20000000) {
238 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
239 l, ((l & 0x000fffff)|0x20000000));
240 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
241 }
242 }
243
244 /* calling is from identify_secondary_cpu() ? */
245 if (!c->cpu_index)
246 return;
247
248 /*
249 * Certain Athlons might work (for various values of 'work') in SMP
250 * but they are not certified as MP capable.
251 */
252 /* Athlon 660/661 is valid. */
253 if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
254 (c->x86_stepping == 1)))
255 return;
256
257 /* Duron 670 is valid */
258 if ((c->x86_model == 7) && (c->x86_stepping == 0))
259 return;
260
261 /*
262 * Athlon 662, Duron 671, and Athlon >model 7 have capability
263 * bit. It's worth noting that the A5 stepping (662) of some
264 * Athlon XP's have the MP bit set.
265 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
266 * more.
267 */
268 if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
269 ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
270 (c->x86_model > 7))
271 if (cpu_has(c, X86_FEATURE_MP))
272 return;
273
274 /* If we get here, not a certified SMP capable AMD system. */
275
276 /*
277 * Don't taint if we are running SMP kernel on a single non-MP
278 * approved Athlon
279 */
280 WARN_ONCE(1, "WARNING: This combination of AMD"
281 " processors is not suitable for SMP.\n");
282 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
283#endif
284}
285
286#ifdef CONFIG_NUMA
287/*
288 * To workaround broken NUMA config. Read the comment in
289 * srat_detect_node().
290 */
291static int nearby_node(int apicid)
292{
293 int i, node;
294
295 for (i = apicid - 1; i >= 0; i--) {
296 node = __apicid_to_node[i];
297 if (node != NUMA_NO_NODE && node_online(node))
298 return node;
299 }
300 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
301 node = __apicid_to_node[i];
302 if (node != NUMA_NO_NODE && node_online(node))
303 return node;
304 }
305 return first_node(node_online_map); /* Shouldn't happen */
306}
307#endif
308
309/*
310 * Fix up cpu_core_id for pre-F17h systems to be in the
311 * [0 .. cores_per_node - 1] range. Not really needed but
312 * kept so as not to break existing setups.
313 */
314static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
315{
316 u32 cus_per_node;
317
318 if (c->x86 >= 0x17)
319 return;
320
321 cus_per_node = c->x86_max_cores / nodes_per_socket;
322 c->cpu_core_id %= cus_per_node;
323}
324
325/*
326 * Fixup core topology information for
327 * (1) AMD multi-node processors
328 * Assumption: Number of cores in each internal node is the same.
329 * (2) AMD processors supporting compute units
330 */
331static void amd_get_topology(struct cpuinfo_x86 *c)
332{
333 u8 node_id;
334 int cpu = smp_processor_id();
335
336 /* get information required for multi-node processors */
337 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
338 int err;
339 u32 eax, ebx, ecx, edx;
340
341 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
342
343 node_id = ecx & 0xff;
344
345 if (c->x86 == 0x15)
346 c->cu_id = ebx & 0xff;
347
348 if (c->x86 >= 0x17) {
349 c->cpu_core_id = ebx & 0xff;
350
351 if (smp_num_siblings > 1)
352 c->x86_max_cores /= smp_num_siblings;
353 }
354
355 /*
356 * In case leaf B is available, use it to derive
357 * topology information.
358 */
359 err = detect_extended_topology(c);
360 if (!err)
361 c->x86_coreid_bits = get_count_order(c->x86_max_cores);
362
363 cacheinfo_amd_init_llc_id(c, cpu, node_id);
364
365 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
366 u64 value;
367
368 rdmsrl(MSR_FAM10H_NODE_ID, value);
369 node_id = value & 7;
370
371 per_cpu(cpu_llc_id, cpu) = node_id;
372 } else
373 return;
374
375 if (nodes_per_socket > 1) {
376 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
377 legacy_fixup_core_id(c);
378 }
379}
380
381/*
382 * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
383 * Assumes number of cores is a power of two.
384 */
385static void amd_detect_cmp(struct cpuinfo_x86 *c)
386{
387 unsigned bits;
388 int cpu = smp_processor_id();
389
390 bits = c->x86_coreid_bits;
391 /* Low order bits define the core id (index of core in socket) */
392 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
393 /* Convert the initial APIC ID into the socket ID */
394 c->phys_proc_id = c->initial_apicid >> bits;
395 /* use socket ID also for last level cache */
396 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
397}
398
399static void amd_detect_ppin(struct cpuinfo_x86 *c)
400{
401 unsigned long long val;
402
403 if (!cpu_has(c, X86_FEATURE_AMD_PPIN))
404 return;
405
406 /* When PPIN is defined in CPUID, still need to check PPIN_CTL MSR */
407 if (rdmsrl_safe(MSR_AMD_PPIN_CTL, &val))
408 goto clear_ppin;
409
410 /* PPIN is locked in disabled mode, clear feature bit */
411 if ((val & 3UL) == 1UL)
412 goto clear_ppin;
413
414 /* If PPIN is disabled, try to enable it */
415 if (!(val & 2UL)) {
416 wrmsrl_safe(MSR_AMD_PPIN_CTL, val | 2UL);
417 rdmsrl_safe(MSR_AMD_PPIN_CTL, &val);
418 }
419
420 /* If PPIN_EN bit is 1, return from here; otherwise fall through */
421 if (val & 2UL)
422 return;
423
424clear_ppin:
425 clear_cpu_cap(c, X86_FEATURE_AMD_PPIN);
426}
427
428u16 amd_get_nb_id(int cpu)
429{
430 return per_cpu(cpu_llc_id, cpu);
431}
432EXPORT_SYMBOL_GPL(amd_get_nb_id);
433
434u32 amd_get_nodes_per_socket(void)
435{
436 return nodes_per_socket;
437}
438EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
439
440static void srat_detect_node(struct cpuinfo_x86 *c)
441{
442#ifdef CONFIG_NUMA
443 int cpu = smp_processor_id();
444 int node;
445 unsigned apicid = c->apicid;
446
447 node = numa_cpu_node(cpu);
448 if (node == NUMA_NO_NODE)
449 node = per_cpu(cpu_llc_id, cpu);
450
451 /*
452 * On multi-fabric platform (e.g. Numascale NumaChip) a
453 * platform-specific handler needs to be called to fixup some
454 * IDs of the CPU.
455 */
456 if (x86_cpuinit.fixup_cpu_id)
457 x86_cpuinit.fixup_cpu_id(c, node);
458
459 if (!node_online(node)) {
460 /*
461 * Two possibilities here:
462 *
463 * - The CPU is missing memory and no node was created. In
464 * that case try picking one from a nearby CPU.
465 *
466 * - The APIC IDs differ from the HyperTransport node IDs
467 * which the K8 northbridge parsing fills in. Assume
468 * they are all increased by a constant offset, but in
469 * the same order as the HT nodeids. If that doesn't
470 * result in a usable node fall back to the path for the
471 * previous case.
472 *
473 * This workaround operates directly on the mapping between
474 * APIC ID and NUMA node, assuming certain relationship
475 * between APIC ID, HT node ID and NUMA topology. As going
476 * through CPU mapping may alter the outcome, directly
477 * access __apicid_to_node[].
478 */
479 int ht_nodeid = c->initial_apicid;
480
481 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
482 node = __apicid_to_node[ht_nodeid];
483 /* Pick a nearby node */
484 if (!node_online(node))
485 node = nearby_node(apicid);
486 }
487 numa_set_node(cpu, node);
488#endif
489}
490
491static void early_init_amd_mc(struct cpuinfo_x86 *c)
492{
493#ifdef CONFIG_SMP
494 unsigned bits, ecx;
495
496 /* Multi core CPU? */
497 if (c->extended_cpuid_level < 0x80000008)
498 return;
499
500 ecx = cpuid_ecx(0x80000008);
501
502 c->x86_max_cores = (ecx & 0xff) + 1;
503
504 /* CPU telling us the core id bits shift? */
505 bits = (ecx >> 12) & 0xF;
506
507 /* Otherwise recompute */
508 if (bits == 0) {
509 while ((1 << bits) < c->x86_max_cores)
510 bits++;
511 }
512
513 c->x86_coreid_bits = bits;
514#endif
515}
516
517static void bsp_init_amd(struct cpuinfo_x86 *c)
518{
519
520#ifdef CONFIG_X86_64
521 if (c->x86 >= 0xf) {
522 unsigned long long tseg;
523
524 /*
525 * Split up direct mapping around the TSEG SMM area.
526 * Don't do it for gbpages because there seems very little
527 * benefit in doing so.
528 */
529 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
530 unsigned long pfn = tseg >> PAGE_SHIFT;
531
532 pr_debug("tseg: %010llx\n", tseg);
533 if (pfn_range_is_mapped(pfn, pfn + 1))
534 set_memory_4k((unsigned long)__va(tseg), 1);
535 }
536 }
537#endif
538
539 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
540
541 if (c->x86 > 0x10 ||
542 (c->x86 == 0x10 && c->x86_model >= 0x2)) {
543 u64 val;
544
545 rdmsrl(MSR_K7_HWCR, val);
546 if (!(val & BIT(24)))
547 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
548 }
549 }
550
551 if (c->x86 == 0x15) {
552 unsigned long upperbit;
553 u32 cpuid, assoc;
554
555 cpuid = cpuid_edx(0x80000005);
556 assoc = cpuid >> 16 & 0xff;
557 upperbit = ((cpuid >> 24) << 10) / assoc;
558
559 va_align.mask = (upperbit - 1) & PAGE_MASK;
560 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
561
562 /* A random value per boot for bit slice [12:upper_bit) */
563 va_align.bits = get_random_int() & va_align.mask;
564 }
565
566 if (cpu_has(c, X86_FEATURE_MWAITX))
567 use_mwaitx_delay();
568
569 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
570 u32 ecx;
571
572 ecx = cpuid_ecx(0x8000001e);
573 nodes_per_socket = ((ecx >> 8) & 7) + 1;
574 } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
575 u64 value;
576
577 rdmsrl(MSR_FAM10H_NODE_ID, value);
578 nodes_per_socket = ((value >> 3) & 7) + 1;
579 }
580
581 if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
582 !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
583 c->x86 >= 0x15 && c->x86 <= 0x17) {
584 unsigned int bit;
585
586 switch (c->x86) {
587 case 0x15: bit = 54; break;
588 case 0x16: bit = 33; break;
589 case 0x17: bit = 10; break;
590 default: return;
591 }
592 /*
593 * Try to cache the base value so further operations can
594 * avoid RMW. If that faults, do not enable SSBD.
595 */
596 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
597 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
598 setup_force_cpu_cap(X86_FEATURE_SSBD);
599 x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
600 }
601 }
602
603 resctrl_cpu_detect(c);
604}
605
606static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
607{
608 u64 msr;
609
610 /*
611 * BIOS support is required for SME and SEV.
612 * For SME: If BIOS has enabled SME then adjust x86_phys_bits by
613 * the SME physical address space reduction value.
614 * If BIOS has not enabled SME then don't advertise the
615 * SME feature (set in scattered.c).
616 * For SEV: If BIOS has not enabled SEV then don't advertise the
617 * SEV feature (set in scattered.c).
618 *
619 * In all cases, since support for SME and SEV requires long mode,
620 * don't advertise the feature under CONFIG_X86_32.
621 */
622 if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
623 /* Check if memory encryption is enabled */
624 rdmsrl(MSR_K8_SYSCFG, msr);
625 if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
626 goto clear_all;
627
628 /*
629 * Always adjust physical address bits. Even though this
630 * will be a value above 32-bits this is still done for
631 * CONFIG_X86_32 so that accurate values are reported.
632 */
633 c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
634
635 if (IS_ENABLED(CONFIG_X86_32))
636 goto clear_all;
637
638 rdmsrl(MSR_K7_HWCR, msr);
639 if (!(msr & MSR_K7_HWCR_SMMLOCK))
640 goto clear_sev;
641
642 return;
643
644clear_all:
645 setup_clear_cpu_cap(X86_FEATURE_SME);
646clear_sev:
647 setup_clear_cpu_cap(X86_FEATURE_SEV);
648 }
649}
650
651static void early_init_amd(struct cpuinfo_x86 *c)
652{
653 u64 value;
654 u32 dummy;
655
656 early_init_amd_mc(c);
657
658#ifdef CONFIG_X86_32
659 if (c->x86 == 6)
660 set_cpu_cap(c, X86_FEATURE_K7);
661#endif
662
663 if (c->x86 >= 0xf)
664 set_cpu_cap(c, X86_FEATURE_K8);
665
666 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
667
668 /*
669 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
670 * with P/T states and does not stop in deep C-states
671 */
672 if (c->x86_power & (1 << 8)) {
673 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
674 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
675 }
676
677 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
678 if (c->x86_power & BIT(12))
679 set_cpu_cap(c, X86_FEATURE_ACC_POWER);
680
681#ifdef CONFIG_X86_64
682 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
683#else
684 /* Set MTRR capability flag if appropriate */
685 if (c->x86 == 5)
686 if (c->x86_model == 13 || c->x86_model == 9 ||
687 (c->x86_model == 8 && c->x86_stepping >= 8))
688 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
689#endif
690#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
691 /*
692 * ApicID can always be treated as an 8-bit value for AMD APIC versions
693 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
694 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
695 * after 16h.
696 */
697 if (boot_cpu_has(X86_FEATURE_APIC)) {
698 if (c->x86 > 0x16)
699 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
700 else if (c->x86 >= 0xf) {
701 /* check CPU config space for extended APIC ID */
702 unsigned int val;
703
704 val = read_pci_config(0, 24, 0, 0x68);
705 if ((val >> 17 & 0x3) == 0x3)
706 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
707 }
708 }
709#endif
710
711 /*
712 * This is only needed to tell the kernel whether to use VMCALL
713 * and VMMCALL. VMMCALL is never executed except under virt, so
714 * we can set it unconditionally.
715 */
716 set_cpu_cap(c, X86_FEATURE_VMMCALL);
717
718 /* F16h erratum 793, CVE-2013-6885 */
719 if (c->x86 == 0x16 && c->x86_model <= 0xf)
720 msr_set_bit(MSR_AMD64_LS_CFG, 15);
721
722 /*
723 * Check whether the machine is affected by erratum 400. This is
724 * used to select the proper idle routine and to enable the check
725 * whether the machine is affected in arch_post_acpi_init(), which
726 * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
727 */
728 if (cpu_has_amd_erratum(c, amd_erratum_400))
729 set_cpu_bug(c, X86_BUG_AMD_E400);
730
731 early_detect_mem_encrypt(c);
732
733 /* Re-enable TopologyExtensions if switched off by BIOS */
734 if (c->x86 == 0x15 &&
735 (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
736 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
737
738 if (msr_set_bit(0xc0011005, 54) > 0) {
739 rdmsrl(0xc0011005, value);
740 if (value & BIT_64(54)) {
741 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
742 pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
743 }
744 }
745 }
746
747 if (cpu_has(c, X86_FEATURE_TOPOEXT))
748 smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
749}
750
751static void init_amd_k8(struct cpuinfo_x86 *c)
752{
753 u32 level;
754 u64 value;
755
756 /* On C+ stepping K8 rep microcode works well for copy/memset */
757 level = cpuid_eax(1);
758 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
759 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
760
761 /*
762 * Some BIOSes incorrectly force this feature, but only K8 revision D
763 * (model = 0x14) and later actually support it.
764 * (AMD Erratum #110, docId: 25759).
765 */
766 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
767 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
768 if (!rdmsrl_amd_safe(0xc001100d, &value)) {
769 value &= ~BIT_64(32);
770 wrmsrl_amd_safe(0xc001100d, value);
771 }
772 }
773
774 if (!c->x86_model_id[0])
775 strcpy(c->x86_model_id, "Hammer");
776
777#ifdef CONFIG_SMP
778 /*
779 * Disable TLB flush filter by setting HWCR.FFDIS on K8
780 * bit 6 of msr C001_0015
781 *
782 * Errata 63 for SH-B3 steppings
783 * Errata 122 for all steppings (F+ have it disabled by default)
784 */
785 msr_set_bit(MSR_K7_HWCR, 6);
786#endif
787 set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
788}
789
790static void init_amd_gh(struct cpuinfo_x86 *c)
791{
792#ifdef CONFIG_MMCONF_FAM10H
793 /* do this for boot cpu */
794 if (c == &boot_cpu_data)
795 check_enable_amd_mmconf_dmi();
796
797 fam10h_check_enable_mmcfg();
798#endif
799
800 /*
801 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
802 * is always needed when GART is enabled, even in a kernel which has no
803 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
804 * If it doesn't, we do it here as suggested by the BKDG.
805 *
806 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
807 */
808 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
809
810 /*
811 * On family 10h BIOS may not have properly enabled WC+ support, causing
812 * it to be converted to CD memtype. This may result in performance
813 * degradation for certain nested-paging guests. Prevent this conversion
814 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
815 *
816 * NOTE: we want to use the _safe accessors so as not to #GP kvm
817 * guests on older kvm hosts.
818 */
819 msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
820
821 if (cpu_has_amd_erratum(c, amd_erratum_383))
822 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
823}
824
825#define MSR_AMD64_DE_CFG 0xC0011029
826
827static void init_amd_ln(struct cpuinfo_x86 *c)
828{
829 /*
830 * Apply erratum 665 fix unconditionally so machines without a BIOS
831 * fix work.
832 */
833 msr_set_bit(MSR_AMD64_DE_CFG, 31);
834}
835
836static bool rdrand_force;
837
838static int __init rdrand_cmdline(char *str)
839{
840 if (!str)
841 return -EINVAL;
842
843 if (!strcmp(str, "force"))
844 rdrand_force = true;
845 else
846 return -EINVAL;
847
848 return 0;
849}
850early_param("rdrand", rdrand_cmdline);
851
852static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
853{
854 /*
855 * Saving of the MSR used to hide the RDRAND support during
856 * suspend/resume is done by arch/x86/power/cpu.c, which is
857 * dependent on CONFIG_PM_SLEEP.
858 */
859 if (!IS_ENABLED(CONFIG_PM_SLEEP))
860 return;
861
862 /*
863 * The nordrand option can clear X86_FEATURE_RDRAND, so check for
864 * RDRAND support using the CPUID function directly.
865 */
866 if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
867 return;
868
869 msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
870
871 /*
872 * Verify that the CPUID change has occurred in case the kernel is
873 * running virtualized and the hypervisor doesn't support the MSR.
874 */
875 if (cpuid_ecx(1) & BIT(30)) {
876 pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
877 return;
878 }
879
880 clear_cpu_cap(c, X86_FEATURE_RDRAND);
881 pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
882}
883
884static void init_amd_jg(struct cpuinfo_x86 *c)
885{
886 /*
887 * Some BIOS implementations do not restore proper RDRAND support
888 * across suspend and resume. Check on whether to hide the RDRAND
889 * instruction support via CPUID.
890 */
891 clear_rdrand_cpuid_bit(c);
892}
893
894static void init_amd_bd(struct cpuinfo_x86 *c)
895{
896 u64 value;
897
898 /*
899 * The way access filter has a performance penalty on some workloads.
900 * Disable it on the affected CPUs.
901 */
902 if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
903 if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
904 value |= 0x1E;
905 wrmsrl_safe(MSR_F15H_IC_CFG, value);
906 }
907 }
908
909 /*
910 * Some BIOS implementations do not restore proper RDRAND support
911 * across suspend and resume. Check on whether to hide the RDRAND
912 * instruction support via CPUID.
913 */
914 clear_rdrand_cpuid_bit(c);
915}
916
917static void init_amd_zn(struct cpuinfo_x86 *c)
918{
919 set_cpu_cap(c, X86_FEATURE_ZEN);
920
921#ifdef CONFIG_NUMA
922 node_reclaim_distance = 32;
923#endif
924
925 /*
926 * Fix erratum 1076: CPB feature bit not being set in CPUID.
927 * Always set it, except when running under a hypervisor.
928 */
929 if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
930 set_cpu_cap(c, X86_FEATURE_CPB);
931}
932
933static void init_amd(struct cpuinfo_x86 *c)
934{
935 early_init_amd(c);
936
937 /*
938 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
939 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
940 */
941 clear_cpu_cap(c, 0*32+31);
942
943 if (c->x86 >= 0x10)
944 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
945
946 /* get apicid instead of initial apic id from cpuid */
947 c->apicid = hard_smp_processor_id();
948
949 /* K6s reports MCEs but don't actually have all the MSRs */
950 if (c->x86 < 6)
951 clear_cpu_cap(c, X86_FEATURE_MCE);
952
953 switch (c->x86) {
954 case 4: init_amd_k5(c); break;
955 case 5: init_amd_k6(c); break;
956 case 6: init_amd_k7(c); break;
957 case 0xf: init_amd_k8(c); break;
958 case 0x10: init_amd_gh(c); break;
959 case 0x12: init_amd_ln(c); break;
960 case 0x15: init_amd_bd(c); break;
961 case 0x16: init_amd_jg(c); break;
962 case 0x17: fallthrough;
963 case 0x19: init_amd_zn(c); break;
964 }
965
966 /*
967 * Enable workaround for FXSAVE leak on CPUs
968 * without a XSaveErPtr feature
969 */
970 if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
971 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
972
973 cpu_detect_cache_sizes(c);
974
975 amd_detect_cmp(c);
976 amd_get_topology(c);
977 srat_detect_node(c);
978 amd_detect_ppin(c);
979
980 init_amd_cacheinfo(c);
981
982 if (cpu_has(c, X86_FEATURE_XMM2)) {
983 /*
984 * Use LFENCE for execution serialization. On families which
985 * don't have that MSR, LFENCE is already serializing.
986 * msr_set_bit() uses the safe accessors, too, even if the MSR
987 * is not present.
988 */
989 msr_set_bit(MSR_F10H_DECFG,
990 MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
991
992 /* A serializing LFENCE stops RDTSC speculation */
993 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
994 }
995
996 /*
997 * Family 0x12 and above processors have APIC timer
998 * running in deep C states.
999 */
1000 if (c->x86 > 0x11)
1001 set_cpu_cap(c, X86_FEATURE_ARAT);
1002
1003 /* 3DNow or LM implies PREFETCHW */
1004 if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
1005 if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
1006 set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
1007
1008 /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
1009 if (!cpu_has(c, X86_FEATURE_XENPV))
1010 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
1011
1012 /*
1013 * Turn on the Instructions Retired free counter on machines not
1014 * susceptible to erratum #1054 "Instructions Retired Performance
1015 * Counter May Be Inaccurate".
1016 */
1017 if (cpu_has(c, X86_FEATURE_IRPERF) &&
1018 !cpu_has_amd_erratum(c, amd_erratum_1054))
1019 msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
1020}
1021
1022#ifdef CONFIG_X86_32
1023static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
1024{
1025 /* AMD errata T13 (order #21922) */
1026 if (c->x86 == 6) {
1027 /* Duron Rev A0 */
1028 if (c->x86_model == 3 && c->x86_stepping == 0)
1029 size = 64;
1030 /* Tbird rev A1/A2 */
1031 if (c->x86_model == 4 &&
1032 (c->x86_stepping == 0 || c->x86_stepping == 1))
1033 size = 256;
1034 }
1035 return size;
1036}
1037#endif
1038
1039static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
1040{
1041 u32 ebx, eax, ecx, edx;
1042 u16 mask = 0xfff;
1043
1044 if (c->x86 < 0xf)
1045 return;
1046
1047 if (c->extended_cpuid_level < 0x80000006)
1048 return;
1049
1050 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
1051
1052 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
1053 tlb_lli_4k[ENTRIES] = ebx & mask;
1054
1055 /*
1056 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
1057 * characteristics from the CPUID function 0x80000005 instead.
1058 */
1059 if (c->x86 == 0xf) {
1060 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1061 mask = 0xff;
1062 }
1063
1064 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1065 if (!((eax >> 16) & mask))
1066 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
1067 else
1068 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
1069
1070 /* a 4M entry uses two 2M entries */
1071 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
1072
1073 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1074 if (!(eax & mask)) {
1075 /* Erratum 658 */
1076 if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
1077 tlb_lli_2m[ENTRIES] = 1024;
1078 } else {
1079 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1080 tlb_lli_2m[ENTRIES] = eax & 0xff;
1081 }
1082 } else
1083 tlb_lli_2m[ENTRIES] = eax & mask;
1084
1085 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
1086}
1087
1088static const struct cpu_dev amd_cpu_dev = {
1089 .c_vendor = "AMD",
1090 .c_ident = { "AuthenticAMD" },
1091#ifdef CONFIG_X86_32
1092 .legacy_models = {
1093 { .family = 4, .model_names =
1094 {
1095 [3] = "486 DX/2",
1096 [7] = "486 DX/2-WB",
1097 [8] = "486 DX/4",
1098 [9] = "486 DX/4-WB",
1099 [14] = "Am5x86-WT",
1100 [15] = "Am5x86-WB"
1101 }
1102 },
1103 },
1104 .legacy_cache_size = amd_size_cache,
1105#endif
1106 .c_early_init = early_init_amd,
1107 .c_detect_tlb = cpu_detect_tlb_amd,
1108 .c_bsp_init = bsp_init_amd,
1109 .c_init = init_amd,
1110 .c_x86_vendor = X86_VENDOR_AMD,
1111};
1112
1113cpu_dev_register(amd_cpu_dev);
1114
1115/*
1116 * AMD errata checking
1117 *
1118 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
1119 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
1120 * have an OSVW id assigned, which it takes as first argument. Both take a
1121 * variable number of family-specific model-stepping ranges created by
1122 * AMD_MODEL_RANGE().
1123 *
1124 * Example:
1125 *
1126 * const int amd_erratum_319[] =
1127 * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
1128 * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
1129 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
1130 */
1131
1132#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
1133#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
1134#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
1135 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
1136#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
1137#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
1138#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
1139
1140static const int amd_erratum_400[] =
1141 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
1142 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
1143
1144static const int amd_erratum_383[] =
1145 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
1146
1147/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
1148static const int amd_erratum_1054[] =
1149 AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
1150
1151static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
1152{
1153 int osvw_id = *erratum++;
1154 u32 range;
1155 u32 ms;
1156
1157 if (osvw_id >= 0 && osvw_id < 65536 &&
1158 cpu_has(cpu, X86_FEATURE_OSVW)) {
1159 u64 osvw_len;
1160
1161 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
1162 if (osvw_id < osvw_len) {
1163 u64 osvw_bits;
1164
1165 rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
1166 osvw_bits);
1167 return osvw_bits & (1ULL << (osvw_id & 0x3f));
1168 }
1169 }
1170
1171 /* OSVW unavailable or ID unknown, match family-model-stepping range */
1172 ms = (cpu->x86_model << 4) | cpu->x86_stepping;
1173 while ((range = *erratum++))
1174 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
1175 (ms >= AMD_MODEL_RANGE_START(range)) &&
1176 (ms <= AMD_MODEL_RANGE_END(range)))
1177 return true;
1178
1179 return false;
1180}
1181
1182void set_dr_addr_mask(unsigned long mask, int dr)
1183{
1184 if (!boot_cpu_has(X86_FEATURE_BPEXT))
1185 return;
1186
1187 switch (dr) {
1188 case 0:
1189 wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
1190 break;
1191 case 1:
1192 case 2:
1193 case 3:
1194 wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
1195 break;
1196 default:
1197 break;
1198 }
1199}