Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2#include <linux/export.h>
3#include <linux/bitops.h>
4#include <linux/elf.h>
5#include <linux/mm.h>
6
7#include <linux/io.h>
8#include <linux/sched.h>
9#include <linux/sched/clock.h>
10#include <linux/random.h>
11#include <linux/topology.h>
12#include <asm/processor.h>
13#include <asm/apic.h>
14#include <asm/cacheinfo.h>
15#include <asm/cpu.h>
16#include <asm/spec-ctrl.h>
17#include <asm/smp.h>
18#include <asm/pci-direct.h>
19#include <asm/delay.h>
20#include <asm/debugreg.h>
21
22#ifdef CONFIG_X86_64
23# include <asm/mmconfig.h>
24# include <asm/set_memory.h>
25#endif
26
27#include "cpu.h"
28
29static const int amd_erratum_383[];
30static const int amd_erratum_400[];
31static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
32
33/*
34 * nodes_per_socket: Stores the number of nodes per socket.
35 * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
36 * Node Identifiers[10:8]
37 */
38static u32 nodes_per_socket = 1;
39
40static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
41{
42 u32 gprs[8] = { 0 };
43 int err;
44
45 WARN_ONCE((boot_cpu_data.x86 != 0xf),
46 "%s should only be used on K8!\n", __func__);
47
48 gprs[1] = msr;
49 gprs[7] = 0x9c5a203a;
50
51 err = rdmsr_safe_regs(gprs);
52
53 *p = gprs[0] | ((u64)gprs[2] << 32);
54
55 return err;
56}
57
58static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
59{
60 u32 gprs[8] = { 0 };
61
62 WARN_ONCE((boot_cpu_data.x86 != 0xf),
63 "%s should only be used on K8!\n", __func__);
64
65 gprs[0] = (u32)val;
66 gprs[1] = msr;
67 gprs[2] = val >> 32;
68 gprs[7] = 0x9c5a203a;
69
70 return wrmsr_safe_regs(gprs);
71}
72
73/*
74 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
75 * misexecution of code under Linux. Owners of such processors should
76 * contact AMD for precise details and a CPU swap.
77 *
78 * See http://www.multimania.com/poulot/k6bug.html
79 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
80 * (Publication # 21266 Issue Date: August 1998)
81 *
82 * The following test is erm.. interesting. AMD neglected to up
83 * the chip setting when fixing the bug but they also tweaked some
84 * performance at the same time..
85 */
86
87#ifdef CONFIG_X86_32
88extern __visible void vide(void);
89__asm__(".text\n"
90 ".globl vide\n"
91 ".type vide, @function\n"
92 ".align 4\n"
93 "vide: ret\n");
94#endif
95
96static void init_amd_k5(struct cpuinfo_x86 *c)
97{
98#ifdef CONFIG_X86_32
99/*
100 * General Systems BIOSen alias the cpu frequency registers
101 * of the Elan at 0x000df000. Unfortunately, one of the Linux
102 * drivers subsequently pokes it, and changes the CPU speed.
103 * Workaround : Remove the unneeded alias.
104 */
105#define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
106#define CBAR_ENB (0x80000000)
107#define CBAR_KEY (0X000000CB)
108 if (c->x86_model == 9 || c->x86_model == 10) {
109 if (inl(CBAR) & CBAR_ENB)
110 outl(0 | CBAR_KEY, CBAR);
111 }
112#endif
113}
114
115static void init_amd_k6(struct cpuinfo_x86 *c)
116{
117#ifdef CONFIG_X86_32
118 u32 l, h;
119 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
120
121 if (c->x86_model < 6) {
122 /* Based on AMD doc 20734R - June 2000 */
123 if (c->x86_model == 0) {
124 clear_cpu_cap(c, X86_FEATURE_APIC);
125 set_cpu_cap(c, X86_FEATURE_PGE);
126 }
127 return;
128 }
129
130 if (c->x86_model == 6 && c->x86_stepping == 1) {
131 const int K6_BUG_LOOP = 1000000;
132 int n;
133 void (*f_vide)(void);
134 u64 d, d2;
135
136 pr_info("AMD K6 stepping B detected - ");
137
138 /*
139 * It looks like AMD fixed the 2.6.2 bug and improved indirect
140 * calls at the same time.
141 */
142
143 n = K6_BUG_LOOP;
144 f_vide = vide;
145 OPTIMIZER_HIDE_VAR(f_vide);
146 d = rdtsc();
147 while (n--)
148 f_vide();
149 d2 = rdtsc();
150 d = d2-d;
151
152 if (d > 20*K6_BUG_LOOP)
153 pr_cont("system stability may be impaired when more than 32 MB are used.\n");
154 else
155 pr_cont("probably OK (after B9730xxxx).\n");
156 }
157
158 /* K6 with old style WHCR */
159 if (c->x86_model < 8 ||
160 (c->x86_model == 8 && c->x86_stepping < 8)) {
161 /* We can only write allocate on the low 508Mb */
162 if (mbytes > 508)
163 mbytes = 508;
164
165 rdmsr(MSR_K6_WHCR, l, h);
166 if ((l&0x0000FFFF) == 0) {
167 unsigned long flags;
168 l = (1<<0)|((mbytes/4)<<1);
169 local_irq_save(flags);
170 wbinvd();
171 wrmsr(MSR_K6_WHCR, l, h);
172 local_irq_restore(flags);
173 pr_info("Enabling old style K6 write allocation for %d Mb\n",
174 mbytes);
175 }
176 return;
177 }
178
179 if ((c->x86_model == 8 && c->x86_stepping > 7) ||
180 c->x86_model == 9 || c->x86_model == 13) {
181 /* The more serious chips .. */
182
183 if (mbytes > 4092)
184 mbytes = 4092;
185
186 rdmsr(MSR_K6_WHCR, l, h);
187 if ((l&0xFFFF0000) == 0) {
188 unsigned long flags;
189 l = ((mbytes>>2)<<22)|(1<<16);
190 local_irq_save(flags);
191 wbinvd();
192 wrmsr(MSR_K6_WHCR, l, h);
193 local_irq_restore(flags);
194 pr_info("Enabling new style K6 write allocation for %d Mb\n",
195 mbytes);
196 }
197
198 return;
199 }
200
201 if (c->x86_model == 10) {
202 /* AMD Geode LX is model 10 */
203 /* placeholder for any needed mods */
204 return;
205 }
206#endif
207}
208
209static void init_amd_k7(struct cpuinfo_x86 *c)
210{
211#ifdef CONFIG_X86_32
212 u32 l, h;
213
214 /*
215 * Bit 15 of Athlon specific MSR 15, needs to be 0
216 * to enable SSE on Palomino/Morgan/Barton CPU's.
217 * If the BIOS didn't enable it already, enable it here.
218 */
219 if (c->x86_model >= 6 && c->x86_model <= 10) {
220 if (!cpu_has(c, X86_FEATURE_XMM)) {
221 pr_info("Enabling disabled K7/SSE Support.\n");
222 msr_clear_bit(MSR_K7_HWCR, 15);
223 set_cpu_cap(c, X86_FEATURE_XMM);
224 }
225 }
226
227 /*
228 * It's been determined by AMD that Athlons since model 8 stepping 1
229 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
230 * As per AMD technical note 27212 0.2
231 */
232 if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
233 rdmsr(MSR_K7_CLK_CTL, l, h);
234 if ((l & 0xfff00000) != 0x20000000) {
235 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
236 l, ((l & 0x000fffff)|0x20000000));
237 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
238 }
239 }
240
241 /* calling is from identify_secondary_cpu() ? */
242 if (!c->cpu_index)
243 return;
244
245 /*
246 * Certain Athlons might work (for various values of 'work') in SMP
247 * but they are not certified as MP capable.
248 */
249 /* Athlon 660/661 is valid. */
250 if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
251 (c->x86_stepping == 1)))
252 return;
253
254 /* Duron 670 is valid */
255 if ((c->x86_model == 7) && (c->x86_stepping == 0))
256 return;
257
258 /*
259 * Athlon 662, Duron 671, and Athlon >model 7 have capability
260 * bit. It's worth noting that the A5 stepping (662) of some
261 * Athlon XP's have the MP bit set.
262 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
263 * more.
264 */
265 if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
266 ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
267 (c->x86_model > 7))
268 if (cpu_has(c, X86_FEATURE_MP))
269 return;
270
271 /* If we get here, not a certified SMP capable AMD system. */
272
273 /*
274 * Don't taint if we are running SMP kernel on a single non-MP
275 * approved Athlon
276 */
277 WARN_ONCE(1, "WARNING: This combination of AMD"
278 " processors is not suitable for SMP.\n");
279 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
280#endif
281}
282
283#ifdef CONFIG_NUMA
284/*
285 * To workaround broken NUMA config. Read the comment in
286 * srat_detect_node().
287 */
288static int nearby_node(int apicid)
289{
290 int i, node;
291
292 for (i = apicid - 1; i >= 0; i--) {
293 node = __apicid_to_node[i];
294 if (node != NUMA_NO_NODE && node_online(node))
295 return node;
296 }
297 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
298 node = __apicid_to_node[i];
299 if (node != NUMA_NO_NODE && node_online(node))
300 return node;
301 }
302 return first_node(node_online_map); /* Shouldn't happen */
303}
304#endif
305
306/*
307 * Fix up cpu_core_id for pre-F17h systems to be in the
308 * [0 .. cores_per_node - 1] range. Not really needed but
309 * kept so as not to break existing setups.
310 */
311static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
312{
313 u32 cus_per_node;
314
315 if (c->x86 >= 0x17)
316 return;
317
318 cus_per_node = c->x86_max_cores / nodes_per_socket;
319 c->cpu_core_id %= cus_per_node;
320}
321
322
323static void amd_get_topology_early(struct cpuinfo_x86 *c)
324{
325 if (cpu_has(c, X86_FEATURE_TOPOEXT))
326 smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
327}
328
329/*
330 * Fixup core topology information for
331 * (1) AMD multi-node processors
332 * Assumption: Number of cores in each internal node is the same.
333 * (2) AMD processors supporting compute units
334 */
335static void amd_get_topology(struct cpuinfo_x86 *c)
336{
337 u8 node_id;
338 int cpu = smp_processor_id();
339
340 /* get information required for multi-node processors */
341 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
342 int err;
343 u32 eax, ebx, ecx, edx;
344
345 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
346
347 node_id = ecx & 0xff;
348
349 if (c->x86 == 0x15)
350 c->cu_id = ebx & 0xff;
351
352 if (c->x86 >= 0x17) {
353 c->cpu_core_id = ebx & 0xff;
354
355 if (smp_num_siblings > 1)
356 c->x86_max_cores /= smp_num_siblings;
357 }
358
359 /*
360 * In case leaf B is available, use it to derive
361 * topology information.
362 */
363 err = detect_extended_topology(c);
364 if (!err)
365 c->x86_coreid_bits = get_count_order(c->x86_max_cores);
366
367 cacheinfo_amd_init_llc_id(c, cpu, node_id);
368
369 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
370 u64 value;
371
372 rdmsrl(MSR_FAM10H_NODE_ID, value);
373 node_id = value & 7;
374
375 per_cpu(cpu_llc_id, cpu) = node_id;
376 } else
377 return;
378
379 if (nodes_per_socket > 1) {
380 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
381 legacy_fixup_core_id(c);
382 }
383}
384
385/*
386 * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
387 * Assumes number of cores is a power of two.
388 */
389static void amd_detect_cmp(struct cpuinfo_x86 *c)
390{
391 unsigned bits;
392 int cpu = smp_processor_id();
393
394 bits = c->x86_coreid_bits;
395 /* Low order bits define the core id (index of core in socket) */
396 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
397 /* Convert the initial APIC ID into the socket ID */
398 c->phys_proc_id = c->initial_apicid >> bits;
399 /* use socket ID also for last level cache */
400 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
401}
402
403u16 amd_get_nb_id(int cpu)
404{
405 return per_cpu(cpu_llc_id, cpu);
406}
407EXPORT_SYMBOL_GPL(amd_get_nb_id);
408
409u32 amd_get_nodes_per_socket(void)
410{
411 return nodes_per_socket;
412}
413EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
414
415static void srat_detect_node(struct cpuinfo_x86 *c)
416{
417#ifdef CONFIG_NUMA
418 int cpu = smp_processor_id();
419 int node;
420 unsigned apicid = c->apicid;
421
422 node = numa_cpu_node(cpu);
423 if (node == NUMA_NO_NODE)
424 node = per_cpu(cpu_llc_id, cpu);
425
426 /*
427 * On multi-fabric platform (e.g. Numascale NumaChip) a
428 * platform-specific handler needs to be called to fixup some
429 * IDs of the CPU.
430 */
431 if (x86_cpuinit.fixup_cpu_id)
432 x86_cpuinit.fixup_cpu_id(c, node);
433
434 if (!node_online(node)) {
435 /*
436 * Two possibilities here:
437 *
438 * - The CPU is missing memory and no node was created. In
439 * that case try picking one from a nearby CPU.
440 *
441 * - The APIC IDs differ from the HyperTransport node IDs
442 * which the K8 northbridge parsing fills in. Assume
443 * they are all increased by a constant offset, but in
444 * the same order as the HT nodeids. If that doesn't
445 * result in a usable node fall back to the path for the
446 * previous case.
447 *
448 * This workaround operates directly on the mapping between
449 * APIC ID and NUMA node, assuming certain relationship
450 * between APIC ID, HT node ID and NUMA topology. As going
451 * through CPU mapping may alter the outcome, directly
452 * access __apicid_to_node[].
453 */
454 int ht_nodeid = c->initial_apicid;
455
456 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
457 node = __apicid_to_node[ht_nodeid];
458 /* Pick a nearby node */
459 if (!node_online(node))
460 node = nearby_node(apicid);
461 }
462 numa_set_node(cpu, node);
463#endif
464}
465
466static void early_init_amd_mc(struct cpuinfo_x86 *c)
467{
468#ifdef CONFIG_SMP
469 unsigned bits, ecx;
470
471 /* Multi core CPU? */
472 if (c->extended_cpuid_level < 0x80000008)
473 return;
474
475 ecx = cpuid_ecx(0x80000008);
476
477 c->x86_max_cores = (ecx & 0xff) + 1;
478
479 /* CPU telling us the core id bits shift? */
480 bits = (ecx >> 12) & 0xF;
481
482 /* Otherwise recompute */
483 if (bits == 0) {
484 while ((1 << bits) < c->x86_max_cores)
485 bits++;
486 }
487
488 c->x86_coreid_bits = bits;
489#endif
490}
491
492static void bsp_init_amd(struct cpuinfo_x86 *c)
493{
494
495#ifdef CONFIG_X86_64
496 if (c->x86 >= 0xf) {
497 unsigned long long tseg;
498
499 /*
500 * Split up direct mapping around the TSEG SMM area.
501 * Don't do it for gbpages because there seems very little
502 * benefit in doing so.
503 */
504 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
505 unsigned long pfn = tseg >> PAGE_SHIFT;
506
507 pr_debug("tseg: %010llx\n", tseg);
508 if (pfn_range_is_mapped(pfn, pfn + 1))
509 set_memory_4k((unsigned long)__va(tseg), 1);
510 }
511 }
512#endif
513
514 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
515
516 if (c->x86 > 0x10 ||
517 (c->x86 == 0x10 && c->x86_model >= 0x2)) {
518 u64 val;
519
520 rdmsrl(MSR_K7_HWCR, val);
521 if (!(val & BIT(24)))
522 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
523 }
524 }
525
526 if (c->x86 == 0x15) {
527 unsigned long upperbit;
528 u32 cpuid, assoc;
529
530 cpuid = cpuid_edx(0x80000005);
531 assoc = cpuid >> 16 & 0xff;
532 upperbit = ((cpuid >> 24) << 10) / assoc;
533
534 va_align.mask = (upperbit - 1) & PAGE_MASK;
535 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
536
537 /* A random value per boot for bit slice [12:upper_bit) */
538 va_align.bits = get_random_int() & va_align.mask;
539 }
540
541 if (cpu_has(c, X86_FEATURE_MWAITX))
542 use_mwaitx_delay();
543
544 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
545 u32 ecx;
546
547 ecx = cpuid_ecx(0x8000001e);
548 nodes_per_socket = ((ecx >> 8) & 7) + 1;
549 } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
550 u64 value;
551
552 rdmsrl(MSR_FAM10H_NODE_ID, value);
553 nodes_per_socket = ((value >> 3) & 7) + 1;
554 }
555
556 if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
557 !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
558 c->x86 >= 0x15 && c->x86 <= 0x17) {
559 unsigned int bit;
560
561 switch (c->x86) {
562 case 0x15: bit = 54; break;
563 case 0x16: bit = 33; break;
564 case 0x17: bit = 10; break;
565 default: return;
566 }
567 /*
568 * Try to cache the base value so further operations can
569 * avoid RMW. If that faults, do not enable SSBD.
570 */
571 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
572 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
573 setup_force_cpu_cap(X86_FEATURE_SSBD);
574 x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
575 }
576 }
577}
578
579static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
580{
581 u64 msr;
582
583 /*
584 * BIOS support is required for SME and SEV.
585 * For SME: If BIOS has enabled SME then adjust x86_phys_bits by
586 * the SME physical address space reduction value.
587 * If BIOS has not enabled SME then don't advertise the
588 * SME feature (set in scattered.c).
589 * For SEV: If BIOS has not enabled SEV then don't advertise the
590 * SEV feature (set in scattered.c).
591 *
592 * In all cases, since support for SME and SEV requires long mode,
593 * don't advertise the feature under CONFIG_X86_32.
594 */
595 if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
596 /* Check if memory encryption is enabled */
597 rdmsrl(MSR_K8_SYSCFG, msr);
598 if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
599 goto clear_all;
600
601 /*
602 * Always adjust physical address bits. Even though this
603 * will be a value above 32-bits this is still done for
604 * CONFIG_X86_32 so that accurate values are reported.
605 */
606 c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
607
608 if (IS_ENABLED(CONFIG_X86_32))
609 goto clear_all;
610
611 rdmsrl(MSR_K7_HWCR, msr);
612 if (!(msr & MSR_K7_HWCR_SMMLOCK))
613 goto clear_sev;
614
615 return;
616
617clear_all:
618 clear_cpu_cap(c, X86_FEATURE_SME);
619clear_sev:
620 clear_cpu_cap(c, X86_FEATURE_SEV);
621 }
622}
623
624static void early_init_amd(struct cpuinfo_x86 *c)
625{
626 u64 value;
627 u32 dummy;
628
629 early_init_amd_mc(c);
630
631#ifdef CONFIG_X86_32
632 if (c->x86 == 6)
633 set_cpu_cap(c, X86_FEATURE_K7);
634#endif
635
636 if (c->x86 >= 0xf)
637 set_cpu_cap(c, X86_FEATURE_K8);
638
639 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
640
641 /*
642 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
643 * with P/T states and does not stop in deep C-states
644 */
645 if (c->x86_power & (1 << 8)) {
646 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
647 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
648 }
649
650 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
651 if (c->x86_power & BIT(12))
652 set_cpu_cap(c, X86_FEATURE_ACC_POWER);
653
654#ifdef CONFIG_X86_64
655 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
656#else
657 /* Set MTRR capability flag if appropriate */
658 if (c->x86 == 5)
659 if (c->x86_model == 13 || c->x86_model == 9 ||
660 (c->x86_model == 8 && c->x86_stepping >= 8))
661 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
662#endif
663#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
664 /*
665 * ApicID can always be treated as an 8-bit value for AMD APIC versions
666 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
667 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
668 * after 16h.
669 */
670 if (boot_cpu_has(X86_FEATURE_APIC)) {
671 if (c->x86 > 0x16)
672 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
673 else if (c->x86 >= 0xf) {
674 /* check CPU config space for extended APIC ID */
675 unsigned int val;
676
677 val = read_pci_config(0, 24, 0, 0x68);
678 if ((val >> 17 & 0x3) == 0x3)
679 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
680 }
681 }
682#endif
683
684 /*
685 * This is only needed to tell the kernel whether to use VMCALL
686 * and VMMCALL. VMMCALL is never executed except under virt, so
687 * we can set it unconditionally.
688 */
689 set_cpu_cap(c, X86_FEATURE_VMMCALL);
690
691 /* F16h erratum 793, CVE-2013-6885 */
692 if (c->x86 == 0x16 && c->x86_model <= 0xf)
693 msr_set_bit(MSR_AMD64_LS_CFG, 15);
694
695 /*
696 * Check whether the machine is affected by erratum 400. This is
697 * used to select the proper idle routine and to enable the check
698 * whether the machine is affected in arch_post_acpi_init(), which
699 * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
700 */
701 if (cpu_has_amd_erratum(c, amd_erratum_400))
702 set_cpu_bug(c, X86_BUG_AMD_E400);
703
704 early_detect_mem_encrypt(c);
705
706 /* Re-enable TopologyExtensions if switched off by BIOS */
707 if (c->x86 == 0x15 &&
708 (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
709 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
710
711 if (msr_set_bit(0xc0011005, 54) > 0) {
712 rdmsrl(0xc0011005, value);
713 if (value & BIT_64(54)) {
714 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
715 pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
716 }
717 }
718 }
719
720 amd_get_topology_early(c);
721}
722
723static void init_amd_k8(struct cpuinfo_x86 *c)
724{
725 u32 level;
726 u64 value;
727
728 /* On C+ stepping K8 rep microcode works well for copy/memset */
729 level = cpuid_eax(1);
730 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
731 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
732
733 /*
734 * Some BIOSes incorrectly force this feature, but only K8 revision D
735 * (model = 0x14) and later actually support it.
736 * (AMD Erratum #110, docId: 25759).
737 */
738 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
739 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
740 if (!rdmsrl_amd_safe(0xc001100d, &value)) {
741 value &= ~BIT_64(32);
742 wrmsrl_amd_safe(0xc001100d, value);
743 }
744 }
745
746 if (!c->x86_model_id[0])
747 strcpy(c->x86_model_id, "Hammer");
748
749#ifdef CONFIG_SMP
750 /*
751 * Disable TLB flush filter by setting HWCR.FFDIS on K8
752 * bit 6 of msr C001_0015
753 *
754 * Errata 63 for SH-B3 steppings
755 * Errata 122 for all steppings (F+ have it disabled by default)
756 */
757 msr_set_bit(MSR_K7_HWCR, 6);
758#endif
759 set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
760}
761
762static void init_amd_gh(struct cpuinfo_x86 *c)
763{
764#ifdef CONFIG_MMCONF_FAM10H
765 /* do this for boot cpu */
766 if (c == &boot_cpu_data)
767 check_enable_amd_mmconf_dmi();
768
769 fam10h_check_enable_mmcfg();
770#endif
771
772 /*
773 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
774 * is always needed when GART is enabled, even in a kernel which has no
775 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
776 * If it doesn't, we do it here as suggested by the BKDG.
777 *
778 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
779 */
780 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
781
782 /*
783 * On family 10h BIOS may not have properly enabled WC+ support, causing
784 * it to be converted to CD memtype. This may result in performance
785 * degradation for certain nested-paging guests. Prevent this conversion
786 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
787 *
788 * NOTE: we want to use the _safe accessors so as not to #GP kvm
789 * guests on older kvm hosts.
790 */
791 msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
792
793 if (cpu_has_amd_erratum(c, amd_erratum_383))
794 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
795}
796
797#define MSR_AMD64_DE_CFG 0xC0011029
798
799static void init_amd_ln(struct cpuinfo_x86 *c)
800{
801 /*
802 * Apply erratum 665 fix unconditionally so machines without a BIOS
803 * fix work.
804 */
805 msr_set_bit(MSR_AMD64_DE_CFG, 31);
806}
807
808static bool rdrand_force;
809
810static int __init rdrand_cmdline(char *str)
811{
812 if (!str)
813 return -EINVAL;
814
815 if (!strcmp(str, "force"))
816 rdrand_force = true;
817 else
818 return -EINVAL;
819
820 return 0;
821}
822early_param("rdrand", rdrand_cmdline);
823
824static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
825{
826 /*
827 * Saving of the MSR used to hide the RDRAND support during
828 * suspend/resume is done by arch/x86/power/cpu.c, which is
829 * dependent on CONFIG_PM_SLEEP.
830 */
831 if (!IS_ENABLED(CONFIG_PM_SLEEP))
832 return;
833
834 /*
835 * The nordrand option can clear X86_FEATURE_RDRAND, so check for
836 * RDRAND support using the CPUID function directly.
837 */
838 if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
839 return;
840
841 msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
842
843 /*
844 * Verify that the CPUID change has occurred in case the kernel is
845 * running virtualized and the hypervisor doesn't support the MSR.
846 */
847 if (cpuid_ecx(1) & BIT(30)) {
848 pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
849 return;
850 }
851
852 clear_cpu_cap(c, X86_FEATURE_RDRAND);
853 pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
854}
855
856static void init_amd_jg(struct cpuinfo_x86 *c)
857{
858 /*
859 * Some BIOS implementations do not restore proper RDRAND support
860 * across suspend and resume. Check on whether to hide the RDRAND
861 * instruction support via CPUID.
862 */
863 clear_rdrand_cpuid_bit(c);
864}
865
866static void init_amd_bd(struct cpuinfo_x86 *c)
867{
868 u64 value;
869
870 /*
871 * The way access filter has a performance penalty on some workloads.
872 * Disable it on the affected CPUs.
873 */
874 if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
875 if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
876 value |= 0x1E;
877 wrmsrl_safe(MSR_F15H_IC_CFG, value);
878 }
879 }
880
881 /*
882 * Some BIOS implementations do not restore proper RDRAND support
883 * across suspend and resume. Check on whether to hide the RDRAND
884 * instruction support via CPUID.
885 */
886 clear_rdrand_cpuid_bit(c);
887}
888
889static void init_amd_zn(struct cpuinfo_x86 *c)
890{
891 set_cpu_cap(c, X86_FEATURE_ZEN);
892
893#ifdef CONFIG_NUMA
894 node_reclaim_distance = 32;
895#endif
896
897 /*
898 * Fix erratum 1076: CPB feature bit not being set in CPUID.
899 * Always set it, except when running under a hypervisor.
900 */
901 if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
902 set_cpu_cap(c, X86_FEATURE_CPB);
903}
904
905static void init_amd(struct cpuinfo_x86 *c)
906{
907 early_init_amd(c);
908
909 /*
910 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
911 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
912 */
913 clear_cpu_cap(c, 0*32+31);
914
915 if (c->x86 >= 0x10)
916 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
917
918 /* get apicid instead of initial apic id from cpuid */
919 c->apicid = hard_smp_processor_id();
920
921 /* K6s reports MCEs but don't actually have all the MSRs */
922 if (c->x86 < 6)
923 clear_cpu_cap(c, X86_FEATURE_MCE);
924
925 switch (c->x86) {
926 case 4: init_amd_k5(c); break;
927 case 5: init_amd_k6(c); break;
928 case 6: init_amd_k7(c); break;
929 case 0xf: init_amd_k8(c); break;
930 case 0x10: init_amd_gh(c); break;
931 case 0x12: init_amd_ln(c); break;
932 case 0x15: init_amd_bd(c); break;
933 case 0x16: init_amd_jg(c); break;
934 case 0x17: init_amd_zn(c); break;
935 }
936
937 /*
938 * Enable workaround for FXSAVE leak on CPUs
939 * without a XSaveErPtr feature
940 */
941 if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
942 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
943
944 cpu_detect_cache_sizes(c);
945
946 amd_detect_cmp(c);
947 amd_get_topology(c);
948 srat_detect_node(c);
949
950 init_amd_cacheinfo(c);
951
952 if (cpu_has(c, X86_FEATURE_XMM2)) {
953 /*
954 * Use LFENCE for execution serialization. On families which
955 * don't have that MSR, LFENCE is already serializing.
956 * msr_set_bit() uses the safe accessors, too, even if the MSR
957 * is not present.
958 */
959 msr_set_bit(MSR_F10H_DECFG,
960 MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
961
962 /* A serializing LFENCE stops RDTSC speculation */
963 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
964 }
965
966 /*
967 * Family 0x12 and above processors have APIC timer
968 * running in deep C states.
969 */
970 if (c->x86 > 0x11)
971 set_cpu_cap(c, X86_FEATURE_ARAT);
972
973 /* 3DNow or LM implies PREFETCHW */
974 if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
975 if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
976 set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
977
978 /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
979 if (!cpu_has(c, X86_FEATURE_XENPV))
980 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
981}
982
983#ifdef CONFIG_X86_32
984static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
985{
986 /* AMD errata T13 (order #21922) */
987 if (c->x86 == 6) {
988 /* Duron Rev A0 */
989 if (c->x86_model == 3 && c->x86_stepping == 0)
990 size = 64;
991 /* Tbird rev A1/A2 */
992 if (c->x86_model == 4 &&
993 (c->x86_stepping == 0 || c->x86_stepping == 1))
994 size = 256;
995 }
996 return size;
997}
998#endif
999
1000static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
1001{
1002 u32 ebx, eax, ecx, edx;
1003 u16 mask = 0xfff;
1004
1005 if (c->x86 < 0xf)
1006 return;
1007
1008 if (c->extended_cpuid_level < 0x80000006)
1009 return;
1010
1011 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
1012
1013 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
1014 tlb_lli_4k[ENTRIES] = ebx & mask;
1015
1016 /*
1017 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
1018 * characteristics from the CPUID function 0x80000005 instead.
1019 */
1020 if (c->x86 == 0xf) {
1021 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1022 mask = 0xff;
1023 }
1024
1025 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1026 if (!((eax >> 16) & mask))
1027 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
1028 else
1029 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
1030
1031 /* a 4M entry uses two 2M entries */
1032 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
1033
1034 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1035 if (!(eax & mask)) {
1036 /* Erratum 658 */
1037 if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
1038 tlb_lli_2m[ENTRIES] = 1024;
1039 } else {
1040 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1041 tlb_lli_2m[ENTRIES] = eax & 0xff;
1042 }
1043 } else
1044 tlb_lli_2m[ENTRIES] = eax & mask;
1045
1046 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
1047}
1048
1049static const struct cpu_dev amd_cpu_dev = {
1050 .c_vendor = "AMD",
1051 .c_ident = { "AuthenticAMD" },
1052#ifdef CONFIG_X86_32
1053 .legacy_models = {
1054 { .family = 4, .model_names =
1055 {
1056 [3] = "486 DX/2",
1057 [7] = "486 DX/2-WB",
1058 [8] = "486 DX/4",
1059 [9] = "486 DX/4-WB",
1060 [14] = "Am5x86-WT",
1061 [15] = "Am5x86-WB"
1062 }
1063 },
1064 },
1065 .legacy_cache_size = amd_size_cache,
1066#endif
1067 .c_early_init = early_init_amd,
1068 .c_detect_tlb = cpu_detect_tlb_amd,
1069 .c_bsp_init = bsp_init_amd,
1070 .c_init = init_amd,
1071 .c_x86_vendor = X86_VENDOR_AMD,
1072};
1073
1074cpu_dev_register(amd_cpu_dev);
1075
1076/*
1077 * AMD errata checking
1078 *
1079 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
1080 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
1081 * have an OSVW id assigned, which it takes as first argument. Both take a
1082 * variable number of family-specific model-stepping ranges created by
1083 * AMD_MODEL_RANGE().
1084 *
1085 * Example:
1086 *
1087 * const int amd_erratum_319[] =
1088 * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
1089 * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
1090 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
1091 */
1092
1093#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
1094#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
1095#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
1096 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
1097#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
1098#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
1099#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
1100
1101static const int amd_erratum_400[] =
1102 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
1103 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
1104
1105static const int amd_erratum_383[] =
1106 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
1107
1108
1109static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
1110{
1111 int osvw_id = *erratum++;
1112 u32 range;
1113 u32 ms;
1114
1115 if (osvw_id >= 0 && osvw_id < 65536 &&
1116 cpu_has(cpu, X86_FEATURE_OSVW)) {
1117 u64 osvw_len;
1118
1119 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
1120 if (osvw_id < osvw_len) {
1121 u64 osvw_bits;
1122
1123 rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
1124 osvw_bits);
1125 return osvw_bits & (1ULL << (osvw_id & 0x3f));
1126 }
1127 }
1128
1129 /* OSVW unavailable or ID unknown, match family-model-stepping range */
1130 ms = (cpu->x86_model << 4) | cpu->x86_stepping;
1131 while ((range = *erratum++))
1132 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
1133 (ms >= AMD_MODEL_RANGE_START(range)) &&
1134 (ms <= AMD_MODEL_RANGE_END(range)))
1135 return true;
1136
1137 return false;
1138}
1139
1140void set_dr_addr_mask(unsigned long mask, int dr)
1141{
1142 if (!boot_cpu_has(X86_FEATURE_BPEXT))
1143 return;
1144
1145 switch (dr) {
1146 case 0:
1147 wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
1148 break;
1149 case 1:
1150 case 2:
1151 case 3:
1152 wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
1153 break;
1154 default:
1155 break;
1156 }
1157}
1#include <linux/export.h>
2#include <linux/bitops.h>
3#include <linux/elf.h>
4#include <linux/mm.h>
5
6#include <linux/io.h>
7#include <linux/sched.h>
8#include <asm/processor.h>
9#include <asm/apic.h>
10#include <asm/cpu.h>
11#include <asm/pci-direct.h>
12
13#ifdef CONFIG_X86_64
14# include <asm/mmconfig.h>
15# include <asm/cacheflush.h>
16#endif
17
18#include "cpu.h"
19
20static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
21{
22 u32 gprs[8] = { 0 };
23 int err;
24
25 WARN_ONCE((boot_cpu_data.x86 != 0xf),
26 "%s should only be used on K8!\n", __func__);
27
28 gprs[1] = msr;
29 gprs[7] = 0x9c5a203a;
30
31 err = rdmsr_safe_regs(gprs);
32
33 *p = gprs[0] | ((u64)gprs[2] << 32);
34
35 return err;
36}
37
38static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
39{
40 u32 gprs[8] = { 0 };
41
42 WARN_ONCE((boot_cpu_data.x86 != 0xf),
43 "%s should only be used on K8!\n", __func__);
44
45 gprs[0] = (u32)val;
46 gprs[1] = msr;
47 gprs[2] = val >> 32;
48 gprs[7] = 0x9c5a203a;
49
50 return wrmsr_safe_regs(gprs);
51}
52
53#ifdef CONFIG_X86_32
54/*
55 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
56 * misexecution of code under Linux. Owners of such processors should
57 * contact AMD for precise details and a CPU swap.
58 *
59 * See http://www.multimania.com/poulot/k6bug.html
60 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
61 * (Publication # 21266 Issue Date: August 1998)
62 *
63 * The following test is erm.. interesting. AMD neglected to up
64 * the chip setting when fixing the bug but they also tweaked some
65 * performance at the same time..
66 */
67
68extern __visible void vide(void);
69__asm__(".globl vide\n\t.align 4\nvide: ret");
70
71static void init_amd_k5(struct cpuinfo_x86 *c)
72{
73/*
74 * General Systems BIOSen alias the cpu frequency registers
75 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
76 * drivers subsequently pokes it, and changes the CPU speed.
77 * Workaround : Remove the unneeded alias.
78 */
79#define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
80#define CBAR_ENB (0x80000000)
81#define CBAR_KEY (0X000000CB)
82 if (c->x86_model == 9 || c->x86_model == 10) {
83 if (inl(CBAR) & CBAR_ENB)
84 outl(0 | CBAR_KEY, CBAR);
85 }
86}
87
88
89static void init_amd_k6(struct cpuinfo_x86 *c)
90{
91 u32 l, h;
92 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
93
94 if (c->x86_model < 6) {
95 /* Based on AMD doc 20734R - June 2000 */
96 if (c->x86_model == 0) {
97 clear_cpu_cap(c, X86_FEATURE_APIC);
98 set_cpu_cap(c, X86_FEATURE_PGE);
99 }
100 return;
101 }
102
103 if (c->x86_model == 6 && c->x86_mask == 1) {
104 const int K6_BUG_LOOP = 1000000;
105 int n;
106 void (*f_vide)(void);
107 unsigned long d, d2;
108
109 printk(KERN_INFO "AMD K6 stepping B detected - ");
110
111 /*
112 * It looks like AMD fixed the 2.6.2 bug and improved indirect
113 * calls at the same time.
114 */
115
116 n = K6_BUG_LOOP;
117 f_vide = vide;
118 rdtscl(d);
119 while (n--)
120 f_vide();
121 rdtscl(d2);
122 d = d2-d;
123
124 if (d > 20*K6_BUG_LOOP)
125 printk(KERN_CONT
126 "system stability may be impaired when more than 32 MB are used.\n");
127 else
128 printk(KERN_CONT "probably OK (after B9730xxxx).\n");
129 }
130
131 /* K6 with old style WHCR */
132 if (c->x86_model < 8 ||
133 (c->x86_model == 8 && c->x86_mask < 8)) {
134 /* We can only write allocate on the low 508Mb */
135 if (mbytes > 508)
136 mbytes = 508;
137
138 rdmsr(MSR_K6_WHCR, l, h);
139 if ((l&0x0000FFFF) == 0) {
140 unsigned long flags;
141 l = (1<<0)|((mbytes/4)<<1);
142 local_irq_save(flags);
143 wbinvd();
144 wrmsr(MSR_K6_WHCR, l, h);
145 local_irq_restore(flags);
146 printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
147 mbytes);
148 }
149 return;
150 }
151
152 if ((c->x86_model == 8 && c->x86_mask > 7) ||
153 c->x86_model == 9 || c->x86_model == 13) {
154 /* The more serious chips .. */
155
156 if (mbytes > 4092)
157 mbytes = 4092;
158
159 rdmsr(MSR_K6_WHCR, l, h);
160 if ((l&0xFFFF0000) == 0) {
161 unsigned long flags;
162 l = ((mbytes>>2)<<22)|(1<<16);
163 local_irq_save(flags);
164 wbinvd();
165 wrmsr(MSR_K6_WHCR, l, h);
166 local_irq_restore(flags);
167 printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
168 mbytes);
169 }
170
171 return;
172 }
173
174 if (c->x86_model == 10) {
175 /* AMD Geode LX is model 10 */
176 /* placeholder for any needed mods */
177 return;
178 }
179}
180
181static void amd_k7_smp_check(struct cpuinfo_x86 *c)
182{
183 /* calling is from identify_secondary_cpu() ? */
184 if (!c->cpu_index)
185 return;
186
187 /*
188 * Certain Athlons might work (for various values of 'work') in SMP
189 * but they are not certified as MP capable.
190 */
191 /* Athlon 660/661 is valid. */
192 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
193 (c->x86_mask == 1)))
194 return;
195
196 /* Duron 670 is valid */
197 if ((c->x86_model == 7) && (c->x86_mask == 0))
198 return;
199
200 /*
201 * Athlon 662, Duron 671, and Athlon >model 7 have capability
202 * bit. It's worth noting that the A5 stepping (662) of some
203 * Athlon XP's have the MP bit set.
204 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
205 * more.
206 */
207 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
208 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
209 (c->x86_model > 7))
210 if (cpu_has_mp)
211 return;
212
213 /* If we get here, not a certified SMP capable AMD system. */
214
215 /*
216 * Don't taint if we are running SMP kernel on a single non-MP
217 * approved Athlon
218 */
219 WARN_ONCE(1, "WARNING: This combination of AMD"
220 " processors is not suitable for SMP.\n");
221 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
222}
223
224static void init_amd_k7(struct cpuinfo_x86 *c)
225{
226 u32 l, h;
227
228 /*
229 * Bit 15 of Athlon specific MSR 15, needs to be 0
230 * to enable SSE on Palomino/Morgan/Barton CPU's.
231 * If the BIOS didn't enable it already, enable it here.
232 */
233 if (c->x86_model >= 6 && c->x86_model <= 10) {
234 if (!cpu_has(c, X86_FEATURE_XMM)) {
235 printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
236 msr_clear_bit(MSR_K7_HWCR, 15);
237 set_cpu_cap(c, X86_FEATURE_XMM);
238 }
239 }
240
241 /*
242 * It's been determined by AMD that Athlons since model 8 stepping 1
243 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
244 * As per AMD technical note 27212 0.2
245 */
246 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
247 rdmsr(MSR_K7_CLK_CTL, l, h);
248 if ((l & 0xfff00000) != 0x20000000) {
249 printk(KERN_INFO
250 "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
251 l, ((l & 0x000fffff)|0x20000000));
252 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
253 }
254 }
255
256 set_cpu_cap(c, X86_FEATURE_K7);
257
258 amd_k7_smp_check(c);
259}
260#endif
261
262#ifdef CONFIG_NUMA
263/*
264 * To workaround broken NUMA config. Read the comment in
265 * srat_detect_node().
266 */
267static int nearby_node(int apicid)
268{
269 int i, node;
270
271 for (i = apicid - 1; i >= 0; i--) {
272 node = __apicid_to_node[i];
273 if (node != NUMA_NO_NODE && node_online(node))
274 return node;
275 }
276 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
277 node = __apicid_to_node[i];
278 if (node != NUMA_NO_NODE && node_online(node))
279 return node;
280 }
281 return first_node(node_online_map); /* Shouldn't happen */
282}
283#endif
284
285/*
286 * Fixup core topology information for
287 * (1) AMD multi-node processors
288 * Assumption: Number of cores in each internal node is the same.
289 * (2) AMD processors supporting compute units
290 */
291#ifdef CONFIG_X86_HT
292static void amd_get_topology(struct cpuinfo_x86 *c)
293{
294 u32 nodes, cores_per_cu = 1;
295 u8 node_id;
296 int cpu = smp_processor_id();
297
298 /* get information required for multi-node processors */
299 if (cpu_has_topoext) {
300 u32 eax, ebx, ecx, edx;
301
302 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
303 nodes = ((ecx >> 8) & 7) + 1;
304 node_id = ecx & 7;
305
306 /* get compute unit information */
307 smp_num_siblings = ((ebx >> 8) & 3) + 1;
308 c->compute_unit_id = ebx & 0xff;
309 cores_per_cu += ((ebx >> 8) & 3);
310 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
311 u64 value;
312
313 rdmsrl(MSR_FAM10H_NODE_ID, value);
314 nodes = ((value >> 3) & 7) + 1;
315 node_id = value & 7;
316 } else
317 return;
318
319 /* fixup multi-node processor information */
320 if (nodes > 1) {
321 u32 cores_per_node;
322 u32 cus_per_node;
323
324 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
325 cores_per_node = c->x86_max_cores / nodes;
326 cus_per_node = cores_per_node / cores_per_cu;
327
328 /* store NodeID, use llc_shared_map to store sibling info */
329 per_cpu(cpu_llc_id, cpu) = node_id;
330
331 /* core id has to be in the [0 .. cores_per_node - 1] range */
332 c->cpu_core_id %= cores_per_node;
333 c->compute_unit_id %= cus_per_node;
334 }
335}
336#endif
337
338/*
339 * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
340 * Assumes number of cores is a power of two.
341 */
342static void amd_detect_cmp(struct cpuinfo_x86 *c)
343{
344#ifdef CONFIG_X86_HT
345 unsigned bits;
346 int cpu = smp_processor_id();
347
348 bits = c->x86_coreid_bits;
349 /* Low order bits define the core id (index of core in socket) */
350 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
351 /* Convert the initial APIC ID into the socket ID */
352 c->phys_proc_id = c->initial_apicid >> bits;
353 /* use socket ID also for last level cache */
354 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
355 amd_get_topology(c);
356#endif
357}
358
359u16 amd_get_nb_id(int cpu)
360{
361 u16 id = 0;
362#ifdef CONFIG_SMP
363 id = per_cpu(cpu_llc_id, cpu);
364#endif
365 return id;
366}
367EXPORT_SYMBOL_GPL(amd_get_nb_id);
368
369static void srat_detect_node(struct cpuinfo_x86 *c)
370{
371#ifdef CONFIG_NUMA
372 int cpu = smp_processor_id();
373 int node;
374 unsigned apicid = c->apicid;
375
376 node = numa_cpu_node(cpu);
377 if (node == NUMA_NO_NODE)
378 node = per_cpu(cpu_llc_id, cpu);
379
380 /*
381 * On multi-fabric platform (e.g. Numascale NumaChip) a
382 * platform-specific handler needs to be called to fixup some
383 * IDs of the CPU.
384 */
385 if (x86_cpuinit.fixup_cpu_id)
386 x86_cpuinit.fixup_cpu_id(c, node);
387
388 if (!node_online(node)) {
389 /*
390 * Two possibilities here:
391 *
392 * - The CPU is missing memory and no node was created. In
393 * that case try picking one from a nearby CPU.
394 *
395 * - The APIC IDs differ from the HyperTransport node IDs
396 * which the K8 northbridge parsing fills in. Assume
397 * they are all increased by a constant offset, but in
398 * the same order as the HT nodeids. If that doesn't
399 * result in a usable node fall back to the path for the
400 * previous case.
401 *
402 * This workaround operates directly on the mapping between
403 * APIC ID and NUMA node, assuming certain relationship
404 * between APIC ID, HT node ID and NUMA topology. As going
405 * through CPU mapping may alter the outcome, directly
406 * access __apicid_to_node[].
407 */
408 int ht_nodeid = c->initial_apicid;
409
410 if (ht_nodeid >= 0 &&
411 __apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
412 node = __apicid_to_node[ht_nodeid];
413 /* Pick a nearby node */
414 if (!node_online(node))
415 node = nearby_node(apicid);
416 }
417 numa_set_node(cpu, node);
418#endif
419}
420
421static void early_init_amd_mc(struct cpuinfo_x86 *c)
422{
423#ifdef CONFIG_X86_HT
424 unsigned bits, ecx;
425
426 /* Multi core CPU? */
427 if (c->extended_cpuid_level < 0x80000008)
428 return;
429
430 ecx = cpuid_ecx(0x80000008);
431
432 c->x86_max_cores = (ecx & 0xff) + 1;
433
434 /* CPU telling us the core id bits shift? */
435 bits = (ecx >> 12) & 0xF;
436
437 /* Otherwise recompute */
438 if (bits == 0) {
439 while ((1 << bits) < c->x86_max_cores)
440 bits++;
441 }
442
443 c->x86_coreid_bits = bits;
444#endif
445}
446
447static void bsp_init_amd(struct cpuinfo_x86 *c)
448{
449 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
450
451 if (c->x86 > 0x10 ||
452 (c->x86 == 0x10 && c->x86_model >= 0x2)) {
453 u64 val;
454
455 rdmsrl(MSR_K7_HWCR, val);
456 if (!(val & BIT(24)))
457 printk(KERN_WARNING FW_BUG "TSC doesn't count "
458 "with P0 frequency!\n");
459 }
460 }
461
462 if (c->x86 == 0x15) {
463 unsigned long upperbit;
464 u32 cpuid, assoc;
465
466 cpuid = cpuid_edx(0x80000005);
467 assoc = cpuid >> 16 & 0xff;
468 upperbit = ((cpuid >> 24) << 10) / assoc;
469
470 va_align.mask = (upperbit - 1) & PAGE_MASK;
471 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
472 }
473}
474
475static void early_init_amd(struct cpuinfo_x86 *c)
476{
477 early_init_amd_mc(c);
478
479 /*
480 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
481 * with P/T states and does not stop in deep C-states
482 */
483 if (c->x86_power & (1 << 8)) {
484 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
485 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
486 if (!check_tsc_unstable())
487 set_sched_clock_stable();
488 }
489
490#ifdef CONFIG_X86_64
491 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
492#else
493 /* Set MTRR capability flag if appropriate */
494 if (c->x86 == 5)
495 if (c->x86_model == 13 || c->x86_model == 9 ||
496 (c->x86_model == 8 && c->x86_mask >= 8))
497 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
498#endif
499#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
500 /* check CPU config space for extended APIC ID */
501 if (cpu_has_apic && c->x86 >= 0xf) {
502 unsigned int val;
503 val = read_pci_config(0, 24, 0, 0x68);
504 if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
505 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
506 }
507#endif
508
509 /* F16h erratum 793, CVE-2013-6885 */
510 if (c->x86 == 0x16 && c->x86_model <= 0xf)
511 msr_set_bit(MSR_AMD64_LS_CFG, 15);
512}
513
514static const int amd_erratum_383[];
515static const int amd_erratum_400[];
516static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
517
518static void init_amd(struct cpuinfo_x86 *c)
519{
520 u32 dummy;
521 unsigned long long value;
522
523#ifdef CONFIG_SMP
524 /*
525 * Disable TLB flush filter by setting HWCR.FFDIS on K8
526 * bit 6 of msr C001_0015
527 *
528 * Errata 63 for SH-B3 steppings
529 * Errata 122 for all steppings (F+ have it disabled by default)
530 */
531 if (c->x86 == 0xf)
532 msr_set_bit(MSR_K7_HWCR, 6);
533#endif
534
535 early_init_amd(c);
536
537 /*
538 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
539 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
540 */
541 clear_cpu_cap(c, 0*32+31);
542
543#ifdef CONFIG_X86_64
544 /* On C+ stepping K8 rep microcode works well for copy/memset */
545 if (c->x86 == 0xf) {
546 u32 level;
547
548 level = cpuid_eax(1);
549 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
550 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
551
552 /*
553 * Some BIOSes incorrectly force this feature, but only K8
554 * revision D (model = 0x14) and later actually support it.
555 * (AMD Erratum #110, docId: 25759).
556 */
557 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
558 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
559 if (!rdmsrl_amd_safe(0xc001100d, &value)) {
560 value &= ~(1ULL << 32);
561 wrmsrl_amd_safe(0xc001100d, value);
562 }
563 }
564
565 }
566 if (c->x86 >= 0x10)
567 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
568
569 /* get apicid instead of initial apic id from cpuid */
570 c->apicid = hard_smp_processor_id();
571#else
572
573 /*
574 * FIXME: We should handle the K5 here. Set up the write
575 * range and also turn on MSR 83 bits 4 and 31 (write alloc,
576 * no bus pipeline)
577 */
578
579 switch (c->x86) {
580 case 4:
581 init_amd_k5(c);
582 break;
583 case 5:
584 init_amd_k6(c);
585 break;
586 case 6: /* An Athlon/Duron */
587 init_amd_k7(c);
588 break;
589 }
590
591 /* K6s reports MCEs but don't actually have all the MSRs */
592 if (c->x86 < 6)
593 clear_cpu_cap(c, X86_FEATURE_MCE);
594#endif
595
596 /* Enable workaround for FXSAVE leak */
597 if (c->x86 >= 6)
598 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
599
600 if (!c->x86_model_id[0]) {
601 switch (c->x86) {
602 case 0xf:
603 /* Should distinguish Models here, but this is only
604 a fallback anyways. */
605 strcpy(c->x86_model_id, "Hammer");
606 break;
607 }
608 }
609
610 /* re-enable TopologyExtensions if switched off by BIOS */
611 if ((c->x86 == 0x15) &&
612 (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
613 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
614
615 if (msr_set_bit(0xc0011005, 54) > 0) {
616 rdmsrl(0xc0011005, value);
617 if (value & BIT_64(54)) {
618 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
619 pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
620 }
621 }
622 }
623
624 /*
625 * The way access filter has a performance penalty on some workloads.
626 * Disable it on the affected CPUs.
627 */
628 if ((c->x86 == 0x15) &&
629 (c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
630
631 if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) {
632 value |= 0x1E;
633 wrmsrl_safe(0xc0011021, value);
634 }
635 }
636
637 cpu_detect_cache_sizes(c);
638
639 /* Multi core CPU? */
640 if (c->extended_cpuid_level >= 0x80000008) {
641 amd_detect_cmp(c);
642 srat_detect_node(c);
643 }
644
645#ifdef CONFIG_X86_32
646 detect_ht(c);
647#endif
648
649 init_amd_cacheinfo(c);
650
651 if (c->x86 >= 0xf)
652 set_cpu_cap(c, X86_FEATURE_K8);
653
654 if (cpu_has_xmm2) {
655 /* MFENCE stops RDTSC speculation */
656 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
657 }
658
659#ifdef CONFIG_X86_64
660 if (c->x86 == 0x10) {
661 /* do this for boot cpu */
662 if (c == &boot_cpu_data)
663 check_enable_amd_mmconf_dmi();
664
665 fam10h_check_enable_mmcfg();
666 }
667
668 if (c == &boot_cpu_data && c->x86 >= 0xf) {
669 unsigned long long tseg;
670
671 /*
672 * Split up direct mapping around the TSEG SMM area.
673 * Don't do it for gbpages because there seems very little
674 * benefit in doing so.
675 */
676 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
677 unsigned long pfn = tseg >> PAGE_SHIFT;
678
679 printk(KERN_DEBUG "tseg: %010llx\n", tseg);
680 if (pfn_range_is_mapped(pfn, pfn + 1))
681 set_memory_4k((unsigned long)__va(tseg), 1);
682 }
683 }
684#endif
685
686 /*
687 * Family 0x12 and above processors have APIC timer
688 * running in deep C states.
689 */
690 if (c->x86 > 0x11)
691 set_cpu_cap(c, X86_FEATURE_ARAT);
692
693 if (c->x86 == 0x10) {
694 /*
695 * Disable GART TLB Walk Errors on Fam10h. We do this here
696 * because this is always needed when GART is enabled, even in a
697 * kernel which has no MCE support built in.
698 * BIOS should disable GartTlbWlk Errors already. If
699 * it doesn't, do it here as suggested by the BKDG.
700 *
701 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
702 */
703 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
704
705 /*
706 * On family 10h BIOS may not have properly enabled WC+ support,
707 * causing it to be converted to CD memtype. This may result in
708 * performance degradation for certain nested-paging guests.
709 * Prevent this conversion by clearing bit 24 in
710 * MSR_AMD64_BU_CFG2.
711 *
712 * NOTE: we want to use the _safe accessors so as not to #GP kvm
713 * guests on older kvm hosts.
714 */
715 msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
716
717 if (cpu_has_amd_erratum(c, amd_erratum_383))
718 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
719 }
720
721 if (cpu_has_amd_erratum(c, amd_erratum_400))
722 set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
723
724 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
725}
726
727#ifdef CONFIG_X86_32
728static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
729{
730 /* AMD errata T13 (order #21922) */
731 if ((c->x86 == 6)) {
732 /* Duron Rev A0 */
733 if (c->x86_model == 3 && c->x86_mask == 0)
734 size = 64;
735 /* Tbird rev A1/A2 */
736 if (c->x86_model == 4 &&
737 (c->x86_mask == 0 || c->x86_mask == 1))
738 size = 256;
739 }
740 return size;
741}
742#endif
743
744static void cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c)
745{
746 tlb_flushall_shift = 6;
747}
748
749static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
750{
751 u32 ebx, eax, ecx, edx;
752 u16 mask = 0xfff;
753
754 if (c->x86 < 0xf)
755 return;
756
757 if (c->extended_cpuid_level < 0x80000006)
758 return;
759
760 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
761
762 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
763 tlb_lli_4k[ENTRIES] = ebx & mask;
764
765 /*
766 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
767 * characteristics from the CPUID function 0x80000005 instead.
768 */
769 if (c->x86 == 0xf) {
770 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
771 mask = 0xff;
772 }
773
774 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
775 if (!((eax >> 16) & mask))
776 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
777 else
778 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
779
780 /* a 4M entry uses two 2M entries */
781 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
782
783 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
784 if (!(eax & mask)) {
785 /* Erratum 658 */
786 if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
787 tlb_lli_2m[ENTRIES] = 1024;
788 } else {
789 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
790 tlb_lli_2m[ENTRIES] = eax & 0xff;
791 }
792 } else
793 tlb_lli_2m[ENTRIES] = eax & mask;
794
795 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
796
797 cpu_set_tlb_flushall_shift(c);
798}
799
800static const struct cpu_dev amd_cpu_dev = {
801 .c_vendor = "AMD",
802 .c_ident = { "AuthenticAMD" },
803#ifdef CONFIG_X86_32
804 .legacy_models = {
805 { .family = 4, .model_names =
806 {
807 [3] = "486 DX/2",
808 [7] = "486 DX/2-WB",
809 [8] = "486 DX/4",
810 [9] = "486 DX/4-WB",
811 [14] = "Am5x86-WT",
812 [15] = "Am5x86-WB"
813 }
814 },
815 },
816 .legacy_cache_size = amd_size_cache,
817#endif
818 .c_early_init = early_init_amd,
819 .c_detect_tlb = cpu_detect_tlb_amd,
820 .c_bsp_init = bsp_init_amd,
821 .c_init = init_amd,
822 .c_x86_vendor = X86_VENDOR_AMD,
823};
824
825cpu_dev_register(amd_cpu_dev);
826
827/*
828 * AMD errata checking
829 *
830 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
831 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
832 * have an OSVW id assigned, which it takes as first argument. Both take a
833 * variable number of family-specific model-stepping ranges created by
834 * AMD_MODEL_RANGE().
835 *
836 * Example:
837 *
838 * const int amd_erratum_319[] =
839 * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
840 * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
841 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
842 */
843
844#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
845#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
846#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
847 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
848#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
849#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
850#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
851
852static const int amd_erratum_400[] =
853 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
854 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
855
856static const int amd_erratum_383[] =
857 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
858
859
860static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
861{
862 int osvw_id = *erratum++;
863 u32 range;
864 u32 ms;
865
866 if (osvw_id >= 0 && osvw_id < 65536 &&
867 cpu_has(cpu, X86_FEATURE_OSVW)) {
868 u64 osvw_len;
869
870 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
871 if (osvw_id < osvw_len) {
872 u64 osvw_bits;
873
874 rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
875 osvw_bits);
876 return osvw_bits & (1ULL << (osvw_id & 0x3f));
877 }
878 }
879
880 /* OSVW unavailable or ID unknown, match family-model-stepping range */
881 ms = (cpu->x86_model << 4) | cpu->x86_mask;
882 while ((range = *erratum++))
883 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
884 (ms >= AMD_MODEL_RANGE_START(range)) &&
885 (ms <= AMD_MODEL_RANGE_END(range)))
886 return true;
887
888 return false;
889}