Loading...
1#include <linux/init.h>
2#include <linux/bitops.h>
3#include <linux/mm.h>
4
5#include <linux/io.h>
6#include <asm/processor.h>
7#include <asm/apic.h>
8#include <asm/cpu.h>
9#include <asm/pci-direct.h>
10
11#ifdef CONFIG_X86_64
12# include <asm/numa_64.h>
13# include <asm/mmconfig.h>
14# include <asm/cacheflush.h>
15#endif
16
17#include "cpu.h"
18
19#ifdef CONFIG_X86_32
20/*
21 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
22 * misexecution of code under Linux. Owners of such processors should
23 * contact AMD for precise details and a CPU swap.
24 *
25 * See http://www.multimania.com/poulot/k6bug.html
26 * http://www.amd.com/K6/k6docs/revgd.html
27 *
28 * The following test is erm.. interesting. AMD neglected to up
29 * the chip setting when fixing the bug but they also tweaked some
30 * performance at the same time..
31 */
32
33extern void vide(void);
34__asm__(".align 4\nvide: ret");
35
36static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
37{
38/*
39 * General Systems BIOSen alias the cpu frequency registers
40 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
41 * drivers subsequently pokes it, and changes the CPU speed.
42 * Workaround : Remove the unneeded alias.
43 */
44#define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
45#define CBAR_ENB (0x80000000)
46#define CBAR_KEY (0X000000CB)
47 if (c->x86_model == 9 || c->x86_model == 10) {
48 if (inl(CBAR) & CBAR_ENB)
49 outl(0 | CBAR_KEY, CBAR);
50 }
51}
52
53
54static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
55{
56 u32 l, h;
57 int mbytes = num_physpages >> (20-PAGE_SHIFT);
58
59 if (c->x86_model < 6) {
60 /* Based on AMD doc 20734R - June 2000 */
61 if (c->x86_model == 0) {
62 clear_cpu_cap(c, X86_FEATURE_APIC);
63 set_cpu_cap(c, X86_FEATURE_PGE);
64 }
65 return;
66 }
67
68 if (c->x86_model == 6 && c->x86_mask == 1) {
69 const int K6_BUG_LOOP = 1000000;
70 int n;
71 void (*f_vide)(void);
72 unsigned long d, d2;
73
74 printk(KERN_INFO "AMD K6 stepping B detected - ");
75
76 /*
77 * It looks like AMD fixed the 2.6.2 bug and improved indirect
78 * calls at the same time.
79 */
80
81 n = K6_BUG_LOOP;
82 f_vide = vide;
83 rdtscl(d);
84 while (n--)
85 f_vide();
86 rdtscl(d2);
87 d = d2-d;
88
89 if (d > 20*K6_BUG_LOOP)
90 printk(KERN_CONT
91 "system stability may be impaired when more than 32 MB are used.\n");
92 else
93 printk(KERN_CONT "probably OK (after B9730xxxx).\n");
94 printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
95 }
96
97 /* K6 with old style WHCR */
98 if (c->x86_model < 8 ||
99 (c->x86_model == 8 && c->x86_mask < 8)) {
100 /* We can only write allocate on the low 508Mb */
101 if (mbytes > 508)
102 mbytes = 508;
103
104 rdmsr(MSR_K6_WHCR, l, h);
105 if ((l&0x0000FFFF) == 0) {
106 unsigned long flags;
107 l = (1<<0)|((mbytes/4)<<1);
108 local_irq_save(flags);
109 wbinvd();
110 wrmsr(MSR_K6_WHCR, l, h);
111 local_irq_restore(flags);
112 printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
113 mbytes);
114 }
115 return;
116 }
117
118 if ((c->x86_model == 8 && c->x86_mask > 7) ||
119 c->x86_model == 9 || c->x86_model == 13) {
120 /* The more serious chips .. */
121
122 if (mbytes > 4092)
123 mbytes = 4092;
124
125 rdmsr(MSR_K6_WHCR, l, h);
126 if ((l&0xFFFF0000) == 0) {
127 unsigned long flags;
128 l = ((mbytes>>2)<<22)|(1<<16);
129 local_irq_save(flags);
130 wbinvd();
131 wrmsr(MSR_K6_WHCR, l, h);
132 local_irq_restore(flags);
133 printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
134 mbytes);
135 }
136
137 return;
138 }
139
140 if (c->x86_model == 10) {
141 /* AMD Geode LX is model 10 */
142 /* placeholder for any needed mods */
143 return;
144 }
145}
146
147static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
148{
149#ifdef CONFIG_SMP
150 /* calling is from identify_secondary_cpu() ? */
151 if (!c->cpu_index)
152 return;
153
154 /*
155 * Certain Athlons might work (for various values of 'work') in SMP
156 * but they are not certified as MP capable.
157 */
158 /* Athlon 660/661 is valid. */
159 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
160 (c->x86_mask == 1)))
161 goto valid_k7;
162
163 /* Duron 670 is valid */
164 if ((c->x86_model == 7) && (c->x86_mask == 0))
165 goto valid_k7;
166
167 /*
168 * Athlon 662, Duron 671, and Athlon >model 7 have capability
169 * bit. It's worth noting that the A5 stepping (662) of some
170 * Athlon XP's have the MP bit set.
171 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
172 * more.
173 */
174 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
175 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
176 (c->x86_model > 7))
177 if (cpu_has_mp)
178 goto valid_k7;
179
180 /* If we get here, not a certified SMP capable AMD system. */
181
182 /*
183 * Don't taint if we are running SMP kernel on a single non-MP
184 * approved Athlon
185 */
186 WARN_ONCE(1, "WARNING: This combination of AMD"
187 " processors is not suitable for SMP.\n");
188 if (!test_taint(TAINT_UNSAFE_SMP))
189 add_taint(TAINT_UNSAFE_SMP);
190
191valid_k7:
192 ;
193#endif
194}
195
196static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
197{
198 u32 l, h;
199
200 /*
201 * Bit 15 of Athlon specific MSR 15, needs to be 0
202 * to enable SSE on Palomino/Morgan/Barton CPU's.
203 * If the BIOS didn't enable it already, enable it here.
204 */
205 if (c->x86_model >= 6 && c->x86_model <= 10) {
206 if (!cpu_has(c, X86_FEATURE_XMM)) {
207 printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
208 rdmsr(MSR_K7_HWCR, l, h);
209 l &= ~0x00008000;
210 wrmsr(MSR_K7_HWCR, l, h);
211 set_cpu_cap(c, X86_FEATURE_XMM);
212 }
213 }
214
215 /*
216 * It's been determined by AMD that Athlons since model 8 stepping 1
217 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
218 * As per AMD technical note 27212 0.2
219 */
220 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
221 rdmsr(MSR_K7_CLK_CTL, l, h);
222 if ((l & 0xfff00000) != 0x20000000) {
223 printk(KERN_INFO
224 "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
225 l, ((l & 0x000fffff)|0x20000000));
226 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
227 }
228 }
229
230 set_cpu_cap(c, X86_FEATURE_K7);
231
232 amd_k7_smp_check(c);
233}
234#endif
235
236#ifdef CONFIG_NUMA
237/*
238 * To workaround broken NUMA config. Read the comment in
239 * srat_detect_node().
240 */
241static int __cpuinit nearby_node(int apicid)
242{
243 int i, node;
244
245 for (i = apicid - 1; i >= 0; i--) {
246 node = __apicid_to_node[i];
247 if (node != NUMA_NO_NODE && node_online(node))
248 return node;
249 }
250 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
251 node = __apicid_to_node[i];
252 if (node != NUMA_NO_NODE && node_online(node))
253 return node;
254 }
255 return first_node(node_online_map); /* Shouldn't happen */
256}
257#endif
258
259/*
260 * Fixup core topology information for
261 * (1) AMD multi-node processors
262 * Assumption: Number of cores in each internal node is the same.
263 * (2) AMD processors supporting compute units
264 */
265#ifdef CONFIG_X86_HT
266static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
267{
268 u32 nodes, cores_per_cu = 1;
269 u8 node_id;
270 int cpu = smp_processor_id();
271
272 /* get information required for multi-node processors */
273 if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
274 u32 eax, ebx, ecx, edx;
275
276 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
277 nodes = ((ecx >> 8) & 7) + 1;
278 node_id = ecx & 7;
279
280 /* get compute unit information */
281 smp_num_siblings = ((ebx >> 8) & 3) + 1;
282 c->compute_unit_id = ebx & 0xff;
283 cores_per_cu += ((ebx >> 8) & 3);
284 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
285 u64 value;
286
287 rdmsrl(MSR_FAM10H_NODE_ID, value);
288 nodes = ((value >> 3) & 7) + 1;
289 node_id = value & 7;
290 } else
291 return;
292
293 /* fixup multi-node processor information */
294 if (nodes > 1) {
295 u32 cores_per_node;
296 u32 cus_per_node;
297
298 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
299 cores_per_node = c->x86_max_cores / nodes;
300 cus_per_node = cores_per_node / cores_per_cu;
301
302 /* store NodeID, use llc_shared_map to store sibling info */
303 per_cpu(cpu_llc_id, cpu) = node_id;
304
305 /* core id has to be in the [0 .. cores_per_node - 1] range */
306 c->cpu_core_id %= cores_per_node;
307 c->compute_unit_id %= cus_per_node;
308 }
309}
310#endif
311
312/*
313 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
314 * Assumes number of cores is a power of two.
315 */
316static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
317{
318#ifdef CONFIG_X86_HT
319 unsigned bits;
320 int cpu = smp_processor_id();
321
322 bits = c->x86_coreid_bits;
323 /* Low order bits define the core id (index of core in socket) */
324 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
325 /* Convert the initial APIC ID into the socket ID */
326 c->phys_proc_id = c->initial_apicid >> bits;
327 /* use socket ID also for last level cache */
328 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
329 amd_get_topology(c);
330#endif
331}
332
333int amd_get_nb_id(int cpu)
334{
335 int id = 0;
336#ifdef CONFIG_SMP
337 id = per_cpu(cpu_llc_id, cpu);
338#endif
339 return id;
340}
341EXPORT_SYMBOL_GPL(amd_get_nb_id);
342
343static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
344{
345#ifdef CONFIG_NUMA
346 int cpu = smp_processor_id();
347 int node;
348 unsigned apicid = c->apicid;
349
350 node = numa_cpu_node(cpu);
351 if (node == NUMA_NO_NODE)
352 node = per_cpu(cpu_llc_id, cpu);
353
354 if (!node_online(node)) {
355 /*
356 * Two possibilities here:
357 *
358 * - The CPU is missing memory and no node was created. In
359 * that case try picking one from a nearby CPU.
360 *
361 * - The APIC IDs differ from the HyperTransport node IDs
362 * which the K8 northbridge parsing fills in. Assume
363 * they are all increased by a constant offset, but in
364 * the same order as the HT nodeids. If that doesn't
365 * result in a usable node fall back to the path for the
366 * previous case.
367 *
368 * This workaround operates directly on the mapping between
369 * APIC ID and NUMA node, assuming certain relationship
370 * between APIC ID, HT node ID and NUMA topology. As going
371 * through CPU mapping may alter the outcome, directly
372 * access __apicid_to_node[].
373 */
374 int ht_nodeid = c->initial_apicid;
375
376 if (ht_nodeid >= 0 &&
377 __apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
378 node = __apicid_to_node[ht_nodeid];
379 /* Pick a nearby node */
380 if (!node_online(node))
381 node = nearby_node(apicid);
382 }
383 numa_set_node(cpu, node);
384#endif
385}
386
387static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
388{
389#ifdef CONFIG_X86_HT
390 unsigned bits, ecx;
391
392 /* Multi core CPU? */
393 if (c->extended_cpuid_level < 0x80000008)
394 return;
395
396 ecx = cpuid_ecx(0x80000008);
397
398 c->x86_max_cores = (ecx & 0xff) + 1;
399
400 /* CPU telling us the core id bits shift? */
401 bits = (ecx >> 12) & 0xF;
402
403 /* Otherwise recompute */
404 if (bits == 0) {
405 while ((1 << bits) < c->x86_max_cores)
406 bits++;
407 }
408
409 c->x86_coreid_bits = bits;
410#endif
411}
412
413static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
414{
415 early_init_amd_mc(c);
416
417 /*
418 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
419 * with P/T states and does not stop in deep C-states
420 */
421 if (c->x86_power & (1 << 8)) {
422 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
423 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
424 }
425
426#ifdef CONFIG_X86_64
427 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
428#else
429 /* Set MTRR capability flag if appropriate */
430 if (c->x86 == 5)
431 if (c->x86_model == 13 || c->x86_model == 9 ||
432 (c->x86_model == 8 && c->x86_mask >= 8))
433 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
434#endif
435#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
436 /* check CPU config space for extended APIC ID */
437 if (cpu_has_apic && c->x86 >= 0xf) {
438 unsigned int val;
439 val = read_pci_config(0, 24, 0, 0x68);
440 if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
441 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
442 }
443#endif
444
445 /* We need to do the following only once */
446 if (c != &boot_cpu_data)
447 return;
448
449 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
450
451 if (c->x86 > 0x10 ||
452 (c->x86 == 0x10 && c->x86_model >= 0x2)) {
453 u64 val;
454
455 rdmsrl(MSR_K7_HWCR, val);
456 if (!(val & BIT(24)))
457 printk(KERN_WARNING FW_BUG "TSC doesn't count "
458 "with P0 frequency!\n");
459 }
460 }
461}
462
463static void __cpuinit init_amd(struct cpuinfo_x86 *c)
464{
465#ifdef CONFIG_SMP
466 unsigned long long value;
467
468 /*
469 * Disable TLB flush filter by setting HWCR.FFDIS on K8
470 * bit 6 of msr C001_0015
471 *
472 * Errata 63 for SH-B3 steppings
473 * Errata 122 for all steppings (F+ have it disabled by default)
474 */
475 if (c->x86 == 0xf) {
476 rdmsrl(MSR_K7_HWCR, value);
477 value |= 1 << 6;
478 wrmsrl(MSR_K7_HWCR, value);
479 }
480#endif
481
482 early_init_amd(c);
483
484 /*
485 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
486 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
487 */
488 clear_cpu_cap(c, 0*32+31);
489
490#ifdef CONFIG_X86_64
491 /* On C+ stepping K8 rep microcode works well for copy/memset */
492 if (c->x86 == 0xf) {
493 u32 level;
494
495 level = cpuid_eax(1);
496 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
497 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
498
499 /*
500 * Some BIOSes incorrectly force this feature, but only K8
501 * revision D (model = 0x14) and later actually support it.
502 * (AMD Erratum #110, docId: 25759).
503 */
504 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
505 u64 val;
506
507 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
508 if (!rdmsrl_amd_safe(0xc001100d, &val)) {
509 val &= ~(1ULL << 32);
510 wrmsrl_amd_safe(0xc001100d, val);
511 }
512 }
513
514 }
515 if (c->x86 >= 0x10)
516 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
517
518 /* get apicid instead of initial apic id from cpuid */
519 c->apicid = hard_smp_processor_id();
520#else
521
522 /*
523 * FIXME: We should handle the K5 here. Set up the write
524 * range and also turn on MSR 83 bits 4 and 31 (write alloc,
525 * no bus pipeline)
526 */
527
528 switch (c->x86) {
529 case 4:
530 init_amd_k5(c);
531 break;
532 case 5:
533 init_amd_k6(c);
534 break;
535 case 6: /* An Athlon/Duron */
536 init_amd_k7(c);
537 break;
538 }
539
540 /* K6s reports MCEs but don't actually have all the MSRs */
541 if (c->x86 < 6)
542 clear_cpu_cap(c, X86_FEATURE_MCE);
543#endif
544
545 /* Enable workaround for FXSAVE leak */
546 if (c->x86 >= 6)
547 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
548
549 if (!c->x86_model_id[0]) {
550 switch (c->x86) {
551 case 0xf:
552 /* Should distinguish Models here, but this is only
553 a fallback anyways. */
554 strcpy(c->x86_model_id, "Hammer");
555 break;
556 }
557 }
558
559 cpu_detect_cache_sizes(c);
560
561 /* Multi core CPU? */
562 if (c->extended_cpuid_level >= 0x80000008) {
563 amd_detect_cmp(c);
564 srat_detect_node(c);
565 }
566
567#ifdef CONFIG_X86_32
568 detect_ht(c);
569#endif
570
571 if (c->extended_cpuid_level >= 0x80000006) {
572 if (cpuid_edx(0x80000006) & 0xf000)
573 num_cache_leaves = 4;
574 else
575 num_cache_leaves = 3;
576 }
577
578 if (c->x86 >= 0xf)
579 set_cpu_cap(c, X86_FEATURE_K8);
580
581 if (cpu_has_xmm2) {
582 /* MFENCE stops RDTSC speculation */
583 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
584 }
585
586#ifdef CONFIG_X86_64
587 if (c->x86 == 0x10) {
588 /* do this for boot cpu */
589 if (c == &boot_cpu_data)
590 check_enable_amd_mmconf_dmi();
591
592 fam10h_check_enable_mmcfg();
593 }
594
595 if (c == &boot_cpu_data && c->x86 >= 0xf) {
596 unsigned long long tseg;
597
598 /*
599 * Split up direct mapping around the TSEG SMM area.
600 * Don't do it for gbpages because there seems very little
601 * benefit in doing so.
602 */
603 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
604 printk(KERN_DEBUG "tseg: %010llx\n", tseg);
605 if ((tseg>>PMD_SHIFT) <
606 (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) ||
607 ((tseg>>PMD_SHIFT) <
608 (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) &&
609 (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT))))
610 set_memory_4k((unsigned long)__va(tseg), 1);
611 }
612 }
613#endif
614
615 /*
616 * Family 0x12 and above processors have APIC timer
617 * running in deep C states.
618 */
619 if (c->x86 > 0x11)
620 set_cpu_cap(c, X86_FEATURE_ARAT);
621
622 /*
623 * Disable GART TLB Walk Errors on Fam10h. We do this here
624 * because this is always needed when GART is enabled, even in a
625 * kernel which has no MCE support built in.
626 */
627 if (c->x86 == 0x10) {
628 /*
629 * BIOS should disable GartTlbWlk Errors themself. If
630 * it doesn't do it here as suggested by the BKDG.
631 *
632 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
633 */
634 u64 mask;
635 int err;
636
637 err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
638 if (err == 0) {
639 mask |= (1 << 10);
640 checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
641 }
642 }
643}
644
645#ifdef CONFIG_X86_32
646static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
647 unsigned int size)
648{
649 /* AMD errata T13 (order #21922) */
650 if ((c->x86 == 6)) {
651 /* Duron Rev A0 */
652 if (c->x86_model == 3 && c->x86_mask == 0)
653 size = 64;
654 /* Tbird rev A1/A2 */
655 if (c->x86_model == 4 &&
656 (c->x86_mask == 0 || c->x86_mask == 1))
657 size = 256;
658 }
659 return size;
660}
661#endif
662
663static const struct cpu_dev __cpuinitconst amd_cpu_dev = {
664 .c_vendor = "AMD",
665 .c_ident = { "AuthenticAMD" },
666#ifdef CONFIG_X86_32
667 .c_models = {
668 { .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
669 {
670 [3] = "486 DX/2",
671 [7] = "486 DX/2-WB",
672 [8] = "486 DX/4",
673 [9] = "486 DX/4-WB",
674 [14] = "Am5x86-WT",
675 [15] = "Am5x86-WB"
676 }
677 },
678 },
679 .c_size_cache = amd_size_cache,
680#endif
681 .c_early_init = early_init_amd,
682 .c_init = init_amd,
683 .c_x86_vendor = X86_VENDOR_AMD,
684};
685
686cpu_dev_register(amd_cpu_dev);
687
688/*
689 * AMD errata checking
690 *
691 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
692 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
693 * have an OSVW id assigned, which it takes as first argument. Both take a
694 * variable number of family-specific model-stepping ranges created by
695 * AMD_MODEL_RANGE(). Each erratum also has to be declared as extern const
696 * int[] in arch/x86/include/asm/processor.h.
697 *
698 * Example:
699 *
700 * const int amd_erratum_319[] =
701 * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
702 * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
703 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
704 */
705
706const int amd_erratum_400[] =
707 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
708 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
709EXPORT_SYMBOL_GPL(amd_erratum_400);
710
711const int amd_erratum_383[] =
712 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
713EXPORT_SYMBOL_GPL(amd_erratum_383);
714
715bool cpu_has_amd_erratum(const int *erratum)
716{
717 struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
718 int osvw_id = *erratum++;
719 u32 range;
720 u32 ms;
721
722 /*
723 * If called early enough that current_cpu_data hasn't been initialized
724 * yet, fall back to boot_cpu_data.
725 */
726 if (cpu->x86 == 0)
727 cpu = &boot_cpu_data;
728
729 if (cpu->x86_vendor != X86_VENDOR_AMD)
730 return false;
731
732 if (osvw_id >= 0 && osvw_id < 65536 &&
733 cpu_has(cpu, X86_FEATURE_OSVW)) {
734 u64 osvw_len;
735
736 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
737 if (osvw_id < osvw_len) {
738 u64 osvw_bits;
739
740 rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
741 osvw_bits);
742 return osvw_bits & (1ULL << (osvw_id & 0x3f));
743 }
744 }
745
746 /* OSVW unavailable or ID unknown, match family-model-stepping range */
747 ms = (cpu->x86_model << 4) | cpu->x86_mask;
748 while ((range = *erratum++))
749 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
750 (ms >= AMD_MODEL_RANGE_START(range)) &&
751 (ms <= AMD_MODEL_RANGE_END(range)))
752 return true;
753
754 return false;
755}
756
757EXPORT_SYMBOL_GPL(cpu_has_amd_erratum);
1#include <linux/export.h>
2#include <linux/bitops.h>
3#include <linux/elf.h>
4#include <linux/mm.h>
5
6#include <linux/io.h>
7#include <linux/sched.h>
8#include <linux/random.h>
9#include <asm/processor.h>
10#include <asm/apic.h>
11#include <asm/cpu.h>
12#include <asm/smp.h>
13#include <asm/pci-direct.h>
14#include <asm/delay.h>
15
16#ifdef CONFIG_X86_64
17# include <asm/mmconfig.h>
18# include <asm/cacheflush.h>
19#endif
20
21#include "cpu.h"
22
23static const int amd_erratum_383[];
24static const int amd_erratum_400[];
25static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
26
27/*
28 * nodes_per_socket: Stores the number of nodes per socket.
29 * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
30 * Node Identifiers[10:8]
31 */
32static u32 nodes_per_socket = 1;
33
34static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
35{
36 u32 gprs[8] = { 0 };
37 int err;
38
39 WARN_ONCE((boot_cpu_data.x86 != 0xf),
40 "%s should only be used on K8!\n", __func__);
41
42 gprs[1] = msr;
43 gprs[7] = 0x9c5a203a;
44
45 err = rdmsr_safe_regs(gprs);
46
47 *p = gprs[0] | ((u64)gprs[2] << 32);
48
49 return err;
50}
51
52static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
53{
54 u32 gprs[8] = { 0 };
55
56 WARN_ONCE((boot_cpu_data.x86 != 0xf),
57 "%s should only be used on K8!\n", __func__);
58
59 gprs[0] = (u32)val;
60 gprs[1] = msr;
61 gprs[2] = val >> 32;
62 gprs[7] = 0x9c5a203a;
63
64 return wrmsr_safe_regs(gprs);
65}
66
67/*
68 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
69 * misexecution of code under Linux. Owners of such processors should
70 * contact AMD for precise details and a CPU swap.
71 *
72 * See http://www.multimania.com/poulot/k6bug.html
73 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
74 * (Publication # 21266 Issue Date: August 1998)
75 *
76 * The following test is erm.. interesting. AMD neglected to up
77 * the chip setting when fixing the bug but they also tweaked some
78 * performance at the same time..
79 */
80
81extern __visible void vide(void);
82__asm__(".globl vide\n"
83 ".type vide, @function\n"
84 ".align 4\n"
85 "vide: ret\n");
86
87static void init_amd_k5(struct cpuinfo_x86 *c)
88{
89#ifdef CONFIG_X86_32
90/*
91 * General Systems BIOSen alias the cpu frequency registers
92 * of the Elan at 0x000df000. Unfortunately, one of the Linux
93 * drivers subsequently pokes it, and changes the CPU speed.
94 * Workaround : Remove the unneeded alias.
95 */
96#define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
97#define CBAR_ENB (0x80000000)
98#define CBAR_KEY (0X000000CB)
99 if (c->x86_model == 9 || c->x86_model == 10) {
100 if (inl(CBAR) & CBAR_ENB)
101 outl(0 | CBAR_KEY, CBAR);
102 }
103#endif
104}
105
106static void init_amd_k6(struct cpuinfo_x86 *c)
107{
108#ifdef CONFIG_X86_32
109 u32 l, h;
110 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
111
112 if (c->x86_model < 6) {
113 /* Based on AMD doc 20734R - June 2000 */
114 if (c->x86_model == 0) {
115 clear_cpu_cap(c, X86_FEATURE_APIC);
116 set_cpu_cap(c, X86_FEATURE_PGE);
117 }
118 return;
119 }
120
121 if (c->x86_model == 6 && c->x86_mask == 1) {
122 const int K6_BUG_LOOP = 1000000;
123 int n;
124 void (*f_vide)(void);
125 u64 d, d2;
126
127 pr_info("AMD K6 stepping B detected - ");
128
129 /*
130 * It looks like AMD fixed the 2.6.2 bug and improved indirect
131 * calls at the same time.
132 */
133
134 n = K6_BUG_LOOP;
135 f_vide = vide;
136 d = rdtsc();
137 while (n--)
138 f_vide();
139 d2 = rdtsc();
140 d = d2-d;
141
142 if (d > 20*K6_BUG_LOOP)
143 pr_cont("system stability may be impaired when more than 32 MB are used.\n");
144 else
145 pr_cont("probably OK (after B9730xxxx).\n");
146 }
147
148 /* K6 with old style WHCR */
149 if (c->x86_model < 8 ||
150 (c->x86_model == 8 && c->x86_mask < 8)) {
151 /* We can only write allocate on the low 508Mb */
152 if (mbytes > 508)
153 mbytes = 508;
154
155 rdmsr(MSR_K6_WHCR, l, h);
156 if ((l&0x0000FFFF) == 0) {
157 unsigned long flags;
158 l = (1<<0)|((mbytes/4)<<1);
159 local_irq_save(flags);
160 wbinvd();
161 wrmsr(MSR_K6_WHCR, l, h);
162 local_irq_restore(flags);
163 pr_info("Enabling old style K6 write allocation for %d Mb\n",
164 mbytes);
165 }
166 return;
167 }
168
169 if ((c->x86_model == 8 && c->x86_mask > 7) ||
170 c->x86_model == 9 || c->x86_model == 13) {
171 /* The more serious chips .. */
172
173 if (mbytes > 4092)
174 mbytes = 4092;
175
176 rdmsr(MSR_K6_WHCR, l, h);
177 if ((l&0xFFFF0000) == 0) {
178 unsigned long flags;
179 l = ((mbytes>>2)<<22)|(1<<16);
180 local_irq_save(flags);
181 wbinvd();
182 wrmsr(MSR_K6_WHCR, l, h);
183 local_irq_restore(flags);
184 pr_info("Enabling new style K6 write allocation for %d Mb\n",
185 mbytes);
186 }
187
188 return;
189 }
190
191 if (c->x86_model == 10) {
192 /* AMD Geode LX is model 10 */
193 /* placeholder for any needed mods */
194 return;
195 }
196#endif
197}
198
199static void init_amd_k7(struct cpuinfo_x86 *c)
200{
201#ifdef CONFIG_X86_32
202 u32 l, h;
203
204 /*
205 * Bit 15 of Athlon specific MSR 15, needs to be 0
206 * to enable SSE on Palomino/Morgan/Barton CPU's.
207 * If the BIOS didn't enable it already, enable it here.
208 */
209 if (c->x86_model >= 6 && c->x86_model <= 10) {
210 if (!cpu_has(c, X86_FEATURE_XMM)) {
211 pr_info("Enabling disabled K7/SSE Support.\n");
212 msr_clear_bit(MSR_K7_HWCR, 15);
213 set_cpu_cap(c, X86_FEATURE_XMM);
214 }
215 }
216
217 /*
218 * It's been determined by AMD that Athlons since model 8 stepping 1
219 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
220 * As per AMD technical note 27212 0.2
221 */
222 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
223 rdmsr(MSR_K7_CLK_CTL, l, h);
224 if ((l & 0xfff00000) != 0x20000000) {
225 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
226 l, ((l & 0x000fffff)|0x20000000));
227 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
228 }
229 }
230
231 set_cpu_cap(c, X86_FEATURE_K7);
232
233 /* calling is from identify_secondary_cpu() ? */
234 if (!c->cpu_index)
235 return;
236
237 /*
238 * Certain Athlons might work (for various values of 'work') in SMP
239 * but they are not certified as MP capable.
240 */
241 /* Athlon 660/661 is valid. */
242 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
243 (c->x86_mask == 1)))
244 return;
245
246 /* Duron 670 is valid */
247 if ((c->x86_model == 7) && (c->x86_mask == 0))
248 return;
249
250 /*
251 * Athlon 662, Duron 671, and Athlon >model 7 have capability
252 * bit. It's worth noting that the A5 stepping (662) of some
253 * Athlon XP's have the MP bit set.
254 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
255 * more.
256 */
257 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
258 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
259 (c->x86_model > 7))
260 if (cpu_has(c, X86_FEATURE_MP))
261 return;
262
263 /* If we get here, not a certified SMP capable AMD system. */
264
265 /*
266 * Don't taint if we are running SMP kernel on a single non-MP
267 * approved Athlon
268 */
269 WARN_ONCE(1, "WARNING: This combination of AMD"
270 " processors is not suitable for SMP.\n");
271 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
272#endif
273}
274
275#ifdef CONFIG_NUMA
276/*
277 * To workaround broken NUMA config. Read the comment in
278 * srat_detect_node().
279 */
280static int nearby_node(int apicid)
281{
282 int i, node;
283
284 for (i = apicid - 1; i >= 0; i--) {
285 node = __apicid_to_node[i];
286 if (node != NUMA_NO_NODE && node_online(node))
287 return node;
288 }
289 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
290 node = __apicid_to_node[i];
291 if (node != NUMA_NO_NODE && node_online(node))
292 return node;
293 }
294 return first_node(node_online_map); /* Shouldn't happen */
295}
296#endif
297
298/*
299 * Fixup core topology information for
300 * (1) AMD multi-node processors
301 * Assumption: Number of cores in each internal node is the same.
302 * (2) AMD processors supporting compute units
303 */
304#ifdef CONFIG_SMP
305static void amd_get_topology(struct cpuinfo_x86 *c)
306{
307 u8 node_id;
308 int cpu = smp_processor_id();
309
310 /* get information required for multi-node processors */
311 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
312 u32 eax, ebx, ecx, edx;
313
314 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
315
316 node_id = ecx & 0xff;
317 smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
318
319 if (c->x86 == 0x15)
320 c->cu_id = ebx & 0xff;
321
322 if (c->x86 >= 0x17) {
323 c->cpu_core_id = ebx & 0xff;
324
325 if (smp_num_siblings > 1)
326 c->x86_max_cores /= smp_num_siblings;
327 }
328
329 /*
330 * We may have multiple LLCs if L3 caches exist, so check if we
331 * have an L3 cache by looking at the L3 cache CPUID leaf.
332 */
333 if (cpuid_edx(0x80000006)) {
334 if (c->x86 == 0x17) {
335 /*
336 * LLC is at the core complex level.
337 * Core complex id is ApicId[3].
338 */
339 per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
340 } else {
341 /* LLC is at the node level. */
342 per_cpu(cpu_llc_id, cpu) = node_id;
343 }
344 }
345 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
346 u64 value;
347
348 rdmsrl(MSR_FAM10H_NODE_ID, value);
349 node_id = value & 7;
350
351 per_cpu(cpu_llc_id, cpu) = node_id;
352 } else
353 return;
354
355 /* fixup multi-node processor information */
356 if (nodes_per_socket > 1) {
357 u32 cus_per_node;
358
359 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
360 cus_per_node = c->x86_max_cores / nodes_per_socket;
361
362 /* core id has to be in the [0 .. cores_per_node - 1] range */
363 c->cpu_core_id %= cus_per_node;
364 }
365}
366#endif
367
368/*
369 * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
370 * Assumes number of cores is a power of two.
371 */
372static void amd_detect_cmp(struct cpuinfo_x86 *c)
373{
374#ifdef CONFIG_SMP
375 unsigned bits;
376 int cpu = smp_processor_id();
377
378 bits = c->x86_coreid_bits;
379 /* Low order bits define the core id (index of core in socket) */
380 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
381 /* Convert the initial APIC ID into the socket ID */
382 c->phys_proc_id = c->initial_apicid >> bits;
383 /* use socket ID also for last level cache */
384 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
385 amd_get_topology(c);
386#endif
387}
388
389u16 amd_get_nb_id(int cpu)
390{
391 u16 id = 0;
392#ifdef CONFIG_SMP
393 id = per_cpu(cpu_llc_id, cpu);
394#endif
395 return id;
396}
397EXPORT_SYMBOL_GPL(amd_get_nb_id);
398
399u32 amd_get_nodes_per_socket(void)
400{
401 return nodes_per_socket;
402}
403EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
404
405static void srat_detect_node(struct cpuinfo_x86 *c)
406{
407#ifdef CONFIG_NUMA
408 int cpu = smp_processor_id();
409 int node;
410 unsigned apicid = c->apicid;
411
412 node = numa_cpu_node(cpu);
413 if (node == NUMA_NO_NODE)
414 node = per_cpu(cpu_llc_id, cpu);
415
416 /*
417 * On multi-fabric platform (e.g. Numascale NumaChip) a
418 * platform-specific handler needs to be called to fixup some
419 * IDs of the CPU.
420 */
421 if (x86_cpuinit.fixup_cpu_id)
422 x86_cpuinit.fixup_cpu_id(c, node);
423
424 if (!node_online(node)) {
425 /*
426 * Two possibilities here:
427 *
428 * - The CPU is missing memory and no node was created. In
429 * that case try picking one from a nearby CPU.
430 *
431 * - The APIC IDs differ from the HyperTransport node IDs
432 * which the K8 northbridge parsing fills in. Assume
433 * they are all increased by a constant offset, but in
434 * the same order as the HT nodeids. If that doesn't
435 * result in a usable node fall back to the path for the
436 * previous case.
437 *
438 * This workaround operates directly on the mapping between
439 * APIC ID and NUMA node, assuming certain relationship
440 * between APIC ID, HT node ID and NUMA topology. As going
441 * through CPU mapping may alter the outcome, directly
442 * access __apicid_to_node[].
443 */
444 int ht_nodeid = c->initial_apicid;
445
446 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
447 node = __apicid_to_node[ht_nodeid];
448 /* Pick a nearby node */
449 if (!node_online(node))
450 node = nearby_node(apicid);
451 }
452 numa_set_node(cpu, node);
453#endif
454}
455
456static void early_init_amd_mc(struct cpuinfo_x86 *c)
457{
458#ifdef CONFIG_SMP
459 unsigned bits, ecx;
460
461 /* Multi core CPU? */
462 if (c->extended_cpuid_level < 0x80000008)
463 return;
464
465 ecx = cpuid_ecx(0x80000008);
466
467 c->x86_max_cores = (ecx & 0xff) + 1;
468
469 /* CPU telling us the core id bits shift? */
470 bits = (ecx >> 12) & 0xF;
471
472 /* Otherwise recompute */
473 if (bits == 0) {
474 while ((1 << bits) < c->x86_max_cores)
475 bits++;
476 }
477
478 c->x86_coreid_bits = bits;
479#endif
480}
481
482static void bsp_init_amd(struct cpuinfo_x86 *c)
483{
484
485#ifdef CONFIG_X86_64
486 if (c->x86 >= 0xf) {
487 unsigned long long tseg;
488
489 /*
490 * Split up direct mapping around the TSEG SMM area.
491 * Don't do it for gbpages because there seems very little
492 * benefit in doing so.
493 */
494 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
495 unsigned long pfn = tseg >> PAGE_SHIFT;
496
497 pr_debug("tseg: %010llx\n", tseg);
498 if (pfn_range_is_mapped(pfn, pfn + 1))
499 set_memory_4k((unsigned long)__va(tseg), 1);
500 }
501 }
502#endif
503
504 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
505
506 if (c->x86 > 0x10 ||
507 (c->x86 == 0x10 && c->x86_model >= 0x2)) {
508 u64 val;
509
510 rdmsrl(MSR_K7_HWCR, val);
511 if (!(val & BIT(24)))
512 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
513 }
514 }
515
516 if (c->x86 == 0x15) {
517 unsigned long upperbit;
518 u32 cpuid, assoc;
519
520 cpuid = cpuid_edx(0x80000005);
521 assoc = cpuid >> 16 & 0xff;
522 upperbit = ((cpuid >> 24) << 10) / assoc;
523
524 va_align.mask = (upperbit - 1) & PAGE_MASK;
525 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
526
527 /* A random value per boot for bit slice [12:upper_bit) */
528 va_align.bits = get_random_int() & va_align.mask;
529 }
530
531 if (cpu_has(c, X86_FEATURE_MWAITX))
532 use_mwaitx_delay();
533
534 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
535 u32 ecx;
536
537 ecx = cpuid_ecx(0x8000001e);
538 nodes_per_socket = ((ecx >> 8) & 7) + 1;
539 } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
540 u64 value;
541
542 rdmsrl(MSR_FAM10H_NODE_ID, value);
543 nodes_per_socket = ((value >> 3) & 7) + 1;
544 }
545}
546
547static void early_init_amd(struct cpuinfo_x86 *c)
548{
549 early_init_amd_mc(c);
550
551 /*
552 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
553 * with P/T states and does not stop in deep C-states
554 */
555 if (c->x86_power & (1 << 8)) {
556 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
557 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
558 if (!check_tsc_unstable())
559 set_sched_clock_stable();
560 }
561
562 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
563 if (c->x86_power & BIT(12))
564 set_cpu_cap(c, X86_FEATURE_ACC_POWER);
565
566#ifdef CONFIG_X86_64
567 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
568#else
569 /* Set MTRR capability flag if appropriate */
570 if (c->x86 == 5)
571 if (c->x86_model == 13 || c->x86_model == 9 ||
572 (c->x86_model == 8 && c->x86_mask >= 8))
573 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
574#endif
575#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
576 /*
577 * ApicID can always be treated as an 8-bit value for AMD APIC versions
578 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
579 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
580 * after 16h.
581 */
582 if (boot_cpu_has(X86_FEATURE_APIC)) {
583 if (c->x86 > 0x16)
584 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
585 else if (c->x86 >= 0xf) {
586 /* check CPU config space for extended APIC ID */
587 unsigned int val;
588
589 val = read_pci_config(0, 24, 0, 0x68);
590 if ((val >> 17 & 0x3) == 0x3)
591 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
592 }
593 }
594#endif
595
596 /*
597 * This is only needed to tell the kernel whether to use VMCALL
598 * and VMMCALL. VMMCALL is never executed except under virt, so
599 * we can set it unconditionally.
600 */
601 set_cpu_cap(c, X86_FEATURE_VMMCALL);
602
603 /* F16h erratum 793, CVE-2013-6885 */
604 if (c->x86 == 0x16 && c->x86_model <= 0xf)
605 msr_set_bit(MSR_AMD64_LS_CFG, 15);
606
607 /*
608 * Check whether the machine is affected by erratum 400. This is
609 * used to select the proper idle routine and to enable the check
610 * whether the machine is affected in arch_post_acpi_init(), which
611 * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
612 */
613 if (cpu_has_amd_erratum(c, amd_erratum_400))
614 set_cpu_bug(c, X86_BUG_AMD_E400);
615}
616
617static void init_amd_k8(struct cpuinfo_x86 *c)
618{
619 u32 level;
620 u64 value;
621
622 /* On C+ stepping K8 rep microcode works well for copy/memset */
623 level = cpuid_eax(1);
624 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
625 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
626
627 /*
628 * Some BIOSes incorrectly force this feature, but only K8 revision D
629 * (model = 0x14) and later actually support it.
630 * (AMD Erratum #110, docId: 25759).
631 */
632 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
633 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
634 if (!rdmsrl_amd_safe(0xc001100d, &value)) {
635 value &= ~BIT_64(32);
636 wrmsrl_amd_safe(0xc001100d, value);
637 }
638 }
639
640 if (!c->x86_model_id[0])
641 strcpy(c->x86_model_id, "Hammer");
642
643#ifdef CONFIG_SMP
644 /*
645 * Disable TLB flush filter by setting HWCR.FFDIS on K8
646 * bit 6 of msr C001_0015
647 *
648 * Errata 63 for SH-B3 steppings
649 * Errata 122 for all steppings (F+ have it disabled by default)
650 */
651 msr_set_bit(MSR_K7_HWCR, 6);
652#endif
653 set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
654}
655
656static void init_amd_gh(struct cpuinfo_x86 *c)
657{
658#ifdef CONFIG_X86_64
659 /* do this for boot cpu */
660 if (c == &boot_cpu_data)
661 check_enable_amd_mmconf_dmi();
662
663 fam10h_check_enable_mmcfg();
664#endif
665
666 /*
667 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
668 * is always needed when GART is enabled, even in a kernel which has no
669 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
670 * If it doesn't, we do it here as suggested by the BKDG.
671 *
672 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
673 */
674 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
675
676 /*
677 * On family 10h BIOS may not have properly enabled WC+ support, causing
678 * it to be converted to CD memtype. This may result in performance
679 * degradation for certain nested-paging guests. Prevent this conversion
680 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
681 *
682 * NOTE: we want to use the _safe accessors so as not to #GP kvm
683 * guests on older kvm hosts.
684 */
685 msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
686
687 if (cpu_has_amd_erratum(c, amd_erratum_383))
688 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
689}
690
691#define MSR_AMD64_DE_CFG 0xC0011029
692
693static void init_amd_ln(struct cpuinfo_x86 *c)
694{
695 /*
696 * Apply erratum 665 fix unconditionally so machines without a BIOS
697 * fix work.
698 */
699 msr_set_bit(MSR_AMD64_DE_CFG, 31);
700}
701
702static void init_amd_bd(struct cpuinfo_x86 *c)
703{
704 u64 value;
705
706 /* re-enable TopologyExtensions if switched off by BIOS */
707 if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
708 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
709
710 if (msr_set_bit(0xc0011005, 54) > 0) {
711 rdmsrl(0xc0011005, value);
712 if (value & BIT_64(54)) {
713 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
714 pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
715 }
716 }
717 }
718
719 /*
720 * The way access filter has a performance penalty on some workloads.
721 * Disable it on the affected CPUs.
722 */
723 if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
724 if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
725 value |= 0x1E;
726 wrmsrl_safe(MSR_F15H_IC_CFG, value);
727 }
728 }
729}
730
731static void init_amd(struct cpuinfo_x86 *c)
732{
733 u32 dummy;
734
735 early_init_amd(c);
736
737 /*
738 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
739 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
740 */
741 clear_cpu_cap(c, 0*32+31);
742
743 if (c->x86 >= 0x10)
744 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
745
746 /* get apicid instead of initial apic id from cpuid */
747 c->apicid = hard_smp_processor_id();
748
749 /* K6s reports MCEs but don't actually have all the MSRs */
750 if (c->x86 < 6)
751 clear_cpu_cap(c, X86_FEATURE_MCE);
752
753 switch (c->x86) {
754 case 4: init_amd_k5(c); break;
755 case 5: init_amd_k6(c); break;
756 case 6: init_amd_k7(c); break;
757 case 0xf: init_amd_k8(c); break;
758 case 0x10: init_amd_gh(c); break;
759 case 0x12: init_amd_ln(c); break;
760 case 0x15: init_amd_bd(c); break;
761 }
762
763 /* Enable workaround for FXSAVE leak */
764 if (c->x86 >= 6)
765 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
766
767 cpu_detect_cache_sizes(c);
768
769 /* Multi core CPU? */
770 if (c->extended_cpuid_level >= 0x80000008) {
771 amd_detect_cmp(c);
772 srat_detect_node(c);
773 }
774
775#ifdef CONFIG_X86_32
776 detect_ht(c);
777#endif
778
779 init_amd_cacheinfo(c);
780
781 if (c->x86 >= 0xf)
782 set_cpu_cap(c, X86_FEATURE_K8);
783
784 if (cpu_has(c, X86_FEATURE_XMM2)) {
785 /* MFENCE stops RDTSC speculation */
786 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
787 }
788
789 /*
790 * Family 0x12 and above processors have APIC timer
791 * running in deep C states.
792 */
793 if (c->x86 > 0x11)
794 set_cpu_cap(c, X86_FEATURE_ARAT);
795
796 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
797
798 /* 3DNow or LM implies PREFETCHW */
799 if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
800 if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
801 set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
802
803 /* AMD CPUs don't reset SS attributes on SYSRET */
804 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
805}
806
807#ifdef CONFIG_X86_32
808static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
809{
810 /* AMD errata T13 (order #21922) */
811 if ((c->x86 == 6)) {
812 /* Duron Rev A0 */
813 if (c->x86_model == 3 && c->x86_mask == 0)
814 size = 64;
815 /* Tbird rev A1/A2 */
816 if (c->x86_model == 4 &&
817 (c->x86_mask == 0 || c->x86_mask == 1))
818 size = 256;
819 }
820 return size;
821}
822#endif
823
824static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
825{
826 u32 ebx, eax, ecx, edx;
827 u16 mask = 0xfff;
828
829 if (c->x86 < 0xf)
830 return;
831
832 if (c->extended_cpuid_level < 0x80000006)
833 return;
834
835 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
836
837 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
838 tlb_lli_4k[ENTRIES] = ebx & mask;
839
840 /*
841 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
842 * characteristics from the CPUID function 0x80000005 instead.
843 */
844 if (c->x86 == 0xf) {
845 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
846 mask = 0xff;
847 }
848
849 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
850 if (!((eax >> 16) & mask))
851 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
852 else
853 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
854
855 /* a 4M entry uses two 2M entries */
856 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
857
858 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
859 if (!(eax & mask)) {
860 /* Erratum 658 */
861 if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
862 tlb_lli_2m[ENTRIES] = 1024;
863 } else {
864 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
865 tlb_lli_2m[ENTRIES] = eax & 0xff;
866 }
867 } else
868 tlb_lli_2m[ENTRIES] = eax & mask;
869
870 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
871}
872
873static const struct cpu_dev amd_cpu_dev = {
874 .c_vendor = "AMD",
875 .c_ident = { "AuthenticAMD" },
876#ifdef CONFIG_X86_32
877 .legacy_models = {
878 { .family = 4, .model_names =
879 {
880 [3] = "486 DX/2",
881 [7] = "486 DX/2-WB",
882 [8] = "486 DX/4",
883 [9] = "486 DX/4-WB",
884 [14] = "Am5x86-WT",
885 [15] = "Am5x86-WB"
886 }
887 },
888 },
889 .legacy_cache_size = amd_size_cache,
890#endif
891 .c_early_init = early_init_amd,
892 .c_detect_tlb = cpu_detect_tlb_amd,
893 .c_bsp_init = bsp_init_amd,
894 .c_init = init_amd,
895 .c_x86_vendor = X86_VENDOR_AMD,
896};
897
898cpu_dev_register(amd_cpu_dev);
899
900/*
901 * AMD errata checking
902 *
903 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
904 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
905 * have an OSVW id assigned, which it takes as first argument. Both take a
906 * variable number of family-specific model-stepping ranges created by
907 * AMD_MODEL_RANGE().
908 *
909 * Example:
910 *
911 * const int amd_erratum_319[] =
912 * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
913 * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
914 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
915 */
916
917#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
918#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
919#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
920 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
921#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
922#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
923#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
924
925static const int amd_erratum_400[] =
926 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
927 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
928
929static const int amd_erratum_383[] =
930 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
931
932
933static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
934{
935 int osvw_id = *erratum++;
936 u32 range;
937 u32 ms;
938
939 if (osvw_id >= 0 && osvw_id < 65536 &&
940 cpu_has(cpu, X86_FEATURE_OSVW)) {
941 u64 osvw_len;
942
943 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
944 if (osvw_id < osvw_len) {
945 u64 osvw_bits;
946
947 rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
948 osvw_bits);
949 return osvw_bits & (1ULL << (osvw_id & 0x3f));
950 }
951 }
952
953 /* OSVW unavailable or ID unknown, match family-model-stepping range */
954 ms = (cpu->x86_model << 4) | cpu->x86_mask;
955 while ((range = *erratum++))
956 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
957 (ms >= AMD_MODEL_RANGE_START(range)) &&
958 (ms <= AMD_MODEL_RANGE_END(range)))
959 return true;
960
961 return false;
962}
963
964void set_dr_addr_mask(unsigned long mask, int dr)
965{
966 if (!boot_cpu_has(X86_FEATURE_BPEXT))
967 return;
968
969 switch (dr) {
970 case 0:
971 wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
972 break;
973 case 1:
974 case 2:
975 case 3:
976 wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
977 break;
978 default:
979 break;
980 }
981}