Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1#include <linux/export.h>
  2#include <linux/init.h>
  3#include <linux/bitops.h>
  4#include <linux/elf.h>
  5#include <linux/mm.h>
  6
  7#include <linux/io.h>
  8#include <linux/sched.h>
 
 
 
  9#include <asm/processor.h>
 10#include <asm/apic.h>
 
 11#include <asm/cpu.h>
 
 
 
 12#include <asm/pci-direct.h>
 
 
 
 
 13
 14#ifdef CONFIG_X86_64
 15# include <asm/numa_64.h>
 16# include <asm/mmconfig.h>
 17# include <asm/cacheflush.h>
 18#endif
 19
 20#include "cpu.h"
 21
 22#ifdef CONFIG_X86_32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 23/*
 24 *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause
 25 *	misexecution of code under Linux. Owners of such processors should
 26 *	contact AMD for precise details and a CPU swap.
 27 *
 28 *	See	http://www.multimania.com/poulot/k6bug.html
 29 *	and	section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
 30 *		(Publication # 21266  Issue Date: August 1998)
 31 *
 32 *	The following test is erm.. interesting. AMD neglected to up
 33 *	the chip setting when fixing the bug but they also tweaked some
 34 *	performance at the same time..
 35 */
 36
 37extern void vide(void);
 38__asm__(".align 4\nvide: ret");
 
 
 
 
 
 
 39
 40static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
 41{
 
 42/*
 43 * General Systems BIOSen alias the cpu frequency registers
 44 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
 45 * drivers subsequently pokes it, and changes the CPU speed.
 46 * Workaround : Remove the unneeded alias.
 47 */
 48#define CBAR		(0xfffc) /* Configuration Base Address  (32-bit) */
 49#define CBAR_ENB	(0x80000000)
 50#define CBAR_KEY	(0X000000CB)
 51	if (c->x86_model == 9 || c->x86_model == 10) {
 52		if (inl(CBAR) & CBAR_ENB)
 53			outl(0 | CBAR_KEY, CBAR);
 54	}
 
 55}
 56
 57
 58static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
 59{
 
 60	u32 l, h;
 61	int mbytes = num_physpages >> (20-PAGE_SHIFT);
 62
 63	if (c->x86_model < 6) {
 64		/* Based on AMD doc 20734R - June 2000 */
 65		if (c->x86_model == 0) {
 66			clear_cpu_cap(c, X86_FEATURE_APIC);
 67			set_cpu_cap(c, X86_FEATURE_PGE);
 68		}
 69		return;
 70	}
 71
 72	if (c->x86_model == 6 && c->x86_mask == 1) {
 73		const int K6_BUG_LOOP = 1000000;
 74		int n;
 75		void (*f_vide)(void);
 76		unsigned long d, d2;
 77
 78		printk(KERN_INFO "AMD K6 stepping B detected - ");
 79
 80		/*
 81		 * It looks like AMD fixed the 2.6.2 bug and improved indirect
 82		 * calls at the same time.
 83		 */
 84
 85		n = K6_BUG_LOOP;
 86		f_vide = vide;
 87		rdtscl(d);
 
 88		while (n--)
 89			f_vide();
 90		rdtscl(d2);
 91		d = d2-d;
 92
 93		if (d > 20*K6_BUG_LOOP)
 94			printk(KERN_CONT
 95				"system stability may be impaired when more than 32 MB are used.\n");
 96		else
 97			printk(KERN_CONT "probably OK (after B9730xxxx).\n");
 98	}
 99
100	/* K6 with old style WHCR */
101	if (c->x86_model < 8 ||
102	   (c->x86_model == 8 && c->x86_mask < 8)) {
103		/* We can only write allocate on the low 508Mb */
104		if (mbytes > 508)
105			mbytes = 508;
106
107		rdmsr(MSR_K6_WHCR, l, h);
108		if ((l&0x0000FFFF) == 0) {
109			unsigned long flags;
110			l = (1<<0)|((mbytes/4)<<1);
111			local_irq_save(flags);
112			wbinvd();
113			wrmsr(MSR_K6_WHCR, l, h);
114			local_irq_restore(flags);
115			printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
116				mbytes);
117		}
118		return;
119	}
120
121	if ((c->x86_model == 8 && c->x86_mask > 7) ||
122	     c->x86_model == 9 || c->x86_model == 13) {
123		/* The more serious chips .. */
124
125		if (mbytes > 4092)
126			mbytes = 4092;
127
128		rdmsr(MSR_K6_WHCR, l, h);
129		if ((l&0xFFFF0000) == 0) {
130			unsigned long flags;
131			l = ((mbytes>>2)<<22)|(1<<16);
132			local_irq_save(flags);
133			wbinvd();
134			wrmsr(MSR_K6_WHCR, l, h);
135			local_irq_restore(flags);
136			printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
137				mbytes);
138		}
139
140		return;
141	}
142
143	if (c->x86_model == 10) {
144		/* AMD Geode LX is model 10 */
145		/* placeholder for any needed mods */
146		return;
147	}
 
148}
149
150static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
151{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152	/* calling is from identify_secondary_cpu() ? */
153	if (!c->cpu_index)
154		return;
155
156	/*
157	 * Certain Athlons might work (for various values of 'work') in SMP
158	 * but they are not certified as MP capable.
159	 */
160	/* Athlon 660/661 is valid. */
161	if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
162	    (c->x86_mask == 1)))
163		goto valid_k7;
164
165	/* Duron 670 is valid */
166	if ((c->x86_model == 7) && (c->x86_mask == 0))
167		goto valid_k7;
168
169	/*
170	 * Athlon 662, Duron 671, and Athlon >model 7 have capability
171	 * bit. It's worth noting that the A5 stepping (662) of some
172	 * Athlon XP's have the MP bit set.
173	 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
174	 * more.
175	 */
176	if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
177	    ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
178	     (c->x86_model > 7))
179		if (cpu_has_mp)
180			goto valid_k7;
181
182	/* If we get here, not a certified SMP capable AMD system. */
183
184	/*
185	 * Don't taint if we are running SMP kernel on a single non-MP
186	 * approved Athlon
187	 */
188	WARN_ONCE(1, "WARNING: This combination of AMD"
189		" processors is not suitable for SMP.\n");
190	if (!test_taint(TAINT_UNSAFE_SMP))
191		add_taint(TAINT_UNSAFE_SMP);
192
193valid_k7:
194	;
195}
196
197static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
198{
199	u32 l, h;
200
201	/*
202	 * Bit 15 of Athlon specific MSR 15, needs to be 0
203	 * to enable SSE on Palomino/Morgan/Barton CPU's.
204	 * If the BIOS didn't enable it already, enable it here.
205	 */
206	if (c->x86_model >= 6 && c->x86_model <= 10) {
207		if (!cpu_has(c, X86_FEATURE_XMM)) {
208			printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
209			rdmsr(MSR_K7_HWCR, l, h);
210			l &= ~0x00008000;
211			wrmsr(MSR_K7_HWCR, l, h);
212			set_cpu_cap(c, X86_FEATURE_XMM);
213		}
214	}
215
216	/*
217	 * It's been determined by AMD that Athlons since model 8 stepping 1
218	 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
219	 * As per AMD technical note 27212 0.2
220	 */
221	if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
222		rdmsr(MSR_K7_CLK_CTL, l, h);
223		if ((l & 0xfff00000) != 0x20000000) {
224			printk(KERN_INFO
225			    "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
226					l, ((l & 0x000fffff)|0x20000000));
227			wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
228		}
229	}
230
231	set_cpu_cap(c, X86_FEATURE_K7);
232
233	amd_k7_smp_check(c);
234}
235#endif
 
236
237#ifdef CONFIG_NUMA
238/*
239 * To workaround broken NUMA config.  Read the comment in
240 * srat_detect_node().
241 */
242static int __cpuinit nearby_node(int apicid)
243{
244	int i, node;
245
246	for (i = apicid - 1; i >= 0; i--) {
247		node = __apicid_to_node[i];
248		if (node != NUMA_NO_NODE && node_online(node))
249			return node;
250	}
251	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
252		node = __apicid_to_node[i];
253		if (node != NUMA_NO_NODE && node_online(node))
254			return node;
255	}
256	return first_node(node_online_map); /* Shouldn't happen */
257}
258#endif
259
260/*
261 * Fixup core topology information for
262 * (1) AMD multi-node processors
263 *     Assumption: Number of cores in each internal node is the same.
264 * (2) AMD processors supporting compute units
265 */
266#ifdef CONFIG_X86_HT
267static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
268{
269	u32 nodes, cores_per_cu = 1;
270	u8 node_id;
271	int cpu = smp_processor_id();
272
273	/* get information required for multi-node processors */
274	if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
275		u32 eax, ebx, ecx, edx;
276
277		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
278		nodes = ((ecx >> 8) & 7) + 1;
279		node_id = ecx & 7;
280
281		/* get compute unit information */
282		smp_num_siblings = ((ebx >> 8) & 3) + 1;
283		c->compute_unit_id = ebx & 0xff;
284		cores_per_cu += ((ebx >> 8) & 3);
285	} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
286		u64 value;
287
288		rdmsrl(MSR_FAM10H_NODE_ID, value);
289		nodes = ((value >> 3) & 7) + 1;
290		node_id = value & 7;
291	} else
292		return;
293
294	/* fixup multi-node processor information */
295	if (nodes > 1) {
296		u32 cores_per_node;
297		u32 cus_per_node;
298
299		set_cpu_cap(c, X86_FEATURE_AMD_DCM);
300		cores_per_node = c->x86_max_cores / nodes;
301		cus_per_node = cores_per_node / cores_per_cu;
302
303		/* store NodeID, use llc_shared_map to store sibling info */
304		per_cpu(cpu_llc_id, cpu) = node_id;
305
306		/* core id has to be in the [0 .. cores_per_node - 1] range */
307		c->cpu_core_id %= cores_per_node;
308		c->compute_unit_id %= cus_per_node;
309	}
310}
311#endif
312
313/*
314 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
315 * Assumes number of cores is a power of two.
316 */
317static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
318{
319#ifdef CONFIG_X86_HT
320	unsigned bits;
321	int cpu = smp_processor_id();
322
323	bits = c->x86_coreid_bits;
324	/* Low order bits define the core id (index of core in socket) */
325	c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
326	/* Convert the initial APIC ID into the socket ID */
327	c->phys_proc_id = c->initial_apicid >> bits;
328	/* use socket ID also for last level cache */
329	per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
330	amd_get_topology(c);
331#endif
332}
333
334int amd_get_nb_id(int cpu)
335{
336	int id = 0;
337#ifdef CONFIG_SMP
338	id = per_cpu(cpu_llc_id, cpu);
339#endif
340	return id;
341}
342EXPORT_SYMBOL_GPL(amd_get_nb_id);
343
344static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
345{
346#ifdef CONFIG_NUMA
347	int cpu = smp_processor_id();
348	int node;
349	unsigned apicid = c->apicid;
350
351	node = numa_cpu_node(cpu);
352	if (node == NUMA_NO_NODE)
353		node = per_cpu(cpu_llc_id, cpu);
354
355	/*
356	 * On multi-fabric platform (e.g. Numascale NumaChip) a
357	 * platform-specific handler needs to be called to fixup some
358	 * IDs of the CPU.
359	 */
360	if (x86_cpuinit.fixup_cpu_id)
361		x86_cpuinit.fixup_cpu_id(c, node);
362
363	if (!node_online(node)) {
364		/*
365		 * Two possibilities here:
366		 *
367		 * - The CPU is missing memory and no node was created.  In
368		 *   that case try picking one from a nearby CPU.
369		 *
370		 * - The APIC IDs differ from the HyperTransport node IDs
371		 *   which the K8 northbridge parsing fills in.  Assume
372		 *   they are all increased by a constant offset, but in
373		 *   the same order as the HT nodeids.  If that doesn't
374		 *   result in a usable node fall back to the path for the
375		 *   previous case.
376		 *
377		 * This workaround operates directly on the mapping between
378		 * APIC ID and NUMA node, assuming certain relationship
379		 * between APIC ID, HT node ID and NUMA topology.  As going
380		 * through CPU mapping may alter the outcome, directly
381		 * access __apicid_to_node[].
382		 */
383		int ht_nodeid = c->initial_apicid;
384
385		if (ht_nodeid >= 0 &&
386		    __apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
387			node = __apicid_to_node[ht_nodeid];
388		/* Pick a nearby node */
389		if (!node_online(node))
390			node = nearby_node(apicid);
391	}
392	numa_set_node(cpu, node);
393#endif
394}
395
396static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
397{
398#ifdef CONFIG_X86_HT
399	unsigned bits, ecx;
400
401	/* Multi core CPU? */
402	if (c->extended_cpuid_level < 0x80000008)
403		return;
404
405	ecx = cpuid_ecx(0x80000008);
406
407	c->x86_max_cores = (ecx & 0xff) + 1;
408
409	/* CPU telling us the core id bits shift? */
410	bits = (ecx >> 12) & 0xF;
411
412	/* Otherwise recompute */
413	if (bits == 0) {
414		while ((1 << bits) < c->x86_max_cores)
415			bits++;
 
 
 
 
416	}
417
418	c->x86_coreid_bits = bits;
419#endif
420}
421
422static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c)
423{
424	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
425
426		if (c->x86 > 0x10 ||
427		    (c->x86 == 0x10 && c->x86_model >= 0x2)) {
428			u64 val;
429
430			rdmsrl(MSR_K7_HWCR, val);
431			if (!(val & BIT(24)))
432				printk(KERN_WARNING FW_BUG "TSC doesn't count "
433					"with P0 frequency!\n");
434		}
435	}
436
437	if (c->x86 == 0x15) {
438		unsigned long upperbit;
439		u32 cpuid, assoc;
440
441		cpuid	 = cpuid_edx(0x80000005);
442		assoc	 = cpuid >> 16 & 0xff;
443		upperbit = ((cpuid >> 24) << 10) / assoc;
444
445		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
446		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
447	}
 
 
 
 
 
 
448}
449
450static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
451{
452	early_init_amd_mc(c);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
453
454	/*
455	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
456	 * with P/T states and does not stop in deep C-states
457	 */
458	if (c->x86_power & (1 << 8)) {
459		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
460		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
461		if (!check_tsc_unstable())
462			sched_clock_stable = 1;
463	}
464
 
 
 
 
 
 
 
 
465#ifdef CONFIG_X86_64
466	set_cpu_cap(c, X86_FEATURE_SYSCALL32);
467#else
468	/*  Set MTRR capability flag if appropriate */
469	if (c->x86 == 5)
470		if (c->x86_model == 13 || c->x86_model == 9 ||
471		    (c->x86_model == 8 && c->x86_mask >= 8))
472			set_cpu_cap(c, X86_FEATURE_K6_MTRR);
473#endif
474#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
475	/* check CPU config space for extended APIC ID */
476	if (cpu_has_apic && c->x86 >= 0xf) {
477		unsigned int val;
478		val = read_pci_config(0, 24, 0, 0x68);
479		if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
 
 
 
480			set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
 
 
 
 
 
 
 
 
481	}
482#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
483}
484
485static void __cpuinit init_amd(struct cpuinfo_x86 *c)
486{
487	u32 dummy;
 
488
489#ifdef CONFIG_SMP
490	unsigned long long value;
 
 
491
492	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
493	 * Disable TLB flush filter by setting HWCR.FFDIS on K8
494	 * bit 6 of msr C001_0015
495	 *
496	 * Errata 63 for SH-B3 steppings
497	 * Errata 122 for all steppings (F+ have it disabled by default)
498	 */
499	if (c->x86 == 0xf) {
500		rdmsrl(MSR_K7_HWCR, value);
501		value |= 1 << 6;
502		wrmsrl(MSR_K7_HWCR, value);
503	}
504#endif
 
505
506	early_init_amd(c);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
507
508	/*
509	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
510	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
 
 
 
 
511	 */
512	clear_cpu_cap(c, 0*32+31);
513
514#ifdef CONFIG_X86_64
515	/* On C+ stepping K8 rep microcode works well for copy/memset */
516	if (c->x86 == 0xf) {
517		u32 level;
 
 
 
 
 
 
518
519		level = cpuid_eax(1);
520		if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
521			set_cpu_cap(c, X86_FEATURE_REP_GOOD);
522
523		/*
524		 * Some BIOSes incorrectly force this feature, but only K8
525		 * revision D (model = 0x14) and later actually support it.
526		 * (AMD Erratum #110, docId: 25759).
527		 */
528		if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
529			u64 val;
 
 
 
530
531			clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
532			if (!rdmsrl_amd_safe(0xc001100d, &val)) {
533				val &= ~(1ULL << 32);
534				wrmsrl_amd_safe(0xc001100d, val);
535			}
536		}
 
 
537
538	}
539	if (c->x86 >= 0x10)
540		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
541
542	/* get apicid instead of initial apic id from cpuid */
543	c->apicid = hard_smp_processor_id();
544#else
 
 
 
 
 
 
545
 
 
 
 
 
 
546	/*
547	 *	FIXME: We should handle the K5 here. Set up the write
548	 *	range and also turn on MSR 83 bits 4 and 31 (write alloc,
549	 *	no bus pipeline)
550	 */
 
 
551
552	switch (c->x86) {
553	case 4:
554		init_amd_k5(c);
555		break;
556	case 5:
557		init_amd_k6(c);
558		break;
559	case 6: /* An Athlon/Duron */
560		init_amd_k7(c);
561		break;
 
 
 
 
 
 
562	}
563
564	/* K6s reports MCEs but don't actually have all the MSRs */
565	if (c->x86 < 6)
566		clear_cpu_cap(c, X86_FEATURE_MCE);
567#endif
568
569	/* Enable workaround for FXSAVE leak */
570	if (c->x86 >= 6)
571		set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
 
 
 
 
 
 
572
573	if (!c->x86_model_id[0]) {
574		switch (c->x86) {
575		case 0xf:
576			/* Should distinguish Models here, but this is only
577			   a fallback anyways. */
578			strcpy(c->x86_model_id, "Hammer");
579			break;
580		}
581	}
582
583	/* re-enable TopologyExtensions if switched off by BIOS */
584	if ((c->x86 == 0x15) &&
585	    (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
586	    !cpu_has(c, X86_FEATURE_TOPOEXT)) {
587		u64 val;
588
589		if (!rdmsrl_amd_safe(0xc0011005, &val)) {
590			val |= 1ULL << 54;
591			wrmsrl_amd_safe(0xc0011005, val);
592			rdmsrl(0xc0011005, val);
593			if (val & (1ULL << 54)) {
594				set_cpu_cap(c, X86_FEATURE_TOPOEXT);
595				printk(KERN_INFO FW_INFO "CPU: Re-enabling "
596				  "disabled Topology Extensions Support\n");
597			}
598		}
599	}
600
601	cpu_detect_cache_sizes(c);
 
 
 
 
 
 
602
603	/* Multi core CPU? */
604	if (c->extended_cpuid_level >= 0x80000008) {
605		amd_detect_cmp(c);
606		srat_detect_node(c);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
607	}
 
 
608
609#ifdef CONFIG_X86_32
610	detect_ht(c);
 
 
 
611#endif
 
612
613	if (c->extended_cpuid_level >= 0x80000006) {
614		if (cpuid_edx(0x80000006) & 0xf000)
615			num_cache_leaves = 4;
616		else
617			num_cache_leaves = 3;
 
 
 
 
 
618	}
619
620	if (c->x86 >= 0xf)
621		set_cpu_cap(c, X86_FEATURE_K8);
 
 
 
 
 
622
623	if (cpu_has_xmm2) {
624		/* MFENCE stops RDTSC speculation */
625		set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
 
 
 
 
 
 
626	}
627
628#ifdef CONFIG_X86_64
629	if (c->x86 == 0x10) {
630		/* do this for boot cpu */
631		if (c == &boot_cpu_data)
632			check_enable_amd_mmconf_dmi();
 
 
 
 
 
633
634		fam10h_check_enable_mmcfg();
 
 
 
 
 
 
 
635	}
 
636
637	if (c == &boot_cpu_data && c->x86 >= 0xf) {
638		unsigned long long tseg;
 
 
 
 
639
 
 
 
640		/*
641		 * Split up direct mapping around the TSEG SMM area.
642		 * Don't do it for gbpages because there seems very little
643		 * benefit in doing so.
644		 */
645		if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
646			printk(KERN_DEBUG "tseg: %010llx\n", tseg);
647			if ((tseg>>PMD_SHIFT) <
648				(max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) ||
649				((tseg>>PMD_SHIFT) <
650				(max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) &&
651				(tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT))))
652				set_memory_4k((unsigned long)__va(tseg), 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
653		}
654	}
655#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
656
657	/*
658	 * Family 0x12 and above processors have APIC timer
659	 * running in deep C states.
660	 */
661	if (c->x86 > 0x11)
662		set_cpu_cap(c, X86_FEATURE_ARAT);
663
 
 
 
 
 
 
 
 
 
664	/*
665	 * Disable GART TLB Walk Errors on Fam10h. We do this here
666	 * because this is always needed when GART is enabled, even in a
667	 * kernel which has no MCE support built in.
668	 */
669	if (c->x86 == 0x10) {
670		/*
671		 * BIOS should disable GartTlbWlk Errors themself. If
672		 * it doesn't do it here as suggested by the BKDG.
673		 *
674		 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
675		 */
676		u64 mask;
677		int err;
678
679		err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
680		if (err == 0) {
681			mask |= (1 << 10);
682			checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
683		}
684	}
685
686	rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
 
 
 
 
 
 
 
 
 
 
 
 
687}
688
689#ifdef CONFIG_X86_32
690static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
691							unsigned int size)
692{
693	/* AMD errata T13 (order #21922) */
694	if ((c->x86 == 6)) {
695		/* Duron Rev A0 */
696		if (c->x86_model == 3 && c->x86_mask == 0)
697			size = 64;
698		/* Tbird rev A1/A2 */
699		if (c->x86_model == 4 &&
700			(c->x86_mask == 0 || c->x86_mask == 1))
701			size = 256;
702	}
703	return size;
704}
705#endif
706
707static const struct cpu_dev __cpuinitconst amd_cpu_dev = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
708	.c_vendor	= "AMD",
709	.c_ident	= { "AuthenticAMD" },
710#ifdef CONFIG_X86_32
711	.c_models = {
712		{ .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
713		  {
714			  [3] = "486 DX/2",
715			  [7] = "486 DX/2-WB",
716			  [8] = "486 DX/4",
717			  [9] = "486 DX/4-WB",
718			  [14] = "Am5x86-WT",
719			  [15] = "Am5x86-WB"
720		  }
721		},
722	},
723	.c_size_cache	= amd_size_cache,
724#endif
725	.c_early_init   = early_init_amd,
 
726	.c_bsp_init	= bsp_init_amd,
727	.c_init		= init_amd,
728	.c_x86_vendor	= X86_VENDOR_AMD,
729};
730
731cpu_dev_register(amd_cpu_dev);
732
733/*
734 * AMD errata checking
735 *
736 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
737 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
738 * have an OSVW id assigned, which it takes as first argument. Both take a
739 * variable number of family-specific model-stepping ranges created by
740 * AMD_MODEL_RANGE(). Each erratum also has to be declared as extern const
741 * int[] in arch/x86/include/asm/processor.h.
742 *
743 * Example:
744 *
745 * const int amd_erratum_319[] =
746 *	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
747 *			   AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
748 *			   AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
749 */
750
751const int amd_erratum_400[] =
752	AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
753			    AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
754EXPORT_SYMBOL_GPL(amd_erratum_400);
755
756const int amd_erratum_383[] =
757	AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
758EXPORT_SYMBOL_GPL(amd_erratum_383);
759
760bool cpu_has_amd_erratum(const int *erratum)
 
 
 
 
 
 
 
 
 
 
761{
762	struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
763	int osvw_id = *erratum++;
764	u32 range;
765	u32 ms;
766
767	/*
768	 * If called early enough that current_cpu_data hasn't been initialized
769	 * yet, fall back to boot_cpu_data.
770	 */
771	if (cpu->x86 == 0)
772		cpu = &boot_cpu_data;
773
774	if (cpu->x86_vendor != X86_VENDOR_AMD)
775		return false;
 
776
777	if (osvw_id >= 0 && osvw_id < 65536 &&
778	    cpu_has(cpu, X86_FEATURE_OSVW)) {
779		u64 osvw_len;
780
781		rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
782		if (osvw_id < osvw_len) {
783			u64 osvw_bits;
784
785			rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
786			    osvw_bits);
787			return osvw_bits & (1ULL << (osvw_id & 0x3f));
788		}
789	}
790
791	/* OSVW unavailable or ID unknown, match family-model-stepping range */
792	ms = (cpu->x86_model << 4) | cpu->x86_mask;
793	while ((range = *erratum++))
794		if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
795		    (ms >= AMD_MODEL_RANGE_START(range)) &&
796		    (ms <= AMD_MODEL_RANGE_END(range)))
797			return true;
 
 
 
 
 
 
 
 
798
799	return false;
800}
801
802EXPORT_SYMBOL_GPL(cpu_has_amd_erratum);
 
 
 
 
 
 
 
 
 
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <linux/export.h>
 
   3#include <linux/bitops.h>
   4#include <linux/elf.h>
   5#include <linux/mm.h>
   6
   7#include <linux/io.h>
   8#include <linux/sched.h>
   9#include <linux/sched/clock.h>
  10#include <linux/random.h>
  11#include <linux/topology.h>
  12#include <asm/processor.h>
  13#include <asm/apic.h>
  14#include <asm/cacheinfo.h>
  15#include <asm/cpu.h>
  16#include <asm/spec-ctrl.h>
  17#include <asm/smp.h>
  18#include <asm/numa.h>
  19#include <asm/pci-direct.h>
  20#include <asm/delay.h>
  21#include <asm/debugreg.h>
  22#include <asm/resctrl.h>
  23#include <asm/sev.h>
  24
  25#ifdef CONFIG_X86_64
 
  26# include <asm/mmconfig.h>
 
  27#endif
  28
  29#include "cpu.h"
  30
  31static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
  32{
  33	u32 gprs[8] = { 0 };
  34	int err;
  35
  36	WARN_ONCE((boot_cpu_data.x86 != 0xf),
  37		  "%s should only be used on K8!\n", __func__);
  38
  39	gprs[1] = msr;
  40	gprs[7] = 0x9c5a203a;
  41
  42	err = rdmsr_safe_regs(gprs);
  43
  44	*p = gprs[0] | ((u64)gprs[2] << 32);
  45
  46	return err;
  47}
  48
  49static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
  50{
  51	u32 gprs[8] = { 0 };
  52
  53	WARN_ONCE((boot_cpu_data.x86 != 0xf),
  54		  "%s should only be used on K8!\n", __func__);
  55
  56	gprs[0] = (u32)val;
  57	gprs[1] = msr;
  58	gprs[2] = val >> 32;
  59	gprs[7] = 0x9c5a203a;
  60
  61	return wrmsr_safe_regs(gprs);
  62}
  63
  64/*
  65 *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause
  66 *	misexecution of code under Linux. Owners of such processors should
  67 *	contact AMD for precise details and a CPU swap.
  68 *
  69 *	See	http://www.multimania.com/poulot/k6bug.html
  70 *	and	section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
  71 *		(Publication # 21266  Issue Date: August 1998)
  72 *
  73 *	The following test is erm.. interesting. AMD neglected to up
  74 *	the chip setting when fixing the bug but they also tweaked some
  75 *	performance at the same time..
  76 */
  77
  78#ifdef CONFIG_X86_32
  79extern __visible void vide(void);
  80__asm__(".text\n"
  81	".globl vide\n"
  82	".type vide, @function\n"
  83	".align 4\n"
  84	"vide: ret\n");
  85#endif
  86
  87static void init_amd_k5(struct cpuinfo_x86 *c)
  88{
  89#ifdef CONFIG_X86_32
  90/*
  91 * General Systems BIOSen alias the cpu frequency registers
  92 * of the Elan at 0x000df000. Unfortunately, one of the Linux
  93 * drivers subsequently pokes it, and changes the CPU speed.
  94 * Workaround : Remove the unneeded alias.
  95 */
  96#define CBAR		(0xfffc) /* Configuration Base Address  (32-bit) */
  97#define CBAR_ENB	(0x80000000)
  98#define CBAR_KEY	(0X000000CB)
  99	if (c->x86_model == 9 || c->x86_model == 10) {
 100		if (inl(CBAR) & CBAR_ENB)
 101			outl(0 | CBAR_KEY, CBAR);
 102	}
 103#endif
 104}
 105
 106static void init_amd_k6(struct cpuinfo_x86 *c)
 
 107{
 108#ifdef CONFIG_X86_32
 109	u32 l, h;
 110	int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
 111
 112	if (c->x86_model < 6) {
 113		/* Based on AMD doc 20734R - June 2000 */
 114		if (c->x86_model == 0) {
 115			clear_cpu_cap(c, X86_FEATURE_APIC);
 116			set_cpu_cap(c, X86_FEATURE_PGE);
 117		}
 118		return;
 119	}
 120
 121	if (c->x86_model == 6 && c->x86_stepping == 1) {
 122		const int K6_BUG_LOOP = 1000000;
 123		int n;
 124		void (*f_vide)(void);
 125		u64 d, d2;
 126
 127		pr_info("AMD K6 stepping B detected - ");
 128
 129		/*
 130		 * It looks like AMD fixed the 2.6.2 bug and improved indirect
 131		 * calls at the same time.
 132		 */
 133
 134		n = K6_BUG_LOOP;
 135		f_vide = vide;
 136		OPTIMIZER_HIDE_VAR(f_vide);
 137		d = rdtsc();
 138		while (n--)
 139			f_vide();
 140		d2 = rdtsc();
 141		d = d2-d;
 142
 143		if (d > 20*K6_BUG_LOOP)
 144			pr_cont("system stability may be impaired when more than 32 MB are used.\n");
 
 145		else
 146			pr_cont("probably OK (after B9730xxxx).\n");
 147	}
 148
 149	/* K6 with old style WHCR */
 150	if (c->x86_model < 8 ||
 151	   (c->x86_model == 8 && c->x86_stepping < 8)) {
 152		/* We can only write allocate on the low 508Mb */
 153		if (mbytes > 508)
 154			mbytes = 508;
 155
 156		rdmsr(MSR_K6_WHCR, l, h);
 157		if ((l&0x0000FFFF) == 0) {
 158			unsigned long flags;
 159			l = (1<<0)|((mbytes/4)<<1);
 160			local_irq_save(flags);
 161			wbinvd();
 162			wrmsr(MSR_K6_WHCR, l, h);
 163			local_irq_restore(flags);
 164			pr_info("Enabling old style K6 write allocation for %d Mb\n",
 165				mbytes);
 166		}
 167		return;
 168	}
 169
 170	if ((c->x86_model == 8 && c->x86_stepping > 7) ||
 171	     c->x86_model == 9 || c->x86_model == 13) {
 172		/* The more serious chips .. */
 173
 174		if (mbytes > 4092)
 175			mbytes = 4092;
 176
 177		rdmsr(MSR_K6_WHCR, l, h);
 178		if ((l&0xFFFF0000) == 0) {
 179			unsigned long flags;
 180			l = ((mbytes>>2)<<22)|(1<<16);
 181			local_irq_save(flags);
 182			wbinvd();
 183			wrmsr(MSR_K6_WHCR, l, h);
 184			local_irq_restore(flags);
 185			pr_info("Enabling new style K6 write allocation for %d Mb\n",
 186				mbytes);
 187		}
 188
 189		return;
 190	}
 191
 192	if (c->x86_model == 10) {
 193		/* AMD Geode LX is model 10 */
 194		/* placeholder for any needed mods */
 195		return;
 196	}
 197#endif
 198}
 199
 200static void init_amd_k7(struct cpuinfo_x86 *c)
 201{
 202#ifdef CONFIG_X86_32
 203	u32 l, h;
 204
 205	/*
 206	 * Bit 15 of Athlon specific MSR 15, needs to be 0
 207	 * to enable SSE on Palomino/Morgan/Barton CPU's.
 208	 * If the BIOS didn't enable it already, enable it here.
 209	 */
 210	if (c->x86_model >= 6 && c->x86_model <= 10) {
 211		if (!cpu_has(c, X86_FEATURE_XMM)) {
 212			pr_info("Enabling disabled K7/SSE Support.\n");
 213			msr_clear_bit(MSR_K7_HWCR, 15);
 214			set_cpu_cap(c, X86_FEATURE_XMM);
 215		}
 216	}
 217
 218	/*
 219	 * It's been determined by AMD that Athlons since model 8 stepping 1
 220	 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
 221	 * As per AMD technical note 27212 0.2
 222	 */
 223	if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
 224		rdmsr(MSR_K7_CLK_CTL, l, h);
 225		if ((l & 0xfff00000) != 0x20000000) {
 226			pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
 227				l, ((l & 0x000fffff)|0x20000000));
 228			wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
 229		}
 230	}
 231
 232	/* calling is from identify_secondary_cpu() ? */
 233	if (!c->cpu_index)
 234		return;
 235
 236	/*
 237	 * Certain Athlons might work (for various values of 'work') in SMP
 238	 * but they are not certified as MP capable.
 239	 */
 240	/* Athlon 660/661 is valid. */
 241	if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
 242	    (c->x86_stepping == 1)))
 243		return;
 244
 245	/* Duron 670 is valid */
 246	if ((c->x86_model == 7) && (c->x86_stepping == 0))
 247		return;
 248
 249	/*
 250	 * Athlon 662, Duron 671, and Athlon >model 7 have capability
 251	 * bit. It's worth noting that the A5 stepping (662) of some
 252	 * Athlon XP's have the MP bit set.
 253	 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
 254	 * more.
 255	 */
 256	if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
 257	    ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
 258	     (c->x86_model > 7))
 259		if (cpu_has(c, X86_FEATURE_MP))
 260			return;
 261
 262	/* If we get here, not a certified SMP capable AMD system. */
 263
 264	/*
 265	 * Don't taint if we are running SMP kernel on a single non-MP
 266	 * approved Athlon
 267	 */
 268	WARN_ONCE(1, "WARNING: This combination of AMD"
 269		" processors is not suitable for SMP.\n");
 270	add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 271#endif
 272}
 273
 274#ifdef CONFIG_NUMA
 275/*
 276 * To workaround broken NUMA config.  Read the comment in
 277 * srat_detect_node().
 278 */
 279static int nearby_node(int apicid)
 280{
 281	int i, node;
 282
 283	for (i = apicid - 1; i >= 0; i--) {
 284		node = __apicid_to_node[i];
 285		if (node != NUMA_NO_NODE && node_online(node))
 286			return node;
 287	}
 288	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
 289		node = __apicid_to_node[i];
 290		if (node != NUMA_NO_NODE && node_online(node))
 291			return node;
 292	}
 293	return first_node(node_online_map); /* Shouldn't happen */
 294}
 295#endif
 296
 297static void srat_detect_node(struct cpuinfo_x86 *c)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 298{
 299#ifdef CONFIG_NUMA
 300	int cpu = smp_processor_id();
 301	int node;
 302	unsigned apicid = c->topo.apicid;
 303
 304	node = numa_cpu_node(cpu);
 305	if (node == NUMA_NO_NODE)
 306		node = per_cpu_llc_id(cpu);
 307
 308	/*
 309	 * On multi-fabric platform (e.g. Numascale NumaChip) a
 310	 * platform-specific handler needs to be called to fixup some
 311	 * IDs of the CPU.
 312	 */
 313	if (x86_cpuinit.fixup_cpu_id)
 314		x86_cpuinit.fixup_cpu_id(c, node);
 315
 316	if (!node_online(node)) {
 317		/*
 318		 * Two possibilities here:
 319		 *
 320		 * - The CPU is missing memory and no node was created.  In
 321		 *   that case try picking one from a nearby CPU.
 322		 *
 323		 * - The APIC IDs differ from the HyperTransport node IDs
 324		 *   which the K8 northbridge parsing fills in.  Assume
 325		 *   they are all increased by a constant offset, but in
 326		 *   the same order as the HT nodeids.  If that doesn't
 327		 *   result in a usable node fall back to the path for the
 328		 *   previous case.
 329		 *
 330		 * This workaround operates directly on the mapping between
 331		 * APIC ID and NUMA node, assuming certain relationship
 332		 * between APIC ID, HT node ID and NUMA topology.  As going
 333		 * through CPU mapping may alter the outcome, directly
 334		 * access __apicid_to_node[].
 335		 */
 336		int ht_nodeid = c->topo.initial_apicid;
 337
 338		if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
 
 339			node = __apicid_to_node[ht_nodeid];
 340		/* Pick a nearby node */
 341		if (!node_online(node))
 342			node = nearby_node(apicid);
 343	}
 344	numa_set_node(cpu, node);
 345#endif
 346}
 347
 348static void bsp_determine_snp(struct cpuinfo_x86 *c)
 349{
 350#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
 351	cc_vendor = CC_VENDOR_AMD;
 
 
 
 
 
 
 352
 353	if (cpu_has(c, X86_FEATURE_SEV_SNP)) {
 354		/*
 355		 * RMP table entry format is not architectural and is defined by the
 356		 * per-processor PPR. Restrict SNP support on the known CPU models
 357		 * for which the RMP table entry format is currently defined for.
 358		 */
 359		if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
 360		    c->x86 >= 0x19 && snp_probe_rmptable_info()) {
 361			cc_platform_set(CC_ATTR_HOST_SEV_SNP);
 362		} else {
 363			setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
 364			cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
 365		}
 366	}
 
 
 367#endif
 368}
 369
 370static void bsp_init_amd(struct cpuinfo_x86 *c)
 371{
 372	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
 373
 374		if (c->x86 > 0x10 ||
 375		    (c->x86 == 0x10 && c->x86_model >= 0x2)) {
 376			u64 val;
 377
 378			rdmsrl(MSR_K7_HWCR, val);
 379			if (!(val & BIT(24)))
 380				pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
 
 381		}
 382	}
 383
 384	if (c->x86 == 0x15) {
 385		unsigned long upperbit;
 386		u32 cpuid, assoc;
 387
 388		cpuid	 = cpuid_edx(0x80000005);
 389		assoc	 = cpuid >> 16 & 0xff;
 390		upperbit = ((cpuid >> 24) << 10) / assoc;
 391
 392		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
 393		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
 394
 395		/* A random value per boot for bit slice [12:upper_bit) */
 396		va_align.bits = get_random_u32() & va_align.mask;
 397	}
 398
 399	if (cpu_has(c, X86_FEATURE_MWAITX))
 400		use_mwaitx_delay();
 401
 402	if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
 403	    !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
 404	    c->x86 >= 0x15 && c->x86 <= 0x17) {
 405		unsigned int bit;
 406
 407		switch (c->x86) {
 408		case 0x15: bit = 54; break;
 409		case 0x16: bit = 33; break;
 410		case 0x17: bit = 10; break;
 411		default: return;
 412		}
 413		/*
 414		 * Try to cache the base value so further operations can
 415		 * avoid RMW. If that faults, do not enable SSBD.
 416		 */
 417		if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
 418			setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
 419			setup_force_cpu_cap(X86_FEATURE_SSBD);
 420			x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
 421		}
 422	}
 423
 424	resctrl_cpu_detect(c);
 425
 426	/* Figure out Zen generations: */
 427	switch (c->x86) {
 428	case 0x17:
 429		switch (c->x86_model) {
 430		case 0x00 ... 0x2f:
 431		case 0x50 ... 0x5f:
 432			setup_force_cpu_cap(X86_FEATURE_ZEN1);
 433			break;
 434		case 0x30 ... 0x4f:
 435		case 0x60 ... 0x7f:
 436		case 0x90 ... 0x91:
 437		case 0xa0 ... 0xaf:
 438			setup_force_cpu_cap(X86_FEATURE_ZEN2);
 439			break;
 440		default:
 441			goto warn;
 442		}
 443		break;
 444
 445	case 0x19:
 446		switch (c->x86_model) {
 447		case 0x00 ... 0x0f:
 448		case 0x20 ... 0x5f:
 449			setup_force_cpu_cap(X86_FEATURE_ZEN3);
 450			break;
 451		case 0x10 ... 0x1f:
 452		case 0x60 ... 0xaf:
 453			setup_force_cpu_cap(X86_FEATURE_ZEN4);
 454			break;
 455		default:
 456			goto warn;
 457		}
 458		break;
 459
 460	case 0x1a:
 461		switch (c->x86_model) {
 462		case 0x00 ... 0x2f:
 463		case 0x40 ... 0x4f:
 464		case 0x70 ... 0x7f:
 465			setup_force_cpu_cap(X86_FEATURE_ZEN5);
 466			break;
 467		default:
 468			goto warn;
 469		}
 470		break;
 471
 472	default:
 473		break;
 474	}
 475
 476	bsp_determine_snp(c);
 477	return;
 478
 479warn:
 480	WARN_ONCE(1, "Family 0x%x, model: 0x%x??\n", c->x86, c->x86_model);
 481}
 482
 483static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
 484{
 485	u64 msr;
 486
 487	/*
 488	 * BIOS support is required for SME and SEV.
 489	 *   For SME: If BIOS has enabled SME then adjust x86_phys_bits by
 490	 *	      the SME physical address space reduction value.
 491	 *	      If BIOS has not enabled SME then don't advertise the
 492	 *	      SME feature (set in scattered.c).
 493	 *	      If the kernel has not enabled SME via any means then
 494	 *	      don't advertise the SME feature.
 495	 *   For SEV: If BIOS has not enabled SEV then don't advertise SEV and
 496	 *	      any additional functionality based on it.
 497	 *
 498	 *   In all cases, since support for SME and SEV requires long mode,
 499	 *   don't advertise the feature under CONFIG_X86_32.
 500	 */
 501	if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
 502		/* Check if memory encryption is enabled */
 503		rdmsrl(MSR_AMD64_SYSCFG, msr);
 504		if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
 505			goto clear_all;
 506
 507		/*
 508		 * Always adjust physical address bits. Even though this
 509		 * will be a value above 32-bits this is still done for
 510		 * CONFIG_X86_32 so that accurate values are reported.
 511		 */
 512		c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
 513
 514		if (IS_ENABLED(CONFIG_X86_32))
 515			goto clear_all;
 516
 517		if (!sme_me_mask)
 518			setup_clear_cpu_cap(X86_FEATURE_SME);
 519
 520		rdmsrl(MSR_K7_HWCR, msr);
 521		if (!(msr & MSR_K7_HWCR_SMMLOCK))
 522			goto clear_sev;
 523
 524		return;
 525
 526clear_all:
 527		setup_clear_cpu_cap(X86_FEATURE_SME);
 528clear_sev:
 529		setup_clear_cpu_cap(X86_FEATURE_SEV);
 530		setup_clear_cpu_cap(X86_FEATURE_SEV_ES);
 531		setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
 532	}
 533}
 534
 535static void early_init_amd(struct cpuinfo_x86 *c)
 536{
 537	u32 dummy;
 538
 539	if (c->x86 >= 0xf)
 540		set_cpu_cap(c, X86_FEATURE_K8);
 541
 542	rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
 543
 544	/*
 545	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
 546	 * with P/T states and does not stop in deep C-states
 547	 */
 548	if (c->x86_power & (1 << 8)) {
 549		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 550		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
 
 
 551	}
 552
 553	/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
 554	if (c->x86_power & BIT(12))
 555		set_cpu_cap(c, X86_FEATURE_ACC_POWER);
 556
 557	/* Bit 14 indicates the Runtime Average Power Limit interface. */
 558	if (c->x86_power & BIT(14))
 559		set_cpu_cap(c, X86_FEATURE_RAPL);
 560
 561#ifdef CONFIG_X86_64
 562	set_cpu_cap(c, X86_FEATURE_SYSCALL32);
 563#else
 564	/*  Set MTRR capability flag if appropriate */
 565	if (c->x86 == 5)
 566		if (c->x86_model == 13 || c->x86_model == 9 ||
 567		    (c->x86_model == 8 && c->x86_stepping >= 8))
 568			set_cpu_cap(c, X86_FEATURE_K6_MTRR);
 569#endif
 570#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
 571	/*
 572	 * ApicID can always be treated as an 8-bit value for AMD APIC versions
 573	 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
 574	 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
 575	 * after 16h.
 576	 */
 577	if (boot_cpu_has(X86_FEATURE_APIC)) {
 578		if (c->x86 > 0x16)
 579			set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
 580		else if (c->x86 >= 0xf) {
 581			/* check CPU config space for extended APIC ID */
 582			unsigned int val;
 583
 584			val = read_pci_config(0, 24, 0, 0x68);
 585			if ((val >> 17 & 0x3) == 0x3)
 586				set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
 587		}
 588	}
 589#endif
 590
 591	/*
 592	 * This is only needed to tell the kernel whether to use VMCALL
 593	 * and VMMCALL.  VMMCALL is never executed except under virt, so
 594	 * we can set it unconditionally.
 595	 */
 596	set_cpu_cap(c, X86_FEATURE_VMMCALL);
 597
 598	/* F16h erratum 793, CVE-2013-6885 */
 599	if (c->x86 == 0x16 && c->x86_model <= 0xf)
 600		msr_set_bit(MSR_AMD64_LS_CFG, 15);
 601
 602	early_detect_mem_encrypt(c);
 603
 604	if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) {
 605		if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB))
 606			setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
 607		else if (c->x86 >= 0x19 && !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
 608			setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
 609			setup_force_cpu_cap(X86_FEATURE_SBPB);
 610		}
 611	}
 612}
 613
 614static void init_amd_k8(struct cpuinfo_x86 *c)
 615{
 616	u32 level;
 617	u64 value;
 618
 619	/* On C+ stepping K8 rep microcode works well for copy/memset */
 620	level = cpuid_eax(1);
 621	if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
 622		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
 623
 624	/*
 625	 * Some BIOSes incorrectly force this feature, but only K8 revision D
 626	 * (model = 0x14) and later actually support it.
 627	 * (AMD Erratum #110, docId: 25759).
 628	 */
 629	if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
 630		clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
 631		if (!rdmsrl_amd_safe(0xc001100d, &value)) {
 632			value &= ~BIT_64(32);
 633			wrmsrl_amd_safe(0xc001100d, value);
 634		}
 635	}
 636
 637	if (!c->x86_model_id[0])
 638		strcpy(c->x86_model_id, "Hammer");
 639
 640#ifdef CONFIG_SMP
 641	/*
 642	 * Disable TLB flush filter by setting HWCR.FFDIS on K8
 643	 * bit 6 of msr C001_0015
 644	 *
 645	 * Errata 63 for SH-B3 steppings
 646	 * Errata 122 for all steppings (F+ have it disabled by default)
 647	 */
 648	msr_set_bit(MSR_K7_HWCR, 6);
 
 
 
 
 649#endif
 650	set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
 651
 652	/*
 653	 * Check models and steppings affected by erratum 400. This is
 654	 * used to select the proper idle routine and to enable the
 655	 * check whether the machine is affected in arch_post_acpi_subsys_init()
 656	 * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
 657	 */
 658	if (c->x86_model > 0x41 ||
 659	    (c->x86_model == 0x41 && c->x86_stepping >= 0x2))
 660		setup_force_cpu_bug(X86_BUG_AMD_E400);
 661}
 662
 663static void init_amd_gh(struct cpuinfo_x86 *c)
 664{
 665#ifdef CONFIG_MMCONF_FAM10H
 666	/* do this for boot cpu */
 667	if (c == &boot_cpu_data)
 668		check_enable_amd_mmconf_dmi();
 669
 670	fam10h_check_enable_mmcfg();
 671#endif
 672
 673	/*
 674	 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
 675	 * is always needed when GART is enabled, even in a kernel which has no
 676	 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
 677	 * If it doesn't, we do it here as suggested by the BKDG.
 678	 *
 679	 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
 680	 */
 681	msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
 682
 683	/*
 684	 * On family 10h BIOS may not have properly enabled WC+ support, causing
 685	 * it to be converted to CD memtype. This may result in performance
 686	 * degradation for certain nested-paging guests. Prevent this conversion
 687	 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
 688	 *
 689	 * NOTE: we want to use the _safe accessors so as not to #GP kvm
 690	 * guests on older kvm hosts.
 691	 */
 692	msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
 693
 694	set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
 
 
 695
 696	/*
 697	 * Check models and steppings affected by erratum 400. This is
 698	 * used to select the proper idle routine and to enable the
 699	 * check whether the machine is affected in arch_post_acpi_subsys_init()
 700	 * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
 701	 */
 702	if (c->x86_model > 0x2 ||
 703	    (c->x86_model == 0x2 && c->x86_stepping >= 0x1))
 704		setup_force_cpu_bug(X86_BUG_AMD_E400);
 705}
 706
 707static void init_amd_ln(struct cpuinfo_x86 *c)
 708{
 709	/*
 710	 * Apply erratum 665 fix unconditionally so machines without a BIOS
 711	 * fix work.
 712	 */
 713	msr_set_bit(MSR_AMD64_DE_CFG, 31);
 714}
 715
 716static bool rdrand_force;
 
 
 717
 718static int __init rdrand_cmdline(char *str)
 719{
 720	if (!str)
 721		return -EINVAL;
 722
 723	if (!strcmp(str, "force"))
 724		rdrand_force = true;
 725	else
 726		return -EINVAL;
 727
 728	return 0;
 729}
 730early_param("rdrand", rdrand_cmdline);
 731
 732static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
 733{
 734	/*
 735	 * Saving of the MSR used to hide the RDRAND support during
 736	 * suspend/resume is done by arch/x86/power/cpu.c, which is
 737	 * dependent on CONFIG_PM_SLEEP.
 738	 */
 739	if (!IS_ENABLED(CONFIG_PM_SLEEP))
 740		return;
 741
 742	/*
 743	 * The self-test can clear X86_FEATURE_RDRAND, so check for
 744	 * RDRAND support using the CPUID function directly.
 745	 */
 746	if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
 747		return;
 748
 749	msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
 750
 751	/*
 752	 * Verify that the CPUID change has occurred in case the kernel is
 753	 * running virtualized and the hypervisor doesn't support the MSR.
 754	 */
 755	if (cpuid_ecx(1) & BIT(30)) {
 756		pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
 757		return;
 758	}
 759
 760	clear_cpu_cap(c, X86_FEATURE_RDRAND);
 761	pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
 762}
 
 763
 764static void init_amd_jg(struct cpuinfo_x86 *c)
 765{
 766	/*
 767	 * Some BIOS implementations do not restore proper RDRAND support
 768	 * across suspend and resume. Check on whether to hide the RDRAND
 769	 * instruction support via CPUID.
 770	 */
 771	clear_rdrand_cpuid_bit(c);
 772}
 773
 774static void init_amd_bd(struct cpuinfo_x86 *c)
 775{
 776	u64 value;
 
 
 
 
 
 
 777
 778	/*
 779	 * The way access filter has a performance penalty on some workloads.
 780	 * Disable it on the affected CPUs.
 781	 */
 782	if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
 783		if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
 784			value |= 0x1E;
 785			wrmsrl_safe(MSR_F15H_IC_CFG, value);
 
 
 
 
 
 
 
 786		}
 787	}
 788
 789	/*
 790	 * Some BIOS implementations do not restore proper RDRAND support
 791	 * across suspend and resume. Check on whether to hide the RDRAND
 792	 * instruction support via CPUID.
 793	 */
 794	clear_rdrand_cpuid_bit(c);
 795}
 796
 797static void fix_erratum_1386(struct cpuinfo_x86 *c)
 798{
 799	/*
 800	 * Work around Erratum 1386.  The XSAVES instruction malfunctions in
 801	 * certain circumstances on Zen1/2 uarch, and not all parts have had
 802	 * updated microcode at the time of writing (March 2023).
 803	 *
 804	 * Affected parts all have no supervisor XSAVE states, meaning that
 805	 * the XSAVEC instruction (which works fine) is equivalent.
 806	 */
 807	clear_cpu_cap(c, X86_FEATURE_XSAVES);
 808}
 809
 810void init_spectral_chicken(struct cpuinfo_x86 *c)
 811{
 812#ifdef CONFIG_MITIGATION_UNRET_ENTRY
 813	u64 value;
 814
 815	/*
 816	 * On Zen2 we offer this chicken (bit) on the altar of Speculation.
 817	 *
 818	 * This suppresses speculation from the middle of a basic block, i.e. it
 819	 * suppresses non-branch predictions.
 820	 */
 821	if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
 822		if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) {
 823			value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT;
 824			wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value);
 825		}
 826	}
 827#endif
 828}
 829
 830static void init_amd_zen_common(void)
 831{
 832	setup_force_cpu_cap(X86_FEATURE_ZEN);
 833#ifdef CONFIG_NUMA
 834	node_reclaim_distance = 32;
 835#endif
 836}
 837
 838static void init_amd_zen1(struct cpuinfo_x86 *c)
 839{
 840	fix_erratum_1386(c);
 841
 842	/* Fix up CPUID bits, but only if not virtualised. */
 843	if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
 844
 845		/* Erratum 1076: CPB feature bit not being set in CPUID. */
 846		if (!cpu_has(c, X86_FEATURE_CPB))
 847			set_cpu_cap(c, X86_FEATURE_CPB);
 848	}
 849
 850	pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
 851	setup_force_cpu_bug(X86_BUG_DIV0);
 852}
 853
 854static bool cpu_has_zenbleed_microcode(void)
 855{
 856	u32 good_rev = 0;
 857
 858	switch (boot_cpu_data.x86_model) {
 859	case 0x30 ... 0x3f: good_rev = 0x0830107b; break;
 860	case 0x60 ... 0x67: good_rev = 0x0860010c; break;
 861	case 0x68 ... 0x6f: good_rev = 0x08608107; break;
 862	case 0x70 ... 0x7f: good_rev = 0x08701033; break;
 863	case 0xa0 ... 0xaf: good_rev = 0x08a00009; break;
 864
 865	default:
 866		return false;
 867	}
 868
 869	if (boot_cpu_data.microcode < good_rev)
 870		return false;
 871
 872	return true;
 873}
 874
 875static void zen2_zenbleed_check(struct cpuinfo_x86 *c)
 876{
 877	if (cpu_has(c, X86_FEATURE_HYPERVISOR))
 878		return;
 879
 880	if (!cpu_has(c, X86_FEATURE_AVX))
 881		return;
 882
 883	if (!cpu_has_zenbleed_microcode()) {
 884		pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n");
 885		msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
 886	} else {
 887		msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
 888	}
 889}
 890
 891static void init_amd_zen2(struct cpuinfo_x86 *c)
 892{
 893	init_spectral_chicken(c);
 894	fix_erratum_1386(c);
 895	zen2_zenbleed_check(c);
 896}
 897
 898static void init_amd_zen3(struct cpuinfo_x86 *c)
 899{
 900	if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
 901		/*
 902		 * Zen3 (Fam19 model < 0x10) parts are not susceptible to
 903		 * Branch Type Confusion, but predate the allocation of the
 904		 * BTC_NO bit.
 905		 */
 906		if (!cpu_has(c, X86_FEATURE_BTC_NO))
 907			set_cpu_cap(c, X86_FEATURE_BTC_NO);
 908	}
 909}
 910
 911static void init_amd_zen4(struct cpuinfo_x86 *c)
 912{
 913	if (!cpu_has(c, X86_FEATURE_HYPERVISOR))
 914		msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
 915}
 916
 917static void init_amd_zen5(struct cpuinfo_x86 *c)
 918{
 919}
 920
 921static void init_amd(struct cpuinfo_x86 *c)
 922{
 923	u64 vm_cr;
 924
 925	early_init_amd(c);
 926
 927	/*
 928	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
 929	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
 930	 */
 931	clear_cpu_cap(c, 0*32+31);
 932
 933	if (c->x86 >= 0x10)
 934		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
 935
 936	/* AMD FSRM also implies FSRS */
 937	if (cpu_has(c, X86_FEATURE_FSRM))
 938		set_cpu_cap(c, X86_FEATURE_FSRS);
 939
 940	/* K6s reports MCEs but don't actually have all the MSRs */
 941	if (c->x86 < 6)
 942		clear_cpu_cap(c, X86_FEATURE_MCE);
 943
 944	switch (c->x86) {
 945	case 4:    init_amd_k5(c); break;
 946	case 5:    init_amd_k6(c); break;
 947	case 6:	   init_amd_k7(c); break;
 948	case 0xf:  init_amd_k8(c); break;
 949	case 0x10: init_amd_gh(c); break;
 950	case 0x12: init_amd_ln(c); break;
 951	case 0x15: init_amd_bd(c); break;
 952	case 0x16: init_amd_jg(c); break;
 953	}
 954
 955	/*
 956	 * Save up on some future enablement work and do common Zen
 957	 * settings.
 958	 */
 959	if (c->x86 >= 0x17)
 960		init_amd_zen_common();
 961
 962	if (boot_cpu_has(X86_FEATURE_ZEN1))
 963		init_amd_zen1(c);
 964	else if (boot_cpu_has(X86_FEATURE_ZEN2))
 965		init_amd_zen2(c);
 966	else if (boot_cpu_has(X86_FEATURE_ZEN3))
 967		init_amd_zen3(c);
 968	else if (boot_cpu_has(X86_FEATURE_ZEN4))
 969		init_amd_zen4(c);
 970	else if (boot_cpu_has(X86_FEATURE_ZEN5))
 971		init_amd_zen5(c);
 972
 973	/*
 974	 * Enable workaround for FXSAVE leak on CPUs
 975	 * without a XSaveErPtr feature
 976	 */
 977	if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
 978		set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
 979
 980	cpu_detect_cache_sizes(c);
 981
 982	srat_detect_node(c);
 983
 984	init_amd_cacheinfo(c);
 985
 986	if (cpu_has(c, X86_FEATURE_SVM)) {
 987		rdmsrl(MSR_VM_CR, vm_cr);
 988		if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) {
 989			pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n");
 990			clear_cpu_cap(c, X86_FEATURE_SVM);
 991		}
 992	}
 993
 994	if (!cpu_has(c, X86_FEATURE_LFENCE_RDTSC) && cpu_has(c, X86_FEATURE_XMM2)) {
 995		/*
 996		 * Use LFENCE for execution serialization.  On families which
 997		 * don't have that MSR, LFENCE is already serializing.
 998		 * msr_set_bit() uses the safe accessors, too, even if the MSR
 999		 * is not present.
1000		 */
1001		msr_set_bit(MSR_AMD64_DE_CFG,
1002			    MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
1003
1004		/* A serializing LFENCE stops RDTSC speculation */
1005		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
1006	}
1007
1008	/*
1009	 * Family 0x12 and above processors have APIC timer
1010	 * running in deep C states.
1011	 */
1012	if (c->x86 > 0x11)
1013		set_cpu_cap(c, X86_FEATURE_ARAT);
1014
1015	/* 3DNow or LM implies PREFETCHW */
1016	if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
1017		if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
1018			set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
1019
1020	/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
1021	if (!cpu_feature_enabled(X86_FEATURE_XENPV))
1022		set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
1023
1024	/*
1025	 * Turn on the Instructions Retired free counter on machines not
1026	 * susceptible to erratum #1054 "Instructions Retired Performance
1027	 * Counter May Be Inaccurate".
1028	 */
1029	if (cpu_has(c, X86_FEATURE_IRPERF) &&
1030	    (boot_cpu_has(X86_FEATURE_ZEN1) && c->x86_model > 0x2f))
1031		msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
 
 
 
 
 
 
1032
1033	check_null_seg_clears_base(c);
 
 
 
 
 
1034
1035	/*
1036	 * Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up
1037	 * using the trampoline code and as part of it, MSR_EFER gets prepared there in
1038	 * order to be replicated onto them. Regardless, set it here again, if not set,
1039	 * to protect against any future refactoring/code reorganization which might
1040	 * miss setting this important bit.
1041	 */
1042	if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
1043	    cpu_has(c, X86_FEATURE_AUTOIBRS))
1044		WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));
1045
1046	/* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
1047	clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
1048}
1049
1050#ifdef CONFIG_X86_32
1051static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
 
1052{
1053	/* AMD errata T13 (order #21922) */
1054	if (c->x86 == 6) {
1055		/* Duron Rev A0 */
1056		if (c->x86_model == 3 && c->x86_stepping == 0)
1057			size = 64;
1058		/* Tbird rev A1/A2 */
1059		if (c->x86_model == 4 &&
1060			(c->x86_stepping == 0 || c->x86_stepping == 1))
1061			size = 256;
1062	}
1063	return size;
1064}
1065#endif
1066
1067static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
1068{
1069	u32 ebx, eax, ecx, edx;
1070	u16 mask = 0xfff;
1071
1072	if (c->x86 < 0xf)
1073		return;
1074
1075	if (c->extended_cpuid_level < 0x80000006)
1076		return;
1077
1078	cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
1079
1080	tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
1081	tlb_lli_4k[ENTRIES] = ebx & mask;
1082
1083	/*
1084	 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
1085	 * characteristics from the CPUID function 0x80000005 instead.
1086	 */
1087	if (c->x86 == 0xf) {
1088		cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1089		mask = 0xff;
1090	}
1091
1092	/* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1093	if (!((eax >> 16) & mask))
1094		tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
1095	else
1096		tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
1097
1098	/* a 4M entry uses two 2M entries */
1099	tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
1100
1101	/* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1102	if (!(eax & mask)) {
1103		/* Erratum 658 */
1104		if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
1105			tlb_lli_2m[ENTRIES] = 1024;
1106		} else {
1107			cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1108			tlb_lli_2m[ENTRIES] = eax & 0xff;
1109		}
1110	} else
1111		tlb_lli_2m[ENTRIES] = eax & mask;
1112
1113	tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
1114}
1115
1116static const struct cpu_dev amd_cpu_dev = {
1117	.c_vendor	= "AMD",
1118	.c_ident	= { "AuthenticAMD" },
1119#ifdef CONFIG_X86_32
1120	.legacy_models = {
1121		{ .family = 4, .model_names =
1122		  {
1123			  [3] = "486 DX/2",
1124			  [7] = "486 DX/2-WB",
1125			  [8] = "486 DX/4",
1126			  [9] = "486 DX/4-WB",
1127			  [14] = "Am5x86-WT",
1128			  [15] = "Am5x86-WB"
1129		  }
1130		},
1131	},
1132	.legacy_cache_size = amd_size_cache,
1133#endif
1134	.c_early_init   = early_init_amd,
1135	.c_detect_tlb	= cpu_detect_tlb_amd,
1136	.c_bsp_init	= bsp_init_amd,
1137	.c_init		= init_amd,
1138	.c_x86_vendor	= X86_VENDOR_AMD,
1139};
1140
1141cpu_dev_register(amd_cpu_dev);
1142
1143static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask);
1144
1145static unsigned int amd_msr_dr_addr_masks[] = {
1146	MSR_F16H_DR0_ADDR_MASK,
1147	MSR_F16H_DR1_ADDR_MASK,
1148	MSR_F16H_DR1_ADDR_MASK + 1,
1149	MSR_F16H_DR1_ADDR_MASK + 2
1150};
 
 
 
 
 
 
 
 
 
1151
1152void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr)
1153{
1154	int cpu = smp_processor_id();
 
1155
1156	if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
1157		return;
 
1158
1159	if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
1160		return;
1161
1162	if (per_cpu(amd_dr_addr_mask, cpu)[dr] == mask)
1163		return;
1164
1165	wrmsr(amd_msr_dr_addr_masks[dr], mask, 0);
1166	per_cpu(amd_dr_addr_mask, cpu)[dr] = mask;
1167}
1168
1169unsigned long amd_get_dr_addr_mask(unsigned int dr)
1170{
1171	if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
1172		return 0;
 
 
1173
1174	if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
1175		return 0;
 
 
 
 
1176
1177	return per_cpu(amd_dr_addr_mask[dr], smp_processor_id());
1178}
1179EXPORT_SYMBOL_GPL(amd_get_dr_addr_mask);
1180
1181u32 amd_get_highest_perf(void)
1182{
1183	struct cpuinfo_x86 *c = &boot_cpu_data;
1184
1185	if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
1186			       (c->x86_model >= 0x70 && c->x86_model < 0x80)))
1187		return 166;
1188
1189	if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
1190			       (c->x86_model >= 0x40 && c->x86_model < 0x70)))
1191		return 166;
 
 
1192
1193	return 255;
1194}
1195EXPORT_SYMBOL_GPL(amd_get_highest_perf);
1196
1197static void zenbleed_check_cpu(void *unused)
1198{
1199	struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
1200
1201	zen2_zenbleed_check(c);
1202}
1203
1204void amd_check_microcode(void)
1205{
1206	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
1207		return;
1208
1209	on_each_cpu(zenbleed_check_cpu, NULL, 1);
1210}
1211
1212/*
1213 * Issue a DIV 0/1 insn to clear any division data from previous DIV
1214 * operations.
1215 */
1216void noinstr amd_clear_divider(void)
1217{
1218	asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
1219		     :: "a" (0), "d" (0), "r" (1));
1220}
1221EXPORT_SYMBOL_GPL(amd_clear_divider);