Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <linux/export.h>
   3#include <linux/bitops.h>
   4#include <linux/elf.h>
   5#include <linux/mm.h>
   6
   7#include <linux/io.h>
   8#include <linux/sched.h>
   9#include <linux/sched/clock.h>
  10#include <linux/random.h>
  11#include <linux/topology.h>
  12#include <asm/processor.h>
  13#include <asm/apic.h>
  14#include <asm/cacheinfo.h>
  15#include <asm/cpu.h>
  16#include <asm/spec-ctrl.h>
  17#include <asm/smp.h>
  18#include <asm/numa.h>
  19#include <asm/pci-direct.h>
  20#include <asm/delay.h>
  21#include <asm/debugreg.h>
  22#include <asm/resctrl.h>
 
  23
  24#ifdef CONFIG_X86_64
  25# include <asm/mmconfig.h>
  26# include <asm/set_memory.h>
  27#endif
  28
  29#include "cpu.h"
  30
  31static const int amd_erratum_383[];
  32static const int amd_erratum_400[];
  33static const int amd_erratum_1054[];
  34static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
  35
  36/*
  37 * nodes_per_socket: Stores the number of nodes per socket.
  38 * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
  39 * Node Identifiers[10:8]
  40 */
  41static u32 nodes_per_socket = 1;
  42
  43static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
  44{
  45	u32 gprs[8] = { 0 };
  46	int err;
  47
  48	WARN_ONCE((boot_cpu_data.x86 != 0xf),
  49		  "%s should only be used on K8!\n", __func__);
  50
  51	gprs[1] = msr;
  52	gprs[7] = 0x9c5a203a;
  53
  54	err = rdmsr_safe_regs(gprs);
  55
  56	*p = gprs[0] | ((u64)gprs[2] << 32);
  57
  58	return err;
  59}
  60
  61static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
  62{
  63	u32 gprs[8] = { 0 };
  64
  65	WARN_ONCE((boot_cpu_data.x86 != 0xf),
  66		  "%s should only be used on K8!\n", __func__);
  67
  68	gprs[0] = (u32)val;
  69	gprs[1] = msr;
  70	gprs[2] = val >> 32;
  71	gprs[7] = 0x9c5a203a;
  72
  73	return wrmsr_safe_regs(gprs);
  74}
  75
  76/*
  77 *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause
  78 *	misexecution of code under Linux. Owners of such processors should
  79 *	contact AMD for precise details and a CPU swap.
  80 *
  81 *	See	http://www.multimania.com/poulot/k6bug.html
  82 *	and	section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
  83 *		(Publication # 21266  Issue Date: August 1998)
  84 *
  85 *	The following test is erm.. interesting. AMD neglected to up
  86 *	the chip setting when fixing the bug but they also tweaked some
  87 *	performance at the same time..
  88 */
  89
  90#ifdef CONFIG_X86_32
  91extern __visible void vide(void);
  92__asm__(".text\n"
  93	".globl vide\n"
  94	".type vide, @function\n"
  95	".align 4\n"
  96	"vide: ret\n");
  97#endif
  98
  99static void init_amd_k5(struct cpuinfo_x86 *c)
 100{
 101#ifdef CONFIG_X86_32
 102/*
 103 * General Systems BIOSen alias the cpu frequency registers
 104 * of the Elan at 0x000df000. Unfortunately, one of the Linux
 105 * drivers subsequently pokes it, and changes the CPU speed.
 106 * Workaround : Remove the unneeded alias.
 107 */
 108#define CBAR		(0xfffc) /* Configuration Base Address  (32-bit) */
 109#define CBAR_ENB	(0x80000000)
 110#define CBAR_KEY	(0X000000CB)
 111	if (c->x86_model == 9 || c->x86_model == 10) {
 112		if (inl(CBAR) & CBAR_ENB)
 113			outl(0 | CBAR_KEY, CBAR);
 114	}
 115#endif
 116}
 117
 118static void init_amd_k6(struct cpuinfo_x86 *c)
 119{
 120#ifdef CONFIG_X86_32
 121	u32 l, h;
 122	int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
 123
 124	if (c->x86_model < 6) {
 125		/* Based on AMD doc 20734R - June 2000 */
 126		if (c->x86_model == 0) {
 127			clear_cpu_cap(c, X86_FEATURE_APIC);
 128			set_cpu_cap(c, X86_FEATURE_PGE);
 129		}
 130		return;
 131	}
 132
 133	if (c->x86_model == 6 && c->x86_stepping == 1) {
 134		const int K6_BUG_LOOP = 1000000;
 135		int n;
 136		void (*f_vide)(void);
 137		u64 d, d2;
 138
 139		pr_info("AMD K6 stepping B detected - ");
 140
 141		/*
 142		 * It looks like AMD fixed the 2.6.2 bug and improved indirect
 143		 * calls at the same time.
 144		 */
 145
 146		n = K6_BUG_LOOP;
 147		f_vide = vide;
 148		OPTIMIZER_HIDE_VAR(f_vide);
 149		d = rdtsc();
 150		while (n--)
 151			f_vide();
 152		d2 = rdtsc();
 153		d = d2-d;
 154
 155		if (d > 20*K6_BUG_LOOP)
 156			pr_cont("system stability may be impaired when more than 32 MB are used.\n");
 157		else
 158			pr_cont("probably OK (after B9730xxxx).\n");
 159	}
 160
 161	/* K6 with old style WHCR */
 162	if (c->x86_model < 8 ||
 163	   (c->x86_model == 8 && c->x86_stepping < 8)) {
 164		/* We can only write allocate on the low 508Mb */
 165		if (mbytes > 508)
 166			mbytes = 508;
 167
 168		rdmsr(MSR_K6_WHCR, l, h);
 169		if ((l&0x0000FFFF) == 0) {
 170			unsigned long flags;
 171			l = (1<<0)|((mbytes/4)<<1);
 172			local_irq_save(flags);
 173			wbinvd();
 174			wrmsr(MSR_K6_WHCR, l, h);
 175			local_irq_restore(flags);
 176			pr_info("Enabling old style K6 write allocation for %d Mb\n",
 177				mbytes);
 178		}
 179		return;
 180	}
 181
 182	if ((c->x86_model == 8 && c->x86_stepping > 7) ||
 183	     c->x86_model == 9 || c->x86_model == 13) {
 184		/* The more serious chips .. */
 185
 186		if (mbytes > 4092)
 187			mbytes = 4092;
 188
 189		rdmsr(MSR_K6_WHCR, l, h);
 190		if ((l&0xFFFF0000) == 0) {
 191			unsigned long flags;
 192			l = ((mbytes>>2)<<22)|(1<<16);
 193			local_irq_save(flags);
 194			wbinvd();
 195			wrmsr(MSR_K6_WHCR, l, h);
 196			local_irq_restore(flags);
 197			pr_info("Enabling new style K6 write allocation for %d Mb\n",
 198				mbytes);
 199		}
 200
 201		return;
 202	}
 203
 204	if (c->x86_model == 10) {
 205		/* AMD Geode LX is model 10 */
 206		/* placeholder for any needed mods */
 207		return;
 208	}
 209#endif
 210}
 211
 212static void init_amd_k7(struct cpuinfo_x86 *c)
 213{
 214#ifdef CONFIG_X86_32
 215	u32 l, h;
 216
 217	/*
 218	 * Bit 15 of Athlon specific MSR 15, needs to be 0
 219	 * to enable SSE on Palomino/Morgan/Barton CPU's.
 220	 * If the BIOS didn't enable it already, enable it here.
 221	 */
 222	if (c->x86_model >= 6 && c->x86_model <= 10) {
 223		if (!cpu_has(c, X86_FEATURE_XMM)) {
 224			pr_info("Enabling disabled K7/SSE Support.\n");
 225			msr_clear_bit(MSR_K7_HWCR, 15);
 226			set_cpu_cap(c, X86_FEATURE_XMM);
 227		}
 228	}
 229
 230	/*
 231	 * It's been determined by AMD that Athlons since model 8 stepping 1
 232	 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
 233	 * As per AMD technical note 27212 0.2
 234	 */
 235	if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
 236		rdmsr(MSR_K7_CLK_CTL, l, h);
 237		if ((l & 0xfff00000) != 0x20000000) {
 238			pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
 239				l, ((l & 0x000fffff)|0x20000000));
 240			wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
 241		}
 242	}
 243
 244	/* calling is from identify_secondary_cpu() ? */
 245	if (!c->cpu_index)
 246		return;
 247
 248	/*
 249	 * Certain Athlons might work (for various values of 'work') in SMP
 250	 * but they are not certified as MP capable.
 251	 */
 252	/* Athlon 660/661 is valid. */
 253	if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
 254	    (c->x86_stepping == 1)))
 255		return;
 256
 257	/* Duron 670 is valid */
 258	if ((c->x86_model == 7) && (c->x86_stepping == 0))
 259		return;
 260
 261	/*
 262	 * Athlon 662, Duron 671, and Athlon >model 7 have capability
 263	 * bit. It's worth noting that the A5 stepping (662) of some
 264	 * Athlon XP's have the MP bit set.
 265	 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
 266	 * more.
 267	 */
 268	if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
 269	    ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
 270	     (c->x86_model > 7))
 271		if (cpu_has(c, X86_FEATURE_MP))
 272			return;
 273
 274	/* If we get here, not a certified SMP capable AMD system. */
 275
 276	/*
 277	 * Don't taint if we are running SMP kernel on a single non-MP
 278	 * approved Athlon
 279	 */
 280	WARN_ONCE(1, "WARNING: This combination of AMD"
 281		" processors is not suitable for SMP.\n");
 282	add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
 283#endif
 284}
 285
 286#ifdef CONFIG_NUMA
 287/*
 288 * To workaround broken NUMA config.  Read the comment in
 289 * srat_detect_node().
 290 */
 291static int nearby_node(int apicid)
 292{
 293	int i, node;
 294
 295	for (i = apicid - 1; i >= 0; i--) {
 296		node = __apicid_to_node[i];
 297		if (node != NUMA_NO_NODE && node_online(node))
 298			return node;
 299	}
 300	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
 301		node = __apicid_to_node[i];
 302		if (node != NUMA_NO_NODE && node_online(node))
 303			return node;
 304	}
 305	return first_node(node_online_map); /* Shouldn't happen */
 306}
 307#endif
 308
 309/*
 310 * Fix up cpu_core_id for pre-F17h systems to be in the
 311 * [0 .. cores_per_node - 1] range. Not really needed but
 312 * kept so as not to break existing setups.
 313 */
 314static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
 315{
 316	u32 cus_per_node;
 317
 318	if (c->x86 >= 0x17)
 319		return;
 320
 321	cus_per_node = c->x86_max_cores / nodes_per_socket;
 322	c->cpu_core_id %= cus_per_node;
 323}
 324
 325/*
 326 * Fixup core topology information for
 327 * (1) AMD multi-node processors
 328 *     Assumption: Number of cores in each internal node is the same.
 329 * (2) AMD processors supporting compute units
 330 */
 331static void amd_get_topology(struct cpuinfo_x86 *c)
 332{
 333	u8 node_id;
 334	int cpu = smp_processor_id();
 335
 336	/* get information required for multi-node processors */
 337	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
 338		int err;
 339		u32 eax, ebx, ecx, edx;
 340
 341		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
 342
 343		node_id  = ecx & 0xff;
 344
 345		if (c->x86 == 0x15)
 346			c->cu_id = ebx & 0xff;
 347
 348		if (c->x86 >= 0x17) {
 349			c->cpu_core_id = ebx & 0xff;
 350
 351			if (smp_num_siblings > 1)
 352				c->x86_max_cores /= smp_num_siblings;
 353		}
 354
 355		/*
 356		 * In case leaf B is available, use it to derive
 357		 * topology information.
 358		 */
 359		err = detect_extended_topology(c);
 360		if (!err)
 361			c->x86_coreid_bits = get_count_order(c->x86_max_cores);
 362
 363		cacheinfo_amd_init_llc_id(c, cpu, node_id);
 364
 365	} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
 366		u64 value;
 367
 368		rdmsrl(MSR_FAM10H_NODE_ID, value);
 369		node_id = value & 7;
 370
 371		per_cpu(cpu_llc_id, cpu) = node_id;
 372	} else
 373		return;
 374
 375	if (nodes_per_socket > 1) {
 376		set_cpu_cap(c, X86_FEATURE_AMD_DCM);
 377		legacy_fixup_core_id(c);
 378	}
 379}
 380
 381/*
 382 * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
 383 * Assumes number of cores is a power of two.
 384 */
 385static void amd_detect_cmp(struct cpuinfo_x86 *c)
 386{
 387	unsigned bits;
 388	int cpu = smp_processor_id();
 389
 390	bits = c->x86_coreid_bits;
 391	/* Low order bits define the core id (index of core in socket) */
 392	c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
 393	/* Convert the initial APIC ID into the socket ID */
 394	c->phys_proc_id = c->initial_apicid >> bits;
 395	/* use socket ID also for last level cache */
 396	per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
 397}
 398
 399static void amd_detect_ppin(struct cpuinfo_x86 *c)
 400{
 401	unsigned long long val;
 402
 403	if (!cpu_has(c, X86_FEATURE_AMD_PPIN))
 404		return;
 405
 406	/* When PPIN is defined in CPUID, still need to check PPIN_CTL MSR */
 407	if (rdmsrl_safe(MSR_AMD_PPIN_CTL, &val))
 408		goto clear_ppin;
 409
 410	/* PPIN is locked in disabled mode, clear feature bit */
 411	if ((val & 3UL) == 1UL)
 412		goto clear_ppin;
 413
 414	/* If PPIN is disabled, try to enable it */
 415	if (!(val & 2UL)) {
 416		wrmsrl_safe(MSR_AMD_PPIN_CTL,  val | 2UL);
 417		rdmsrl_safe(MSR_AMD_PPIN_CTL, &val);
 418	}
 419
 420	/* If PPIN_EN bit is 1, return from here; otherwise fall through */
 421	if (val & 2UL)
 422		return;
 423
 424clear_ppin:
 425	clear_cpu_cap(c, X86_FEATURE_AMD_PPIN);
 426}
 427
 428u16 amd_get_nb_id(int cpu)
 429{
 430	return per_cpu(cpu_llc_id, cpu);
 431}
 432EXPORT_SYMBOL_GPL(amd_get_nb_id);
 433
 434u32 amd_get_nodes_per_socket(void)
 435{
 436	return nodes_per_socket;
 437}
 438EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
 439
 440static void srat_detect_node(struct cpuinfo_x86 *c)
 441{
 442#ifdef CONFIG_NUMA
 443	int cpu = smp_processor_id();
 444	int node;
 445	unsigned apicid = c->apicid;
 446
 447	node = numa_cpu_node(cpu);
 448	if (node == NUMA_NO_NODE)
 449		node = per_cpu(cpu_llc_id, cpu);
 450
 451	/*
 452	 * On multi-fabric platform (e.g. Numascale NumaChip) a
 453	 * platform-specific handler needs to be called to fixup some
 454	 * IDs of the CPU.
 455	 */
 456	if (x86_cpuinit.fixup_cpu_id)
 457		x86_cpuinit.fixup_cpu_id(c, node);
 458
 459	if (!node_online(node)) {
 460		/*
 461		 * Two possibilities here:
 462		 *
 463		 * - The CPU is missing memory and no node was created.  In
 464		 *   that case try picking one from a nearby CPU.
 465		 *
 466		 * - The APIC IDs differ from the HyperTransport node IDs
 467		 *   which the K8 northbridge parsing fills in.  Assume
 468		 *   they are all increased by a constant offset, but in
 469		 *   the same order as the HT nodeids.  If that doesn't
 470		 *   result in a usable node fall back to the path for the
 471		 *   previous case.
 472		 *
 473		 * This workaround operates directly on the mapping between
 474		 * APIC ID and NUMA node, assuming certain relationship
 475		 * between APIC ID, HT node ID and NUMA topology.  As going
 476		 * through CPU mapping may alter the outcome, directly
 477		 * access __apicid_to_node[].
 478		 */
 479		int ht_nodeid = c->initial_apicid;
 480
 481		if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
 482			node = __apicid_to_node[ht_nodeid];
 483		/* Pick a nearby node */
 484		if (!node_online(node))
 485			node = nearby_node(apicid);
 486	}
 487	numa_set_node(cpu, node);
 488#endif
 489}
 490
 491static void early_init_amd_mc(struct cpuinfo_x86 *c)
 492{
 493#ifdef CONFIG_SMP
 494	unsigned bits, ecx;
 495
 496	/* Multi core CPU? */
 497	if (c->extended_cpuid_level < 0x80000008)
 498		return;
 499
 500	ecx = cpuid_ecx(0x80000008);
 501
 502	c->x86_max_cores = (ecx & 0xff) + 1;
 503
 504	/* CPU telling us the core id bits shift? */
 505	bits = (ecx >> 12) & 0xF;
 506
 507	/* Otherwise recompute */
 508	if (bits == 0) {
 509		while ((1 << bits) < c->x86_max_cores)
 510			bits++;
 511	}
 512
 513	c->x86_coreid_bits = bits;
 514#endif
 515}
 516
 517static void bsp_init_amd(struct cpuinfo_x86 *c)
 518{
 519
 520#ifdef CONFIG_X86_64
 521	if (c->x86 >= 0xf) {
 522		unsigned long long tseg;
 523
 
 524		/*
 525		 * Split up direct mapping around the TSEG SMM area.
 526		 * Don't do it for gbpages because there seems very little
 527		 * benefit in doing so.
 528		 */
 529		if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
 530			unsigned long pfn = tseg >> PAGE_SHIFT;
 531
 532			pr_debug("tseg: %010llx\n", tseg);
 533			if (pfn_range_is_mapped(pfn, pfn + 1))
 534				set_memory_4k((unsigned long)__va(tseg), 1);
 535		}
 536	}
 537#endif
 
 538
 
 
 539	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
 540
 541		if (c->x86 > 0x10 ||
 542		    (c->x86 == 0x10 && c->x86_model >= 0x2)) {
 543			u64 val;
 544
 545			rdmsrl(MSR_K7_HWCR, val);
 546			if (!(val & BIT(24)))
 547				pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
 548		}
 549	}
 550
 551	if (c->x86 == 0x15) {
 552		unsigned long upperbit;
 553		u32 cpuid, assoc;
 554
 555		cpuid	 = cpuid_edx(0x80000005);
 556		assoc	 = cpuid >> 16 & 0xff;
 557		upperbit = ((cpuid >> 24) << 10) / assoc;
 558
 559		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
 560		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
 561
 562		/* A random value per boot for bit slice [12:upper_bit) */
 563		va_align.bits = get_random_int() & va_align.mask;
 564	}
 565
 566	if (cpu_has(c, X86_FEATURE_MWAITX))
 567		use_mwaitx_delay();
 568
 569	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
 570		u32 ecx;
 571
 572		ecx = cpuid_ecx(0x8000001e);
 573		nodes_per_socket = ((ecx >> 8) & 7) + 1;
 574	} else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
 575		u64 value;
 576
 577		rdmsrl(MSR_FAM10H_NODE_ID, value);
 578		nodes_per_socket = ((value >> 3) & 7) + 1;
 579	}
 580
 581	if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
 582	    !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
 583	    c->x86 >= 0x15 && c->x86 <= 0x17) {
 584		unsigned int bit;
 585
 586		switch (c->x86) {
 587		case 0x15: bit = 54; break;
 588		case 0x16: bit = 33; break;
 589		case 0x17: bit = 10; break;
 590		default: return;
 591		}
 592		/*
 593		 * Try to cache the base value so further operations can
 594		 * avoid RMW. If that faults, do not enable SSBD.
 595		 */
 596		if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
 597			setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
 598			setup_force_cpu_cap(X86_FEATURE_SSBD);
 599			x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
 600		}
 601	}
 602
 603	resctrl_cpu_detect(c);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 604}
 605
 606static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
 607{
 608	u64 msr;
 609
 610	/*
 611	 * BIOS support is required for SME and SEV.
 612	 *   For SME: If BIOS has enabled SME then adjust x86_phys_bits by
 613	 *	      the SME physical address space reduction value.
 614	 *	      If BIOS has not enabled SME then don't advertise the
 615	 *	      SME feature (set in scattered.c).
 616	 *   For SEV: If BIOS has not enabled SEV then don't advertise the
 617	 *            SEV feature (set in scattered.c).
 
 
 618	 *
 619	 *   In all cases, since support for SME and SEV requires long mode,
 620	 *   don't advertise the feature under CONFIG_X86_32.
 621	 */
 622	if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
 623		/* Check if memory encryption is enabled */
 624		rdmsrl(MSR_K8_SYSCFG, msr);
 625		if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
 626			goto clear_all;
 627
 628		/*
 629		 * Always adjust physical address bits. Even though this
 630		 * will be a value above 32-bits this is still done for
 631		 * CONFIG_X86_32 so that accurate values are reported.
 632		 */
 633		c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
 634
 635		if (IS_ENABLED(CONFIG_X86_32))
 636			goto clear_all;
 637
 
 
 
 638		rdmsrl(MSR_K7_HWCR, msr);
 639		if (!(msr & MSR_K7_HWCR_SMMLOCK))
 640			goto clear_sev;
 641
 642		return;
 643
 644clear_all:
 645		setup_clear_cpu_cap(X86_FEATURE_SME);
 646clear_sev:
 647		setup_clear_cpu_cap(X86_FEATURE_SEV);
 
 
 648	}
 649}
 650
 651static void early_init_amd(struct cpuinfo_x86 *c)
 652{
 653	u64 value;
 654	u32 dummy;
 655
 656	early_init_amd_mc(c);
 657
 658#ifdef CONFIG_X86_32
 659	if (c->x86 == 6)
 660		set_cpu_cap(c, X86_FEATURE_K7);
 661#endif
 662
 663	if (c->x86 >= 0xf)
 664		set_cpu_cap(c, X86_FEATURE_K8);
 665
 666	rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
 667
 668	/*
 669	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
 670	 * with P/T states and does not stop in deep C-states
 671	 */
 672	if (c->x86_power & (1 << 8)) {
 673		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 674		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
 675	}
 676
 677	/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
 678	if (c->x86_power & BIT(12))
 679		set_cpu_cap(c, X86_FEATURE_ACC_POWER);
 680
 
 
 
 
 681#ifdef CONFIG_X86_64
 682	set_cpu_cap(c, X86_FEATURE_SYSCALL32);
 683#else
 684	/*  Set MTRR capability flag if appropriate */
 685	if (c->x86 == 5)
 686		if (c->x86_model == 13 || c->x86_model == 9 ||
 687		    (c->x86_model == 8 && c->x86_stepping >= 8))
 688			set_cpu_cap(c, X86_FEATURE_K6_MTRR);
 689#endif
 690#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
 691	/*
 692	 * ApicID can always be treated as an 8-bit value for AMD APIC versions
 693	 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
 694	 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
 695	 * after 16h.
 696	 */
 697	if (boot_cpu_has(X86_FEATURE_APIC)) {
 698		if (c->x86 > 0x16)
 699			set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
 700		else if (c->x86 >= 0xf) {
 701			/* check CPU config space for extended APIC ID */
 702			unsigned int val;
 703
 704			val = read_pci_config(0, 24, 0, 0x68);
 705			if ((val >> 17 & 0x3) == 0x3)
 706				set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
 707		}
 708	}
 709#endif
 710
 711	/*
 712	 * This is only needed to tell the kernel whether to use VMCALL
 713	 * and VMMCALL.  VMMCALL is never executed except under virt, so
 714	 * we can set it unconditionally.
 715	 */
 716	set_cpu_cap(c, X86_FEATURE_VMMCALL);
 717
 718	/* F16h erratum 793, CVE-2013-6885 */
 719	if (c->x86 == 0x16 && c->x86_model <= 0xf)
 720		msr_set_bit(MSR_AMD64_LS_CFG, 15);
 721
 722	/*
 723	 * Check whether the machine is affected by erratum 400. This is
 724	 * used to select the proper idle routine and to enable the check
 725	 * whether the machine is affected in arch_post_acpi_init(), which
 726	 * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
 727	 */
 728	if (cpu_has_amd_erratum(c, amd_erratum_400))
 729		set_cpu_bug(c, X86_BUG_AMD_E400);
 730
 731	early_detect_mem_encrypt(c);
 732
 733	/* Re-enable TopologyExtensions if switched off by BIOS */
 734	if (c->x86 == 0x15 &&
 735	    (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
 736	    !cpu_has(c, X86_FEATURE_TOPOEXT)) {
 737
 738		if (msr_set_bit(0xc0011005, 54) > 0) {
 739			rdmsrl(0xc0011005, value);
 740			if (value & BIT_64(54)) {
 741				set_cpu_cap(c, X86_FEATURE_TOPOEXT);
 742				pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
 743			}
 744		}
 745	}
 746
 747	if (cpu_has(c, X86_FEATURE_TOPOEXT))
 748		smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
 749}
 750
 751static void init_amd_k8(struct cpuinfo_x86 *c)
 752{
 753	u32 level;
 754	u64 value;
 755
 756	/* On C+ stepping K8 rep microcode works well for copy/memset */
 757	level = cpuid_eax(1);
 758	if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
 759		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
 760
 761	/*
 762	 * Some BIOSes incorrectly force this feature, but only K8 revision D
 763	 * (model = 0x14) and later actually support it.
 764	 * (AMD Erratum #110, docId: 25759).
 765	 */
 766	if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
 767		clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
 768		if (!rdmsrl_amd_safe(0xc001100d, &value)) {
 769			value &= ~BIT_64(32);
 770			wrmsrl_amd_safe(0xc001100d, value);
 771		}
 772	}
 773
 774	if (!c->x86_model_id[0])
 775		strcpy(c->x86_model_id, "Hammer");
 776
 777#ifdef CONFIG_SMP
 778	/*
 779	 * Disable TLB flush filter by setting HWCR.FFDIS on K8
 780	 * bit 6 of msr C001_0015
 781	 *
 782	 * Errata 63 for SH-B3 steppings
 783	 * Errata 122 for all steppings (F+ have it disabled by default)
 784	 */
 785	msr_set_bit(MSR_K7_HWCR, 6);
 786#endif
 787	set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
 
 
 
 
 
 
 
 
 
 
 788}
 789
 790static void init_amd_gh(struct cpuinfo_x86 *c)
 791{
 792#ifdef CONFIG_MMCONF_FAM10H
 793	/* do this for boot cpu */
 794	if (c == &boot_cpu_data)
 795		check_enable_amd_mmconf_dmi();
 796
 797	fam10h_check_enable_mmcfg();
 798#endif
 799
 800	/*
 801	 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
 802	 * is always needed when GART is enabled, even in a kernel which has no
 803	 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
 804	 * If it doesn't, we do it here as suggested by the BKDG.
 805	 *
 806	 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
 807	 */
 808	msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
 809
 810	/*
 811	 * On family 10h BIOS may not have properly enabled WC+ support, causing
 812	 * it to be converted to CD memtype. This may result in performance
 813	 * degradation for certain nested-paging guests. Prevent this conversion
 814	 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
 815	 *
 816	 * NOTE: we want to use the _safe accessors so as not to #GP kvm
 817	 * guests on older kvm hosts.
 818	 */
 819	msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
 820
 821	if (cpu_has_amd_erratum(c, amd_erratum_383))
 822		set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
 823}
 824
 825#define MSR_AMD64_DE_CFG	0xC0011029
 
 
 
 
 
 
 
 
 
 826
 827static void init_amd_ln(struct cpuinfo_x86 *c)
 828{
 829	/*
 830	 * Apply erratum 665 fix unconditionally so machines without a BIOS
 831	 * fix work.
 832	 */
 833	msr_set_bit(MSR_AMD64_DE_CFG, 31);
 834}
 835
 836static bool rdrand_force;
 837
 838static int __init rdrand_cmdline(char *str)
 839{
 840	if (!str)
 841		return -EINVAL;
 842
 843	if (!strcmp(str, "force"))
 844		rdrand_force = true;
 845	else
 846		return -EINVAL;
 847
 848	return 0;
 849}
 850early_param("rdrand", rdrand_cmdline);
 851
 852static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
 853{
 854	/*
 855	 * Saving of the MSR used to hide the RDRAND support during
 856	 * suspend/resume is done by arch/x86/power/cpu.c, which is
 857	 * dependent on CONFIG_PM_SLEEP.
 858	 */
 859	if (!IS_ENABLED(CONFIG_PM_SLEEP))
 860		return;
 861
 862	/*
 863	 * The nordrand option can clear X86_FEATURE_RDRAND, so check for
 864	 * RDRAND support using the CPUID function directly.
 865	 */
 866	if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
 867		return;
 868
 869	msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
 870
 871	/*
 872	 * Verify that the CPUID change has occurred in case the kernel is
 873	 * running virtualized and the hypervisor doesn't support the MSR.
 874	 */
 875	if (cpuid_ecx(1) & BIT(30)) {
 876		pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
 877		return;
 878	}
 879
 880	clear_cpu_cap(c, X86_FEATURE_RDRAND);
 881	pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
 882}
 883
 884static void init_amd_jg(struct cpuinfo_x86 *c)
 885{
 886	/*
 887	 * Some BIOS implementations do not restore proper RDRAND support
 888	 * across suspend and resume. Check on whether to hide the RDRAND
 889	 * instruction support via CPUID.
 890	 */
 891	clear_rdrand_cpuid_bit(c);
 892}
 893
 894static void init_amd_bd(struct cpuinfo_x86 *c)
 895{
 896	u64 value;
 897
 898	/*
 899	 * The way access filter has a performance penalty on some workloads.
 900	 * Disable it on the affected CPUs.
 901	 */
 902	if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
 903		if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
 904			value |= 0x1E;
 905			wrmsrl_safe(MSR_F15H_IC_CFG, value);
 906		}
 907	}
 908
 909	/*
 910	 * Some BIOS implementations do not restore proper RDRAND support
 911	 * across suspend and resume. Check on whether to hide the RDRAND
 912	 * instruction support via CPUID.
 913	 */
 914	clear_rdrand_cpuid_bit(c);
 915}
 916
 917static void init_amd_zn(struct cpuinfo_x86 *c)
 918{
 919	set_cpu_cap(c, X86_FEATURE_ZEN);
 
 
 
 
 
 
 
 
 
 920
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 921#ifdef CONFIG_NUMA
 922	node_reclaim_distance = 32;
 923#endif
 
 924
 925	/*
 926	 * Fix erratum 1076: CPB feature bit not being set in CPUID.
 927	 * Always set it, except when running under a hypervisor.
 928	 */
 929	if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
 930		set_cpu_cap(c, X86_FEATURE_CPB);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 931}
 932
 933static void init_amd(struct cpuinfo_x86 *c)
 934{
 
 
 935	early_init_amd(c);
 936
 937	/*
 938	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
 939	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
 940	 */
 941	clear_cpu_cap(c, 0*32+31);
 942
 943	if (c->x86 >= 0x10)
 944		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
 945
 946	/* get apicid instead of initial apic id from cpuid */
 947	c->apicid = hard_smp_processor_id();
 
 948
 949	/* K6s reports MCEs but don't actually have all the MSRs */
 950	if (c->x86 < 6)
 951		clear_cpu_cap(c, X86_FEATURE_MCE);
 952
 953	switch (c->x86) {
 954	case 4:    init_amd_k5(c); break;
 955	case 5:    init_amd_k6(c); break;
 956	case 6:	   init_amd_k7(c); break;
 957	case 0xf:  init_amd_k8(c); break;
 958	case 0x10: init_amd_gh(c); break;
 959	case 0x12: init_amd_ln(c); break;
 960	case 0x15: init_amd_bd(c); break;
 961	case 0x16: init_amd_jg(c); break;
 962	case 0x17: fallthrough;
 963	case 0x19: init_amd_zn(c); break;
 964	}
 965
 966	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 967	 * Enable workaround for FXSAVE leak on CPUs
 968	 * without a XSaveErPtr feature
 969	 */
 970	if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
 971		set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
 972
 973	cpu_detect_cache_sizes(c);
 974
 975	amd_detect_cmp(c);
 976	amd_get_topology(c);
 977	srat_detect_node(c);
 978	amd_detect_ppin(c);
 979
 980	init_amd_cacheinfo(c);
 981
 982	if (cpu_has(c, X86_FEATURE_XMM2)) {
 
 
 
 
 
 
 
 
 983		/*
 984		 * Use LFENCE for execution serialization.  On families which
 985		 * don't have that MSR, LFENCE is already serializing.
 986		 * msr_set_bit() uses the safe accessors, too, even if the MSR
 987		 * is not present.
 988		 */
 989		msr_set_bit(MSR_F10H_DECFG,
 990			    MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
 991
 992		/* A serializing LFENCE stops RDTSC speculation */
 993		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
 994	}
 995
 996	/*
 997	 * Family 0x12 and above processors have APIC timer
 998	 * running in deep C states.
 999	 */
1000	if (c->x86 > 0x11)
1001		set_cpu_cap(c, X86_FEATURE_ARAT);
1002
1003	/* 3DNow or LM implies PREFETCHW */
1004	if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
1005		if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
1006			set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
1007
1008	/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
1009	if (!cpu_has(c, X86_FEATURE_XENPV))
1010		set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
1011
1012	/*
1013	 * Turn on the Instructions Retired free counter on machines not
1014	 * susceptible to erratum #1054 "Instructions Retired Performance
1015	 * Counter May Be Inaccurate".
1016	 */
1017	if (cpu_has(c, X86_FEATURE_IRPERF) &&
1018	    !cpu_has_amd_erratum(c, amd_erratum_1054))
1019		msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1020}
1021
1022#ifdef CONFIG_X86_32
1023static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
1024{
1025	/* AMD errata T13 (order #21922) */
1026	if (c->x86 == 6) {
1027		/* Duron Rev A0 */
1028		if (c->x86_model == 3 && c->x86_stepping == 0)
1029			size = 64;
1030		/* Tbird rev A1/A2 */
1031		if (c->x86_model == 4 &&
1032			(c->x86_stepping == 0 || c->x86_stepping == 1))
1033			size = 256;
1034	}
1035	return size;
1036}
1037#endif
1038
1039static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
1040{
1041	u32 ebx, eax, ecx, edx;
1042	u16 mask = 0xfff;
1043
1044	if (c->x86 < 0xf)
1045		return;
1046
1047	if (c->extended_cpuid_level < 0x80000006)
1048		return;
1049
1050	cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
1051
1052	tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
1053	tlb_lli_4k[ENTRIES] = ebx & mask;
1054
1055	/*
1056	 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
1057	 * characteristics from the CPUID function 0x80000005 instead.
1058	 */
1059	if (c->x86 == 0xf) {
1060		cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1061		mask = 0xff;
1062	}
1063
1064	/* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1065	if (!((eax >> 16) & mask))
1066		tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
1067	else
1068		tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
1069
1070	/* a 4M entry uses two 2M entries */
1071	tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
1072
1073	/* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1074	if (!(eax & mask)) {
1075		/* Erratum 658 */
1076		if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
1077			tlb_lli_2m[ENTRIES] = 1024;
1078		} else {
1079			cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1080			tlb_lli_2m[ENTRIES] = eax & 0xff;
1081		}
1082	} else
1083		tlb_lli_2m[ENTRIES] = eax & mask;
1084
1085	tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
1086}
1087
1088static const struct cpu_dev amd_cpu_dev = {
1089	.c_vendor	= "AMD",
1090	.c_ident	= { "AuthenticAMD" },
1091#ifdef CONFIG_X86_32
1092	.legacy_models = {
1093		{ .family = 4, .model_names =
1094		  {
1095			  [3] = "486 DX/2",
1096			  [7] = "486 DX/2-WB",
1097			  [8] = "486 DX/4",
1098			  [9] = "486 DX/4-WB",
1099			  [14] = "Am5x86-WT",
1100			  [15] = "Am5x86-WB"
1101		  }
1102		},
1103	},
1104	.legacy_cache_size = amd_size_cache,
1105#endif
1106	.c_early_init   = early_init_amd,
1107	.c_detect_tlb	= cpu_detect_tlb_amd,
1108	.c_bsp_init	= bsp_init_amd,
1109	.c_init		= init_amd,
1110	.c_x86_vendor	= X86_VENDOR_AMD,
1111};
1112
1113cpu_dev_register(amd_cpu_dev);
1114
1115/*
1116 * AMD errata checking
1117 *
1118 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
1119 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
1120 * have an OSVW id assigned, which it takes as first argument. Both take a
1121 * variable number of family-specific model-stepping ranges created by
1122 * AMD_MODEL_RANGE().
1123 *
1124 * Example:
1125 *
1126 * const int amd_erratum_319[] =
1127 *	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
1128 *			   AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
1129 *			   AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
1130 */
1131
1132#define AMD_LEGACY_ERRATUM(...)		{ -1, __VA_ARGS__, 0 }
1133#define AMD_OSVW_ERRATUM(osvw_id, ...)	{ osvw_id, __VA_ARGS__, 0 }
1134#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
1135	((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
1136#define AMD_MODEL_RANGE_FAMILY(range)	(((range) >> 24) & 0xff)
1137#define AMD_MODEL_RANGE_START(range)	(((range) >> 12) & 0xfff)
1138#define AMD_MODEL_RANGE_END(range)	((range) & 0xfff)
1139
1140static const int amd_erratum_400[] =
1141	AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
1142			    AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
1143
1144static const int amd_erratum_383[] =
1145	AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
1146
1147/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
1148static const int amd_erratum_1054[] =
1149	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
1150
1151static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
1152{
1153	int osvw_id = *erratum++;
1154	u32 range;
1155	u32 ms;
1156
1157	if (osvw_id >= 0 && osvw_id < 65536 &&
1158	    cpu_has(cpu, X86_FEATURE_OSVW)) {
1159		u64 osvw_len;
1160
1161		rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
1162		if (osvw_id < osvw_len) {
1163			u64 osvw_bits;
1164
1165			rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
1166			    osvw_bits);
1167			return osvw_bits & (1ULL << (osvw_id & 0x3f));
1168		}
1169	}
1170
1171	/* OSVW unavailable or ID unknown, match family-model-stepping range */
1172	ms = (cpu->x86_model << 4) | cpu->x86_stepping;
1173	while ((range = *erratum++))
1174		if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
1175		    (ms >= AMD_MODEL_RANGE_START(range)) &&
1176		    (ms <= AMD_MODEL_RANGE_END(range)))
1177			return true;
1178
1179	return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1180}
 
1181
1182void set_dr_addr_mask(unsigned long mask, int dr)
1183{
1184	if (!boot_cpu_has(X86_FEATURE_BPEXT))
 
 
 
 
 
 
 
1185		return;
1186
1187	switch (dr) {
1188	case 0:
1189		wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
1190		break;
1191	case 1:
1192	case 2:
1193	case 3:
1194		wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
1195		break;
1196	default:
1197		break;
1198	}
1199}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <linux/export.h>
   3#include <linux/bitops.h>
   4#include <linux/elf.h>
   5#include <linux/mm.h>
   6
   7#include <linux/io.h>
   8#include <linux/sched.h>
   9#include <linux/sched/clock.h>
  10#include <linux/random.h>
  11#include <linux/topology.h>
  12#include <asm/processor.h>
  13#include <asm/apic.h>
  14#include <asm/cacheinfo.h>
  15#include <asm/cpu.h>
  16#include <asm/spec-ctrl.h>
  17#include <asm/smp.h>
  18#include <asm/numa.h>
  19#include <asm/pci-direct.h>
  20#include <asm/delay.h>
  21#include <asm/debugreg.h>
  22#include <asm/resctrl.h>
  23#include <asm/sev.h>
  24
  25#ifdef CONFIG_X86_64
  26# include <asm/mmconfig.h>
 
  27#endif
  28
  29#include "cpu.h"
  30
 
 
 
 
 
 
 
 
 
 
 
 
  31static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
  32{
  33	u32 gprs[8] = { 0 };
  34	int err;
  35
  36	WARN_ONCE((boot_cpu_data.x86 != 0xf),
  37		  "%s should only be used on K8!\n", __func__);
  38
  39	gprs[1] = msr;
  40	gprs[7] = 0x9c5a203a;
  41
  42	err = rdmsr_safe_regs(gprs);
  43
  44	*p = gprs[0] | ((u64)gprs[2] << 32);
  45
  46	return err;
  47}
  48
  49static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
  50{
  51	u32 gprs[8] = { 0 };
  52
  53	WARN_ONCE((boot_cpu_data.x86 != 0xf),
  54		  "%s should only be used on K8!\n", __func__);
  55
  56	gprs[0] = (u32)val;
  57	gprs[1] = msr;
  58	gprs[2] = val >> 32;
  59	gprs[7] = 0x9c5a203a;
  60
  61	return wrmsr_safe_regs(gprs);
  62}
  63
  64/*
  65 *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause
  66 *	misexecution of code under Linux. Owners of such processors should
  67 *	contact AMD for precise details and a CPU swap.
  68 *
  69 *	See	http://www.multimania.com/poulot/k6bug.html
  70 *	and	section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
  71 *		(Publication # 21266  Issue Date: August 1998)
  72 *
  73 *	The following test is erm.. interesting. AMD neglected to up
  74 *	the chip setting when fixing the bug but they also tweaked some
  75 *	performance at the same time..
  76 */
  77
  78#ifdef CONFIG_X86_32
  79extern __visible void vide(void);
  80__asm__(".text\n"
  81	".globl vide\n"
  82	".type vide, @function\n"
  83	".align 4\n"
  84	"vide: ret\n");
  85#endif
  86
  87static void init_amd_k5(struct cpuinfo_x86 *c)
  88{
  89#ifdef CONFIG_X86_32
  90/*
  91 * General Systems BIOSen alias the cpu frequency registers
  92 * of the Elan at 0x000df000. Unfortunately, one of the Linux
  93 * drivers subsequently pokes it, and changes the CPU speed.
  94 * Workaround : Remove the unneeded alias.
  95 */
  96#define CBAR		(0xfffc) /* Configuration Base Address  (32-bit) */
  97#define CBAR_ENB	(0x80000000)
  98#define CBAR_KEY	(0X000000CB)
  99	if (c->x86_model == 9 || c->x86_model == 10) {
 100		if (inl(CBAR) & CBAR_ENB)
 101			outl(0 | CBAR_KEY, CBAR);
 102	}
 103#endif
 104}
 105
 106static void init_amd_k6(struct cpuinfo_x86 *c)
 107{
 108#ifdef CONFIG_X86_32
 109	u32 l, h;
 110	int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
 111
 112	if (c->x86_model < 6) {
 113		/* Based on AMD doc 20734R - June 2000 */
 114		if (c->x86_model == 0) {
 115			clear_cpu_cap(c, X86_FEATURE_APIC);
 116			set_cpu_cap(c, X86_FEATURE_PGE);
 117		}
 118		return;
 119	}
 120
 121	if (c->x86_model == 6 && c->x86_stepping == 1) {
 122		const int K6_BUG_LOOP = 1000000;
 123		int n;
 124		void (*f_vide)(void);
 125		u64 d, d2;
 126
 127		pr_info("AMD K6 stepping B detected - ");
 128
 129		/*
 130		 * It looks like AMD fixed the 2.6.2 bug and improved indirect
 131		 * calls at the same time.
 132		 */
 133
 134		n = K6_BUG_LOOP;
 135		f_vide = vide;
 136		OPTIMIZER_HIDE_VAR(f_vide);
 137		d = rdtsc();
 138		while (n--)
 139			f_vide();
 140		d2 = rdtsc();
 141		d = d2-d;
 142
 143		if (d > 20*K6_BUG_LOOP)
 144			pr_cont("system stability may be impaired when more than 32 MB are used.\n");
 145		else
 146			pr_cont("probably OK (after B9730xxxx).\n");
 147	}
 148
 149	/* K6 with old style WHCR */
 150	if (c->x86_model < 8 ||
 151	   (c->x86_model == 8 && c->x86_stepping < 8)) {
 152		/* We can only write allocate on the low 508Mb */
 153		if (mbytes > 508)
 154			mbytes = 508;
 155
 156		rdmsr(MSR_K6_WHCR, l, h);
 157		if ((l&0x0000FFFF) == 0) {
 158			unsigned long flags;
 159			l = (1<<0)|((mbytes/4)<<1);
 160			local_irq_save(flags);
 161			wbinvd();
 162			wrmsr(MSR_K6_WHCR, l, h);
 163			local_irq_restore(flags);
 164			pr_info("Enabling old style K6 write allocation for %d Mb\n",
 165				mbytes);
 166		}
 167		return;
 168	}
 169
 170	if ((c->x86_model == 8 && c->x86_stepping > 7) ||
 171	     c->x86_model == 9 || c->x86_model == 13) {
 172		/* The more serious chips .. */
 173
 174		if (mbytes > 4092)
 175			mbytes = 4092;
 176
 177		rdmsr(MSR_K6_WHCR, l, h);
 178		if ((l&0xFFFF0000) == 0) {
 179			unsigned long flags;
 180			l = ((mbytes>>2)<<22)|(1<<16);
 181			local_irq_save(flags);
 182			wbinvd();
 183			wrmsr(MSR_K6_WHCR, l, h);
 184			local_irq_restore(flags);
 185			pr_info("Enabling new style K6 write allocation for %d Mb\n",
 186				mbytes);
 187		}
 188
 189		return;
 190	}
 191
 192	if (c->x86_model == 10) {
 193		/* AMD Geode LX is model 10 */
 194		/* placeholder for any needed mods */
 195		return;
 196	}
 197#endif
 198}
 199
 200static void init_amd_k7(struct cpuinfo_x86 *c)
 201{
 202#ifdef CONFIG_X86_32
 203	u32 l, h;
 204
 205	/*
 206	 * Bit 15 of Athlon specific MSR 15, needs to be 0
 207	 * to enable SSE on Palomino/Morgan/Barton CPU's.
 208	 * If the BIOS didn't enable it already, enable it here.
 209	 */
 210	if (c->x86_model >= 6 && c->x86_model <= 10) {
 211		if (!cpu_has(c, X86_FEATURE_XMM)) {
 212			pr_info("Enabling disabled K7/SSE Support.\n");
 213			msr_clear_bit(MSR_K7_HWCR, 15);
 214			set_cpu_cap(c, X86_FEATURE_XMM);
 215		}
 216	}
 217
 218	/*
 219	 * It's been determined by AMD that Athlons since model 8 stepping 1
 220	 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
 221	 * As per AMD technical note 27212 0.2
 222	 */
 223	if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
 224		rdmsr(MSR_K7_CLK_CTL, l, h);
 225		if ((l & 0xfff00000) != 0x20000000) {
 226			pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
 227				l, ((l & 0x000fffff)|0x20000000));
 228			wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
 229		}
 230	}
 231
 232	/* calling is from identify_secondary_cpu() ? */
 233	if (!c->cpu_index)
 234		return;
 235
 236	/*
 237	 * Certain Athlons might work (for various values of 'work') in SMP
 238	 * but they are not certified as MP capable.
 239	 */
 240	/* Athlon 660/661 is valid. */
 241	if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
 242	    (c->x86_stepping == 1)))
 243		return;
 244
 245	/* Duron 670 is valid */
 246	if ((c->x86_model == 7) && (c->x86_stepping == 0))
 247		return;
 248
 249	/*
 250	 * Athlon 662, Duron 671, and Athlon >model 7 have capability
 251	 * bit. It's worth noting that the A5 stepping (662) of some
 252	 * Athlon XP's have the MP bit set.
 253	 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
 254	 * more.
 255	 */
 256	if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
 257	    ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
 258	     (c->x86_model > 7))
 259		if (cpu_has(c, X86_FEATURE_MP))
 260			return;
 261
 262	/* If we get here, not a certified SMP capable AMD system. */
 263
 264	/*
 265	 * Don't taint if we are running SMP kernel on a single non-MP
 266	 * approved Athlon
 267	 */
 268	WARN_ONCE(1, "WARNING: This combination of AMD"
 269		" processors is not suitable for SMP.\n");
 270	add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
 271#endif
 272}
 273
 274#ifdef CONFIG_NUMA
 275/*
 276 * To workaround broken NUMA config.  Read the comment in
 277 * srat_detect_node().
 278 */
 279static int nearby_node(int apicid)
 280{
 281	int i, node;
 282
 283	for (i = apicid - 1; i >= 0; i--) {
 284		node = __apicid_to_node[i];
 285		if (node != NUMA_NO_NODE && node_online(node))
 286			return node;
 287	}
 288	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
 289		node = __apicid_to_node[i];
 290		if (node != NUMA_NO_NODE && node_online(node))
 291			return node;
 292	}
 293	return first_node(node_online_map); /* Shouldn't happen */
 294}
 295#endif
 296
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 297static void srat_detect_node(struct cpuinfo_x86 *c)
 298{
 299#ifdef CONFIG_NUMA
 300	int cpu = smp_processor_id();
 301	int node;
 302	unsigned apicid = c->topo.apicid;
 303
 304	node = numa_cpu_node(cpu);
 305	if (node == NUMA_NO_NODE)
 306		node = per_cpu_llc_id(cpu);
 307
 308	/*
 309	 * On multi-fabric platform (e.g. Numascale NumaChip) a
 310	 * platform-specific handler needs to be called to fixup some
 311	 * IDs of the CPU.
 312	 */
 313	if (x86_cpuinit.fixup_cpu_id)
 314		x86_cpuinit.fixup_cpu_id(c, node);
 315
 316	if (!node_online(node)) {
 317		/*
 318		 * Two possibilities here:
 319		 *
 320		 * - The CPU is missing memory and no node was created.  In
 321		 *   that case try picking one from a nearby CPU.
 322		 *
 323		 * - The APIC IDs differ from the HyperTransport node IDs
 324		 *   which the K8 northbridge parsing fills in.  Assume
 325		 *   they are all increased by a constant offset, but in
 326		 *   the same order as the HT nodeids.  If that doesn't
 327		 *   result in a usable node fall back to the path for the
 328		 *   previous case.
 329		 *
 330		 * This workaround operates directly on the mapping between
 331		 * APIC ID and NUMA node, assuming certain relationship
 332		 * between APIC ID, HT node ID and NUMA topology.  As going
 333		 * through CPU mapping may alter the outcome, directly
 334		 * access __apicid_to_node[].
 335		 */
 336		int ht_nodeid = c->topo.initial_apicid;
 337
 338		if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
 339			node = __apicid_to_node[ht_nodeid];
 340		/* Pick a nearby node */
 341		if (!node_online(node))
 342			node = nearby_node(apicid);
 343	}
 344	numa_set_node(cpu, node);
 345#endif
 346}
 347
 348static void bsp_determine_snp(struct cpuinfo_x86 *c)
 349{
 350#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
 351	cc_vendor = CC_VENDOR_AMD;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 352
 353	if (cpu_has(c, X86_FEATURE_SEV_SNP)) {
 354		/*
 355		 * RMP table entry format is not architectural and is defined by the
 356		 * per-processor PPR. Restrict SNP support on the known CPU models
 357		 * for which the RMP table entry format is currently defined for.
 358		 */
 359		if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
 360		    c->x86 >= 0x19 && snp_probe_rmptable_info()) {
 361			cc_platform_set(CC_ATTR_HOST_SEV_SNP);
 362		} else {
 363			setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
 364			cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
 365		}
 366	}
 367#endif
 368}
 369
 370static void bsp_init_amd(struct cpuinfo_x86 *c)
 371{
 372	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
 373
 374		if (c->x86 > 0x10 ||
 375		    (c->x86 == 0x10 && c->x86_model >= 0x2)) {
 376			u64 val;
 377
 378			rdmsrl(MSR_K7_HWCR, val);
 379			if (!(val & BIT(24)))
 380				pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
 381		}
 382	}
 383
 384	if (c->x86 == 0x15) {
 385		unsigned long upperbit;
 386		u32 cpuid, assoc;
 387
 388		cpuid	 = cpuid_edx(0x80000005);
 389		assoc	 = cpuid >> 16 & 0xff;
 390		upperbit = ((cpuid >> 24) << 10) / assoc;
 391
 392		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
 393		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
 394
 395		/* A random value per boot for bit slice [12:upper_bit) */
 396		va_align.bits = get_random_u32() & va_align.mask;
 397	}
 398
 399	if (cpu_has(c, X86_FEATURE_MWAITX))
 400		use_mwaitx_delay();
 401
 
 
 
 
 
 
 
 
 
 
 
 
 402	if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
 403	    !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
 404	    c->x86 >= 0x15 && c->x86 <= 0x17) {
 405		unsigned int bit;
 406
 407		switch (c->x86) {
 408		case 0x15: bit = 54; break;
 409		case 0x16: bit = 33; break;
 410		case 0x17: bit = 10; break;
 411		default: return;
 412		}
 413		/*
 414		 * Try to cache the base value so further operations can
 415		 * avoid RMW. If that faults, do not enable SSBD.
 416		 */
 417		if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
 418			setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
 419			setup_force_cpu_cap(X86_FEATURE_SSBD);
 420			x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
 421		}
 422	}
 423
 424	resctrl_cpu_detect(c);
 425
 426	/* Figure out Zen generations: */
 427	switch (c->x86) {
 428	case 0x17:
 429		switch (c->x86_model) {
 430		case 0x00 ... 0x2f:
 431		case 0x50 ... 0x5f:
 432			setup_force_cpu_cap(X86_FEATURE_ZEN1);
 433			break;
 434		case 0x30 ... 0x4f:
 435		case 0x60 ... 0x7f:
 436		case 0x90 ... 0x91:
 437		case 0xa0 ... 0xaf:
 438			setup_force_cpu_cap(X86_FEATURE_ZEN2);
 439			break;
 440		default:
 441			goto warn;
 442		}
 443		break;
 444
 445	case 0x19:
 446		switch (c->x86_model) {
 447		case 0x00 ... 0x0f:
 448		case 0x20 ... 0x5f:
 449			setup_force_cpu_cap(X86_FEATURE_ZEN3);
 450			break;
 451		case 0x10 ... 0x1f:
 452		case 0x60 ... 0xaf:
 453			setup_force_cpu_cap(X86_FEATURE_ZEN4);
 454			break;
 455		default:
 456			goto warn;
 457		}
 458		break;
 459
 460	case 0x1a:
 461		switch (c->x86_model) {
 462		case 0x00 ... 0x2f:
 463		case 0x40 ... 0x4f:
 464		case 0x70 ... 0x7f:
 465			setup_force_cpu_cap(X86_FEATURE_ZEN5);
 466			break;
 467		default:
 468			goto warn;
 469		}
 470		break;
 471
 472	default:
 473		break;
 474	}
 475
 476	bsp_determine_snp(c);
 477	return;
 478
 479warn:
 480	WARN_ONCE(1, "Family 0x%x, model: 0x%x??\n", c->x86, c->x86_model);
 481}
 482
 483static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
 484{
 485	u64 msr;
 486
 487	/*
 488	 * BIOS support is required for SME and SEV.
 489	 *   For SME: If BIOS has enabled SME then adjust x86_phys_bits by
 490	 *	      the SME physical address space reduction value.
 491	 *	      If BIOS has not enabled SME then don't advertise the
 492	 *	      SME feature (set in scattered.c).
 493	 *	      If the kernel has not enabled SME via any means then
 494	 *	      don't advertise the SME feature.
 495	 *   For SEV: If BIOS has not enabled SEV then don't advertise SEV and
 496	 *	      any additional functionality based on it.
 497	 *
 498	 *   In all cases, since support for SME and SEV requires long mode,
 499	 *   don't advertise the feature under CONFIG_X86_32.
 500	 */
 501	if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
 502		/* Check if memory encryption is enabled */
 503		rdmsrl(MSR_AMD64_SYSCFG, msr);
 504		if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
 505			goto clear_all;
 506
 507		/*
 508		 * Always adjust physical address bits. Even though this
 509		 * will be a value above 32-bits this is still done for
 510		 * CONFIG_X86_32 so that accurate values are reported.
 511		 */
 512		c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
 513
 514		if (IS_ENABLED(CONFIG_X86_32))
 515			goto clear_all;
 516
 517		if (!sme_me_mask)
 518			setup_clear_cpu_cap(X86_FEATURE_SME);
 519
 520		rdmsrl(MSR_K7_HWCR, msr);
 521		if (!(msr & MSR_K7_HWCR_SMMLOCK))
 522			goto clear_sev;
 523
 524		return;
 525
 526clear_all:
 527		setup_clear_cpu_cap(X86_FEATURE_SME);
 528clear_sev:
 529		setup_clear_cpu_cap(X86_FEATURE_SEV);
 530		setup_clear_cpu_cap(X86_FEATURE_SEV_ES);
 531		setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
 532	}
 533}
 534
 535static void early_init_amd(struct cpuinfo_x86 *c)
 536{
 
 537	u32 dummy;
 538
 
 
 
 
 
 
 
 539	if (c->x86 >= 0xf)
 540		set_cpu_cap(c, X86_FEATURE_K8);
 541
 542	rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
 543
 544	/*
 545	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
 546	 * with P/T states and does not stop in deep C-states
 547	 */
 548	if (c->x86_power & (1 << 8)) {
 549		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 550		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
 551	}
 552
 553	/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
 554	if (c->x86_power & BIT(12))
 555		set_cpu_cap(c, X86_FEATURE_ACC_POWER);
 556
 557	/* Bit 14 indicates the Runtime Average Power Limit interface. */
 558	if (c->x86_power & BIT(14))
 559		set_cpu_cap(c, X86_FEATURE_RAPL);
 560
 561#ifdef CONFIG_X86_64
 562	set_cpu_cap(c, X86_FEATURE_SYSCALL32);
 563#else
 564	/*  Set MTRR capability flag if appropriate */
 565	if (c->x86 == 5)
 566		if (c->x86_model == 13 || c->x86_model == 9 ||
 567		    (c->x86_model == 8 && c->x86_stepping >= 8))
 568			set_cpu_cap(c, X86_FEATURE_K6_MTRR);
 569#endif
 570#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
 571	/*
 572	 * ApicID can always be treated as an 8-bit value for AMD APIC versions
 573	 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
 574	 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
 575	 * after 16h.
 576	 */
 577	if (boot_cpu_has(X86_FEATURE_APIC)) {
 578		if (c->x86 > 0x16)
 579			set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
 580		else if (c->x86 >= 0xf) {
 581			/* check CPU config space for extended APIC ID */
 582			unsigned int val;
 583
 584			val = read_pci_config(0, 24, 0, 0x68);
 585			if ((val >> 17 & 0x3) == 0x3)
 586				set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
 587		}
 588	}
 589#endif
 590
 591	/*
 592	 * This is only needed to tell the kernel whether to use VMCALL
 593	 * and VMMCALL.  VMMCALL is never executed except under virt, so
 594	 * we can set it unconditionally.
 595	 */
 596	set_cpu_cap(c, X86_FEATURE_VMMCALL);
 597
 598	/* F16h erratum 793, CVE-2013-6885 */
 599	if (c->x86 == 0x16 && c->x86_model <= 0xf)
 600		msr_set_bit(MSR_AMD64_LS_CFG, 15);
 601
 
 
 
 
 
 
 
 
 
 602	early_detect_mem_encrypt(c);
 603
 604	if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) {
 605		if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB))
 606			setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
 607		else if (c->x86 >= 0x19 && !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
 608			setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
 609			setup_force_cpu_cap(X86_FEATURE_SBPB);
 
 
 
 
 
 610		}
 611	}
 
 
 
 612}
 613
 614static void init_amd_k8(struct cpuinfo_x86 *c)
 615{
 616	u32 level;
 617	u64 value;
 618
 619	/* On C+ stepping K8 rep microcode works well for copy/memset */
 620	level = cpuid_eax(1);
 621	if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
 622		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
 623
 624	/*
 625	 * Some BIOSes incorrectly force this feature, but only K8 revision D
 626	 * (model = 0x14) and later actually support it.
 627	 * (AMD Erratum #110, docId: 25759).
 628	 */
 629	if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
 630		clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
 631		if (!rdmsrl_amd_safe(0xc001100d, &value)) {
 632			value &= ~BIT_64(32);
 633			wrmsrl_amd_safe(0xc001100d, value);
 634		}
 635	}
 636
 637	if (!c->x86_model_id[0])
 638		strcpy(c->x86_model_id, "Hammer");
 639
 640#ifdef CONFIG_SMP
 641	/*
 642	 * Disable TLB flush filter by setting HWCR.FFDIS on K8
 643	 * bit 6 of msr C001_0015
 644	 *
 645	 * Errata 63 for SH-B3 steppings
 646	 * Errata 122 for all steppings (F+ have it disabled by default)
 647	 */
 648	msr_set_bit(MSR_K7_HWCR, 6);
 649#endif
 650	set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
 651
 652	/*
 653	 * Check models and steppings affected by erratum 400. This is
 654	 * used to select the proper idle routine and to enable the
 655	 * check whether the machine is affected in arch_post_acpi_subsys_init()
 656	 * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
 657	 */
 658	if (c->x86_model > 0x41 ||
 659	    (c->x86_model == 0x41 && c->x86_stepping >= 0x2))
 660		setup_force_cpu_bug(X86_BUG_AMD_E400);
 661}
 662
 663static void init_amd_gh(struct cpuinfo_x86 *c)
 664{
 665#ifdef CONFIG_MMCONF_FAM10H
 666	/* do this for boot cpu */
 667	if (c == &boot_cpu_data)
 668		check_enable_amd_mmconf_dmi();
 669
 670	fam10h_check_enable_mmcfg();
 671#endif
 672
 673	/*
 674	 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
 675	 * is always needed when GART is enabled, even in a kernel which has no
 676	 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
 677	 * If it doesn't, we do it here as suggested by the BKDG.
 678	 *
 679	 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
 680	 */
 681	msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
 682
 683	/*
 684	 * On family 10h BIOS may not have properly enabled WC+ support, causing
 685	 * it to be converted to CD memtype. This may result in performance
 686	 * degradation for certain nested-paging guests. Prevent this conversion
 687	 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
 688	 *
 689	 * NOTE: we want to use the _safe accessors so as not to #GP kvm
 690	 * guests on older kvm hosts.
 691	 */
 692	msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
 693
 694	set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
 
 
 695
 696	/*
 697	 * Check models and steppings affected by erratum 400. This is
 698	 * used to select the proper idle routine and to enable the
 699	 * check whether the machine is affected in arch_post_acpi_subsys_init()
 700	 * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
 701	 */
 702	if (c->x86_model > 0x2 ||
 703	    (c->x86_model == 0x2 && c->x86_stepping >= 0x1))
 704		setup_force_cpu_bug(X86_BUG_AMD_E400);
 705}
 706
 707static void init_amd_ln(struct cpuinfo_x86 *c)
 708{
 709	/*
 710	 * Apply erratum 665 fix unconditionally so machines without a BIOS
 711	 * fix work.
 712	 */
 713	msr_set_bit(MSR_AMD64_DE_CFG, 31);
 714}
 715
 716static bool rdrand_force;
 717
 718static int __init rdrand_cmdline(char *str)
 719{
 720	if (!str)
 721		return -EINVAL;
 722
 723	if (!strcmp(str, "force"))
 724		rdrand_force = true;
 725	else
 726		return -EINVAL;
 727
 728	return 0;
 729}
 730early_param("rdrand", rdrand_cmdline);
 731
 732static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
 733{
 734	/*
 735	 * Saving of the MSR used to hide the RDRAND support during
 736	 * suspend/resume is done by arch/x86/power/cpu.c, which is
 737	 * dependent on CONFIG_PM_SLEEP.
 738	 */
 739	if (!IS_ENABLED(CONFIG_PM_SLEEP))
 740		return;
 741
 742	/*
 743	 * The self-test can clear X86_FEATURE_RDRAND, so check for
 744	 * RDRAND support using the CPUID function directly.
 745	 */
 746	if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
 747		return;
 748
 749	msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
 750
 751	/*
 752	 * Verify that the CPUID change has occurred in case the kernel is
 753	 * running virtualized and the hypervisor doesn't support the MSR.
 754	 */
 755	if (cpuid_ecx(1) & BIT(30)) {
 756		pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
 757		return;
 758	}
 759
 760	clear_cpu_cap(c, X86_FEATURE_RDRAND);
 761	pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
 762}
 763
 764static void init_amd_jg(struct cpuinfo_x86 *c)
 765{
 766	/*
 767	 * Some BIOS implementations do not restore proper RDRAND support
 768	 * across suspend and resume. Check on whether to hide the RDRAND
 769	 * instruction support via CPUID.
 770	 */
 771	clear_rdrand_cpuid_bit(c);
 772}
 773
 774static void init_amd_bd(struct cpuinfo_x86 *c)
 775{
 776	u64 value;
 777
 778	/*
 779	 * The way access filter has a performance penalty on some workloads.
 780	 * Disable it on the affected CPUs.
 781	 */
 782	if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
 783		if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
 784			value |= 0x1E;
 785			wrmsrl_safe(MSR_F15H_IC_CFG, value);
 786		}
 787	}
 788
 789	/*
 790	 * Some BIOS implementations do not restore proper RDRAND support
 791	 * across suspend and resume. Check on whether to hide the RDRAND
 792	 * instruction support via CPUID.
 793	 */
 794	clear_rdrand_cpuid_bit(c);
 795}
 796
 797static void fix_erratum_1386(struct cpuinfo_x86 *c)
 798{
 799	/*
 800	 * Work around Erratum 1386.  The XSAVES instruction malfunctions in
 801	 * certain circumstances on Zen1/2 uarch, and not all parts have had
 802	 * updated microcode at the time of writing (March 2023).
 803	 *
 804	 * Affected parts all have no supervisor XSAVE states, meaning that
 805	 * the XSAVEC instruction (which works fine) is equivalent.
 806	 */
 807	clear_cpu_cap(c, X86_FEATURE_XSAVES);
 808}
 809
 810void init_spectral_chicken(struct cpuinfo_x86 *c)
 811{
 812#ifdef CONFIG_MITIGATION_UNRET_ENTRY
 813	u64 value;
 814
 815	/*
 816	 * On Zen2 we offer this chicken (bit) on the altar of Speculation.
 817	 *
 818	 * This suppresses speculation from the middle of a basic block, i.e. it
 819	 * suppresses non-branch predictions.
 820	 */
 821	if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
 822		if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) {
 823			value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT;
 824			wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value);
 825		}
 826	}
 827#endif
 828}
 829
 830static void init_amd_zen_common(void)
 831{
 832	setup_force_cpu_cap(X86_FEATURE_ZEN);
 833#ifdef CONFIG_NUMA
 834	node_reclaim_distance = 32;
 835#endif
 836}
 837
 838static void init_amd_zen1(struct cpuinfo_x86 *c)
 839{
 840	fix_erratum_1386(c);
 841
 842	/* Fix up CPUID bits, but only if not virtualised. */
 843	if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
 844
 845		/* Erratum 1076: CPB feature bit not being set in CPUID. */
 846		if (!cpu_has(c, X86_FEATURE_CPB))
 847			set_cpu_cap(c, X86_FEATURE_CPB);
 848	}
 849
 850	pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
 851	setup_force_cpu_bug(X86_BUG_DIV0);
 852}
 853
 854static bool cpu_has_zenbleed_microcode(void)
 855{
 856	u32 good_rev = 0;
 857
 858	switch (boot_cpu_data.x86_model) {
 859	case 0x30 ... 0x3f: good_rev = 0x0830107b; break;
 860	case 0x60 ... 0x67: good_rev = 0x0860010c; break;
 861	case 0x68 ... 0x6f: good_rev = 0x08608107; break;
 862	case 0x70 ... 0x7f: good_rev = 0x08701033; break;
 863	case 0xa0 ... 0xaf: good_rev = 0x08a00009; break;
 864
 865	default:
 866		return false;
 867	}
 868
 869	if (boot_cpu_data.microcode < good_rev)
 870		return false;
 871
 872	return true;
 873}
 874
 875static void zen2_zenbleed_check(struct cpuinfo_x86 *c)
 876{
 877	if (cpu_has(c, X86_FEATURE_HYPERVISOR))
 878		return;
 879
 880	if (!cpu_has(c, X86_FEATURE_AVX))
 881		return;
 882
 883	if (!cpu_has_zenbleed_microcode()) {
 884		pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n");
 885		msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
 886	} else {
 887		msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
 888	}
 889}
 890
 891static void init_amd_zen2(struct cpuinfo_x86 *c)
 892{
 893	init_spectral_chicken(c);
 894	fix_erratum_1386(c);
 895	zen2_zenbleed_check(c);
 896}
 897
 898static void init_amd_zen3(struct cpuinfo_x86 *c)
 899{
 900	if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
 901		/*
 902		 * Zen3 (Fam19 model < 0x10) parts are not susceptible to
 903		 * Branch Type Confusion, but predate the allocation of the
 904		 * BTC_NO bit.
 905		 */
 906		if (!cpu_has(c, X86_FEATURE_BTC_NO))
 907			set_cpu_cap(c, X86_FEATURE_BTC_NO);
 908	}
 909}
 910
 911static void init_amd_zen4(struct cpuinfo_x86 *c)
 912{
 913	if (!cpu_has(c, X86_FEATURE_HYPERVISOR))
 914		msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
 915}
 916
 917static void init_amd_zen5(struct cpuinfo_x86 *c)
 918{
 919}
 920
 921static void init_amd(struct cpuinfo_x86 *c)
 922{
 923	u64 vm_cr;
 924
 925	early_init_amd(c);
 926
 927	/*
 928	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
 929	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
 930	 */
 931	clear_cpu_cap(c, 0*32+31);
 932
 933	if (c->x86 >= 0x10)
 934		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
 935
 936	/* AMD FSRM also implies FSRS */
 937	if (cpu_has(c, X86_FEATURE_FSRM))
 938		set_cpu_cap(c, X86_FEATURE_FSRS);
 939
 940	/* K6s reports MCEs but don't actually have all the MSRs */
 941	if (c->x86 < 6)
 942		clear_cpu_cap(c, X86_FEATURE_MCE);
 943
 944	switch (c->x86) {
 945	case 4:    init_amd_k5(c); break;
 946	case 5:    init_amd_k6(c); break;
 947	case 6:	   init_amd_k7(c); break;
 948	case 0xf:  init_amd_k8(c); break;
 949	case 0x10: init_amd_gh(c); break;
 950	case 0x12: init_amd_ln(c); break;
 951	case 0x15: init_amd_bd(c); break;
 952	case 0x16: init_amd_jg(c); break;
 
 
 953	}
 954
 955	/*
 956	 * Save up on some future enablement work and do common Zen
 957	 * settings.
 958	 */
 959	if (c->x86 >= 0x17)
 960		init_amd_zen_common();
 961
 962	if (boot_cpu_has(X86_FEATURE_ZEN1))
 963		init_amd_zen1(c);
 964	else if (boot_cpu_has(X86_FEATURE_ZEN2))
 965		init_amd_zen2(c);
 966	else if (boot_cpu_has(X86_FEATURE_ZEN3))
 967		init_amd_zen3(c);
 968	else if (boot_cpu_has(X86_FEATURE_ZEN4))
 969		init_amd_zen4(c);
 970	else if (boot_cpu_has(X86_FEATURE_ZEN5))
 971		init_amd_zen5(c);
 972
 973	/*
 974	 * Enable workaround for FXSAVE leak on CPUs
 975	 * without a XSaveErPtr feature
 976	 */
 977	if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
 978		set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
 979
 980	cpu_detect_cache_sizes(c);
 981
 
 
 982	srat_detect_node(c);
 
 983
 984	init_amd_cacheinfo(c);
 985
 986	if (cpu_has(c, X86_FEATURE_SVM)) {
 987		rdmsrl(MSR_VM_CR, vm_cr);
 988		if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) {
 989			pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n");
 990			clear_cpu_cap(c, X86_FEATURE_SVM);
 991		}
 992	}
 993
 994	if (!cpu_has(c, X86_FEATURE_LFENCE_RDTSC) && cpu_has(c, X86_FEATURE_XMM2)) {
 995		/*
 996		 * Use LFENCE for execution serialization.  On families which
 997		 * don't have that MSR, LFENCE is already serializing.
 998		 * msr_set_bit() uses the safe accessors, too, even if the MSR
 999		 * is not present.
1000		 */
1001		msr_set_bit(MSR_AMD64_DE_CFG,
1002			    MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
1003
1004		/* A serializing LFENCE stops RDTSC speculation */
1005		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
1006	}
1007
1008	/*
1009	 * Family 0x12 and above processors have APIC timer
1010	 * running in deep C states.
1011	 */
1012	if (c->x86 > 0x11)
1013		set_cpu_cap(c, X86_FEATURE_ARAT);
1014
1015	/* 3DNow or LM implies PREFETCHW */
1016	if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
1017		if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
1018			set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
1019
1020	/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
1021	if (!cpu_feature_enabled(X86_FEATURE_XENPV))
1022		set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
1023
1024	/*
1025	 * Turn on the Instructions Retired free counter on machines not
1026	 * susceptible to erratum #1054 "Instructions Retired Performance
1027	 * Counter May Be Inaccurate".
1028	 */
1029	if (cpu_has(c, X86_FEATURE_IRPERF) &&
1030	    (boot_cpu_has(X86_FEATURE_ZEN1) && c->x86_model > 0x2f))
1031		msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
1032
1033	check_null_seg_clears_base(c);
1034
1035	/*
1036	 * Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up
1037	 * using the trampoline code and as part of it, MSR_EFER gets prepared there in
1038	 * order to be replicated onto them. Regardless, set it here again, if not set,
1039	 * to protect against any future refactoring/code reorganization which might
1040	 * miss setting this important bit.
1041	 */
1042	if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
1043	    cpu_has(c, X86_FEATURE_AUTOIBRS))
1044		WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));
1045
1046	/* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
1047	clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
1048}
1049
1050#ifdef CONFIG_X86_32
1051static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
1052{
1053	/* AMD errata T13 (order #21922) */
1054	if (c->x86 == 6) {
1055		/* Duron Rev A0 */
1056		if (c->x86_model == 3 && c->x86_stepping == 0)
1057			size = 64;
1058		/* Tbird rev A1/A2 */
1059		if (c->x86_model == 4 &&
1060			(c->x86_stepping == 0 || c->x86_stepping == 1))
1061			size = 256;
1062	}
1063	return size;
1064}
1065#endif
1066
1067static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
1068{
1069	u32 ebx, eax, ecx, edx;
1070	u16 mask = 0xfff;
1071
1072	if (c->x86 < 0xf)
1073		return;
1074
1075	if (c->extended_cpuid_level < 0x80000006)
1076		return;
1077
1078	cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
1079
1080	tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
1081	tlb_lli_4k[ENTRIES] = ebx & mask;
1082
1083	/*
1084	 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
1085	 * characteristics from the CPUID function 0x80000005 instead.
1086	 */
1087	if (c->x86 == 0xf) {
1088		cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1089		mask = 0xff;
1090	}
1091
1092	/* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1093	if (!((eax >> 16) & mask))
1094		tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
1095	else
1096		tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
1097
1098	/* a 4M entry uses two 2M entries */
1099	tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
1100
1101	/* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1102	if (!(eax & mask)) {
1103		/* Erratum 658 */
1104		if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
1105			tlb_lli_2m[ENTRIES] = 1024;
1106		} else {
1107			cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1108			tlb_lli_2m[ENTRIES] = eax & 0xff;
1109		}
1110	} else
1111		tlb_lli_2m[ENTRIES] = eax & mask;
1112
1113	tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
1114}
1115
1116static const struct cpu_dev amd_cpu_dev = {
1117	.c_vendor	= "AMD",
1118	.c_ident	= { "AuthenticAMD" },
1119#ifdef CONFIG_X86_32
1120	.legacy_models = {
1121		{ .family = 4, .model_names =
1122		  {
1123			  [3] = "486 DX/2",
1124			  [7] = "486 DX/2-WB",
1125			  [8] = "486 DX/4",
1126			  [9] = "486 DX/4-WB",
1127			  [14] = "Am5x86-WT",
1128			  [15] = "Am5x86-WB"
1129		  }
1130		},
1131	},
1132	.legacy_cache_size = amd_size_cache,
1133#endif
1134	.c_early_init   = early_init_amd,
1135	.c_detect_tlb	= cpu_detect_tlb_amd,
1136	.c_bsp_init	= bsp_init_amd,
1137	.c_init		= init_amd,
1138	.c_x86_vendor	= X86_VENDOR_AMD,
1139};
1140
1141cpu_dev_register(amd_cpu_dev);
1142
1143static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1144
1145static unsigned int amd_msr_dr_addr_masks[] = {
1146	MSR_F16H_DR0_ADDR_MASK,
1147	MSR_F16H_DR1_ADDR_MASK,
1148	MSR_F16H_DR1_ADDR_MASK + 1,
1149	MSR_F16H_DR1_ADDR_MASK + 2
1150};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1151
1152void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr)
1153{
1154	int cpu = smp_processor_id();
 
 
 
 
1155
1156	if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
1157		return;
1158
1159	if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
1160		return;
1161
1162	if (per_cpu(amd_dr_addr_mask, cpu)[dr] == mask)
1163		return;
1164
1165	wrmsr(amd_msr_dr_addr_masks[dr], mask, 0);
1166	per_cpu(amd_dr_addr_mask, cpu)[dr] = mask;
1167}
1168
1169unsigned long amd_get_dr_addr_mask(unsigned int dr)
1170{
1171	if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
1172		return 0;
1173
1174	if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
1175		return 0;
1176
1177	return per_cpu(amd_dr_addr_mask[dr], smp_processor_id());
1178}
1179EXPORT_SYMBOL_GPL(amd_get_dr_addr_mask);
1180
1181u32 amd_get_highest_perf(void)
1182{
1183	struct cpuinfo_x86 *c = &boot_cpu_data;
1184
1185	if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
1186			       (c->x86_model >= 0x70 && c->x86_model < 0x80)))
1187		return 166;
1188
1189	if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
1190			       (c->x86_model >= 0x40 && c->x86_model < 0x70)))
1191		return 166;
1192
1193	return 255;
1194}
1195EXPORT_SYMBOL_GPL(amd_get_highest_perf);
1196
1197static void zenbleed_check_cpu(void *unused)
1198{
1199	struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
1200
1201	zen2_zenbleed_check(c);
1202}
1203
1204void amd_check_microcode(void)
1205{
1206	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
1207		return;
1208
1209	on_each_cpu(zenbleed_check_cpu, NULL, 1);
1210}
1211
1212/*
1213 * Issue a DIV 0/1 insn to clear any division data from previous DIV
1214 * operations.
1215 */
1216void noinstr amd_clear_divider(void)
1217{
1218	asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
1219		     :: "a" (0), "d" (0), "r" (1));
 
1220}
1221EXPORT_SYMBOL_GPL(amd_clear_divider);