Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <linux/export.h>
   3#include <linux/bitops.h>
   4#include <linux/elf.h>
   5#include <linux/mm.h>
   6
   7#include <linux/io.h>
   8#include <linux/sched.h>
   9#include <linux/sched/clock.h>
  10#include <linux/random.h>
  11#include <linux/topology.h>
  12#include <asm/processor.h>
  13#include <asm/apic.h>
  14#include <asm/cacheinfo.h>
  15#include <asm/cpu.h>
  16#include <asm/spec-ctrl.h>
  17#include <asm/smp.h>
 
  18#include <asm/pci-direct.h>
  19#include <asm/delay.h>
  20#include <asm/debugreg.h>
 
  21
  22#ifdef CONFIG_X86_64
  23# include <asm/mmconfig.h>
  24# include <asm/set_memory.h>
  25#endif
  26
  27#include "cpu.h"
  28
  29static const int amd_erratum_383[];
  30static const int amd_erratum_400[];
 
  31static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
  32
  33/*
  34 * nodes_per_socket: Stores the number of nodes per socket.
  35 * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
  36 * Node Identifiers[10:8]
  37 */
  38static u32 nodes_per_socket = 1;
  39
  40static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
  41{
  42	u32 gprs[8] = { 0 };
  43	int err;
  44
  45	WARN_ONCE((boot_cpu_data.x86 != 0xf),
  46		  "%s should only be used on K8!\n", __func__);
  47
  48	gprs[1] = msr;
  49	gprs[7] = 0x9c5a203a;
  50
  51	err = rdmsr_safe_regs(gprs);
  52
  53	*p = gprs[0] | ((u64)gprs[2] << 32);
  54
  55	return err;
  56}
  57
  58static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
  59{
  60	u32 gprs[8] = { 0 };
  61
  62	WARN_ONCE((boot_cpu_data.x86 != 0xf),
  63		  "%s should only be used on K8!\n", __func__);
  64
  65	gprs[0] = (u32)val;
  66	gprs[1] = msr;
  67	gprs[2] = val >> 32;
  68	gprs[7] = 0x9c5a203a;
  69
  70	return wrmsr_safe_regs(gprs);
  71}
  72
  73/*
  74 *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause
  75 *	misexecution of code under Linux. Owners of such processors should
  76 *	contact AMD for precise details and a CPU swap.
  77 *
  78 *	See	http://www.multimania.com/poulot/k6bug.html
  79 *	and	section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
  80 *		(Publication # 21266  Issue Date: August 1998)
  81 *
  82 *	The following test is erm.. interesting. AMD neglected to up
  83 *	the chip setting when fixing the bug but they also tweaked some
  84 *	performance at the same time..
  85 */
  86
  87#ifdef CONFIG_X86_32
  88extern __visible void vide(void);
  89__asm__(".text\n"
  90	".globl vide\n"
  91	".type vide, @function\n"
  92	".align 4\n"
  93	"vide: ret\n");
  94#endif
  95
  96static void init_amd_k5(struct cpuinfo_x86 *c)
  97{
  98#ifdef CONFIG_X86_32
  99/*
 100 * General Systems BIOSen alias the cpu frequency registers
 101 * of the Elan at 0x000df000. Unfortunately, one of the Linux
 102 * drivers subsequently pokes it, and changes the CPU speed.
 103 * Workaround : Remove the unneeded alias.
 104 */
 105#define CBAR		(0xfffc) /* Configuration Base Address  (32-bit) */
 106#define CBAR_ENB	(0x80000000)
 107#define CBAR_KEY	(0X000000CB)
 108	if (c->x86_model == 9 || c->x86_model == 10) {
 109		if (inl(CBAR) & CBAR_ENB)
 110			outl(0 | CBAR_KEY, CBAR);
 111	}
 112#endif
 113}
 114
 115static void init_amd_k6(struct cpuinfo_x86 *c)
 116{
 117#ifdef CONFIG_X86_32
 118	u32 l, h;
 119	int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
 120
 121	if (c->x86_model < 6) {
 122		/* Based on AMD doc 20734R - June 2000 */
 123		if (c->x86_model == 0) {
 124			clear_cpu_cap(c, X86_FEATURE_APIC);
 125			set_cpu_cap(c, X86_FEATURE_PGE);
 126		}
 127		return;
 128	}
 129
 130	if (c->x86_model == 6 && c->x86_stepping == 1) {
 131		const int K6_BUG_LOOP = 1000000;
 132		int n;
 133		void (*f_vide)(void);
 134		u64 d, d2;
 135
 136		pr_info("AMD K6 stepping B detected - ");
 137
 138		/*
 139		 * It looks like AMD fixed the 2.6.2 bug and improved indirect
 140		 * calls at the same time.
 141		 */
 142
 143		n = K6_BUG_LOOP;
 144		f_vide = vide;
 145		OPTIMIZER_HIDE_VAR(f_vide);
 146		d = rdtsc();
 147		while (n--)
 148			f_vide();
 149		d2 = rdtsc();
 150		d = d2-d;
 151
 152		if (d > 20*K6_BUG_LOOP)
 153			pr_cont("system stability may be impaired when more than 32 MB are used.\n");
 154		else
 155			pr_cont("probably OK (after B9730xxxx).\n");
 156	}
 157
 158	/* K6 with old style WHCR */
 159	if (c->x86_model < 8 ||
 160	   (c->x86_model == 8 && c->x86_stepping < 8)) {
 161		/* We can only write allocate on the low 508Mb */
 162		if (mbytes > 508)
 163			mbytes = 508;
 164
 165		rdmsr(MSR_K6_WHCR, l, h);
 166		if ((l&0x0000FFFF) == 0) {
 167			unsigned long flags;
 168			l = (1<<0)|((mbytes/4)<<1);
 169			local_irq_save(flags);
 170			wbinvd();
 171			wrmsr(MSR_K6_WHCR, l, h);
 172			local_irq_restore(flags);
 173			pr_info("Enabling old style K6 write allocation for %d Mb\n",
 174				mbytes);
 175		}
 176		return;
 177	}
 178
 179	if ((c->x86_model == 8 && c->x86_stepping > 7) ||
 180	     c->x86_model == 9 || c->x86_model == 13) {
 181		/* The more serious chips .. */
 182
 183		if (mbytes > 4092)
 184			mbytes = 4092;
 185
 186		rdmsr(MSR_K6_WHCR, l, h);
 187		if ((l&0xFFFF0000) == 0) {
 188			unsigned long flags;
 189			l = ((mbytes>>2)<<22)|(1<<16);
 190			local_irq_save(flags);
 191			wbinvd();
 192			wrmsr(MSR_K6_WHCR, l, h);
 193			local_irq_restore(flags);
 194			pr_info("Enabling new style K6 write allocation for %d Mb\n",
 195				mbytes);
 196		}
 197
 198		return;
 199	}
 200
 201	if (c->x86_model == 10) {
 202		/* AMD Geode LX is model 10 */
 203		/* placeholder for any needed mods */
 204		return;
 205	}
 206#endif
 207}
 208
 209static void init_amd_k7(struct cpuinfo_x86 *c)
 210{
 211#ifdef CONFIG_X86_32
 212	u32 l, h;
 213
 214	/*
 215	 * Bit 15 of Athlon specific MSR 15, needs to be 0
 216	 * to enable SSE on Palomino/Morgan/Barton CPU's.
 217	 * If the BIOS didn't enable it already, enable it here.
 218	 */
 219	if (c->x86_model >= 6 && c->x86_model <= 10) {
 220		if (!cpu_has(c, X86_FEATURE_XMM)) {
 221			pr_info("Enabling disabled K7/SSE Support.\n");
 222			msr_clear_bit(MSR_K7_HWCR, 15);
 223			set_cpu_cap(c, X86_FEATURE_XMM);
 224		}
 225	}
 226
 227	/*
 228	 * It's been determined by AMD that Athlons since model 8 stepping 1
 229	 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
 230	 * As per AMD technical note 27212 0.2
 231	 */
 232	if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
 233		rdmsr(MSR_K7_CLK_CTL, l, h);
 234		if ((l & 0xfff00000) != 0x20000000) {
 235			pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
 236				l, ((l & 0x000fffff)|0x20000000));
 237			wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
 238		}
 239	}
 240
 241	/* calling is from identify_secondary_cpu() ? */
 242	if (!c->cpu_index)
 243		return;
 244
 245	/*
 246	 * Certain Athlons might work (for various values of 'work') in SMP
 247	 * but they are not certified as MP capable.
 248	 */
 249	/* Athlon 660/661 is valid. */
 250	if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
 251	    (c->x86_stepping == 1)))
 252		return;
 253
 254	/* Duron 670 is valid */
 255	if ((c->x86_model == 7) && (c->x86_stepping == 0))
 256		return;
 257
 258	/*
 259	 * Athlon 662, Duron 671, and Athlon >model 7 have capability
 260	 * bit. It's worth noting that the A5 stepping (662) of some
 261	 * Athlon XP's have the MP bit set.
 262	 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
 263	 * more.
 264	 */
 265	if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
 266	    ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
 267	     (c->x86_model > 7))
 268		if (cpu_has(c, X86_FEATURE_MP))
 269			return;
 270
 271	/* If we get here, not a certified SMP capable AMD system. */
 272
 273	/*
 274	 * Don't taint if we are running SMP kernel on a single non-MP
 275	 * approved Athlon
 276	 */
 277	WARN_ONCE(1, "WARNING: This combination of AMD"
 278		" processors is not suitable for SMP.\n");
 279	add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
 280#endif
 281}
 282
 283#ifdef CONFIG_NUMA
 284/*
 285 * To workaround broken NUMA config.  Read the comment in
 286 * srat_detect_node().
 287 */
 288static int nearby_node(int apicid)
 289{
 290	int i, node;
 291
 292	for (i = apicid - 1; i >= 0; i--) {
 293		node = __apicid_to_node[i];
 294		if (node != NUMA_NO_NODE && node_online(node))
 295			return node;
 296	}
 297	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
 298		node = __apicid_to_node[i];
 299		if (node != NUMA_NO_NODE && node_online(node))
 300			return node;
 301	}
 302	return first_node(node_online_map); /* Shouldn't happen */
 303}
 304#endif
 305
 306/*
 307 * Fix up cpu_core_id for pre-F17h systems to be in the
 308 * [0 .. cores_per_node - 1] range. Not really needed but
 309 * kept so as not to break existing setups.
 310 */
 311static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
 312{
 313	u32 cus_per_node;
 314
 315	if (c->x86 >= 0x17)
 316		return;
 317
 318	cus_per_node = c->x86_max_cores / nodes_per_socket;
 319	c->cpu_core_id %= cus_per_node;
 320}
 321
 322
 323static void amd_get_topology_early(struct cpuinfo_x86 *c)
 324{
 325	if (cpu_has(c, X86_FEATURE_TOPOEXT))
 326		smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
 327}
 328
 329/*
 330 * Fixup core topology information for
 331 * (1) AMD multi-node processors
 332 *     Assumption: Number of cores in each internal node is the same.
 333 * (2) AMD processors supporting compute units
 334 */
 335static void amd_get_topology(struct cpuinfo_x86 *c)
 336{
 337	u8 node_id;
 338	int cpu = smp_processor_id();
 339
 340	/* get information required for multi-node processors */
 341	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
 342		int err;
 343		u32 eax, ebx, ecx, edx;
 344
 345		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
 346
 347		node_id  = ecx & 0xff;
 348
 349		if (c->x86 == 0x15)
 350			c->cu_id = ebx & 0xff;
 351
 352		if (c->x86 >= 0x17) {
 353			c->cpu_core_id = ebx & 0xff;
 354
 355			if (smp_num_siblings > 1)
 356				c->x86_max_cores /= smp_num_siblings;
 357		}
 358
 359		/*
 360		 * In case leaf B is available, use it to derive
 361		 * topology information.
 362		 */
 363		err = detect_extended_topology(c);
 364		if (!err)
 365			c->x86_coreid_bits = get_count_order(c->x86_max_cores);
 366
 367		cacheinfo_amd_init_llc_id(c, cpu, node_id);
 368
 369	} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
 370		u64 value;
 371
 372		rdmsrl(MSR_FAM10H_NODE_ID, value);
 373		node_id = value & 7;
 374
 375		per_cpu(cpu_llc_id, cpu) = node_id;
 376	} else
 377		return;
 378
 379	if (nodes_per_socket > 1) {
 380		set_cpu_cap(c, X86_FEATURE_AMD_DCM);
 381		legacy_fixup_core_id(c);
 382	}
 383}
 384
 385/*
 386 * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
 387 * Assumes number of cores is a power of two.
 388 */
 389static void amd_detect_cmp(struct cpuinfo_x86 *c)
 390{
 391	unsigned bits;
 392	int cpu = smp_processor_id();
 393
 394	bits = c->x86_coreid_bits;
 395	/* Low order bits define the core id (index of core in socket) */
 396	c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
 397	/* Convert the initial APIC ID into the socket ID */
 398	c->phys_proc_id = c->initial_apicid >> bits;
 399	/* use socket ID also for last level cache */
 400	per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
 401}
 402
 403u16 amd_get_nb_id(int cpu)
 404{
 405	return per_cpu(cpu_llc_id, cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 406}
 407EXPORT_SYMBOL_GPL(amd_get_nb_id);
 408
 409u32 amd_get_nodes_per_socket(void)
 410{
 411	return nodes_per_socket;
 412}
 413EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
 414
 415static void srat_detect_node(struct cpuinfo_x86 *c)
 416{
 417#ifdef CONFIG_NUMA
 418	int cpu = smp_processor_id();
 419	int node;
 420	unsigned apicid = c->apicid;
 421
 422	node = numa_cpu_node(cpu);
 423	if (node == NUMA_NO_NODE)
 424		node = per_cpu(cpu_llc_id, cpu);
 425
 426	/*
 427	 * On multi-fabric platform (e.g. Numascale NumaChip) a
 428	 * platform-specific handler needs to be called to fixup some
 429	 * IDs of the CPU.
 430	 */
 431	if (x86_cpuinit.fixup_cpu_id)
 432		x86_cpuinit.fixup_cpu_id(c, node);
 433
 434	if (!node_online(node)) {
 435		/*
 436		 * Two possibilities here:
 437		 *
 438		 * - The CPU is missing memory and no node was created.  In
 439		 *   that case try picking one from a nearby CPU.
 440		 *
 441		 * - The APIC IDs differ from the HyperTransport node IDs
 442		 *   which the K8 northbridge parsing fills in.  Assume
 443		 *   they are all increased by a constant offset, but in
 444		 *   the same order as the HT nodeids.  If that doesn't
 445		 *   result in a usable node fall back to the path for the
 446		 *   previous case.
 447		 *
 448		 * This workaround operates directly on the mapping between
 449		 * APIC ID and NUMA node, assuming certain relationship
 450		 * between APIC ID, HT node ID and NUMA topology.  As going
 451		 * through CPU mapping may alter the outcome, directly
 452		 * access __apicid_to_node[].
 453		 */
 454		int ht_nodeid = c->initial_apicid;
 455
 456		if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
 457			node = __apicid_to_node[ht_nodeid];
 458		/* Pick a nearby node */
 459		if (!node_online(node))
 460			node = nearby_node(apicid);
 461	}
 462	numa_set_node(cpu, node);
 463#endif
 464}
 465
 466static void early_init_amd_mc(struct cpuinfo_x86 *c)
 467{
 468#ifdef CONFIG_SMP
 469	unsigned bits, ecx;
 470
 471	/* Multi core CPU? */
 472	if (c->extended_cpuid_level < 0x80000008)
 473		return;
 474
 475	ecx = cpuid_ecx(0x80000008);
 476
 477	c->x86_max_cores = (ecx & 0xff) + 1;
 478
 479	/* CPU telling us the core id bits shift? */
 480	bits = (ecx >> 12) & 0xF;
 481
 482	/* Otherwise recompute */
 483	if (bits == 0) {
 484		while ((1 << bits) < c->x86_max_cores)
 485			bits++;
 486	}
 487
 488	c->x86_coreid_bits = bits;
 489#endif
 490}
 491
 492static void bsp_init_amd(struct cpuinfo_x86 *c)
 493{
 494
 495#ifdef CONFIG_X86_64
 496	if (c->x86 >= 0xf) {
 497		unsigned long long tseg;
 498
 499		/*
 500		 * Split up direct mapping around the TSEG SMM area.
 501		 * Don't do it for gbpages because there seems very little
 502		 * benefit in doing so.
 503		 */
 504		if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
 505			unsigned long pfn = tseg >> PAGE_SHIFT;
 506
 507			pr_debug("tseg: %010llx\n", tseg);
 508			if (pfn_range_is_mapped(pfn, pfn + 1))
 509				set_memory_4k((unsigned long)__va(tseg), 1);
 510		}
 511	}
 512#endif
 513
 514	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
 515
 516		if (c->x86 > 0x10 ||
 517		    (c->x86 == 0x10 && c->x86_model >= 0x2)) {
 518			u64 val;
 519
 520			rdmsrl(MSR_K7_HWCR, val);
 521			if (!(val & BIT(24)))
 522				pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
 523		}
 524	}
 525
 526	if (c->x86 == 0x15) {
 527		unsigned long upperbit;
 528		u32 cpuid, assoc;
 529
 530		cpuid	 = cpuid_edx(0x80000005);
 531		assoc	 = cpuid >> 16 & 0xff;
 532		upperbit = ((cpuid >> 24) << 10) / assoc;
 533
 534		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
 535		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
 536
 537		/* A random value per boot for bit slice [12:upper_bit) */
 538		va_align.bits = get_random_int() & va_align.mask;
 539	}
 540
 541	if (cpu_has(c, X86_FEATURE_MWAITX))
 542		use_mwaitx_delay();
 543
 544	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
 545		u32 ecx;
 546
 547		ecx = cpuid_ecx(0x8000001e);
 548		nodes_per_socket = ((ecx >> 8) & 7) + 1;
 549	} else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
 550		u64 value;
 551
 552		rdmsrl(MSR_FAM10H_NODE_ID, value);
 553		nodes_per_socket = ((value >> 3) & 7) + 1;
 554	}
 555
 556	if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
 557	    !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
 558	    c->x86 >= 0x15 && c->x86 <= 0x17) {
 559		unsigned int bit;
 560
 561		switch (c->x86) {
 562		case 0x15: bit = 54; break;
 563		case 0x16: bit = 33; break;
 564		case 0x17: bit = 10; break;
 565		default: return;
 566		}
 567		/*
 568		 * Try to cache the base value so further operations can
 569		 * avoid RMW. If that faults, do not enable SSBD.
 570		 */
 571		if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
 572			setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
 573			setup_force_cpu_cap(X86_FEATURE_SSBD);
 574			x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
 575		}
 576	}
 
 
 577}
 578
 579static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
 580{
 581	u64 msr;
 582
 583	/*
 584	 * BIOS support is required for SME and SEV.
 585	 *   For SME: If BIOS has enabled SME then adjust x86_phys_bits by
 586	 *	      the SME physical address space reduction value.
 587	 *	      If BIOS has not enabled SME then don't advertise the
 588	 *	      SME feature (set in scattered.c).
 589	 *   For SEV: If BIOS has not enabled SEV then don't advertise the
 590	 *            SEV feature (set in scattered.c).
 591	 *
 592	 *   In all cases, since support for SME and SEV requires long mode,
 593	 *   don't advertise the feature under CONFIG_X86_32.
 594	 */
 595	if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
 596		/* Check if memory encryption is enabled */
 597		rdmsrl(MSR_K8_SYSCFG, msr);
 598		if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
 599			goto clear_all;
 600
 601		/*
 602		 * Always adjust physical address bits. Even though this
 603		 * will be a value above 32-bits this is still done for
 604		 * CONFIG_X86_32 so that accurate values are reported.
 605		 */
 606		c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
 607
 608		if (IS_ENABLED(CONFIG_X86_32))
 609			goto clear_all;
 610
 611		rdmsrl(MSR_K7_HWCR, msr);
 612		if (!(msr & MSR_K7_HWCR_SMMLOCK))
 613			goto clear_sev;
 614
 615		return;
 616
 617clear_all:
 618		clear_cpu_cap(c, X86_FEATURE_SME);
 619clear_sev:
 620		clear_cpu_cap(c, X86_FEATURE_SEV);
 
 621	}
 622}
 623
 624static void early_init_amd(struct cpuinfo_x86 *c)
 625{
 626	u64 value;
 627	u32 dummy;
 628
 629	early_init_amd_mc(c);
 630
 631#ifdef CONFIG_X86_32
 632	if (c->x86 == 6)
 633		set_cpu_cap(c, X86_FEATURE_K7);
 634#endif
 635
 636	if (c->x86 >= 0xf)
 637		set_cpu_cap(c, X86_FEATURE_K8);
 638
 639	rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
 640
 641	/*
 642	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
 643	 * with P/T states and does not stop in deep C-states
 644	 */
 645	if (c->x86_power & (1 << 8)) {
 646		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 647		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
 648	}
 649
 650	/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
 651	if (c->x86_power & BIT(12))
 652		set_cpu_cap(c, X86_FEATURE_ACC_POWER);
 653
 
 
 
 
 654#ifdef CONFIG_X86_64
 655	set_cpu_cap(c, X86_FEATURE_SYSCALL32);
 656#else
 657	/*  Set MTRR capability flag if appropriate */
 658	if (c->x86 == 5)
 659		if (c->x86_model == 13 || c->x86_model == 9 ||
 660		    (c->x86_model == 8 && c->x86_stepping >= 8))
 661			set_cpu_cap(c, X86_FEATURE_K6_MTRR);
 662#endif
 663#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
 664	/*
 665	 * ApicID can always be treated as an 8-bit value for AMD APIC versions
 666	 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
 667	 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
 668	 * after 16h.
 669	 */
 670	if (boot_cpu_has(X86_FEATURE_APIC)) {
 671		if (c->x86 > 0x16)
 672			set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
 673		else if (c->x86 >= 0xf) {
 674			/* check CPU config space for extended APIC ID */
 675			unsigned int val;
 676
 677			val = read_pci_config(0, 24, 0, 0x68);
 678			if ((val >> 17 & 0x3) == 0x3)
 679				set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
 680		}
 681	}
 682#endif
 683
 684	/*
 685	 * This is only needed to tell the kernel whether to use VMCALL
 686	 * and VMMCALL.  VMMCALL is never executed except under virt, so
 687	 * we can set it unconditionally.
 688	 */
 689	set_cpu_cap(c, X86_FEATURE_VMMCALL);
 690
 691	/* F16h erratum 793, CVE-2013-6885 */
 692	if (c->x86 == 0x16 && c->x86_model <= 0xf)
 693		msr_set_bit(MSR_AMD64_LS_CFG, 15);
 694
 695	/*
 696	 * Check whether the machine is affected by erratum 400. This is
 697	 * used to select the proper idle routine and to enable the check
 698	 * whether the machine is affected in arch_post_acpi_init(), which
 699	 * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
 700	 */
 701	if (cpu_has_amd_erratum(c, amd_erratum_400))
 702		set_cpu_bug(c, X86_BUG_AMD_E400);
 703
 704	early_detect_mem_encrypt(c);
 705
 706	/* Re-enable TopologyExtensions if switched off by BIOS */
 707	if (c->x86 == 0x15 &&
 708	    (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
 709	    !cpu_has(c, X86_FEATURE_TOPOEXT)) {
 710
 711		if (msr_set_bit(0xc0011005, 54) > 0) {
 712			rdmsrl(0xc0011005, value);
 713			if (value & BIT_64(54)) {
 714				set_cpu_cap(c, X86_FEATURE_TOPOEXT);
 715				pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
 716			}
 717		}
 718	}
 719
 720	amd_get_topology_early(c);
 
 721}
 722
 723static void init_amd_k8(struct cpuinfo_x86 *c)
 724{
 725	u32 level;
 726	u64 value;
 727
 728	/* On C+ stepping K8 rep microcode works well for copy/memset */
 729	level = cpuid_eax(1);
 730	if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
 731		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
 732
 733	/*
 734	 * Some BIOSes incorrectly force this feature, but only K8 revision D
 735	 * (model = 0x14) and later actually support it.
 736	 * (AMD Erratum #110, docId: 25759).
 737	 */
 738	if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
 739		clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
 740		if (!rdmsrl_amd_safe(0xc001100d, &value)) {
 741			value &= ~BIT_64(32);
 742			wrmsrl_amd_safe(0xc001100d, value);
 743		}
 744	}
 745
 746	if (!c->x86_model_id[0])
 747		strcpy(c->x86_model_id, "Hammer");
 748
 749#ifdef CONFIG_SMP
 750	/*
 751	 * Disable TLB flush filter by setting HWCR.FFDIS on K8
 752	 * bit 6 of msr C001_0015
 753	 *
 754	 * Errata 63 for SH-B3 steppings
 755	 * Errata 122 for all steppings (F+ have it disabled by default)
 756	 */
 757	msr_set_bit(MSR_K7_HWCR, 6);
 758#endif
 759	set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
 760}
 761
 762static void init_amd_gh(struct cpuinfo_x86 *c)
 763{
 764#ifdef CONFIG_MMCONF_FAM10H
 765	/* do this for boot cpu */
 766	if (c == &boot_cpu_data)
 767		check_enable_amd_mmconf_dmi();
 768
 769	fam10h_check_enable_mmcfg();
 770#endif
 771
 772	/*
 773	 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
 774	 * is always needed when GART is enabled, even in a kernel which has no
 775	 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
 776	 * If it doesn't, we do it here as suggested by the BKDG.
 777	 *
 778	 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
 779	 */
 780	msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
 781
 782	/*
 783	 * On family 10h BIOS may not have properly enabled WC+ support, causing
 784	 * it to be converted to CD memtype. This may result in performance
 785	 * degradation for certain nested-paging guests. Prevent this conversion
 786	 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
 787	 *
 788	 * NOTE: we want to use the _safe accessors so as not to #GP kvm
 789	 * guests on older kvm hosts.
 790	 */
 791	msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
 792
 793	if (cpu_has_amd_erratum(c, amd_erratum_383))
 794		set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
 795}
 796
 797#define MSR_AMD64_DE_CFG	0xC0011029
 798
 799static void init_amd_ln(struct cpuinfo_x86 *c)
 800{
 801	/*
 802	 * Apply erratum 665 fix unconditionally so machines without a BIOS
 803	 * fix work.
 804	 */
 805	msr_set_bit(MSR_AMD64_DE_CFG, 31);
 806}
 807
 808static bool rdrand_force;
 809
 810static int __init rdrand_cmdline(char *str)
 811{
 812	if (!str)
 813		return -EINVAL;
 814
 815	if (!strcmp(str, "force"))
 816		rdrand_force = true;
 817	else
 818		return -EINVAL;
 819
 820	return 0;
 821}
 822early_param("rdrand", rdrand_cmdline);
 823
 824static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
 825{
 826	/*
 827	 * Saving of the MSR used to hide the RDRAND support during
 828	 * suspend/resume is done by arch/x86/power/cpu.c, which is
 829	 * dependent on CONFIG_PM_SLEEP.
 830	 */
 831	if (!IS_ENABLED(CONFIG_PM_SLEEP))
 832		return;
 833
 834	/*
 835	 * The nordrand option can clear X86_FEATURE_RDRAND, so check for
 836	 * RDRAND support using the CPUID function directly.
 837	 */
 838	if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
 839		return;
 840
 841	msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
 842
 843	/*
 844	 * Verify that the CPUID change has occurred in case the kernel is
 845	 * running virtualized and the hypervisor doesn't support the MSR.
 846	 */
 847	if (cpuid_ecx(1) & BIT(30)) {
 848		pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
 849		return;
 850	}
 851
 852	clear_cpu_cap(c, X86_FEATURE_RDRAND);
 853	pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
 854}
 855
 856static void init_amd_jg(struct cpuinfo_x86 *c)
 857{
 858	/*
 859	 * Some BIOS implementations do not restore proper RDRAND support
 860	 * across suspend and resume. Check on whether to hide the RDRAND
 861	 * instruction support via CPUID.
 862	 */
 863	clear_rdrand_cpuid_bit(c);
 864}
 865
 866static void init_amd_bd(struct cpuinfo_x86 *c)
 867{
 868	u64 value;
 869
 870	/*
 871	 * The way access filter has a performance penalty on some workloads.
 872	 * Disable it on the affected CPUs.
 873	 */
 874	if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
 875		if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
 876			value |= 0x1E;
 877			wrmsrl_safe(MSR_F15H_IC_CFG, value);
 878		}
 879	}
 880
 881	/*
 882	 * Some BIOS implementations do not restore proper RDRAND support
 883	 * across suspend and resume. Check on whether to hide the RDRAND
 884	 * instruction support via CPUID.
 885	 */
 886	clear_rdrand_cpuid_bit(c);
 887}
 888
 889static void init_amd_zn(struct cpuinfo_x86 *c)
 890{
 891	set_cpu_cap(c, X86_FEATURE_ZEN);
 892
 893#ifdef CONFIG_NUMA
 894	node_reclaim_distance = 32;
 895#endif
 896
 897	/*
 898	 * Fix erratum 1076: CPB feature bit not being set in CPUID.
 899	 * Always set it, except when running under a hypervisor.
 900	 */
 901	if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
 902		set_cpu_cap(c, X86_FEATURE_CPB);
 903}
 904
 905static void init_amd(struct cpuinfo_x86 *c)
 906{
 907	early_init_amd(c);
 908
 909	/*
 910	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
 911	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
 912	 */
 913	clear_cpu_cap(c, 0*32+31);
 914
 915	if (c->x86 >= 0x10)
 916		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
 917
 918	/* get apicid instead of initial apic id from cpuid */
 919	c->apicid = hard_smp_processor_id();
 920
 921	/* K6s reports MCEs but don't actually have all the MSRs */
 922	if (c->x86 < 6)
 923		clear_cpu_cap(c, X86_FEATURE_MCE);
 924
 925	switch (c->x86) {
 926	case 4:    init_amd_k5(c); break;
 927	case 5:    init_amd_k6(c); break;
 928	case 6:	   init_amd_k7(c); break;
 929	case 0xf:  init_amd_k8(c); break;
 930	case 0x10: init_amd_gh(c); break;
 931	case 0x12: init_amd_ln(c); break;
 932	case 0x15: init_amd_bd(c); break;
 933	case 0x16: init_amd_jg(c); break;
 934	case 0x17: init_amd_zn(c); break;
 
 935	}
 936
 937	/*
 938	 * Enable workaround for FXSAVE leak on CPUs
 939	 * without a XSaveErPtr feature
 940	 */
 941	if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
 942		set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
 943
 944	cpu_detect_cache_sizes(c);
 945
 946	amd_detect_cmp(c);
 947	amd_get_topology(c);
 948	srat_detect_node(c);
 
 949
 950	init_amd_cacheinfo(c);
 951
 952	if (cpu_has(c, X86_FEATURE_XMM2)) {
 953		/*
 954		 * Use LFENCE for execution serialization.  On families which
 955		 * don't have that MSR, LFENCE is already serializing.
 956		 * msr_set_bit() uses the safe accessors, too, even if the MSR
 957		 * is not present.
 958		 */
 959		msr_set_bit(MSR_F10H_DECFG,
 960			    MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
 961
 962		/* A serializing LFENCE stops RDTSC speculation */
 963		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
 964	}
 965
 966	/*
 967	 * Family 0x12 and above processors have APIC timer
 968	 * running in deep C states.
 969	 */
 970	if (c->x86 > 0x11)
 971		set_cpu_cap(c, X86_FEATURE_ARAT);
 972
 973	/* 3DNow or LM implies PREFETCHW */
 974	if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
 975		if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
 976			set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
 977
 978	/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
 979	if (!cpu_has(c, X86_FEATURE_XENPV))
 980		set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
 
 
 
 
 
 
 
 
 
 981}
 982
 983#ifdef CONFIG_X86_32
 984static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
 985{
 986	/* AMD errata T13 (order #21922) */
 987	if (c->x86 == 6) {
 988		/* Duron Rev A0 */
 989		if (c->x86_model == 3 && c->x86_stepping == 0)
 990			size = 64;
 991		/* Tbird rev A1/A2 */
 992		if (c->x86_model == 4 &&
 993			(c->x86_stepping == 0 || c->x86_stepping == 1))
 994			size = 256;
 995	}
 996	return size;
 997}
 998#endif
 999
1000static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
1001{
1002	u32 ebx, eax, ecx, edx;
1003	u16 mask = 0xfff;
1004
1005	if (c->x86 < 0xf)
1006		return;
1007
1008	if (c->extended_cpuid_level < 0x80000006)
1009		return;
1010
1011	cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
1012
1013	tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
1014	tlb_lli_4k[ENTRIES] = ebx & mask;
1015
1016	/*
1017	 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
1018	 * characteristics from the CPUID function 0x80000005 instead.
1019	 */
1020	if (c->x86 == 0xf) {
1021		cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1022		mask = 0xff;
1023	}
1024
1025	/* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1026	if (!((eax >> 16) & mask))
1027		tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
1028	else
1029		tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
1030
1031	/* a 4M entry uses two 2M entries */
1032	tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
1033
1034	/* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1035	if (!(eax & mask)) {
1036		/* Erratum 658 */
1037		if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
1038			tlb_lli_2m[ENTRIES] = 1024;
1039		} else {
1040			cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1041			tlb_lli_2m[ENTRIES] = eax & 0xff;
1042		}
1043	} else
1044		tlb_lli_2m[ENTRIES] = eax & mask;
1045
1046	tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
1047}
1048
1049static const struct cpu_dev amd_cpu_dev = {
1050	.c_vendor	= "AMD",
1051	.c_ident	= { "AuthenticAMD" },
1052#ifdef CONFIG_X86_32
1053	.legacy_models = {
1054		{ .family = 4, .model_names =
1055		  {
1056			  [3] = "486 DX/2",
1057			  [7] = "486 DX/2-WB",
1058			  [8] = "486 DX/4",
1059			  [9] = "486 DX/4-WB",
1060			  [14] = "Am5x86-WT",
1061			  [15] = "Am5x86-WB"
1062		  }
1063		},
1064	},
1065	.legacy_cache_size = amd_size_cache,
1066#endif
1067	.c_early_init   = early_init_amd,
1068	.c_detect_tlb	= cpu_detect_tlb_amd,
1069	.c_bsp_init	= bsp_init_amd,
1070	.c_init		= init_amd,
1071	.c_x86_vendor	= X86_VENDOR_AMD,
1072};
1073
1074cpu_dev_register(amd_cpu_dev);
1075
1076/*
1077 * AMD errata checking
1078 *
1079 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
1080 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
1081 * have an OSVW id assigned, which it takes as first argument. Both take a
1082 * variable number of family-specific model-stepping ranges created by
1083 * AMD_MODEL_RANGE().
1084 *
1085 * Example:
1086 *
1087 * const int amd_erratum_319[] =
1088 *	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
1089 *			   AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
1090 *			   AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
1091 */
1092
1093#define AMD_LEGACY_ERRATUM(...)		{ -1, __VA_ARGS__, 0 }
1094#define AMD_OSVW_ERRATUM(osvw_id, ...)	{ osvw_id, __VA_ARGS__, 0 }
1095#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
1096	((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
1097#define AMD_MODEL_RANGE_FAMILY(range)	(((range) >> 24) & 0xff)
1098#define AMD_MODEL_RANGE_START(range)	(((range) >> 12) & 0xfff)
1099#define AMD_MODEL_RANGE_END(range)	((range) & 0xfff)
1100
1101static const int amd_erratum_400[] =
1102	AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
1103			    AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
1104
1105static const int amd_erratum_383[] =
1106	AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
1107
 
 
 
1108
1109static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
1110{
1111	int osvw_id = *erratum++;
1112	u32 range;
1113	u32 ms;
1114
1115	if (osvw_id >= 0 && osvw_id < 65536 &&
1116	    cpu_has(cpu, X86_FEATURE_OSVW)) {
1117		u64 osvw_len;
1118
1119		rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
1120		if (osvw_id < osvw_len) {
1121			u64 osvw_bits;
1122
1123			rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
1124			    osvw_bits);
1125			return osvw_bits & (1ULL << (osvw_id & 0x3f));
1126		}
1127	}
1128
1129	/* OSVW unavailable or ID unknown, match family-model-stepping range */
1130	ms = (cpu->x86_model << 4) | cpu->x86_stepping;
1131	while ((range = *erratum++))
1132		if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
1133		    (ms >= AMD_MODEL_RANGE_START(range)) &&
1134		    (ms <= AMD_MODEL_RANGE_END(range)))
1135			return true;
1136
1137	return false;
1138}
1139
1140void set_dr_addr_mask(unsigned long mask, int dr)
1141{
1142	if (!boot_cpu_has(X86_FEATURE_BPEXT))
1143		return;
1144
1145	switch (dr) {
1146	case 0:
1147		wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
1148		break;
1149	case 1:
1150	case 2:
1151	case 3:
1152		wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
1153		break;
1154	default:
1155		break;
1156	}
1157}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <linux/export.h>
   3#include <linux/bitops.h>
   4#include <linux/elf.h>
   5#include <linux/mm.h>
   6
   7#include <linux/io.h>
   8#include <linux/sched.h>
   9#include <linux/sched/clock.h>
  10#include <linux/random.h>
  11#include <linux/topology.h>
  12#include <asm/processor.h>
  13#include <asm/apic.h>
  14#include <asm/cacheinfo.h>
  15#include <asm/cpu.h>
  16#include <asm/spec-ctrl.h>
  17#include <asm/smp.h>
  18#include <asm/numa.h>
  19#include <asm/pci-direct.h>
  20#include <asm/delay.h>
  21#include <asm/debugreg.h>
  22#include <asm/resctrl.h>
  23
  24#ifdef CONFIG_X86_64
  25# include <asm/mmconfig.h>
 
  26#endif
  27
  28#include "cpu.h"
  29
  30static const int amd_erratum_383[];
  31static const int amd_erratum_400[];
  32static const int amd_erratum_1054[];
  33static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
  34
  35/*
  36 * nodes_per_socket: Stores the number of nodes per socket.
  37 * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
  38 * Node Identifiers[10:8]
  39 */
  40static u32 nodes_per_socket = 1;
  41
  42static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
  43{
  44	u32 gprs[8] = { 0 };
  45	int err;
  46
  47	WARN_ONCE((boot_cpu_data.x86 != 0xf),
  48		  "%s should only be used on K8!\n", __func__);
  49
  50	gprs[1] = msr;
  51	gprs[7] = 0x9c5a203a;
  52
  53	err = rdmsr_safe_regs(gprs);
  54
  55	*p = gprs[0] | ((u64)gprs[2] << 32);
  56
  57	return err;
  58}
  59
  60static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
  61{
  62	u32 gprs[8] = { 0 };
  63
  64	WARN_ONCE((boot_cpu_data.x86 != 0xf),
  65		  "%s should only be used on K8!\n", __func__);
  66
  67	gprs[0] = (u32)val;
  68	gprs[1] = msr;
  69	gprs[2] = val >> 32;
  70	gprs[7] = 0x9c5a203a;
  71
  72	return wrmsr_safe_regs(gprs);
  73}
  74
  75/*
  76 *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause
  77 *	misexecution of code under Linux. Owners of such processors should
  78 *	contact AMD for precise details and a CPU swap.
  79 *
  80 *	See	http://www.multimania.com/poulot/k6bug.html
  81 *	and	section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
  82 *		(Publication # 21266  Issue Date: August 1998)
  83 *
  84 *	The following test is erm.. interesting. AMD neglected to up
  85 *	the chip setting when fixing the bug but they also tweaked some
  86 *	performance at the same time..
  87 */
  88
  89#ifdef CONFIG_X86_32
  90extern __visible void vide(void);
  91__asm__(".text\n"
  92	".globl vide\n"
  93	".type vide, @function\n"
  94	".align 4\n"
  95	"vide: ret\n");
  96#endif
  97
  98static void init_amd_k5(struct cpuinfo_x86 *c)
  99{
 100#ifdef CONFIG_X86_32
 101/*
 102 * General Systems BIOSen alias the cpu frequency registers
 103 * of the Elan at 0x000df000. Unfortunately, one of the Linux
 104 * drivers subsequently pokes it, and changes the CPU speed.
 105 * Workaround : Remove the unneeded alias.
 106 */
 107#define CBAR		(0xfffc) /* Configuration Base Address  (32-bit) */
 108#define CBAR_ENB	(0x80000000)
 109#define CBAR_KEY	(0X000000CB)
 110	if (c->x86_model == 9 || c->x86_model == 10) {
 111		if (inl(CBAR) & CBAR_ENB)
 112			outl(0 | CBAR_KEY, CBAR);
 113	}
 114#endif
 115}
 116
 117static void init_amd_k6(struct cpuinfo_x86 *c)
 118{
 119#ifdef CONFIG_X86_32
 120	u32 l, h;
 121	int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
 122
 123	if (c->x86_model < 6) {
 124		/* Based on AMD doc 20734R - June 2000 */
 125		if (c->x86_model == 0) {
 126			clear_cpu_cap(c, X86_FEATURE_APIC);
 127			set_cpu_cap(c, X86_FEATURE_PGE);
 128		}
 129		return;
 130	}
 131
 132	if (c->x86_model == 6 && c->x86_stepping == 1) {
 133		const int K6_BUG_LOOP = 1000000;
 134		int n;
 135		void (*f_vide)(void);
 136		u64 d, d2;
 137
 138		pr_info("AMD K6 stepping B detected - ");
 139
 140		/*
 141		 * It looks like AMD fixed the 2.6.2 bug and improved indirect
 142		 * calls at the same time.
 143		 */
 144
 145		n = K6_BUG_LOOP;
 146		f_vide = vide;
 147		OPTIMIZER_HIDE_VAR(f_vide);
 148		d = rdtsc();
 149		while (n--)
 150			f_vide();
 151		d2 = rdtsc();
 152		d = d2-d;
 153
 154		if (d > 20*K6_BUG_LOOP)
 155			pr_cont("system stability may be impaired when more than 32 MB are used.\n");
 156		else
 157			pr_cont("probably OK (after B9730xxxx).\n");
 158	}
 159
 160	/* K6 with old style WHCR */
 161	if (c->x86_model < 8 ||
 162	   (c->x86_model == 8 && c->x86_stepping < 8)) {
 163		/* We can only write allocate on the low 508Mb */
 164		if (mbytes > 508)
 165			mbytes = 508;
 166
 167		rdmsr(MSR_K6_WHCR, l, h);
 168		if ((l&0x0000FFFF) == 0) {
 169			unsigned long flags;
 170			l = (1<<0)|((mbytes/4)<<1);
 171			local_irq_save(flags);
 172			wbinvd();
 173			wrmsr(MSR_K6_WHCR, l, h);
 174			local_irq_restore(flags);
 175			pr_info("Enabling old style K6 write allocation for %d Mb\n",
 176				mbytes);
 177		}
 178		return;
 179	}
 180
 181	if ((c->x86_model == 8 && c->x86_stepping > 7) ||
 182	     c->x86_model == 9 || c->x86_model == 13) {
 183		/* The more serious chips .. */
 184
 185		if (mbytes > 4092)
 186			mbytes = 4092;
 187
 188		rdmsr(MSR_K6_WHCR, l, h);
 189		if ((l&0xFFFF0000) == 0) {
 190			unsigned long flags;
 191			l = ((mbytes>>2)<<22)|(1<<16);
 192			local_irq_save(flags);
 193			wbinvd();
 194			wrmsr(MSR_K6_WHCR, l, h);
 195			local_irq_restore(flags);
 196			pr_info("Enabling new style K6 write allocation for %d Mb\n",
 197				mbytes);
 198		}
 199
 200		return;
 201	}
 202
 203	if (c->x86_model == 10) {
 204		/* AMD Geode LX is model 10 */
 205		/* placeholder for any needed mods */
 206		return;
 207	}
 208#endif
 209}
 210
 211static void init_amd_k7(struct cpuinfo_x86 *c)
 212{
 213#ifdef CONFIG_X86_32
 214	u32 l, h;
 215
 216	/*
 217	 * Bit 15 of Athlon specific MSR 15, needs to be 0
 218	 * to enable SSE on Palomino/Morgan/Barton CPU's.
 219	 * If the BIOS didn't enable it already, enable it here.
 220	 */
 221	if (c->x86_model >= 6 && c->x86_model <= 10) {
 222		if (!cpu_has(c, X86_FEATURE_XMM)) {
 223			pr_info("Enabling disabled K7/SSE Support.\n");
 224			msr_clear_bit(MSR_K7_HWCR, 15);
 225			set_cpu_cap(c, X86_FEATURE_XMM);
 226		}
 227	}
 228
 229	/*
 230	 * It's been determined by AMD that Athlons since model 8 stepping 1
 231	 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
 232	 * As per AMD technical note 27212 0.2
 233	 */
 234	if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
 235		rdmsr(MSR_K7_CLK_CTL, l, h);
 236		if ((l & 0xfff00000) != 0x20000000) {
 237			pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
 238				l, ((l & 0x000fffff)|0x20000000));
 239			wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
 240		}
 241	}
 242
 243	/* calling is from identify_secondary_cpu() ? */
 244	if (!c->cpu_index)
 245		return;
 246
 247	/*
 248	 * Certain Athlons might work (for various values of 'work') in SMP
 249	 * but they are not certified as MP capable.
 250	 */
 251	/* Athlon 660/661 is valid. */
 252	if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
 253	    (c->x86_stepping == 1)))
 254		return;
 255
 256	/* Duron 670 is valid */
 257	if ((c->x86_model == 7) && (c->x86_stepping == 0))
 258		return;
 259
 260	/*
 261	 * Athlon 662, Duron 671, and Athlon >model 7 have capability
 262	 * bit. It's worth noting that the A5 stepping (662) of some
 263	 * Athlon XP's have the MP bit set.
 264	 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
 265	 * more.
 266	 */
 267	if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
 268	    ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
 269	     (c->x86_model > 7))
 270		if (cpu_has(c, X86_FEATURE_MP))
 271			return;
 272
 273	/* If we get here, not a certified SMP capable AMD system. */
 274
 275	/*
 276	 * Don't taint if we are running SMP kernel on a single non-MP
 277	 * approved Athlon
 278	 */
 279	WARN_ONCE(1, "WARNING: This combination of AMD"
 280		" processors is not suitable for SMP.\n");
 281	add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
 282#endif
 283}
 284
 285#ifdef CONFIG_NUMA
 286/*
 287 * To workaround broken NUMA config.  Read the comment in
 288 * srat_detect_node().
 289 */
 290static int nearby_node(int apicid)
 291{
 292	int i, node;
 293
 294	for (i = apicid - 1; i >= 0; i--) {
 295		node = __apicid_to_node[i];
 296		if (node != NUMA_NO_NODE && node_online(node))
 297			return node;
 298	}
 299	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
 300		node = __apicid_to_node[i];
 301		if (node != NUMA_NO_NODE && node_online(node))
 302			return node;
 303	}
 304	return first_node(node_online_map); /* Shouldn't happen */
 305}
 306#endif
 307
 308/*
 309 * Fix up cpu_core_id for pre-F17h systems to be in the
 310 * [0 .. cores_per_node - 1] range. Not really needed but
 311 * kept so as not to break existing setups.
 312 */
 313static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
 314{
 315	u32 cus_per_node;
 316
 317	if (c->x86 >= 0x17)
 318		return;
 319
 320	cus_per_node = c->x86_max_cores / nodes_per_socket;
 321	c->cpu_core_id %= cus_per_node;
 322}
 323
 
 
 
 
 
 
 
 324/*
 325 * Fixup core topology information for
 326 * (1) AMD multi-node processors
 327 *     Assumption: Number of cores in each internal node is the same.
 328 * (2) AMD processors supporting compute units
 329 */
 330static void amd_get_topology(struct cpuinfo_x86 *c)
 331{
 
 332	int cpu = smp_processor_id();
 333
 334	/* get information required for multi-node processors */
 335	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
 336		int err;
 337		u32 eax, ebx, ecx, edx;
 338
 339		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
 340
 341		c->cpu_die_id  = ecx & 0xff;
 342
 343		if (c->x86 == 0x15)
 344			c->cu_id = ebx & 0xff;
 345
 346		if (c->x86 >= 0x17) {
 347			c->cpu_core_id = ebx & 0xff;
 348
 349			if (smp_num_siblings > 1)
 350				c->x86_max_cores /= smp_num_siblings;
 351		}
 352
 353		/*
 354		 * In case leaf B is available, use it to derive
 355		 * topology information.
 356		 */
 357		err = detect_extended_topology(c);
 358		if (!err)
 359			c->x86_coreid_bits = get_count_order(c->x86_max_cores);
 360
 361		cacheinfo_amd_init_llc_id(c, cpu);
 362
 363	} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
 364		u64 value;
 365
 366		rdmsrl(MSR_FAM10H_NODE_ID, value);
 367		c->cpu_die_id = value & 7;
 368
 369		per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
 370	} else
 371		return;
 372
 373	if (nodes_per_socket > 1) {
 374		set_cpu_cap(c, X86_FEATURE_AMD_DCM);
 375		legacy_fixup_core_id(c);
 376	}
 377}
 378
 379/*
 380 * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
 381 * Assumes number of cores is a power of two.
 382 */
 383static void amd_detect_cmp(struct cpuinfo_x86 *c)
 384{
 385	unsigned bits;
 386	int cpu = smp_processor_id();
 387
 388	bits = c->x86_coreid_bits;
 389	/* Low order bits define the core id (index of core in socket) */
 390	c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
 391	/* Convert the initial APIC ID into the socket ID */
 392	c->phys_proc_id = c->initial_apicid >> bits;
 393	/* use socket ID also for last level cache */
 394	per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
 395}
 396
 397static void amd_detect_ppin(struct cpuinfo_x86 *c)
 398{
 399	unsigned long long val;
 400
 401	if (!cpu_has(c, X86_FEATURE_AMD_PPIN))
 402		return;
 403
 404	/* When PPIN is defined in CPUID, still need to check PPIN_CTL MSR */
 405	if (rdmsrl_safe(MSR_AMD_PPIN_CTL, &val))
 406		goto clear_ppin;
 407
 408	/* PPIN is locked in disabled mode, clear feature bit */
 409	if ((val & 3UL) == 1UL)
 410		goto clear_ppin;
 411
 412	/* If PPIN is disabled, try to enable it */
 413	if (!(val & 2UL)) {
 414		wrmsrl_safe(MSR_AMD_PPIN_CTL,  val | 2UL);
 415		rdmsrl_safe(MSR_AMD_PPIN_CTL, &val);
 416	}
 417
 418	/* If PPIN_EN bit is 1, return from here; otherwise fall through */
 419	if (val & 2UL)
 420		return;
 421
 422clear_ppin:
 423	clear_cpu_cap(c, X86_FEATURE_AMD_PPIN);
 424}
 
 425
 426u32 amd_get_nodes_per_socket(void)
 427{
 428	return nodes_per_socket;
 429}
 430EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
 431
 432static void srat_detect_node(struct cpuinfo_x86 *c)
 433{
 434#ifdef CONFIG_NUMA
 435	int cpu = smp_processor_id();
 436	int node;
 437	unsigned apicid = c->apicid;
 438
 439	node = numa_cpu_node(cpu);
 440	if (node == NUMA_NO_NODE)
 441		node = per_cpu(cpu_llc_id, cpu);
 442
 443	/*
 444	 * On multi-fabric platform (e.g. Numascale NumaChip) a
 445	 * platform-specific handler needs to be called to fixup some
 446	 * IDs of the CPU.
 447	 */
 448	if (x86_cpuinit.fixup_cpu_id)
 449		x86_cpuinit.fixup_cpu_id(c, node);
 450
 451	if (!node_online(node)) {
 452		/*
 453		 * Two possibilities here:
 454		 *
 455		 * - The CPU is missing memory and no node was created.  In
 456		 *   that case try picking one from a nearby CPU.
 457		 *
 458		 * - The APIC IDs differ from the HyperTransport node IDs
 459		 *   which the K8 northbridge parsing fills in.  Assume
 460		 *   they are all increased by a constant offset, but in
 461		 *   the same order as the HT nodeids.  If that doesn't
 462		 *   result in a usable node fall back to the path for the
 463		 *   previous case.
 464		 *
 465		 * This workaround operates directly on the mapping between
 466		 * APIC ID and NUMA node, assuming certain relationship
 467		 * between APIC ID, HT node ID and NUMA topology.  As going
 468		 * through CPU mapping may alter the outcome, directly
 469		 * access __apicid_to_node[].
 470		 */
 471		int ht_nodeid = c->initial_apicid;
 472
 473		if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
 474			node = __apicid_to_node[ht_nodeid];
 475		/* Pick a nearby node */
 476		if (!node_online(node))
 477			node = nearby_node(apicid);
 478	}
 479	numa_set_node(cpu, node);
 480#endif
 481}
 482
 483static void early_init_amd_mc(struct cpuinfo_x86 *c)
 484{
 485#ifdef CONFIG_SMP
 486	unsigned bits, ecx;
 487
 488	/* Multi core CPU? */
 489	if (c->extended_cpuid_level < 0x80000008)
 490		return;
 491
 492	ecx = cpuid_ecx(0x80000008);
 493
 494	c->x86_max_cores = (ecx & 0xff) + 1;
 495
 496	/* CPU telling us the core id bits shift? */
 497	bits = (ecx >> 12) & 0xF;
 498
 499	/* Otherwise recompute */
 500	if (bits == 0) {
 501		while ((1 << bits) < c->x86_max_cores)
 502			bits++;
 503	}
 504
 505	c->x86_coreid_bits = bits;
 506#endif
 507}
 508
 509static void bsp_init_amd(struct cpuinfo_x86 *c)
 510{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 511	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
 512
 513		if (c->x86 > 0x10 ||
 514		    (c->x86 == 0x10 && c->x86_model >= 0x2)) {
 515			u64 val;
 516
 517			rdmsrl(MSR_K7_HWCR, val);
 518			if (!(val & BIT(24)))
 519				pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
 520		}
 521	}
 522
 523	if (c->x86 == 0x15) {
 524		unsigned long upperbit;
 525		u32 cpuid, assoc;
 526
 527		cpuid	 = cpuid_edx(0x80000005);
 528		assoc	 = cpuid >> 16 & 0xff;
 529		upperbit = ((cpuid >> 24) << 10) / assoc;
 530
 531		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
 532		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
 533
 534		/* A random value per boot for bit slice [12:upper_bit) */
 535		va_align.bits = get_random_int() & va_align.mask;
 536	}
 537
 538	if (cpu_has(c, X86_FEATURE_MWAITX))
 539		use_mwaitx_delay();
 540
 541	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
 542		u32 ecx;
 543
 544		ecx = cpuid_ecx(0x8000001e);
 545		__max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1;
 546	} else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
 547		u64 value;
 548
 549		rdmsrl(MSR_FAM10H_NODE_ID, value);
 550		__max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1;
 551	}
 552
 553	if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
 554	    !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
 555	    c->x86 >= 0x15 && c->x86 <= 0x17) {
 556		unsigned int bit;
 557
 558		switch (c->x86) {
 559		case 0x15: bit = 54; break;
 560		case 0x16: bit = 33; break;
 561		case 0x17: bit = 10; break;
 562		default: return;
 563		}
 564		/*
 565		 * Try to cache the base value so further operations can
 566		 * avoid RMW. If that faults, do not enable SSBD.
 567		 */
 568		if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
 569			setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
 570			setup_force_cpu_cap(X86_FEATURE_SSBD);
 571			x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
 572		}
 573	}
 574
 575	resctrl_cpu_detect(c);
 576}
 577
 578static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
 579{
 580	u64 msr;
 581
 582	/*
 583	 * BIOS support is required for SME and SEV.
 584	 *   For SME: If BIOS has enabled SME then adjust x86_phys_bits by
 585	 *	      the SME physical address space reduction value.
 586	 *	      If BIOS has not enabled SME then don't advertise the
 587	 *	      SME feature (set in scattered.c).
 588	 *   For SEV: If BIOS has not enabled SEV then don't advertise the
 589	 *            SEV and SEV_ES feature (set in scattered.c).
 590	 *
 591	 *   In all cases, since support for SME and SEV requires long mode,
 592	 *   don't advertise the feature under CONFIG_X86_32.
 593	 */
 594	if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
 595		/* Check if memory encryption is enabled */
 596		rdmsrl(MSR_AMD64_SYSCFG, msr);
 597		if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
 598			goto clear_all;
 599
 600		/*
 601		 * Always adjust physical address bits. Even though this
 602		 * will be a value above 32-bits this is still done for
 603		 * CONFIG_X86_32 so that accurate values are reported.
 604		 */
 605		c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
 606
 607		if (IS_ENABLED(CONFIG_X86_32))
 608			goto clear_all;
 609
 610		rdmsrl(MSR_K7_HWCR, msr);
 611		if (!(msr & MSR_K7_HWCR_SMMLOCK))
 612			goto clear_sev;
 613
 614		return;
 615
 616clear_all:
 617		setup_clear_cpu_cap(X86_FEATURE_SME);
 618clear_sev:
 619		setup_clear_cpu_cap(X86_FEATURE_SEV);
 620		setup_clear_cpu_cap(X86_FEATURE_SEV_ES);
 621	}
 622}
 623
 624static void early_init_amd(struct cpuinfo_x86 *c)
 625{
 626	u64 value;
 627	u32 dummy;
 628
 629	early_init_amd_mc(c);
 630
 
 
 
 
 
 631	if (c->x86 >= 0xf)
 632		set_cpu_cap(c, X86_FEATURE_K8);
 633
 634	rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
 635
 636	/*
 637	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
 638	 * with P/T states and does not stop in deep C-states
 639	 */
 640	if (c->x86_power & (1 << 8)) {
 641		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 642		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
 643	}
 644
 645	/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
 646	if (c->x86_power & BIT(12))
 647		set_cpu_cap(c, X86_FEATURE_ACC_POWER);
 648
 649	/* Bit 14 indicates the Runtime Average Power Limit interface. */
 650	if (c->x86_power & BIT(14))
 651		set_cpu_cap(c, X86_FEATURE_RAPL);
 652
 653#ifdef CONFIG_X86_64
 654	set_cpu_cap(c, X86_FEATURE_SYSCALL32);
 655#else
 656	/*  Set MTRR capability flag if appropriate */
 657	if (c->x86 == 5)
 658		if (c->x86_model == 13 || c->x86_model == 9 ||
 659		    (c->x86_model == 8 && c->x86_stepping >= 8))
 660			set_cpu_cap(c, X86_FEATURE_K6_MTRR);
 661#endif
 662#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
 663	/*
 664	 * ApicID can always be treated as an 8-bit value for AMD APIC versions
 665	 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
 666	 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
 667	 * after 16h.
 668	 */
 669	if (boot_cpu_has(X86_FEATURE_APIC)) {
 670		if (c->x86 > 0x16)
 671			set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
 672		else if (c->x86 >= 0xf) {
 673			/* check CPU config space for extended APIC ID */
 674			unsigned int val;
 675
 676			val = read_pci_config(0, 24, 0, 0x68);
 677			if ((val >> 17 & 0x3) == 0x3)
 678				set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
 679		}
 680	}
 681#endif
 682
 683	/*
 684	 * This is only needed to tell the kernel whether to use VMCALL
 685	 * and VMMCALL.  VMMCALL is never executed except under virt, so
 686	 * we can set it unconditionally.
 687	 */
 688	set_cpu_cap(c, X86_FEATURE_VMMCALL);
 689
 690	/* F16h erratum 793, CVE-2013-6885 */
 691	if (c->x86 == 0x16 && c->x86_model <= 0xf)
 692		msr_set_bit(MSR_AMD64_LS_CFG, 15);
 693
 694	/*
 695	 * Check whether the machine is affected by erratum 400. This is
 696	 * used to select the proper idle routine and to enable the check
 697	 * whether the machine is affected in arch_post_acpi_init(), which
 698	 * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
 699	 */
 700	if (cpu_has_amd_erratum(c, amd_erratum_400))
 701		set_cpu_bug(c, X86_BUG_AMD_E400);
 702
 703	early_detect_mem_encrypt(c);
 704
 705	/* Re-enable TopologyExtensions if switched off by BIOS */
 706	if (c->x86 == 0x15 &&
 707	    (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
 708	    !cpu_has(c, X86_FEATURE_TOPOEXT)) {
 709
 710		if (msr_set_bit(0xc0011005, 54) > 0) {
 711			rdmsrl(0xc0011005, value);
 712			if (value & BIT_64(54)) {
 713				set_cpu_cap(c, X86_FEATURE_TOPOEXT);
 714				pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
 715			}
 716		}
 717	}
 718
 719	if (cpu_has(c, X86_FEATURE_TOPOEXT))
 720		smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
 721}
 722
 723static void init_amd_k8(struct cpuinfo_x86 *c)
 724{
 725	u32 level;
 726	u64 value;
 727
 728	/* On C+ stepping K8 rep microcode works well for copy/memset */
 729	level = cpuid_eax(1);
 730	if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
 731		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
 732
 733	/*
 734	 * Some BIOSes incorrectly force this feature, but only K8 revision D
 735	 * (model = 0x14) and later actually support it.
 736	 * (AMD Erratum #110, docId: 25759).
 737	 */
 738	if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
 739		clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
 740		if (!rdmsrl_amd_safe(0xc001100d, &value)) {
 741			value &= ~BIT_64(32);
 742			wrmsrl_amd_safe(0xc001100d, value);
 743		}
 744	}
 745
 746	if (!c->x86_model_id[0])
 747		strcpy(c->x86_model_id, "Hammer");
 748
 749#ifdef CONFIG_SMP
 750	/*
 751	 * Disable TLB flush filter by setting HWCR.FFDIS on K8
 752	 * bit 6 of msr C001_0015
 753	 *
 754	 * Errata 63 for SH-B3 steppings
 755	 * Errata 122 for all steppings (F+ have it disabled by default)
 756	 */
 757	msr_set_bit(MSR_K7_HWCR, 6);
 758#endif
 759	set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
 760}
 761
 762static void init_amd_gh(struct cpuinfo_x86 *c)
 763{
 764#ifdef CONFIG_MMCONF_FAM10H
 765	/* do this for boot cpu */
 766	if (c == &boot_cpu_data)
 767		check_enable_amd_mmconf_dmi();
 768
 769	fam10h_check_enable_mmcfg();
 770#endif
 771
 772	/*
 773	 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
 774	 * is always needed when GART is enabled, even in a kernel which has no
 775	 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
 776	 * If it doesn't, we do it here as suggested by the BKDG.
 777	 *
 778	 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
 779	 */
 780	msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
 781
 782	/*
 783	 * On family 10h BIOS may not have properly enabled WC+ support, causing
 784	 * it to be converted to CD memtype. This may result in performance
 785	 * degradation for certain nested-paging guests. Prevent this conversion
 786	 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
 787	 *
 788	 * NOTE: we want to use the _safe accessors so as not to #GP kvm
 789	 * guests on older kvm hosts.
 790	 */
 791	msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
 792
 793	if (cpu_has_amd_erratum(c, amd_erratum_383))
 794		set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
 795}
 796
 797#define MSR_AMD64_DE_CFG	0xC0011029
 798
 799static void init_amd_ln(struct cpuinfo_x86 *c)
 800{
 801	/*
 802	 * Apply erratum 665 fix unconditionally so machines without a BIOS
 803	 * fix work.
 804	 */
 805	msr_set_bit(MSR_AMD64_DE_CFG, 31);
 806}
 807
 808static bool rdrand_force;
 809
 810static int __init rdrand_cmdline(char *str)
 811{
 812	if (!str)
 813		return -EINVAL;
 814
 815	if (!strcmp(str, "force"))
 816		rdrand_force = true;
 817	else
 818		return -EINVAL;
 819
 820	return 0;
 821}
 822early_param("rdrand", rdrand_cmdline);
 823
 824static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
 825{
 826	/*
 827	 * Saving of the MSR used to hide the RDRAND support during
 828	 * suspend/resume is done by arch/x86/power/cpu.c, which is
 829	 * dependent on CONFIG_PM_SLEEP.
 830	 */
 831	if (!IS_ENABLED(CONFIG_PM_SLEEP))
 832		return;
 833
 834	/*
 835	 * The nordrand option can clear X86_FEATURE_RDRAND, so check for
 836	 * RDRAND support using the CPUID function directly.
 837	 */
 838	if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
 839		return;
 840
 841	msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
 842
 843	/*
 844	 * Verify that the CPUID change has occurred in case the kernel is
 845	 * running virtualized and the hypervisor doesn't support the MSR.
 846	 */
 847	if (cpuid_ecx(1) & BIT(30)) {
 848		pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
 849		return;
 850	}
 851
 852	clear_cpu_cap(c, X86_FEATURE_RDRAND);
 853	pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
 854}
 855
 856static void init_amd_jg(struct cpuinfo_x86 *c)
 857{
 858	/*
 859	 * Some BIOS implementations do not restore proper RDRAND support
 860	 * across suspend and resume. Check on whether to hide the RDRAND
 861	 * instruction support via CPUID.
 862	 */
 863	clear_rdrand_cpuid_bit(c);
 864}
 865
 866static void init_amd_bd(struct cpuinfo_x86 *c)
 867{
 868	u64 value;
 869
 870	/*
 871	 * The way access filter has a performance penalty on some workloads.
 872	 * Disable it on the affected CPUs.
 873	 */
 874	if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
 875		if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
 876			value |= 0x1E;
 877			wrmsrl_safe(MSR_F15H_IC_CFG, value);
 878		}
 879	}
 880
 881	/*
 882	 * Some BIOS implementations do not restore proper RDRAND support
 883	 * across suspend and resume. Check on whether to hide the RDRAND
 884	 * instruction support via CPUID.
 885	 */
 886	clear_rdrand_cpuid_bit(c);
 887}
 888
 889static void init_amd_zn(struct cpuinfo_x86 *c)
 890{
 891	set_cpu_cap(c, X86_FEATURE_ZEN);
 892
 893#ifdef CONFIG_NUMA
 894	node_reclaim_distance = 32;
 895#endif
 896
 897	/*
 898	 * Fix erratum 1076: CPB feature bit not being set in CPUID.
 899	 * Always set it, except when running under a hypervisor.
 900	 */
 901	if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
 902		set_cpu_cap(c, X86_FEATURE_CPB);
 903}
 904
 905static void init_amd(struct cpuinfo_x86 *c)
 906{
 907	early_init_amd(c);
 908
 909	/*
 910	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
 911	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
 912	 */
 913	clear_cpu_cap(c, 0*32+31);
 914
 915	if (c->x86 >= 0x10)
 916		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
 917
 918	/* get apicid instead of initial apic id from cpuid */
 919	c->apicid = hard_smp_processor_id();
 920
 921	/* K6s reports MCEs but don't actually have all the MSRs */
 922	if (c->x86 < 6)
 923		clear_cpu_cap(c, X86_FEATURE_MCE);
 924
 925	switch (c->x86) {
 926	case 4:    init_amd_k5(c); break;
 927	case 5:    init_amd_k6(c); break;
 928	case 6:	   init_amd_k7(c); break;
 929	case 0xf:  init_amd_k8(c); break;
 930	case 0x10: init_amd_gh(c); break;
 931	case 0x12: init_amd_ln(c); break;
 932	case 0x15: init_amd_bd(c); break;
 933	case 0x16: init_amd_jg(c); break;
 934	case 0x17: fallthrough;
 935	case 0x19: init_amd_zn(c); break;
 936	}
 937
 938	/*
 939	 * Enable workaround for FXSAVE leak on CPUs
 940	 * without a XSaveErPtr feature
 941	 */
 942	if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
 943		set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
 944
 945	cpu_detect_cache_sizes(c);
 946
 947	amd_detect_cmp(c);
 948	amd_get_topology(c);
 949	srat_detect_node(c);
 950	amd_detect_ppin(c);
 951
 952	init_amd_cacheinfo(c);
 953
 954	if (cpu_has(c, X86_FEATURE_XMM2)) {
 955		/*
 956		 * Use LFENCE for execution serialization.  On families which
 957		 * don't have that MSR, LFENCE is already serializing.
 958		 * msr_set_bit() uses the safe accessors, too, even if the MSR
 959		 * is not present.
 960		 */
 961		msr_set_bit(MSR_F10H_DECFG,
 962			    MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
 963
 964		/* A serializing LFENCE stops RDTSC speculation */
 965		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
 966	}
 967
 968	/*
 969	 * Family 0x12 and above processors have APIC timer
 970	 * running in deep C states.
 971	 */
 972	if (c->x86 > 0x11)
 973		set_cpu_cap(c, X86_FEATURE_ARAT);
 974
 975	/* 3DNow or LM implies PREFETCHW */
 976	if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
 977		if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
 978			set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
 979
 980	/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
 981	if (!cpu_has(c, X86_FEATURE_XENPV))
 982		set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
 983
 984	/*
 985	 * Turn on the Instructions Retired free counter on machines not
 986	 * susceptible to erratum #1054 "Instructions Retired Performance
 987	 * Counter May Be Inaccurate".
 988	 */
 989	if (cpu_has(c, X86_FEATURE_IRPERF) &&
 990	    !cpu_has_amd_erratum(c, amd_erratum_1054))
 991		msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
 992}
 993
 994#ifdef CONFIG_X86_32
 995static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
 996{
 997	/* AMD errata T13 (order #21922) */
 998	if (c->x86 == 6) {
 999		/* Duron Rev A0 */
1000		if (c->x86_model == 3 && c->x86_stepping == 0)
1001			size = 64;
1002		/* Tbird rev A1/A2 */
1003		if (c->x86_model == 4 &&
1004			(c->x86_stepping == 0 || c->x86_stepping == 1))
1005			size = 256;
1006	}
1007	return size;
1008}
1009#endif
1010
1011static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
1012{
1013	u32 ebx, eax, ecx, edx;
1014	u16 mask = 0xfff;
1015
1016	if (c->x86 < 0xf)
1017		return;
1018
1019	if (c->extended_cpuid_level < 0x80000006)
1020		return;
1021
1022	cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
1023
1024	tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
1025	tlb_lli_4k[ENTRIES] = ebx & mask;
1026
1027	/*
1028	 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
1029	 * characteristics from the CPUID function 0x80000005 instead.
1030	 */
1031	if (c->x86 == 0xf) {
1032		cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1033		mask = 0xff;
1034	}
1035
1036	/* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1037	if (!((eax >> 16) & mask))
1038		tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
1039	else
1040		tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
1041
1042	/* a 4M entry uses two 2M entries */
1043	tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
1044
1045	/* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1046	if (!(eax & mask)) {
1047		/* Erratum 658 */
1048		if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
1049			tlb_lli_2m[ENTRIES] = 1024;
1050		} else {
1051			cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1052			tlb_lli_2m[ENTRIES] = eax & 0xff;
1053		}
1054	} else
1055		tlb_lli_2m[ENTRIES] = eax & mask;
1056
1057	tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
1058}
1059
1060static const struct cpu_dev amd_cpu_dev = {
1061	.c_vendor	= "AMD",
1062	.c_ident	= { "AuthenticAMD" },
1063#ifdef CONFIG_X86_32
1064	.legacy_models = {
1065		{ .family = 4, .model_names =
1066		  {
1067			  [3] = "486 DX/2",
1068			  [7] = "486 DX/2-WB",
1069			  [8] = "486 DX/4",
1070			  [9] = "486 DX/4-WB",
1071			  [14] = "Am5x86-WT",
1072			  [15] = "Am5x86-WB"
1073		  }
1074		},
1075	},
1076	.legacy_cache_size = amd_size_cache,
1077#endif
1078	.c_early_init   = early_init_amd,
1079	.c_detect_tlb	= cpu_detect_tlb_amd,
1080	.c_bsp_init	= bsp_init_amd,
1081	.c_init		= init_amd,
1082	.c_x86_vendor	= X86_VENDOR_AMD,
1083};
1084
1085cpu_dev_register(amd_cpu_dev);
1086
1087/*
1088 * AMD errata checking
1089 *
1090 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
1091 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
1092 * have an OSVW id assigned, which it takes as first argument. Both take a
1093 * variable number of family-specific model-stepping ranges created by
1094 * AMD_MODEL_RANGE().
1095 *
1096 * Example:
1097 *
1098 * const int amd_erratum_319[] =
1099 *	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
1100 *			   AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
1101 *			   AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
1102 */
1103
1104#define AMD_LEGACY_ERRATUM(...)		{ -1, __VA_ARGS__, 0 }
1105#define AMD_OSVW_ERRATUM(osvw_id, ...)	{ osvw_id, __VA_ARGS__, 0 }
1106#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
1107	((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
1108#define AMD_MODEL_RANGE_FAMILY(range)	(((range) >> 24) & 0xff)
1109#define AMD_MODEL_RANGE_START(range)	(((range) >> 12) & 0xfff)
1110#define AMD_MODEL_RANGE_END(range)	((range) & 0xfff)
1111
1112static const int amd_erratum_400[] =
1113	AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
1114			    AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
1115
1116static const int amd_erratum_383[] =
1117	AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
1118
1119/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
1120static const int amd_erratum_1054[] =
1121	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
1122
1123static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
1124{
1125	int osvw_id = *erratum++;
1126	u32 range;
1127	u32 ms;
1128
1129	if (osvw_id >= 0 && osvw_id < 65536 &&
1130	    cpu_has(cpu, X86_FEATURE_OSVW)) {
1131		u64 osvw_len;
1132
1133		rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
1134		if (osvw_id < osvw_len) {
1135			u64 osvw_bits;
1136
1137			rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
1138			    osvw_bits);
1139			return osvw_bits & (1ULL << (osvw_id & 0x3f));
1140		}
1141	}
1142
1143	/* OSVW unavailable or ID unknown, match family-model-stepping range */
1144	ms = (cpu->x86_model << 4) | cpu->x86_stepping;
1145	while ((range = *erratum++))
1146		if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
1147		    (ms >= AMD_MODEL_RANGE_START(range)) &&
1148		    (ms <= AMD_MODEL_RANGE_END(range)))
1149			return true;
1150
1151	return false;
1152}
1153
1154void set_dr_addr_mask(unsigned long mask, int dr)
1155{
1156	if (!boot_cpu_has(X86_FEATURE_BPEXT))
1157		return;
1158
1159	switch (dr) {
1160	case 0:
1161		wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
1162		break;
1163	case 1:
1164	case 2:
1165	case 3:
1166		wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
1167		break;
1168	default:
1169		break;
1170	}
1171}
1172
1173u32 amd_get_highest_perf(void)
1174{
1175	struct cpuinfo_x86 *c = &boot_cpu_data;
1176
1177	if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
1178			       (c->x86_model >= 0x70 && c->x86_model < 0x80)))
1179		return 166;
1180
1181	if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
1182			       (c->x86_model >= 0x40 && c->x86_model < 0x70)))
1183		return 166;
1184
1185	return 255;
1186}
1187EXPORT_SYMBOL_GPL(amd_get_highest_perf);