Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v4.17
   1#include <linux/export.h>
 
   2#include <linux/bitops.h>
   3#include <linux/elf.h>
   4#include <linux/mm.h>
   5
   6#include <linux/io.h>
   7#include <linux/sched.h>
   8#include <linux/sched/clock.h>
   9#include <linux/random.h>
  10#include <asm/processor.h>
  11#include <asm/apic.h>
  12#include <asm/cpu.h>
  13#include <asm/spec-ctrl.h>
  14#include <asm/smp.h>
  15#include <asm/pci-direct.h>
  16#include <asm/delay.h>
  17
  18#ifdef CONFIG_X86_64
 
  19# include <asm/mmconfig.h>
  20# include <asm/set_memory.h>
  21#endif
  22
  23#include "cpu.h"
  24
  25static const int amd_erratum_383[];
  26static const int amd_erratum_400[];
  27static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
  28
  29/*
  30 * nodes_per_socket: Stores the number of nodes per socket.
  31 * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
  32 * Node Identifiers[10:8]
  33 */
  34static u32 nodes_per_socket = 1;
  35
  36static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
  37{
  38	u32 gprs[8] = { 0 };
  39	int err;
  40
  41	WARN_ONCE((boot_cpu_data.x86 != 0xf),
  42		  "%s should only be used on K8!\n", __func__);
  43
  44	gprs[1] = msr;
  45	gprs[7] = 0x9c5a203a;
  46
  47	err = rdmsr_safe_regs(gprs);
  48
  49	*p = gprs[0] | ((u64)gprs[2] << 32);
  50
  51	return err;
  52}
  53
  54static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
  55{
  56	u32 gprs[8] = { 0 };
  57
  58	WARN_ONCE((boot_cpu_data.x86 != 0xf),
  59		  "%s should only be used on K8!\n", __func__);
  60
  61	gprs[0] = (u32)val;
  62	gprs[1] = msr;
  63	gprs[2] = val >> 32;
  64	gprs[7] = 0x9c5a203a;
  65
  66	return wrmsr_safe_regs(gprs);
  67}
  68
  69/*
  70 *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause
  71 *	misexecution of code under Linux. Owners of such processors should
  72 *	contact AMD for precise details and a CPU swap.
  73 *
  74 *	See	http://www.multimania.com/poulot/k6bug.html
  75 *	and	section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
  76 *		(Publication # 21266  Issue Date: August 1998)
  77 *
  78 *	The following test is erm.. interesting. AMD neglected to up
  79 *	the chip setting when fixing the bug but they also tweaked some
  80 *	performance at the same time..
  81 */
  82
  83extern __visible void vide(void);
  84__asm__(".globl vide\n"
  85	".type vide, @function\n"
  86	".align 4\n"
  87	"vide: ret\n");
  88
  89static void init_amd_k5(struct cpuinfo_x86 *c)
  90{
  91#ifdef CONFIG_X86_32
  92/*
  93 * General Systems BIOSen alias the cpu frequency registers
  94 * of the Elan at 0x000df000. Unfortunately, one of the Linux
  95 * drivers subsequently pokes it, and changes the CPU speed.
  96 * Workaround : Remove the unneeded alias.
  97 */
  98#define CBAR		(0xfffc) /* Configuration Base Address  (32-bit) */
  99#define CBAR_ENB	(0x80000000)
 100#define CBAR_KEY	(0X000000CB)
 101	if (c->x86_model == 9 || c->x86_model == 10) {
 102		if (inl(CBAR) & CBAR_ENB)
 103			outl(0 | CBAR_KEY, CBAR);
 104	}
 105#endif
 106}
 107
 108static void init_amd_k6(struct cpuinfo_x86 *c)
 
 109{
 110#ifdef CONFIG_X86_32
 111	u32 l, h;
 112	int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
 113
 114	if (c->x86_model < 6) {
 115		/* Based on AMD doc 20734R - June 2000 */
 116		if (c->x86_model == 0) {
 117			clear_cpu_cap(c, X86_FEATURE_APIC);
 118			set_cpu_cap(c, X86_FEATURE_PGE);
 119		}
 120		return;
 121	}
 122
 123	if (c->x86_model == 6 && c->x86_stepping == 1) {
 124		const int K6_BUG_LOOP = 1000000;
 125		int n;
 126		void (*f_vide)(void);
 127		u64 d, d2;
 128
 129		pr_info("AMD K6 stepping B detected - ");
 130
 131		/*
 132		 * It looks like AMD fixed the 2.6.2 bug and improved indirect
 133		 * calls at the same time.
 134		 */
 135
 136		n = K6_BUG_LOOP;
 137		f_vide = vide;
 138		OPTIMIZER_HIDE_VAR(f_vide);
 139		d = rdtsc();
 140		while (n--)
 141			f_vide();
 142		d2 = rdtsc();
 143		d = d2-d;
 144
 145		if (d > 20*K6_BUG_LOOP)
 146			pr_cont("system stability may be impaired when more than 32 MB are used.\n");
 
 147		else
 148			pr_cont("probably OK (after B9730xxxx).\n");
 149	}
 150
 151	/* K6 with old style WHCR */
 152	if (c->x86_model < 8 ||
 153	   (c->x86_model == 8 && c->x86_stepping < 8)) {
 154		/* We can only write allocate on the low 508Mb */
 155		if (mbytes > 508)
 156			mbytes = 508;
 157
 158		rdmsr(MSR_K6_WHCR, l, h);
 159		if ((l&0x0000FFFF) == 0) {
 160			unsigned long flags;
 161			l = (1<<0)|((mbytes/4)<<1);
 162			local_irq_save(flags);
 163			wbinvd();
 164			wrmsr(MSR_K6_WHCR, l, h);
 165			local_irq_restore(flags);
 166			pr_info("Enabling old style K6 write allocation for %d Mb\n",
 167				mbytes);
 168		}
 169		return;
 170	}
 171
 172	if ((c->x86_model == 8 && c->x86_stepping > 7) ||
 173	     c->x86_model == 9 || c->x86_model == 13) {
 174		/* The more serious chips .. */
 175
 176		if (mbytes > 4092)
 177			mbytes = 4092;
 178
 179		rdmsr(MSR_K6_WHCR, l, h);
 180		if ((l&0xFFFF0000) == 0) {
 181			unsigned long flags;
 182			l = ((mbytes>>2)<<22)|(1<<16);
 183			local_irq_save(flags);
 184			wbinvd();
 185			wrmsr(MSR_K6_WHCR, l, h);
 186			local_irq_restore(flags);
 187			pr_info("Enabling new style K6 write allocation for %d Mb\n",
 188				mbytes);
 189		}
 190
 191		return;
 192	}
 193
 194	if (c->x86_model == 10) {
 195		/* AMD Geode LX is model 10 */
 196		/* placeholder for any needed mods */
 197		return;
 198	}
 199#endif
 200}
 201
 202static void init_amd_k7(struct cpuinfo_x86 *c)
 203{
 204#ifdef CONFIG_X86_32
 205	u32 l, h;
 206
 207	/*
 208	 * Bit 15 of Athlon specific MSR 15, needs to be 0
 209	 * to enable SSE on Palomino/Morgan/Barton CPU's.
 210	 * If the BIOS didn't enable it already, enable it here.
 211	 */
 212	if (c->x86_model >= 6 && c->x86_model <= 10) {
 213		if (!cpu_has(c, X86_FEATURE_XMM)) {
 214			pr_info("Enabling disabled K7/SSE Support.\n");
 215			msr_clear_bit(MSR_K7_HWCR, 15);
 216			set_cpu_cap(c, X86_FEATURE_XMM);
 217		}
 218	}
 219
 220	/*
 221	 * It's been determined by AMD that Athlons since model 8 stepping 1
 222	 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
 223	 * As per AMD technical note 27212 0.2
 224	 */
 225	if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
 226		rdmsr(MSR_K7_CLK_CTL, l, h);
 227		if ((l & 0xfff00000) != 0x20000000) {
 228			pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
 229				l, ((l & 0x000fffff)|0x20000000));
 230			wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
 231		}
 232	}
 233
 234	set_cpu_cap(c, X86_FEATURE_K7);
 235
 236	/* calling is from identify_secondary_cpu() ? */
 237	if (!c->cpu_index)
 238		return;
 239
 240	/*
 241	 * Certain Athlons might work (for various values of 'work') in SMP
 242	 * but they are not certified as MP capable.
 243	 */
 244	/* Athlon 660/661 is valid. */
 245	if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
 246	    (c->x86_stepping == 1)))
 247		return;
 248
 249	/* Duron 670 is valid */
 250	if ((c->x86_model == 7) && (c->x86_stepping == 0))
 251		return;
 252
 253	/*
 254	 * Athlon 662, Duron 671, and Athlon >model 7 have capability
 255	 * bit. It's worth noting that the A5 stepping (662) of some
 256	 * Athlon XP's have the MP bit set.
 257	 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
 258	 * more.
 259	 */
 260	if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
 261	    ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
 262	     (c->x86_model > 7))
 263		if (cpu_has(c, X86_FEATURE_MP))
 264			return;
 265
 266	/* If we get here, not a certified SMP capable AMD system. */
 267
 268	/*
 269	 * Don't taint if we are running SMP kernel on a single non-MP
 270	 * approved Athlon
 271	 */
 272	WARN_ONCE(1, "WARNING: This combination of AMD"
 273		" processors is not suitable for SMP.\n");
 274	add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
 275#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 276}
 
 277
 278#ifdef CONFIG_NUMA
 279/*
 280 * To workaround broken NUMA config.  Read the comment in
 281 * srat_detect_node().
 282 */
 283static int nearby_node(int apicid)
 284{
 285	int i, node;
 286
 287	for (i = apicid - 1; i >= 0; i--) {
 288		node = __apicid_to_node[i];
 289		if (node != NUMA_NO_NODE && node_online(node))
 290			return node;
 291	}
 292	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
 293		node = __apicid_to_node[i];
 294		if (node != NUMA_NO_NODE && node_online(node))
 295			return node;
 296	}
 297	return first_node(node_online_map); /* Shouldn't happen */
 298}
 299#endif
 300
 301#ifdef CONFIG_SMP
 302/*
 303 * Fix up cpu_core_id for pre-F17h systems to be in the
 304 * [0 .. cores_per_node - 1] range. Not really needed but
 305 * kept so as not to break existing setups.
 306 */
 307static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
 308{
 309	u32 cus_per_node;
 310
 311	if (c->x86 >= 0x17)
 312		return;
 313
 314	cus_per_node = c->x86_max_cores / nodes_per_socket;
 315	c->cpu_core_id %= cus_per_node;
 316}
 317
 318/*
 319 * Fixup core topology information for
 320 * (1) AMD multi-node processors
 321 *     Assumption: Number of cores in each internal node is the same.
 322 * (2) AMD processors supporting compute units
 323 */
 324static void amd_get_topology(struct cpuinfo_x86 *c)
 
 325{
 
 326	u8 node_id;
 327	int cpu = smp_processor_id();
 328
 329	/* get information required for multi-node processors */
 330	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
 331		u32 eax, ebx, ecx, edx;
 332
 333		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
 
 
 334
 335		node_id  = ecx & 0xff;
 336		smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
 337
 338		if (c->x86 == 0x15)
 339			c->cu_id = ebx & 0xff;
 340
 341		if (c->x86 >= 0x17) {
 342			c->cpu_core_id = ebx & 0xff;
 343
 344			if (smp_num_siblings > 1)
 345				c->x86_max_cores /= smp_num_siblings;
 346		}
 347
 348		/*
 349		 * We may have multiple LLCs if L3 caches exist, so check if we
 350		 * have an L3 cache by looking at the L3 cache CPUID leaf.
 351		 */
 352		if (cpuid_edx(0x80000006)) {
 353			if (c->x86 == 0x17) {
 354				/*
 355				 * LLC is at the core complex level.
 356				 * Core complex id is ApicId[3].
 357				 */
 358				per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
 359			} else {
 360				/* LLC is at the node level. */
 361				per_cpu(cpu_llc_id, cpu) = node_id;
 362			}
 363		}
 364	} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
 365		u64 value;
 366
 367		rdmsrl(MSR_FAM10H_NODE_ID, value);
 
 368		node_id = value & 7;
 369
 370		per_cpu(cpu_llc_id, cpu) = node_id;
 371	} else
 372		return;
 373
 374	if (nodes_per_socket > 1) {
 
 
 
 
 375		set_cpu_cap(c, X86_FEATURE_AMD_DCM);
 376		legacy_fixup_core_id(c);
 
 
 
 
 
 
 
 
 377	}
 378}
 379#endif
 380
 381/*
 382 * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
 383 * Assumes number of cores is a power of two.
 384 */
 385static void amd_detect_cmp(struct cpuinfo_x86 *c)
 386{
 387#ifdef CONFIG_SMP
 388	unsigned bits;
 389	int cpu = smp_processor_id();
 390
 391	bits = c->x86_coreid_bits;
 392	/* Low order bits define the core id (index of core in socket) */
 393	c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
 394	/* Convert the initial APIC ID into the socket ID */
 395	c->phys_proc_id = c->initial_apicid >> bits;
 396	/* use socket ID also for last level cache */
 397	per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
 398	amd_get_topology(c);
 399#endif
 400}
 401
 402u16 amd_get_nb_id(int cpu)
 403{
 404	u16 id = 0;
 405#ifdef CONFIG_SMP
 406	id = per_cpu(cpu_llc_id, cpu);
 407#endif
 408	return id;
 409}
 410EXPORT_SYMBOL_GPL(amd_get_nb_id);
 411
 412u32 amd_get_nodes_per_socket(void)
 413{
 414	return nodes_per_socket;
 415}
 416EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
 417
 418static void srat_detect_node(struct cpuinfo_x86 *c)
 419{
 420#ifdef CONFIG_NUMA
 421	int cpu = smp_processor_id();
 422	int node;
 423	unsigned apicid = c->apicid;
 424
 425	node = numa_cpu_node(cpu);
 426	if (node == NUMA_NO_NODE)
 427		node = per_cpu(cpu_llc_id, cpu);
 428
 429	/*
 430	 * On multi-fabric platform (e.g. Numascale NumaChip) a
 431	 * platform-specific handler needs to be called to fixup some
 432	 * IDs of the CPU.
 433	 */
 434	if (x86_cpuinit.fixup_cpu_id)
 435		x86_cpuinit.fixup_cpu_id(c, node);
 436
 437	if (!node_online(node)) {
 438		/*
 439		 * Two possibilities here:
 440		 *
 441		 * - The CPU is missing memory and no node was created.  In
 442		 *   that case try picking one from a nearby CPU.
 443		 *
 444		 * - The APIC IDs differ from the HyperTransport node IDs
 445		 *   which the K8 northbridge parsing fills in.  Assume
 446		 *   they are all increased by a constant offset, but in
 447		 *   the same order as the HT nodeids.  If that doesn't
 448		 *   result in a usable node fall back to the path for the
 449		 *   previous case.
 450		 *
 451		 * This workaround operates directly on the mapping between
 452		 * APIC ID and NUMA node, assuming certain relationship
 453		 * between APIC ID, HT node ID and NUMA topology.  As going
 454		 * through CPU mapping may alter the outcome, directly
 455		 * access __apicid_to_node[].
 456		 */
 457		int ht_nodeid = c->initial_apicid;
 458
 459		if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
 
 460			node = __apicid_to_node[ht_nodeid];
 461		/* Pick a nearby node */
 462		if (!node_online(node))
 463			node = nearby_node(apicid);
 464	}
 465	numa_set_node(cpu, node);
 466#endif
 467}
 468
 469static void early_init_amd_mc(struct cpuinfo_x86 *c)
 470{
 471#ifdef CONFIG_SMP
 472	unsigned bits, ecx;
 473
 474	/* Multi core CPU? */
 475	if (c->extended_cpuid_level < 0x80000008)
 476		return;
 477
 478	ecx = cpuid_ecx(0x80000008);
 479
 480	c->x86_max_cores = (ecx & 0xff) + 1;
 481
 482	/* CPU telling us the core id bits shift? */
 483	bits = (ecx >> 12) & 0xF;
 484
 485	/* Otherwise recompute */
 486	if (bits == 0) {
 487		while ((1 << bits) < c->x86_max_cores)
 488			bits++;
 489	}
 490
 491	c->x86_coreid_bits = bits;
 492#endif
 493}
 494
 495static void bsp_init_amd(struct cpuinfo_x86 *c)
 496{
 497
 498#ifdef CONFIG_X86_64
 499	if (c->x86 >= 0xf) {
 500		unsigned long long tseg;
 501
 502		/*
 503		 * Split up direct mapping around the TSEG SMM area.
 504		 * Don't do it for gbpages because there seems very little
 505		 * benefit in doing so.
 506		 */
 507		if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
 508			unsigned long pfn = tseg >> PAGE_SHIFT;
 509
 510			pr_debug("tseg: %010llx\n", tseg);
 511			if (pfn_range_is_mapped(pfn, pfn + 1))
 512				set_memory_4k((unsigned long)__va(tseg), 1);
 513		}
 514	}
 515#endif
 516
 517	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
 518
 519		if (c->x86 > 0x10 ||
 520		    (c->x86 == 0x10 && c->x86_model >= 0x2)) {
 521			u64 val;
 522
 523			rdmsrl(MSR_K7_HWCR, val);
 524			if (!(val & BIT(24)))
 525				pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
 
 526		}
 527	}
 528
 529	if (c->x86 == 0x15) {
 530		unsigned long upperbit;
 531		u32 cpuid, assoc;
 532
 533		cpuid	 = cpuid_edx(0x80000005);
 534		assoc	 = cpuid >> 16 & 0xff;
 535		upperbit = ((cpuid >> 24) << 10) / assoc;
 536
 537		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
 538		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
 539
 540		/* A random value per boot for bit slice [12:upper_bit) */
 541		va_align.bits = get_random_int() & va_align.mask;
 542	}
 543
 544	if (cpu_has(c, X86_FEATURE_MWAITX))
 545		use_mwaitx_delay();
 546
 547	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
 548		u32 ecx;
 549
 550		ecx = cpuid_ecx(0x8000001e);
 551		nodes_per_socket = ((ecx >> 8) & 7) + 1;
 552	} else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
 553		u64 value;
 554
 555		rdmsrl(MSR_FAM10H_NODE_ID, value);
 556		nodes_per_socket = ((value >> 3) & 7) + 1;
 557	}
 558
 559	if (c->x86 >= 0x15 && c->x86 <= 0x17) {
 560		unsigned int bit;
 561
 562		switch (c->x86) {
 563		case 0x15: bit = 54; break;
 564		case 0x16: bit = 33; break;
 565		case 0x17: bit = 10; break;
 566		default: return;
 567		}
 568		/*
 569		 * Try to cache the base value so further operations can
 570		 * avoid RMW. If that faults, do not enable SSBD.
 571		 */
 572		if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
 573			setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
 574			setup_force_cpu_cap(X86_FEATURE_SSBD);
 575			x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
 576		}
 577	}
 578}
 579
 580static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
 581{
 582	u64 msr;
 583
 584	/*
 585	 * BIOS support is required for SME and SEV.
 586	 *   For SME: If BIOS has enabled SME then adjust x86_phys_bits by
 587	 *	      the SME physical address space reduction value.
 588	 *	      If BIOS has not enabled SME then don't advertise the
 589	 *	      SME feature (set in scattered.c).
 590	 *   For SEV: If BIOS has not enabled SEV then don't advertise the
 591	 *            SEV feature (set in scattered.c).
 592	 *
 593	 *   In all cases, since support for SME and SEV requires long mode,
 594	 *   don't advertise the feature under CONFIG_X86_32.
 595	 */
 596	if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
 597		/* Check if memory encryption is enabled */
 598		rdmsrl(MSR_K8_SYSCFG, msr);
 599		if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
 600			goto clear_all;
 601
 602		/*
 603		 * Always adjust physical address bits. Even though this
 604		 * will be a value above 32-bits this is still done for
 605		 * CONFIG_X86_32 so that accurate values are reported.
 606		 */
 607		c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
 608
 609		if (IS_ENABLED(CONFIG_X86_32))
 610			goto clear_all;
 611
 612		rdmsrl(MSR_K7_HWCR, msr);
 613		if (!(msr & MSR_K7_HWCR_SMMLOCK))
 614			goto clear_sev;
 615
 616		return;
 617
 618clear_all:
 619		clear_cpu_cap(c, X86_FEATURE_SME);
 620clear_sev:
 621		clear_cpu_cap(c, X86_FEATURE_SEV);
 622	}
 623}
 624
 625static void early_init_amd(struct cpuinfo_x86 *c)
 626{
 627	u32 dummy;
 628
 629	early_init_amd_mc(c);
 630
 631	rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
 632
 633	/*
 634	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
 635	 * with P/T states and does not stop in deep C-states
 636	 */
 637	if (c->x86_power & (1 << 8)) {
 638		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 639		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
 
 
 640	}
 641
 642	/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
 643	if (c->x86_power & BIT(12))
 644		set_cpu_cap(c, X86_FEATURE_ACC_POWER);
 645
 646#ifdef CONFIG_X86_64
 647	set_cpu_cap(c, X86_FEATURE_SYSCALL32);
 648#else
 649	/*  Set MTRR capability flag if appropriate */
 650	if (c->x86 == 5)
 651		if (c->x86_model == 13 || c->x86_model == 9 ||
 652		    (c->x86_model == 8 && c->x86_stepping >= 8))
 653			set_cpu_cap(c, X86_FEATURE_K6_MTRR);
 654#endif
 655#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
 656	/*
 657	 * ApicID can always be treated as an 8-bit value for AMD APIC versions
 658	 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
 659	 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
 660	 * after 16h.
 661	 */
 662	if (boot_cpu_has(X86_FEATURE_APIC)) {
 663		if (c->x86 > 0x16)
 664			set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
 665		else if (c->x86 >= 0xf) {
 666			/* check CPU config space for extended APIC ID */
 667			unsigned int val;
 668
 669			val = read_pci_config(0, 24, 0, 0x68);
 670			if ((val >> 17 & 0x3) == 0x3)
 671				set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
 672		}
 673	}
 674#endif
 675
 676	/*
 677	 * This is only needed to tell the kernel whether to use VMCALL
 678	 * and VMMCALL.  VMMCALL is never executed except under virt, so
 679	 * we can set it unconditionally.
 680	 */
 681	set_cpu_cap(c, X86_FEATURE_VMMCALL);
 682
 683	/* F16h erratum 793, CVE-2013-6885 */
 684	if (c->x86 == 0x16 && c->x86_model <= 0xf)
 685		msr_set_bit(MSR_AMD64_LS_CFG, 15);
 686
 687	/*
 688	 * Check whether the machine is affected by erratum 400. This is
 689	 * used to select the proper idle routine and to enable the check
 690	 * whether the machine is affected in arch_post_acpi_init(), which
 691	 * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
 692	 */
 693	if (cpu_has_amd_erratum(c, amd_erratum_400))
 694		set_cpu_bug(c, X86_BUG_AMD_E400);
 695
 696	early_detect_mem_encrypt(c);
 697}
 698
 699static void init_amd_k8(struct cpuinfo_x86 *c)
 700{
 701	u32 level;
 702	u64 value;
 703
 704	/* On C+ stepping K8 rep microcode works well for copy/memset */
 705	level = cpuid_eax(1);
 706	if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
 707		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
 708
 709	/*
 710	 * Some BIOSes incorrectly force this feature, but only K8 revision D
 711	 * (model = 0x14) and later actually support it.
 712	 * (AMD Erratum #110, docId: 25759).
 713	 */
 714	if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
 715		clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
 716		if (!rdmsrl_amd_safe(0xc001100d, &value)) {
 717			value &= ~BIT_64(32);
 718			wrmsrl_amd_safe(0xc001100d, value);
 719		}
 720	}
 721
 722	if (!c->x86_model_id[0])
 723		strcpy(c->x86_model_id, "Hammer");
 724
 725#ifdef CONFIG_SMP
 
 
 726	/*
 727	 * Disable TLB flush filter by setting HWCR.FFDIS on K8
 728	 * bit 6 of msr C001_0015
 729	 *
 730	 * Errata 63 for SH-B3 steppings
 731	 * Errata 122 for all steppings (F+ have it disabled by default)
 732	 */
 733	msr_set_bit(MSR_K7_HWCR, 6);
 734#endif
 735	set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
 736}
 737
 738static void init_amd_gh(struct cpuinfo_x86 *c)
 739{
 740#ifdef CONFIG_MMCONF_FAM10H
 741	/* do this for boot cpu */
 742	if (c == &boot_cpu_data)
 743		check_enable_amd_mmconf_dmi();
 744
 745	fam10h_check_enable_mmcfg();
 746#endif
 747
 748	/*
 749	 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
 750	 * is always needed when GART is enabled, even in a kernel which has no
 751	 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
 752	 * If it doesn't, we do it here as suggested by the BKDG.
 753	 *
 754	 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
 755	 */
 756	msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
 757
 758	/*
 759	 * On family 10h BIOS may not have properly enabled WC+ support, causing
 760	 * it to be converted to CD memtype. This may result in performance
 761	 * degradation for certain nested-paging guests. Prevent this conversion
 762	 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
 763	 *
 764	 * NOTE: we want to use the _safe accessors so as not to #GP kvm
 765	 * guests on older kvm hosts.
 766	 */
 767	msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
 768
 769	if (cpu_has_amd_erratum(c, amd_erratum_383))
 770		set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
 771}
 772
 773#define MSR_AMD64_DE_CFG	0xC0011029
 774
 775static void init_amd_ln(struct cpuinfo_x86 *c)
 776{
 777	/*
 778	 * Apply erratum 665 fix unconditionally so machines without a BIOS
 779	 * fix work.
 780	 */
 781	msr_set_bit(MSR_AMD64_DE_CFG, 31);
 782}
 783
 784static void init_amd_bd(struct cpuinfo_x86 *c)
 785{
 786	u64 value;
 787
 788	/* re-enable TopologyExtensions if switched off by BIOS */
 789	if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
 790	    !cpu_has(c, X86_FEATURE_TOPOEXT)) {
 
 
 
 
 791
 792		if (msr_set_bit(0xc0011005, 54) > 0) {
 793			rdmsrl(0xc0011005, value);
 794			if (value & BIT_64(54)) {
 795				set_cpu_cap(c, X86_FEATURE_TOPOEXT);
 796				pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
 797			}
 798		}
 799	}
 800
 801	/*
 802	 * The way access filter has a performance penalty on some workloads.
 803	 * Disable it on the affected CPUs.
 804	 */
 805	if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
 806		if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
 807			value |= 0x1E;
 808			wrmsrl_safe(MSR_F15H_IC_CFG, value);
 809		}
 810	}
 811}
 812
 813static void init_amd_zn(struct cpuinfo_x86 *c)
 814{
 815	set_cpu_cap(c, X86_FEATURE_ZEN);
 816	/*
 817	 * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
 818	 * all up to and including B1.
 819	 */
 820	if (c->x86_model <= 1 && c->x86_stepping <= 1)
 821		set_cpu_cap(c, X86_FEATURE_CPB);
 822}
 823
 824static void init_amd(struct cpuinfo_x86 *c)
 825{
 826	early_init_amd(c);
 827
 828	/*
 829	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
 830	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
 831	 */
 832	clear_cpu_cap(c, 0*32+31);
 833
 834	if (c->x86 >= 0x10)
 835		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
 836
 837	/* get apicid instead of initial apic id from cpuid */
 838	c->apicid = hard_smp_processor_id();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 839
 840	/* K6s reports MCEs but don't actually have all the MSRs */
 841	if (c->x86 < 6)
 842		clear_cpu_cap(c, X86_FEATURE_MCE);
 
 843
 844	switch (c->x86) {
 845	case 4:    init_amd_k5(c); break;
 846	case 5:    init_amd_k6(c); break;
 847	case 6:	   init_amd_k7(c); break;
 848	case 0xf:  init_amd_k8(c); break;
 849	case 0x10: init_amd_gh(c); break;
 850	case 0x12: init_amd_ln(c); break;
 851	case 0x15: init_amd_bd(c); break;
 852	case 0x17: init_amd_zn(c); break;
 
 
 
 853	}
 854
 855	/*
 856	 * Enable workaround for FXSAVE leak on CPUs
 857	 * without a XSaveErPtr feature
 858	 */
 859	if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
 860		set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
 
 
 
 
 
 
 
 
 
 
 
 861
 862	cpu_detect_cache_sizes(c);
 863
 864	/* Multi core CPU? */
 865	if (c->extended_cpuid_level >= 0x80000008) {
 866		amd_detect_cmp(c);
 867		srat_detect_node(c);
 868	}
 869
 870#ifdef CONFIG_X86_32
 871	detect_ht(c);
 872#endif
 873
 874	init_amd_cacheinfo(c);
 
 
 
 
 
 875
 876	if (c->x86 >= 0xf)
 877		set_cpu_cap(c, X86_FEATURE_K8);
 878
 879	if (cpu_has(c, X86_FEATURE_XMM2)) {
 880		unsigned long long val;
 881		int ret;
 
 
 
 
 
 
 
 882
 883		/*
 884		 * A serializing LFENCE has less overhead than MFENCE, so
 885		 * use it for execution serialization.  On families which
 886		 * don't have that MSR, LFENCE is already serializing.
 887		 * msr_set_bit() uses the safe accessors, too, even if the MSR
 888		 * is not present.
 889		 */
 890		msr_set_bit(MSR_F10H_DECFG,
 891			    MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
 892
 893		/*
 894		 * Verify that the MSR write was successful (could be running
 895		 * under a hypervisor) and only then assume that LFENCE is
 896		 * serializing.
 897		 */
 898		ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
 899		if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
 900			/* A serializing LFENCE stops RDTSC speculation */
 901			set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
 902		} else {
 903			/* MFENCE stops RDTSC speculation */
 904			set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
 
 905		}
 906	}
 
 907
 908	/*
 909	 * Family 0x12 and above processors have APIC timer
 910	 * running in deep C states.
 911	 */
 912	if (c->x86 > 0x11)
 913		set_cpu_cap(c, X86_FEATURE_ARAT);
 914
 915	/* 3DNow or LM implies PREFETCHW */
 916	if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
 917		if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
 918			set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
 919
 920	/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
 921	if (!cpu_has(c, X86_FEATURE_XENPV))
 922		set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 923}
 924
 925#ifdef CONFIG_X86_32
 926static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
 
 927{
 928	/* AMD errata T13 (order #21922) */
 929	if ((c->x86 == 6)) {
 930		/* Duron Rev A0 */
 931		if (c->x86_model == 3 && c->x86_stepping == 0)
 932			size = 64;
 933		/* Tbird rev A1/A2 */
 934		if (c->x86_model == 4 &&
 935			(c->x86_stepping == 0 || c->x86_stepping == 1))
 936			size = 256;
 937	}
 938	return size;
 939}
 940#endif
 941
 942static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
 943{
 944	u32 ebx, eax, ecx, edx;
 945	u16 mask = 0xfff;
 946
 947	if (c->x86 < 0xf)
 948		return;
 949
 950	if (c->extended_cpuid_level < 0x80000006)
 951		return;
 952
 953	cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
 954
 955	tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
 956	tlb_lli_4k[ENTRIES] = ebx & mask;
 957
 958	/*
 959	 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
 960	 * characteristics from the CPUID function 0x80000005 instead.
 961	 */
 962	if (c->x86 == 0xf) {
 963		cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
 964		mask = 0xff;
 965	}
 966
 967	/* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
 968	if (!((eax >> 16) & mask))
 969		tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
 970	else
 971		tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
 972
 973	/* a 4M entry uses two 2M entries */
 974	tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
 975
 976	/* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
 977	if (!(eax & mask)) {
 978		/* Erratum 658 */
 979		if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
 980			tlb_lli_2m[ENTRIES] = 1024;
 981		} else {
 982			cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
 983			tlb_lli_2m[ENTRIES] = eax & 0xff;
 984		}
 985	} else
 986		tlb_lli_2m[ENTRIES] = eax & mask;
 987
 988	tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
 989}
 990
 991static const struct cpu_dev amd_cpu_dev = {
 992	.c_vendor	= "AMD",
 993	.c_ident	= { "AuthenticAMD" },
 994#ifdef CONFIG_X86_32
 995	.legacy_models = {
 996		{ .family = 4, .model_names =
 997		  {
 998			  [3] = "486 DX/2",
 999			  [7] = "486 DX/2-WB",
1000			  [8] = "486 DX/4",
1001			  [9] = "486 DX/4-WB",
1002			  [14] = "Am5x86-WT",
1003			  [15] = "Am5x86-WB"
1004		  }
1005		},
1006	},
1007	.legacy_cache_size = amd_size_cache,
1008#endif
1009	.c_early_init   = early_init_amd,
1010	.c_detect_tlb	= cpu_detect_tlb_amd,
1011	.c_bsp_init	= bsp_init_amd,
1012	.c_init		= init_amd,
1013	.c_x86_vendor	= X86_VENDOR_AMD,
1014};
1015
1016cpu_dev_register(amd_cpu_dev);
1017
1018/*
1019 * AMD errata checking
1020 *
1021 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
1022 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
1023 * have an OSVW id assigned, which it takes as first argument. Both take a
1024 * variable number of family-specific model-stepping ranges created by
1025 * AMD_MODEL_RANGE().
 
1026 *
1027 * Example:
1028 *
1029 * const int amd_erratum_319[] =
1030 *	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
1031 *			   AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
1032 *			   AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
1033 */
1034
1035#define AMD_LEGACY_ERRATUM(...)		{ -1, __VA_ARGS__, 0 }
1036#define AMD_OSVW_ERRATUM(osvw_id, ...)	{ osvw_id, __VA_ARGS__, 0 }
1037#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
1038	((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
1039#define AMD_MODEL_RANGE_FAMILY(range)	(((range) >> 24) & 0xff)
1040#define AMD_MODEL_RANGE_START(range)	(((range) >> 12) & 0xfff)
1041#define AMD_MODEL_RANGE_END(range)	((range) & 0xfff)
1042
1043static const int amd_erratum_400[] =
1044	AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
1045			    AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
 
1046
1047static const int amd_erratum_383[] =
1048	AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
 
1049
1050
1051static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
1052{
 
1053	int osvw_id = *erratum++;
1054	u32 range;
1055	u32 ms;
1056
 
 
 
 
 
 
 
 
 
 
1057	if (osvw_id >= 0 && osvw_id < 65536 &&
1058	    cpu_has(cpu, X86_FEATURE_OSVW)) {
1059		u64 osvw_len;
1060
1061		rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
1062		if (osvw_id < osvw_len) {
1063			u64 osvw_bits;
1064
1065			rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
1066			    osvw_bits);
1067			return osvw_bits & (1ULL << (osvw_id & 0x3f));
1068		}
1069	}
1070
1071	/* OSVW unavailable or ID unknown, match family-model-stepping range */
1072	ms = (cpu->x86_model << 4) | cpu->x86_stepping;
1073	while ((range = *erratum++))
1074		if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
1075		    (ms >= AMD_MODEL_RANGE_START(range)) &&
1076		    (ms <= AMD_MODEL_RANGE_END(range)))
1077			return true;
1078
1079	return false;
1080}
1081
1082void set_dr_addr_mask(unsigned long mask, int dr)
1083{
1084	if (!boot_cpu_has(X86_FEATURE_BPEXT))
1085		return;
1086
1087	switch (dr) {
1088	case 0:
1089		wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
1090		break;
1091	case 1:
1092	case 2:
1093	case 3:
1094		wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
1095		break;
1096	default:
1097		break;
1098	}
1099}
v3.5.6
  1#include <linux/export.h>
  2#include <linux/init.h>
  3#include <linux/bitops.h>
  4#include <linux/elf.h>
  5#include <linux/mm.h>
  6
  7#include <linux/io.h>
  8#include <linux/sched.h>
 
 
  9#include <asm/processor.h>
 10#include <asm/apic.h>
 11#include <asm/cpu.h>
 
 
 12#include <asm/pci-direct.h>
 
 13
 14#ifdef CONFIG_X86_64
 15# include <asm/numa_64.h>
 16# include <asm/mmconfig.h>
 17# include <asm/cacheflush.h>
 18#endif
 19
 20#include "cpu.h"
 21
 22#ifdef CONFIG_X86_32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 23/*
 24 *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause
 25 *	misexecution of code under Linux. Owners of such processors should
 26 *	contact AMD for precise details and a CPU swap.
 27 *
 28 *	See	http://www.multimania.com/poulot/k6bug.html
 29 *	and	section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
 30 *		(Publication # 21266  Issue Date: August 1998)
 31 *
 32 *	The following test is erm.. interesting. AMD neglected to up
 33 *	the chip setting when fixing the bug but they also tweaked some
 34 *	performance at the same time..
 35 */
 36
 37extern void vide(void);
 38__asm__(".align 4\nvide: ret");
 
 
 
 39
 40static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
 41{
 
 42/*
 43 * General Systems BIOSen alias the cpu frequency registers
 44 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
 45 * drivers subsequently pokes it, and changes the CPU speed.
 46 * Workaround : Remove the unneeded alias.
 47 */
 48#define CBAR		(0xfffc) /* Configuration Base Address  (32-bit) */
 49#define CBAR_ENB	(0x80000000)
 50#define CBAR_KEY	(0X000000CB)
 51	if (c->x86_model == 9 || c->x86_model == 10) {
 52		if (inl(CBAR) & CBAR_ENB)
 53			outl(0 | CBAR_KEY, CBAR);
 54	}
 
 55}
 56
 57
 58static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
 59{
 
 60	u32 l, h;
 61	int mbytes = num_physpages >> (20-PAGE_SHIFT);
 62
 63	if (c->x86_model < 6) {
 64		/* Based on AMD doc 20734R - June 2000 */
 65		if (c->x86_model == 0) {
 66			clear_cpu_cap(c, X86_FEATURE_APIC);
 67			set_cpu_cap(c, X86_FEATURE_PGE);
 68		}
 69		return;
 70	}
 71
 72	if (c->x86_model == 6 && c->x86_mask == 1) {
 73		const int K6_BUG_LOOP = 1000000;
 74		int n;
 75		void (*f_vide)(void);
 76		unsigned long d, d2;
 77
 78		printk(KERN_INFO "AMD K6 stepping B detected - ");
 79
 80		/*
 81		 * It looks like AMD fixed the 2.6.2 bug and improved indirect
 82		 * calls at the same time.
 83		 */
 84
 85		n = K6_BUG_LOOP;
 86		f_vide = vide;
 87		rdtscl(d);
 
 88		while (n--)
 89			f_vide();
 90		rdtscl(d2);
 91		d = d2-d;
 92
 93		if (d > 20*K6_BUG_LOOP)
 94			printk(KERN_CONT
 95				"system stability may be impaired when more than 32 MB are used.\n");
 96		else
 97			printk(KERN_CONT "probably OK (after B9730xxxx).\n");
 98	}
 99
100	/* K6 with old style WHCR */
101	if (c->x86_model < 8 ||
102	   (c->x86_model == 8 && c->x86_mask < 8)) {
103		/* We can only write allocate on the low 508Mb */
104		if (mbytes > 508)
105			mbytes = 508;
106
107		rdmsr(MSR_K6_WHCR, l, h);
108		if ((l&0x0000FFFF) == 0) {
109			unsigned long flags;
110			l = (1<<0)|((mbytes/4)<<1);
111			local_irq_save(flags);
112			wbinvd();
113			wrmsr(MSR_K6_WHCR, l, h);
114			local_irq_restore(flags);
115			printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
116				mbytes);
117		}
118		return;
119	}
120
121	if ((c->x86_model == 8 && c->x86_mask > 7) ||
122	     c->x86_model == 9 || c->x86_model == 13) {
123		/* The more serious chips .. */
124
125		if (mbytes > 4092)
126			mbytes = 4092;
127
128		rdmsr(MSR_K6_WHCR, l, h);
129		if ((l&0xFFFF0000) == 0) {
130			unsigned long flags;
131			l = ((mbytes>>2)<<22)|(1<<16);
132			local_irq_save(flags);
133			wbinvd();
134			wrmsr(MSR_K6_WHCR, l, h);
135			local_irq_restore(flags);
136			printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
137				mbytes);
138		}
139
140		return;
141	}
142
143	if (c->x86_model == 10) {
144		/* AMD Geode LX is model 10 */
145		/* placeholder for any needed mods */
146		return;
147	}
 
148}
149
150static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
151{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152	/* calling is from identify_secondary_cpu() ? */
153	if (!c->cpu_index)
154		return;
155
156	/*
157	 * Certain Athlons might work (for various values of 'work') in SMP
158	 * but they are not certified as MP capable.
159	 */
160	/* Athlon 660/661 is valid. */
161	if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
162	    (c->x86_mask == 1)))
163		goto valid_k7;
164
165	/* Duron 670 is valid */
166	if ((c->x86_model == 7) && (c->x86_mask == 0))
167		goto valid_k7;
168
169	/*
170	 * Athlon 662, Duron 671, and Athlon >model 7 have capability
171	 * bit. It's worth noting that the A5 stepping (662) of some
172	 * Athlon XP's have the MP bit set.
173	 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
174	 * more.
175	 */
176	if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
177	    ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
178	     (c->x86_model > 7))
179		if (cpu_has_mp)
180			goto valid_k7;
181
182	/* If we get here, not a certified SMP capable AMD system. */
183
184	/*
185	 * Don't taint if we are running SMP kernel on a single non-MP
186	 * approved Athlon
187	 */
188	WARN_ONCE(1, "WARNING: This combination of AMD"
189		" processors is not suitable for SMP.\n");
190	if (!test_taint(TAINT_UNSAFE_SMP))
191		add_taint(TAINT_UNSAFE_SMP);
192
193valid_k7:
194	;
195}
196
197static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
198{
199	u32 l, h;
200
201	/*
202	 * Bit 15 of Athlon specific MSR 15, needs to be 0
203	 * to enable SSE on Palomino/Morgan/Barton CPU's.
204	 * If the BIOS didn't enable it already, enable it here.
205	 */
206	if (c->x86_model >= 6 && c->x86_model <= 10) {
207		if (!cpu_has(c, X86_FEATURE_XMM)) {
208			printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
209			rdmsr(MSR_K7_HWCR, l, h);
210			l &= ~0x00008000;
211			wrmsr(MSR_K7_HWCR, l, h);
212			set_cpu_cap(c, X86_FEATURE_XMM);
213		}
214	}
215
216	/*
217	 * It's been determined by AMD that Athlons since model 8 stepping 1
218	 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
219	 * As per AMD technical note 27212 0.2
220	 */
221	if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
222		rdmsr(MSR_K7_CLK_CTL, l, h);
223		if ((l & 0xfff00000) != 0x20000000) {
224			printk(KERN_INFO
225			    "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
226					l, ((l & 0x000fffff)|0x20000000));
227			wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
228		}
229	}
230
231	set_cpu_cap(c, X86_FEATURE_K7);
232
233	amd_k7_smp_check(c);
234}
235#endif
236
237#ifdef CONFIG_NUMA
238/*
239 * To workaround broken NUMA config.  Read the comment in
240 * srat_detect_node().
241 */
242static int __cpuinit nearby_node(int apicid)
243{
244	int i, node;
245
246	for (i = apicid - 1; i >= 0; i--) {
247		node = __apicid_to_node[i];
248		if (node != NUMA_NO_NODE && node_online(node))
249			return node;
250	}
251	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
252		node = __apicid_to_node[i];
253		if (node != NUMA_NO_NODE && node_online(node))
254			return node;
255	}
256	return first_node(node_online_map); /* Shouldn't happen */
257}
258#endif
259
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260/*
261 * Fixup core topology information for
262 * (1) AMD multi-node processors
263 *     Assumption: Number of cores in each internal node is the same.
264 * (2) AMD processors supporting compute units
265 */
266#ifdef CONFIG_X86_HT
267static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
268{
269	u32 nodes, cores_per_cu = 1;
270	u8 node_id;
271	int cpu = smp_processor_id();
272
273	/* get information required for multi-node processors */
274	if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
275		u32 eax, ebx, ecx, edx;
276
277		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
278		nodes = ((ecx >> 8) & 7) + 1;
279		node_id = ecx & 7;
280
281		/* get compute unit information */
282		smp_num_siblings = ((ebx >> 8) & 3) + 1;
283		c->compute_unit_id = ebx & 0xff;
284		cores_per_cu += ((ebx >> 8) & 3);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
285	} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
286		u64 value;
287
288		rdmsrl(MSR_FAM10H_NODE_ID, value);
289		nodes = ((value >> 3) & 7) + 1;
290		node_id = value & 7;
 
 
291	} else
292		return;
293
294	/* fixup multi-node processor information */
295	if (nodes > 1) {
296		u32 cores_per_node;
297		u32 cus_per_node;
298
299		set_cpu_cap(c, X86_FEATURE_AMD_DCM);
300		cores_per_node = c->x86_max_cores / nodes;
301		cus_per_node = cores_per_node / cores_per_cu;
302
303		/* store NodeID, use llc_shared_map to store sibling info */
304		per_cpu(cpu_llc_id, cpu) = node_id;
305
306		/* core id has to be in the [0 .. cores_per_node - 1] range */
307		c->cpu_core_id %= cores_per_node;
308		c->compute_unit_id %= cus_per_node;
309	}
310}
311#endif
312
313/*
314 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
315 * Assumes number of cores is a power of two.
316 */
317static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
318{
319#ifdef CONFIG_X86_HT
320	unsigned bits;
321	int cpu = smp_processor_id();
322
323	bits = c->x86_coreid_bits;
324	/* Low order bits define the core id (index of core in socket) */
325	c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
326	/* Convert the initial APIC ID into the socket ID */
327	c->phys_proc_id = c->initial_apicid >> bits;
328	/* use socket ID also for last level cache */
329	per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
330	amd_get_topology(c);
331#endif
332}
333
334int amd_get_nb_id(int cpu)
335{
336	int id = 0;
337#ifdef CONFIG_SMP
338	id = per_cpu(cpu_llc_id, cpu);
339#endif
340	return id;
341}
342EXPORT_SYMBOL_GPL(amd_get_nb_id);
343
344static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
 
 
 
 
 
 
345{
346#ifdef CONFIG_NUMA
347	int cpu = smp_processor_id();
348	int node;
349	unsigned apicid = c->apicid;
350
351	node = numa_cpu_node(cpu);
352	if (node == NUMA_NO_NODE)
353		node = per_cpu(cpu_llc_id, cpu);
354
355	/*
356	 * On multi-fabric platform (e.g. Numascale NumaChip) a
357	 * platform-specific handler needs to be called to fixup some
358	 * IDs of the CPU.
359	 */
360	if (x86_cpuinit.fixup_cpu_id)
361		x86_cpuinit.fixup_cpu_id(c, node);
362
363	if (!node_online(node)) {
364		/*
365		 * Two possibilities here:
366		 *
367		 * - The CPU is missing memory and no node was created.  In
368		 *   that case try picking one from a nearby CPU.
369		 *
370		 * - The APIC IDs differ from the HyperTransport node IDs
371		 *   which the K8 northbridge parsing fills in.  Assume
372		 *   they are all increased by a constant offset, but in
373		 *   the same order as the HT nodeids.  If that doesn't
374		 *   result in a usable node fall back to the path for the
375		 *   previous case.
376		 *
377		 * This workaround operates directly on the mapping between
378		 * APIC ID and NUMA node, assuming certain relationship
379		 * between APIC ID, HT node ID and NUMA topology.  As going
380		 * through CPU mapping may alter the outcome, directly
381		 * access __apicid_to_node[].
382		 */
383		int ht_nodeid = c->initial_apicid;
384
385		if (ht_nodeid >= 0 &&
386		    __apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
387			node = __apicid_to_node[ht_nodeid];
388		/* Pick a nearby node */
389		if (!node_online(node))
390			node = nearby_node(apicid);
391	}
392	numa_set_node(cpu, node);
393#endif
394}
395
396static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
397{
398#ifdef CONFIG_X86_HT
399	unsigned bits, ecx;
400
401	/* Multi core CPU? */
402	if (c->extended_cpuid_level < 0x80000008)
403		return;
404
405	ecx = cpuid_ecx(0x80000008);
406
407	c->x86_max_cores = (ecx & 0xff) + 1;
408
409	/* CPU telling us the core id bits shift? */
410	bits = (ecx >> 12) & 0xF;
411
412	/* Otherwise recompute */
413	if (bits == 0) {
414		while ((1 << bits) < c->x86_max_cores)
415			bits++;
416	}
417
418	c->x86_coreid_bits = bits;
419#endif
420}
421
422static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c)
423{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
425
426		if (c->x86 > 0x10 ||
427		    (c->x86 == 0x10 && c->x86_model >= 0x2)) {
428			u64 val;
429
430			rdmsrl(MSR_K7_HWCR, val);
431			if (!(val & BIT(24)))
432				printk(KERN_WARNING FW_BUG "TSC doesn't count "
433					"with P0 frequency!\n");
434		}
435	}
436
437	if (c->x86 == 0x15) {
438		unsigned long upperbit;
439		u32 cpuid, assoc;
440
441		cpuid	 = cpuid_edx(0x80000005);
442		assoc	 = cpuid >> 16 & 0xff;
443		upperbit = ((cpuid >> 24) << 10) / assoc;
444
445		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
446		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
447	}
448}
449
450static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
451{
 
 
452	early_init_amd_mc(c);
453
 
 
454	/*
455	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
456	 * with P/T states and does not stop in deep C-states
457	 */
458	if (c->x86_power & (1 << 8)) {
459		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
460		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
461		if (!check_tsc_unstable())
462			sched_clock_stable = 1;
463	}
464
 
 
 
 
465#ifdef CONFIG_X86_64
466	set_cpu_cap(c, X86_FEATURE_SYSCALL32);
467#else
468	/*  Set MTRR capability flag if appropriate */
469	if (c->x86 == 5)
470		if (c->x86_model == 13 || c->x86_model == 9 ||
471		    (c->x86_model == 8 && c->x86_mask >= 8))
472			set_cpu_cap(c, X86_FEATURE_K6_MTRR);
473#endif
474#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
475	/* check CPU config space for extended APIC ID */
476	if (cpu_has_apic && c->x86 >= 0xf) {
477		unsigned int val;
478		val = read_pci_config(0, 24, 0, 0x68);
479		if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
 
 
 
480			set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
 
 
 
 
 
 
 
 
481	}
482#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
483}
484
485static void __cpuinit init_amd(struct cpuinfo_x86 *c)
486{
487	u32 dummy;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
488
489#ifdef CONFIG_SMP
490	unsigned long long value;
491
492	/*
493	 * Disable TLB flush filter by setting HWCR.FFDIS on K8
494	 * bit 6 of msr C001_0015
495	 *
496	 * Errata 63 for SH-B3 steppings
497	 * Errata 122 for all steppings (F+ have it disabled by default)
498	 */
499	if (c->x86 == 0xf) {
500		rdmsrl(MSR_K7_HWCR, value);
501		value |= 1 << 6;
502		wrmsrl(MSR_K7_HWCR, value);
503	}
 
 
 
 
 
 
 
 
504#endif
505
506	early_init_amd(c);
 
 
 
 
 
 
 
 
507
508	/*
509	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
510	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
 
 
 
 
 
511	 */
512	clear_cpu_cap(c, 0*32+31);
 
 
 
 
 
 
513
514#ifdef CONFIG_X86_64
515	/* On C+ stepping K8 rep microcode works well for copy/memset */
516	if (c->x86 == 0xf) {
517		u32 level;
 
 
 
 
518
519		level = cpuid_eax(1);
520		if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
521			set_cpu_cap(c, X86_FEATURE_REP_GOOD);
522
523		/*
524		 * Some BIOSes incorrectly force this feature, but only K8
525		 * revision D (model = 0x14) and later actually support it.
526		 * (AMD Erratum #110, docId: 25759).
527		 */
528		if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
529			u64 val;
530
531			clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
532			if (!rdmsrl_amd_safe(0xc001100d, &val)) {
533				val &= ~(1ULL << 32);
534				wrmsrl_amd_safe(0xc001100d, val);
 
535			}
536		}
 
537
 
 
 
 
 
 
 
 
 
538	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
539	if (c->x86 >= 0x10)
540		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
541
542	/* get apicid instead of initial apic id from cpuid */
543	c->apicid = hard_smp_processor_id();
544#else
545
546	/*
547	 *	FIXME: We should handle the K5 here. Set up the write
548	 *	range and also turn on MSR 83 bits 4 and 31 (write alloc,
549	 *	no bus pipeline)
550	 */
551
552	switch (c->x86) {
553	case 4:
554		init_amd_k5(c);
555		break;
556	case 5:
557		init_amd_k6(c);
558		break;
559	case 6: /* An Athlon/Duron */
560		init_amd_k7(c);
561		break;
562	}
563
564	/* K6s reports MCEs but don't actually have all the MSRs */
565	if (c->x86 < 6)
566		clear_cpu_cap(c, X86_FEATURE_MCE);
567#endif
568
569	/* Enable workaround for FXSAVE leak */
570	if (c->x86 >= 6)
571		set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
572
573	if (!c->x86_model_id[0]) {
574		switch (c->x86) {
575		case 0xf:
576			/* Should distinguish Models here, but this is only
577			   a fallback anyways. */
578			strcpy(c->x86_model_id, "Hammer");
579			break;
580		}
581	}
582
583	/* re-enable TopologyExtensions if switched off by BIOS */
584	if ((c->x86 == 0x15) &&
585	    (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
586	    !cpu_has(c, X86_FEATURE_TOPOEXT)) {
587		u64 val;
588
589		if (!rdmsrl_amd_safe(0xc0011005, &val)) {
590			val |= 1ULL << 54;
591			wrmsrl_amd_safe(0xc0011005, val);
592			rdmsrl(0xc0011005, val);
593			if (val & (1ULL << 54)) {
594				set_cpu_cap(c, X86_FEATURE_TOPOEXT);
595				printk(KERN_INFO FW_INFO "CPU: Re-enabling "
596				  "disabled Topology Extensions Support\n");
597			}
598		}
599	}
600
601	cpu_detect_cache_sizes(c);
602
603	/* Multi core CPU? */
604	if (c->extended_cpuid_level >= 0x80000008) {
605		amd_detect_cmp(c);
606		srat_detect_node(c);
607	}
608
609#ifdef CONFIG_X86_32
610	detect_ht(c);
611#endif
612
613	if (c->extended_cpuid_level >= 0x80000006) {
614		if (cpuid_edx(0x80000006) & 0xf000)
615			num_cache_leaves = 4;
616		else
617			num_cache_leaves = 3;
618	}
619
620	if (c->x86 >= 0xf)
621		set_cpu_cap(c, X86_FEATURE_K8);
622
623	if (cpu_has_xmm2) {
624		/* MFENCE stops RDTSC speculation */
625		set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
626	}
627
628#ifdef CONFIG_X86_64
629	if (c->x86 == 0x10) {
630		/* do this for boot cpu */
631		if (c == &boot_cpu_data)
632			check_enable_amd_mmconf_dmi();
633
634		fam10h_check_enable_mmcfg();
635	}
636
637	if (c == &boot_cpu_data && c->x86 >= 0xf) {
638		unsigned long long tseg;
 
 
 
 
639
640		/*
641		 * Split up direct mapping around the TSEG SMM area.
642		 * Don't do it for gbpages because there seems very little
643		 * benefit in doing so.
644		 */
645		if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
646			printk(KERN_DEBUG "tseg: %010llx\n", tseg);
647			if ((tseg>>PMD_SHIFT) <
648				(max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) ||
649				((tseg>>PMD_SHIFT) <
650				(max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) &&
651				(tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT))))
652				set_memory_4k((unsigned long)__va(tseg), 1);
653		}
654	}
655#endif
656
657	/*
658	 * Family 0x12 and above processors have APIC timer
659	 * running in deep C states.
660	 */
661	if (c->x86 > 0x11)
662		set_cpu_cap(c, X86_FEATURE_ARAT);
663
664	/*
665	 * Disable GART TLB Walk Errors on Fam10h. We do this here
666	 * because this is always needed when GART is enabled, even in a
667	 * kernel which has no MCE support built in.
668	 */
669	if (c->x86 == 0x10) {
670		/*
671		 * BIOS should disable GartTlbWlk Errors themself. If
672		 * it doesn't do it here as suggested by the BKDG.
673		 *
674		 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
675		 */
676		u64 mask;
677		int err;
678
679		err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
680		if (err == 0) {
681			mask |= (1 << 10);
682			checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
683		}
684	}
685
686	rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
687}
688
689#ifdef CONFIG_X86_32
690static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
691							unsigned int size)
692{
693	/* AMD errata T13 (order #21922) */
694	if ((c->x86 == 6)) {
695		/* Duron Rev A0 */
696		if (c->x86_model == 3 && c->x86_mask == 0)
697			size = 64;
698		/* Tbird rev A1/A2 */
699		if (c->x86_model == 4 &&
700			(c->x86_mask == 0 || c->x86_mask == 1))
701			size = 256;
702	}
703	return size;
704}
705#endif
706
707static const struct cpu_dev __cpuinitconst amd_cpu_dev = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
708	.c_vendor	= "AMD",
709	.c_ident	= { "AuthenticAMD" },
710#ifdef CONFIG_X86_32
711	.c_models = {
712		{ .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
713		  {
714			  [3] = "486 DX/2",
715			  [7] = "486 DX/2-WB",
716			  [8] = "486 DX/4",
717			  [9] = "486 DX/4-WB",
718			  [14] = "Am5x86-WT",
719			  [15] = "Am5x86-WB"
720		  }
721		},
722	},
723	.c_size_cache	= amd_size_cache,
724#endif
725	.c_early_init   = early_init_amd,
 
726	.c_bsp_init	= bsp_init_amd,
727	.c_init		= init_amd,
728	.c_x86_vendor	= X86_VENDOR_AMD,
729};
730
731cpu_dev_register(amd_cpu_dev);
732
733/*
734 * AMD errata checking
735 *
736 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
737 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
738 * have an OSVW id assigned, which it takes as first argument. Both take a
739 * variable number of family-specific model-stepping ranges created by
740 * AMD_MODEL_RANGE(). Each erratum also has to be declared as extern const
741 * int[] in arch/x86/include/asm/processor.h.
742 *
743 * Example:
744 *
745 * const int amd_erratum_319[] =
746 *	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
747 *			   AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
748 *			   AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
749 */
750
751const int amd_erratum_400[] =
 
 
 
 
 
 
 
 
752	AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
753			    AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
754EXPORT_SYMBOL_GPL(amd_erratum_400);
755
756const int amd_erratum_383[] =
757	AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
758EXPORT_SYMBOL_GPL(amd_erratum_383);
759
760bool cpu_has_amd_erratum(const int *erratum)
 
761{
762	struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
763	int osvw_id = *erratum++;
764	u32 range;
765	u32 ms;
766
767	/*
768	 * If called early enough that current_cpu_data hasn't been initialized
769	 * yet, fall back to boot_cpu_data.
770	 */
771	if (cpu->x86 == 0)
772		cpu = &boot_cpu_data;
773
774	if (cpu->x86_vendor != X86_VENDOR_AMD)
775		return false;
776
777	if (osvw_id >= 0 && osvw_id < 65536 &&
778	    cpu_has(cpu, X86_FEATURE_OSVW)) {
779		u64 osvw_len;
780
781		rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
782		if (osvw_id < osvw_len) {
783			u64 osvw_bits;
784
785			rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
786			    osvw_bits);
787			return osvw_bits & (1ULL << (osvw_id & 0x3f));
788		}
789	}
790
791	/* OSVW unavailable or ID unknown, match family-model-stepping range */
792	ms = (cpu->x86_model << 4) | cpu->x86_mask;
793	while ((range = *erratum++))
794		if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
795		    (ms >= AMD_MODEL_RANGE_START(range)) &&
796		    (ms <= AMD_MODEL_RANGE_END(range)))
797			return true;
798
799	return false;
800}
801
802EXPORT_SYMBOL_GPL(cpu_has_amd_erratum);