Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
   4 * because MTRRs can span up to 40 bits (36bits on most modern x86)
   5 */
 
   6
   7#include <linux/export.h>
   8#include <linux/init.h>
   9#include <linux/io.h>
  10#include <linux/mm.h>
  11#include <linux/cc_platform.h>
  12#include <asm/processor-flags.h>
  13#include <asm/cacheinfo.h>
  14#include <asm/cpufeature.h>
  15#include <asm/hypervisor.h>
  16#include <asm/mshyperv.h>
  17#include <asm/tlbflush.h>
  18#include <asm/mtrr.h>
  19#include <asm/msr.h>
  20#include <asm/memtype.h>
  21
  22#include "mtrr.h"
  23
  24struct fixed_range_block {
  25	int base_msr;		/* start address of an MTRR block */
  26	int ranges;		/* number of MTRRs in this block  */
  27};
  28
  29static struct fixed_range_block fixed_range_blocks[] = {
  30	{ MSR_MTRRfix64K_00000, 1 }, /* one   64k MTRR  */
  31	{ MSR_MTRRfix16K_80000, 2 }, /* two   16k MTRRs */
  32	{ MSR_MTRRfix4K_C0000,  8 }, /* eight  4k MTRRs */
  33	{}
  34};
  35
  36struct cache_map {
  37	u64 start;
  38	u64 end;
  39	u64 flags;
  40	u64 type:8;
  41	u64 fixed:1;
  42};
  43
  44bool mtrr_debug;
  45
  46static int __init mtrr_param_setup(char *str)
  47{
  48	int rc = 0;
  49
  50	if (!str)
  51		return -EINVAL;
  52	if (!strcmp(str, "debug"))
  53		mtrr_debug = true;
  54	else
  55		rc = -EINVAL;
  56
  57	return rc;
  58}
  59early_param("mtrr", mtrr_param_setup);
  60
  61/*
  62 * CACHE_MAP_MAX is the maximum number of memory ranges in cache_map, where
  63 * no 2 adjacent ranges have the same cache mode (those would be merged).
  64 * The number is based on the worst case:
  65 * - no two adjacent fixed MTRRs share the same cache mode
  66 * - one variable MTRR is spanning a huge area with mode WB
  67 * - 255 variable MTRRs with mode UC all overlap with the WB MTRR, creating 2
  68 *   additional ranges each (result like "ababababa...aba" with a = WB, b = UC),
  69 *   accounting for MTRR_MAX_VAR_RANGES * 2 - 1 range entries
  70 * - a TOP_MEM2 area (even with overlapping an UC MTRR can't add 2 range entries
  71 *   to the possible maximum, as it always starts at 4GB, thus it can't be in
  72 *   the middle of that MTRR, unless that MTRR starts at 0, which would remove
  73 *   the initial "a" from the "abababa" pattern above)
  74 * The map won't contain ranges with no matching MTRR (those fall back to the
  75 * default cache mode).
  76 */
  77#define CACHE_MAP_MAX	(MTRR_NUM_FIXED_RANGES + MTRR_MAX_VAR_RANGES * 2)
  78
  79static struct cache_map init_cache_map[CACHE_MAP_MAX] __initdata;
  80static struct cache_map *cache_map __refdata = init_cache_map;
  81static unsigned int cache_map_size = CACHE_MAP_MAX;
  82static unsigned int cache_map_n;
  83static unsigned int cache_map_fixed;
  84
  85static unsigned long smp_changes_mask;
  86static int mtrr_state_set;
  87u64 mtrr_tom2;
  88
  89struct mtrr_state_type mtrr_state;
  90EXPORT_SYMBOL_GPL(mtrr_state);
  91
  92/* Reserved bits in the high portion of the MTRRphysBaseN MSR. */
  93u32 phys_hi_rsvd;
  94
  95/*
  96 * BIOS is expected to clear MtrrFixDramModEn bit, see for example
  97 * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
  98 * Opteron Processors" (26094 Rev. 3.30 February 2006), section
  99 * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
 100 * to 1 during BIOS initialization of the fixed MTRRs, then cleared to
 101 * 0 for operation."
 102 */
 103static inline void k8_check_syscfg_dram_mod_en(void)
 104{
 105	u32 lo, hi;
 106
 107	if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
 108	      (boot_cpu_data.x86 >= 0x0f)))
 109		return;
 110
 111	rdmsr(MSR_AMD64_SYSCFG, lo, hi);
 112	if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
 113		pr_err(FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
 114		       " not cleared by BIOS, clearing this bit\n",
 115		       smp_processor_id());
 116		lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
 117		mtrr_wrmsr(MSR_AMD64_SYSCFG, lo, hi);
 118	}
 119}
 120
 121/* Get the size of contiguous MTRR range */
 122static u64 get_mtrr_size(u64 mask)
 123{
 124	u64 size;
 125
 126	mask |= (u64)phys_hi_rsvd << 32;
 
 127	size = -mask;
 128
 129	return size;
 130}
 131
 132static u8 get_var_mtrr_state(unsigned int reg, u64 *start, u64 *size)
 133{
 134	struct mtrr_var_range *mtrr = mtrr_state.var_ranges + reg;
 135
 136	if (!(mtrr->mask_lo & MTRR_PHYSMASK_V))
 137		return MTRR_TYPE_INVALID;
 138
 139	*start = (((u64)mtrr->base_hi) << 32) + (mtrr->base_lo & PAGE_MASK);
 140	*size = get_mtrr_size((((u64)mtrr->mask_hi) << 32) +
 141			      (mtrr->mask_lo & PAGE_MASK));
 142
 143	return mtrr->base_lo & MTRR_PHYSBASE_TYPE;
 144}
 145
 146static u8 get_effective_type(u8 type1, u8 type2)
 147{
 148	if (type1 == MTRR_TYPE_UNCACHABLE || type2 == MTRR_TYPE_UNCACHABLE)
 149		return MTRR_TYPE_UNCACHABLE;
 150
 151	if ((type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH) ||
 152	    (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK))
 153		return MTRR_TYPE_WRTHROUGH;
 154
 155	if (type1 != type2)
 156		return MTRR_TYPE_UNCACHABLE;
 157
 158	return type1;
 159}
 160
 161static void rm_map_entry_at(int idx)
 162{
 163	cache_map_n--;
 164	if (cache_map_n > idx) {
 165		memmove(cache_map + idx, cache_map + idx + 1,
 166			sizeof(*cache_map) * (cache_map_n - idx));
 167	}
 168}
 169
 170/*
 171 * Add an entry into cache_map at a specific index.  Merges adjacent entries if
 172 * appropriate.  Return the number of merges for correcting the scan index
 173 * (this is needed as merging will reduce the number of entries, which will
 174 * result in skipping entries in future iterations if the scan index isn't
 175 * corrected).
 176 * Note that the corrected index can never go below -1 (resulting in being 0 in
 177 * the next scan iteration), as "2" is returned only if the current index is
 178 * larger than zero.
 179 */
 180static int add_map_entry_at(u64 start, u64 end, u8 type, int idx)
 181{
 182	bool merge_prev = false, merge_next = false;
 183
 184	if (start >= end)
 185		return 0;
 186
 187	if (idx > 0) {
 188		struct cache_map *prev = cache_map + idx - 1;
 189
 190		if (!prev->fixed && start == prev->end && type == prev->type)
 191			merge_prev = true;
 192	}
 193
 194	if (idx < cache_map_n) {
 195		struct cache_map *next = cache_map + idx;
 196
 197		if (!next->fixed && end == next->start && type == next->type)
 198			merge_next = true;
 199	}
 200
 201	if (merge_prev && merge_next) {
 202		cache_map[idx - 1].end = cache_map[idx].end;
 203		rm_map_entry_at(idx);
 204		return 2;
 205	}
 206	if (merge_prev) {
 207		cache_map[idx - 1].end = end;
 208		return 1;
 209	}
 210	if (merge_next) {
 211		cache_map[idx].start = start;
 212		return 1;
 213	}
 214
 215	/* Sanity check: the array should NEVER be too small! */
 216	if (cache_map_n == cache_map_size) {
 217		WARN(1, "MTRR cache mode memory map exhausted!\n");
 218		cache_map_n = cache_map_fixed;
 219		return 0;
 220	}
 221
 222	if (cache_map_n > idx) {
 223		memmove(cache_map + idx + 1, cache_map + idx,
 224			sizeof(*cache_map) * (cache_map_n - idx));
 225	}
 226
 227	cache_map[idx].start = start;
 228	cache_map[idx].end = end;
 229	cache_map[idx].type = type;
 230	cache_map[idx].fixed = 0;
 231	cache_map_n++;
 232
 233	return 0;
 234}
 235
 236/* Clear a part of an entry. Return 1 if start of entry is still valid. */
 237static int clr_map_range_at(u64 start, u64 end, int idx)
 238{
 239	int ret = start != cache_map[idx].start;
 240	u64 tmp;
 241
 242	if (start == cache_map[idx].start && end == cache_map[idx].end) {
 243		rm_map_entry_at(idx);
 244	} else if (start == cache_map[idx].start) {
 245		cache_map[idx].start = end;
 246	} else if (end == cache_map[idx].end) {
 247		cache_map[idx].end = start;
 248	} else {
 249		tmp = cache_map[idx].end;
 250		cache_map[idx].end = start;
 251		add_map_entry_at(end, tmp, cache_map[idx].type, idx + 1);
 252	}
 253
 254	return ret;
 255}
 256
 257/*
 258 * Add MTRR to the map.  The current map is scanned and each part of the MTRR
 259 * either overlapping with an existing entry or with a hole in the map is
 260 * handled separately.
 
 
 261 */
 262static void add_map_entry(u64 start, u64 end, u8 type)
 263{
 264	u8 new_type, old_type;
 265	u64 tmp;
 266	int i;
 
 
 267
 268	for (i = 0; i < cache_map_n && start < end; i++) {
 269		if (start >= cache_map[i].end)
 270			continue;
 271
 272		if (start < cache_map[i].start) {
 273			/* Region start has no overlap. */
 274			tmp = min(end, cache_map[i].start);
 275			i -= add_map_entry_at(start, tmp,  type, i);
 276			start = tmp;
 277			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 278		}
 279
 280		new_type = get_effective_type(type, cache_map[i].type);
 281		old_type = cache_map[i].type;
 282
 283		if (cache_map[i].fixed || new_type == old_type) {
 284			/* Cut off start of new entry. */
 285			start = cache_map[i].end;
 286			continue;
 287		}
 288
 289		/* Handle only overlapping part of region. */
 290		tmp = min(end, cache_map[i].end);
 291		i += clr_map_range_at(start, tmp, i);
 292		i -= add_map_entry_at(start, tmp, new_type, i);
 293		start = tmp;
 294	}
 295
 296	/* Add rest of region after last map entry (rest might be empty). */
 297	add_map_entry_at(start, end, type, i);
 298}
 299
 300/* Add variable MTRRs to cache map. */
 301static void map_add_var(void)
 302{
 303	u64 start, size;
 304	unsigned int i;
 305	u8 type;
 306
 307	/*
 308	 * Add AMD TOP_MEM2 area.  Can't be added in mtrr_build_map(), as it
 309	 * needs to be added again when rebuilding the map due to potentially
 310	 * having moved as a result of variable MTRRs for memory below 4GB.
 311	 */
 312	if (mtrr_tom2) {
 313		add_map_entry(BIT_ULL(32), mtrr_tom2, MTRR_TYPE_WRBACK);
 314		cache_map[cache_map_n - 1].fixed = 1;
 315	}
 316
 317	for (i = 0; i < num_var_ranges; i++) {
 318		type = get_var_mtrr_state(i, &start, &size);
 319		if (type != MTRR_TYPE_INVALID)
 320			add_map_entry(start, start + size, type);
 321	}
 322}
 323
 324/*
 325 * Rebuild map by replacing variable entries.  Needs to be called when MTRR
 326 * registers are being changed after boot, as such changes could include
 327 * removals of registers, which are complicated to handle without rebuild of
 328 * the map.
 329 */
 330void generic_rebuild_map(void)
 331{
 332	if (mtrr_if != &generic_mtrr_ops)
 333		return;
 334
 335	cache_map_n = cache_map_fixed;
 336
 337	map_add_var();
 338}
 339
 340static unsigned int __init get_cache_map_size(void)
 341{
 342	return cache_map_fixed + 2 * num_var_ranges + (mtrr_tom2 != 0);
 343}
 344
 345/* Build the cache_map containing the cache modes per memory range. */
 346void __init mtrr_build_map(void)
 347{
 348	u64 start, end, size;
 349	unsigned int i;
 350	u8 type;
 351
 352	/* Add fixed MTRRs, optimize for adjacent entries with same type. */
 353	if (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED) {
 354		/*
 355		 * Start with 64k size fixed entries, preset 1st one (hence the
 356		 * loop below is starting with index 1).
 357		 */
 358		start = 0;
 359		end = size = 0x10000;
 360		type = mtrr_state.fixed_ranges[0];
 361
 362		for (i = 1; i < MTRR_NUM_FIXED_RANGES; i++) {
 363			/* 8 64k entries, then 16 16k ones, rest 4k. */
 364			if (i == 8 || i == 24)
 365				size >>= 2;
 366
 367			if (mtrr_state.fixed_ranges[i] != type) {
 368				add_map_entry(start, end, type);
 369				start = end;
 370				type = mtrr_state.fixed_ranges[i];
 
 
 
 
 
 
 
 
 
 
 
 371			}
 372			end += size;
 373		}
 374		add_map_entry(start, end, type);
 375	}
 376
 377	/* Mark fixed, they take precedence. */
 378	for (i = 0; i < cache_map_n; i++)
 379		cache_map[i].fixed = 1;
 380	cache_map_fixed = cache_map_n;
 381
 382	map_add_var();
 
 
 383
 384	pr_info("MTRR map: %u entries (%u fixed + %u variable; max %u), built from %u variable MTRRs\n",
 385		cache_map_n, cache_map_fixed, cache_map_n - cache_map_fixed,
 386		get_cache_map_size(), num_var_ranges + (mtrr_tom2 != 0));
 387
 388	if (mtrr_debug) {
 389		for (i = 0; i < cache_map_n; i++) {
 390			pr_info("%3u: %016llx-%016llx %s\n", i,
 391				cache_map[i].start, cache_map[i].end - 1,
 392				mtrr_attrib_to_str(cache_map[i].type));
 393		}
 394	}
 395}
 396
 397/* Copy the cache_map from __initdata memory to dynamically allocated one. */
 398void __init mtrr_copy_map(void)
 399{
 400	unsigned int new_size = get_cache_map_size();
 401
 402	if (!mtrr_state.enabled || !new_size) {
 403		cache_map = NULL;
 404		return;
 405	}
 406
 407	mutex_lock(&mtrr_mutex);
 408
 409	cache_map = kcalloc(new_size, sizeof(*cache_map), GFP_KERNEL);
 410	if (cache_map) {
 411		memmove(cache_map, init_cache_map,
 412			cache_map_n * sizeof(*cache_map));
 413		cache_map_size = new_size;
 414	} else {
 415		mtrr_state.enabled = 0;
 416		pr_err("MTRRs disabled due to allocation failure for lookup map.\n");
 417	}
 418
 419	mutex_unlock(&mtrr_mutex);
 
 
 
 420}
 421
 422/**
 423 * mtrr_overwrite_state - set static MTRR state
 424 *
 425 * Used to set MTRR state via different means (e.g. with data obtained from
 426 * a hypervisor).
 427 * Is allowed only for special cases when running virtualized. Must be called
 428 * from the x86_init.hyper.init_platform() hook.  It can be called only once.
 429 * The MTRR state can't be changed afterwards.  To ensure that, X86_FEATURE_MTRR
 430 * is cleared.
 431 *
 432 * @var: MTRR variable range array to use
 433 * @num_var: length of the @var array
 434 * @def_type: default caching type
 435 */
 436void mtrr_overwrite_state(struct mtrr_var_range *var, unsigned int num_var,
 437			  mtrr_type def_type)
 438{
 439	unsigned int i;
 
 
 440
 441	/* Only allowed to be called once before mtrr_bp_init(). */
 442	if (WARN_ON_ONCE(mtrr_state_set))
 443		return;
 444
 445	/* Only allowed when running virtualized. */
 446	if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
 447		return;
 448
 449	/*
 450	 * Only allowed for special virtualization cases:
 451	 * - when running as Hyper-V, SEV-SNP guest using vTOM
 452	 * - when running as Xen PV guest
 453	 * - when running as SEV-SNP or TDX guest to avoid unnecessary
 454	 *   VMM communication/Virtualization exceptions (#VC, #VE)
 455	 */
 456	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP) &&
 457	    !hv_is_isolation_supported() &&
 458	    !cpu_feature_enabled(X86_FEATURE_XENPV) &&
 459	    !cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
 460		return;
 461
 462	/* Disable MTRR in order to disable MTRR modifications. */
 463	setup_clear_cpu_cap(X86_FEATURE_MTRR);
 464
 465	if (var) {
 466		if (num_var > MTRR_MAX_VAR_RANGES) {
 467			pr_warn("Trying to overwrite MTRR state with %u variable entries\n",
 468				num_var);
 469			num_var = MTRR_MAX_VAR_RANGES;
 470		}
 471		for (i = 0; i < num_var; i++)
 472			mtrr_state.var_ranges[i] = var[i];
 473		num_var_ranges = num_var;
 474	}
 475
 476	mtrr_state.def_type = def_type;
 477	mtrr_state.enabled |= MTRR_STATE_MTRR_ENABLED;
 478
 479	mtrr_state_set = 1;
 480}
 481
 482static u8 type_merge(u8 type, u8 new_type, u8 *uniform)
 483{
 484	u8 effective_type;
 485
 486	if (type == MTRR_TYPE_INVALID)
 487		return new_type;
 488
 489	effective_type = get_effective_type(type, new_type);
 490	if (type != effective_type)
 491		*uniform = 0;
 492
 493	return effective_type;
 494}
 495
 496/**
 497 * mtrr_type_lookup - look up memory type in MTRR
 498 *
 499 * @start: Begin of the physical address range
 500 * @end: End of the physical address range
 501 * @uniform: output argument:
 502 *  - 1: the returned MTRR type is valid for the whole region
 503 *  - 0: otherwise
 504 *
 505 * Return Values:
 506 * MTRR_TYPE_(type)  - The effective MTRR type for the region
 507 * MTRR_TYPE_INVALID - MTRR is disabled
 508 */
 509u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform)
 510{
 511	u8 type = MTRR_TYPE_INVALID;
 512	unsigned int i;
 513
 514	if (!mtrr_state_set) {
 515		/* Uniformity is unknown. */
 516		*uniform = 0;
 517		return MTRR_TYPE_UNCACHABLE;
 518	}
 519
 520	*uniform = 1;
 521
 522	if (!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED))
 523		return MTRR_TYPE_UNCACHABLE;
 524
 525	for (i = 0; i < cache_map_n && start < end; i++) {
 526		/* Region after current map entry? -> continue with next one. */
 527		if (start >= cache_map[i].end)
 528			continue;
 529
 530		/* Start of region not covered by current map entry? */
 531		if (start < cache_map[i].start) {
 532			/* At least some part of region has default type. */
 533			type = type_merge(type, mtrr_state.def_type, uniform);
 534			/* End of region not covered, too? -> lookup done. */
 535			if (end <= cache_map[i].start)
 536				return type;
 537		}
 538
 539		/* At least part of region covered by map entry. */
 540		type = type_merge(type, cache_map[i].type, uniform);
 541
 542		start = cache_map[i].end;
 543	}
 544
 545	/* End of region past last entry in map? -> use default type. */
 546	if (start < end)
 547		type = type_merge(type, mtrr_state.def_type, uniform);
 548
 549	return type;
 550}
 551
 552/* Get the MSR pair relating to a var range */
 553static void
 554get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
 555{
 556	rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
 557	rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
 558}
 559
 560/* Fill the MSR pair relating to a var range */
 561void fill_mtrr_var_range(unsigned int index,
 562		u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
 563{
 564	struct mtrr_var_range *vr;
 565
 566	vr = mtrr_state.var_ranges;
 567
 568	vr[index].base_lo = base_lo;
 569	vr[index].base_hi = base_hi;
 570	vr[index].mask_lo = mask_lo;
 571	vr[index].mask_hi = mask_hi;
 572}
 573
 574static void get_fixed_ranges(mtrr_type *frs)
 575{
 576	unsigned int *p = (unsigned int *)frs;
 577	int i;
 578
 579	k8_check_syscfg_dram_mod_en();
 580
 581	rdmsr(MSR_MTRRfix64K_00000, p[0], p[1]);
 582
 583	for (i = 0; i < 2; i++)
 584		rdmsr(MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]);
 585	for (i = 0; i < 8; i++)
 586		rdmsr(MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]);
 587}
 588
 589void mtrr_save_fixed_ranges(void *info)
 590{
 591	if (boot_cpu_has(X86_FEATURE_MTRR))
 592		get_fixed_ranges(mtrr_state.fixed_ranges);
 593}
 594
 595static unsigned __initdata last_fixed_start;
 596static unsigned __initdata last_fixed_end;
 597static mtrr_type __initdata last_fixed_type;
 598
 599static void __init print_fixed_last(void)
 600{
 601	if (!last_fixed_end)
 602		return;
 603
 604	pr_info("  %05X-%05X %s\n", last_fixed_start,
 605		last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
 606
 607	last_fixed_end = 0;
 608}
 609
 610static void __init update_fixed_last(unsigned base, unsigned end,
 611				     mtrr_type type)
 612{
 613	last_fixed_start = base;
 614	last_fixed_end = end;
 615	last_fixed_type = type;
 616}
 617
 618static void __init
 619print_fixed(unsigned base, unsigned step, const mtrr_type *types)
 620{
 621	unsigned i;
 622
 623	for (i = 0; i < 8; ++i, ++types, base += step) {
 624		if (last_fixed_end == 0) {
 625			update_fixed_last(base, base + step, *types);
 626			continue;
 627		}
 628		if (last_fixed_end == base && last_fixed_type == *types) {
 629			last_fixed_end = base + step;
 630			continue;
 631		}
 632		/* new segments: gap or different type */
 633		print_fixed_last();
 634		update_fixed_last(base, base + step, *types);
 635	}
 636}
 637
 
 
 
 638static void __init print_mtrr_state(void)
 639{
 640	unsigned int i;
 641	int high_width;
 642
 643	pr_info("MTRR default type: %s\n",
 644		mtrr_attrib_to_str(mtrr_state.def_type));
 645	if (mtrr_state.have_fixed) {
 646		pr_info("MTRR fixed ranges %sabled:\n",
 647			((mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) &&
 648			 (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) ?
 649			 "en" : "dis");
 650		print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
 651		for (i = 0; i < 2; ++i)
 652			print_fixed(0x80000 + i * 0x20000, 0x04000,
 653				    mtrr_state.fixed_ranges + (i + 1) * 8);
 654		for (i = 0; i < 8; ++i)
 655			print_fixed(0xC0000 + i * 0x08000, 0x01000,
 656				    mtrr_state.fixed_ranges + (i + 3) * 8);
 657
 658		/* tail */
 659		print_fixed_last();
 660	}
 661	pr_info("MTRR variable ranges %sabled:\n",
 662		mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED ? "en" : "dis");
 663	high_width = (boot_cpu_data.x86_phys_bits - (32 - PAGE_SHIFT) + 3) / 4;
 
 
 
 
 664
 665	for (i = 0; i < num_var_ranges; ++i) {
 666		if (mtrr_state.var_ranges[i].mask_lo & MTRR_PHYSMASK_V)
 667			pr_info("  %u base %0*X%05X000 mask %0*X%05X000 %s\n",
 668				i,
 669				high_width,
 670				mtrr_state.var_ranges[i].base_hi,
 671				mtrr_state.var_ranges[i].base_lo >> 12,
 672				high_width,
 673				mtrr_state.var_ranges[i].mask_hi,
 674				mtrr_state.var_ranges[i].mask_lo >> 12,
 675				mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo &
 676						    MTRR_PHYSBASE_TYPE));
 677		else
 678			pr_info("  %u disabled\n", i);
 679	}
 680	if (mtrr_tom2)
 681		pr_info("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20);
 682}
 683
 684/* Grab all of the MTRR state for this CPU into *state */
 685bool __init get_mtrr_state(void)
 686{
 687	struct mtrr_var_range *vrs;
 
 688	unsigned lo, dummy;
 689	unsigned int i;
 690
 691	vrs = mtrr_state.var_ranges;
 692
 693	rdmsr(MSR_MTRRcap, lo, dummy);
 694	mtrr_state.have_fixed = lo & MTRR_CAP_FIX;
 695
 696	for (i = 0; i < num_var_ranges; i++)
 697		get_mtrr_var_range(i, &vrs[i]);
 698	if (mtrr_state.have_fixed)
 699		get_fixed_ranges(mtrr_state.fixed_ranges);
 700
 701	rdmsr(MSR_MTRRdefType, lo, dummy);
 702	mtrr_state.def_type = lo & MTRR_DEF_TYPE_TYPE;
 703	mtrr_state.enabled = (lo & MTRR_DEF_TYPE_ENABLE) >> MTRR_STATE_SHIFT;
 704
 705	if (amd_special_default_mtrr()) {
 706		unsigned low, high;
 707
 708		/* TOP_MEM2 */
 709		rdmsr(MSR_K8_TOP_MEM2, low, high);
 710		mtrr_tom2 = high;
 711		mtrr_tom2 <<= 32;
 712		mtrr_tom2 |= low;
 713		mtrr_tom2 &= 0xffffff800000ULL;
 714	}
 715
 716	if (mtrr_debug)
 717		print_mtrr_state();
 718
 719	mtrr_state_set = 1;
 720
 721	return !!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED);
 
 
 
 
 
 
 
 722}
 723
 724/* Some BIOS's are messed up and don't set all MTRRs the same! */
 725void __init mtrr_state_warn(void)
 726{
 727	unsigned long mask = smp_changes_mask;
 728
 729	if (!mask)
 730		return;
 731	if (mask & MTRR_CHANGE_MASK_FIXED)
 732		pr_warn("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
 733	if (mask & MTRR_CHANGE_MASK_VARIABLE)
 734		pr_warn("mtrr: your CPUs had inconsistent variable MTRR settings\n");
 735	if (mask & MTRR_CHANGE_MASK_DEFTYPE)
 736		pr_warn("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
 737
 738	pr_info("mtrr: probably your BIOS does not setup all CPUs.\n");
 739	pr_info("mtrr: corrected configuration.\n");
 740}
 741
 742/*
 743 * Doesn't attempt to pass an error out to MTRR users
 744 * because it's quite complicated in some cases and probably not
 745 * worth it because the best error handling is to ignore it.
 746 */
 747void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
 748{
 749	if (wrmsr_safe(msr, a, b) < 0) {
 750		pr_err("MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
 
 751			smp_processor_id(), msr, a, b);
 752	}
 753}
 754
 755/**
 756 * set_fixed_range - checks & updates a fixed-range MTRR if it
 757 *		     differs from the value it should have
 758 * @msr: MSR address of the MTTR which should be checked and updated
 759 * @changed: pointer which indicates whether the MTRR needed to be changed
 760 * @msrwords: pointer to the MSR values which the MSR should have
 761 */
 762static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
 763{
 764	unsigned lo, hi;
 765
 766	rdmsr(msr, lo, hi);
 767
 768	if (lo != msrwords[0] || hi != msrwords[1]) {
 769		mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
 770		*changed = true;
 771	}
 772}
 773
 774/**
 775 * generic_get_free_region - Get a free MTRR.
 776 * @base: The starting (base) address of the region.
 777 * @size: The size (in bytes) of the region.
 778 * @replace_reg: mtrr index to be replaced; set to invalid value if none.
 779 *
 780 * Returns: The index of the region on success, else negative on error.
 781 */
 782int
 783generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
 784{
 785	unsigned long lbase, lsize;
 786	mtrr_type ltype;
 787	int i, max;
 788
 789	max = num_var_ranges;
 790	if (replace_reg >= 0 && replace_reg < max)
 791		return replace_reg;
 792
 793	for (i = 0; i < max; ++i) {
 794		mtrr_if->get(i, &lbase, &lsize, &ltype);
 795		if (lsize == 0)
 796			return i;
 797	}
 798
 799	return -ENOSPC;
 800}
 801
 802static void generic_get_mtrr(unsigned int reg, unsigned long *base,
 803			     unsigned long *size, mtrr_type *type)
 804{
 805	u32 mask_lo, mask_hi, base_lo, base_hi;
 806	unsigned int hi;
 807	u64 tmp, mask;
 808
 809	/*
 810	 * get_mtrr doesn't need to update mtrr_state, also it could be called
 811	 * from any cpu, so try to print it out directly.
 812	 */
 813	get_cpu();
 814
 815	rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
 816
 817	if (!(mask_lo & MTRR_PHYSMASK_V)) {
 818		/*  Invalid (i.e. free) range */
 819		*base = 0;
 820		*size = 0;
 821		*type = 0;
 822		goto out_put_cpu;
 823	}
 824
 825	rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
 826
 827	/* Work out the shifted address mask: */
 828	tmp = (u64)mask_hi << 32 | (mask_lo & PAGE_MASK);
 829	mask = (u64)phys_hi_rsvd << 32 | tmp;
 830
 831	/* Expand tmp with high bits to all 1s: */
 832	hi = fls64(tmp);
 833	if (hi > 0) {
 834		tmp |= ~((1ULL<<(hi - 1)) - 1);
 835
 836		if (tmp != mask) {
 837			pr_warn("mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
 838			add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
 839			mask = tmp;
 840		}
 841	}
 842
 843	/*
 844	 * This works correctly if size is a power of two, i.e. a
 845	 * contiguous range:
 846	 */
 847	*size = -mask >> PAGE_SHIFT;
 848	*base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
 849	*type = base_lo & MTRR_PHYSBASE_TYPE;
 850
 851out_put_cpu:
 852	put_cpu();
 853}
 854
 855/**
 856 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they
 857 *		      differ from the saved set
 858 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
 859 */
 860static int set_fixed_ranges(mtrr_type *frs)
 861{
 862	unsigned long long *saved = (unsigned long long *)frs;
 863	bool changed = false;
 864	int block = -1, range;
 865
 866	k8_check_syscfg_dram_mod_en();
 867
 868	while (fixed_range_blocks[++block].ranges) {
 869		for (range = 0; range < fixed_range_blocks[block].ranges; range++)
 870			set_fixed_range(fixed_range_blocks[block].base_msr + range,
 871					&changed, (unsigned int *)saved++);
 872	}
 873
 874	return changed;
 875}
 876
 877/*
 878 * Set the MSR pair relating to a var range.
 879 * Returns true if changes are made.
 880 */
 881static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
 882{
 883	unsigned int lo, hi;
 884	bool changed = false;
 885
 886	rdmsr(MTRRphysBase_MSR(index), lo, hi);
 887	if ((vr->base_lo & ~MTRR_PHYSBASE_RSVD) != (lo & ~MTRR_PHYSBASE_RSVD)
 888	    || (vr->base_hi & ~phys_hi_rsvd) != (hi & ~phys_hi_rsvd)) {
 
 889
 890		mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
 891		changed = true;
 892	}
 893
 894	rdmsr(MTRRphysMask_MSR(index), lo, hi);
 895
 896	if ((vr->mask_lo & ~MTRR_PHYSMASK_RSVD) != (lo & ~MTRR_PHYSMASK_RSVD)
 897	    || (vr->mask_hi & ~phys_hi_rsvd) != (hi & ~phys_hi_rsvd)) {
 
 898		mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
 899		changed = true;
 900	}
 901	return changed;
 902}
 903
 904static u32 deftype_lo, deftype_hi;
 905
 906/**
 907 * set_mtrr_state - Set the MTRR state for this CPU.
 908 *
 909 * NOTE: The CPU must already be in a safe state for MTRR changes, including
 910 *       measures that only a single CPU can be active in set_mtrr_state() in
 911 *       order to not be subject to races for usage of deftype_lo. This is
 912 *       accomplished by taking cache_disable_lock.
 913 * RETURNS: 0 if no changes made, else a mask indicating what was changed.
 914 */
 915static unsigned long set_mtrr_state(void)
 916{
 917	unsigned long change_mask = 0;
 918	unsigned int i;
 919
 920	for (i = 0; i < num_var_ranges; i++) {
 921		if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
 922			change_mask |= MTRR_CHANGE_MASK_VARIABLE;
 923	}
 924
 925	if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
 926		change_mask |= MTRR_CHANGE_MASK_FIXED;
 927
 928	/*
 929	 * Set_mtrr_restore restores the old value of MTRRdefType,
 930	 * so to set it we fiddle with the saved value:
 931	 */
 932	if ((deftype_lo & MTRR_DEF_TYPE_TYPE) != mtrr_state.def_type ||
 933	    ((deftype_lo & MTRR_DEF_TYPE_ENABLE) >> MTRR_STATE_SHIFT) != mtrr_state.enabled) {
 934
 935		deftype_lo = (deftype_lo & MTRR_DEF_TYPE_DISABLE) |
 936			     mtrr_state.def_type |
 937			     (mtrr_state.enabled << MTRR_STATE_SHIFT);
 938		change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
 939	}
 940
 941	return change_mask;
 942}
 943
 944void mtrr_disable(void)
 
 
 
 
 
 
 
 
 
 
 
 945{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 946	/* Save MTRR state */
 947	rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
 948
 949	/* Disable MTRRs, and set the default type to uncached */
 950	mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & MTRR_DEF_TYPE_DISABLE, deftype_hi);
 
 951}
 952
 953void mtrr_enable(void)
 954{
 
 
 
 955	/* Intel (P6) standard MTRRs */
 956	mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
 
 
 
 
 
 
 
 
 957}
 958
 959void mtrr_generic_set_state(void)
 960{
 961	unsigned long mask, count;
 
 
 
 
 962
 963	/* Actually set the state */
 964	mask = set_mtrr_state();
 965
 
 
 
 
 
 
 966	/* Use the atomic bitops to update the global mask */
 967	for (count = 0; count < sizeof(mask) * 8; ++count) {
 968		if (mask & 0x01)
 969			set_bit(count, &smp_changes_mask);
 970		mask >>= 1;
 971	}
 
 972}
 973
 974/**
 975 * generic_set_mtrr - set variable MTRR register on the local CPU.
 976 *
 977 * @reg: The register to set.
 978 * @base: The base address of the region.
 979 * @size: The size of the region. If this is 0 the region is disabled.
 980 * @type: The type of the region.
 981 *
 982 * Returns nothing.
 983 */
 984static void generic_set_mtrr(unsigned int reg, unsigned long base,
 985			     unsigned long size, mtrr_type type)
 986{
 987	unsigned long flags;
 988	struct mtrr_var_range *vr;
 989
 990	vr = &mtrr_state.var_ranges[reg];
 991
 992	local_irq_save(flags);
 993	cache_disable();
 994
 995	if (size == 0) {
 996		/*
 997		 * The invalid bit is kept in the mask, so we simply
 998		 * clear the relevant mask register to disable a range.
 999		 */
1000		mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
1001		memset(vr, 0, sizeof(struct mtrr_var_range));
1002	} else {
1003		vr->base_lo = base << PAGE_SHIFT | type;
1004		vr->base_hi = (base >> (32 - PAGE_SHIFT)) & ~phys_hi_rsvd;
1005		vr->mask_lo = -size << PAGE_SHIFT | MTRR_PHYSMASK_V;
1006		vr->mask_hi = (-size >> (32 - PAGE_SHIFT)) & ~phys_hi_rsvd;
1007
1008		mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
1009		mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
1010	}
1011
1012	cache_enable();
1013	local_irq_restore(flags);
1014}
1015
1016int generic_validate_add_page(unsigned long base, unsigned long size,
1017			      unsigned int type)
1018{
1019	unsigned long lbase, last;
1020
1021	/*
1022	 * For Intel PPro stepping <= 7
1023	 * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF
1024	 */
1025	if (mtrr_if == &generic_mtrr_ops && boot_cpu_data.x86 == 6 &&
1026	    boot_cpu_data.x86_model == 1 &&
1027	    boot_cpu_data.x86_stepping <= 7) {
1028		if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
1029			pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
1030			return -EINVAL;
1031		}
1032		if (!(base + size < 0x70000 || base > 0x7003F) &&
1033		    (type == MTRR_TYPE_WRCOMB
1034		     || type == MTRR_TYPE_WRBACK)) {
1035			pr_warn("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
1036			return -EINVAL;
1037		}
1038	}
1039
1040	/*
1041	 * Check upper bits of base and last are equal and lower bits are 0
1042	 * for base and 1 for last
1043	 */
1044	last = base + size - 1;
1045	for (lbase = base; !(lbase & 1) && (last & 1);
1046	     lbase = lbase >> 1, last = last >> 1)
1047		;
1048	if (lbase != last) {
1049		pr_warn("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size);
1050		return -EINVAL;
1051	}
1052	return 0;
1053}
1054
1055static int generic_have_wrcomb(void)
1056{
1057	unsigned long config, dummy;
1058	rdmsr(MSR_MTRRcap, config, dummy);
1059	return config & MTRR_CAP_WC;
1060}
1061
1062int positive_have_wrcomb(void)
1063{
1064	return 1;
1065}
1066
1067/*
1068 * Generic structure...
1069 */
1070const struct mtrr_ops generic_mtrr_ops = {
 
 
1071	.get			= generic_get_mtrr,
1072	.get_free_region	= generic_get_free_region,
1073	.set			= generic_set_mtrr,
1074	.validate_add_page	= generic_validate_add_page,
1075	.have_wrcomb		= generic_have_wrcomb,
1076};
v3.5.6
 
  1/*
  2 * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
  3 * because MTRRs can span up to 40 bits (36bits on most modern x86)
  4 */
  5#define DEBUG
  6
  7#include <linux/module.h>
  8#include <linux/init.h>
  9#include <linux/io.h>
 10#include <linux/mm.h>
 11
 12#include <asm/processor-flags.h>
 
 13#include <asm/cpufeature.h>
 
 
 14#include <asm/tlbflush.h>
 15#include <asm/mtrr.h>
 16#include <asm/msr.h>
 17#include <asm/pat.h>
 18
 19#include "mtrr.h"
 20
 21struct fixed_range_block {
 22	int base_msr;		/* start address of an MTRR block */
 23	int ranges;		/* number of MTRRs in this block  */
 24};
 25
 26static struct fixed_range_block fixed_range_blocks[] = {
 27	{ MSR_MTRRfix64K_00000, 1 }, /* one   64k MTRR  */
 28	{ MSR_MTRRfix16K_80000, 2 }, /* two   16k MTRRs */
 29	{ MSR_MTRRfix4K_C0000,  8 }, /* eight  4k MTRRs */
 30	{}
 31};
 32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 33static unsigned long smp_changes_mask;
 34static int mtrr_state_set;
 35u64 mtrr_tom2;
 36
 37struct mtrr_state_type mtrr_state;
 38EXPORT_SYMBOL_GPL(mtrr_state);
 39
 
 
 
 40/*
 41 * BIOS is expected to clear MtrrFixDramModEn bit, see for example
 42 * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
 43 * Opteron Processors" (26094 Rev. 3.30 February 2006), section
 44 * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
 45 * to 1 during BIOS initalization of the fixed MTRRs, then cleared to
 46 * 0 for operation."
 47 */
 48static inline void k8_check_syscfg_dram_mod_en(void)
 49{
 50	u32 lo, hi;
 51
 52	if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
 53	      (boot_cpu_data.x86 >= 0x0f)))
 54		return;
 55
 56	rdmsr(MSR_K8_SYSCFG, lo, hi);
 57	if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
 58		printk(KERN_ERR FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
 59		       " not cleared by BIOS, clearing this bit\n",
 60		       smp_processor_id());
 61		lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
 62		mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
 63	}
 64}
 65
 66/* Get the size of contiguous MTRR range */
 67static u64 get_mtrr_size(u64 mask)
 68{
 69	u64 size;
 70
 71	mask >>= PAGE_SHIFT;
 72	mask |= size_or_mask;
 73	size = -mask;
 74	size <<= PAGE_SHIFT;
 75	return size;
 76}
 77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 78/*
 79 * Check and return the effective type for MTRR-MTRR type overlap.
 80 * Returns 1 if the effective type is UNCACHEABLE, else returns 0
 
 
 
 
 
 
 81 */
 82static int check_type_overlap(u8 *prev, u8 *curr)
 83{
 84	if (*prev == MTRR_TYPE_UNCACHABLE || *curr == MTRR_TYPE_UNCACHABLE) {
 85		*prev = MTRR_TYPE_UNCACHABLE;
 86		*curr = MTRR_TYPE_UNCACHABLE;
 87		return 1;
 
 
 
 
 
 
 88	}
 89
 90	if ((*prev == MTRR_TYPE_WRBACK && *curr == MTRR_TYPE_WRTHROUGH) ||
 91	    (*prev == MTRR_TYPE_WRTHROUGH && *curr == MTRR_TYPE_WRBACK)) {
 92		*prev = MTRR_TYPE_WRTHROUGH;
 93		*curr = MTRR_TYPE_WRTHROUGH;
 
 94	}
 95
 96	if (*prev != *curr) {
 97		*prev = MTRR_TYPE_UNCACHABLE;
 98		*curr = MTRR_TYPE_UNCACHABLE;
 
 
 
 
 
 
 
 
 99		return 1;
100	}
101
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102	return 0;
103}
104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105/*
106 * Error/Semi-error returns:
107 * 0xFF - when MTRR is not enabled
108 * *repeat == 1 implies [start:end] spanned across MTRR range and type returned
109 *		corresponds only to [start:*partial_end].
110 *		Caller has to lookup again for [*partial_end:end].
111 */
112static u8 __mtrr_type_lookup(u64 start, u64 end, u64 *partial_end, int *repeat)
113{
 
 
114	int i;
115	u64 base, mask;
116	u8 prev_match, curr_match;
117
118	*repeat = 0;
119	if (!mtrr_state_set)
120		return 0xFF;
121
122	if (!mtrr_state.enabled)
123		return 0xFF;
124
125	/* Make end inclusive end, instead of exclusive */
126	end--;
127
128	/* Look in fixed ranges. Just return the type as per start */
129	if (mtrr_state.have_fixed && (start < 0x100000)) {
130		int idx;
131
132		if (start < 0x80000) {
133			idx = 0;
134			idx += (start >> 16);
135			return mtrr_state.fixed_ranges[idx];
136		} else if (start < 0xC0000) {
137			idx = 1 * 8;
138			idx += ((start - 0x80000) >> 14);
139			return mtrr_state.fixed_ranges[idx];
140		} else if (start < 0x1000000) {
141			idx = 3 * 8;
142			idx += ((start - 0xC0000) >> 12);
143			return mtrr_state.fixed_ranges[idx];
144		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145	}
146
 
 
 
 
 
 
 
 
 
 
 
147	/*
148	 * Look in variable ranges
149	 * Look of multiple ranges matching this address and pick type
150	 * as per MTRR precedence
151	 */
152	if (!(mtrr_state.enabled & 2))
153		return mtrr_state.def_type;
 
 
154
155	prev_match = 0xFF;
156	for (i = 0; i < num_var_ranges; ++i) {
157		unsigned short start_state, end_state;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158
159		if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
160			continue;
 
 
 
 
161
162		base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
163		       (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
164		mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
165		       (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
166
167		start_state = ((start & mask) == (base & mask));
168		end_state = ((end & mask) == (base & mask));
169
170		if (start_state != end_state) {
171			/*
172			 * We have start:end spanning across an MTRR.
173			 * We split the region into
174			 * either
175			 * (start:mtrr_end) (mtrr_end:end)
176			 * or
177			 * (start:mtrr_start) (mtrr_start:end)
178			 * depending on kind of overlap.
179			 * Return the type for first region and a pointer to
180			 * the start of second region so that caller will
181			 * lookup again on the second region.
182			 * Note: This way we handle multiple overlaps as well.
183			 */
184			if (start_state)
185				*partial_end = base + get_mtrr_size(mask);
186			else
187				*partial_end = base;
188
189			if (unlikely(*partial_end <= start)) {
190				WARN_ON(1);
191				*partial_end = start + PAGE_SIZE;
192			}
 
 
 
 
 
 
 
 
 
193
194			end = *partial_end - 1; /* end is inclusive */
195			*repeat = 1;
196		}
197
198		if ((start & mask) != (base & mask))
199			continue;
 
200
201		curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
202		if (prev_match == 0xFF) {
203			prev_match = curr_match;
204			continue;
 
205		}
 
 
 
 
 
 
 
206
207		if (check_type_overlap(&prev_match, &curr_match))
208			return curr_match;
 
209	}
210
211	if (mtrr_tom2) {
212		if (start >= (1ULL<<32) && (end < mtrr_tom2))
213			return MTRR_TYPE_WRBACK;
 
 
 
 
 
 
 
214	}
215
216	if (prev_match != 0xFF)
217		return prev_match;
218
219	return mtrr_state.def_type;
220}
221
222/*
223 * Returns the effective MTRR type for the region
224 * Error return:
225 * 0xFF - when MTRR is not enabled
 
 
 
 
 
 
 
 
 
226 */
227u8 mtrr_type_lookup(u64 start, u64 end)
 
228{
229	u8 type, prev_type;
230	int repeat;
231	u64 partial_end;
232
233	type = __mtrr_type_lookup(start, end, &partial_end, &repeat);
 
 
 
 
 
 
234
235	/*
236	 * Common path is with repeat = 0.
237	 * However, we can have cases where [start:end] spans across some
238	 * MTRR range. Do repeated lookups for that case here.
 
 
239	 */
240	while (repeat) {
241		prev_type = type;
242		start = partial_end;
243		type = __mtrr_type_lookup(start, end, &partial_end, &repeat);
 
 
 
 
244
245		if (check_type_overlap(&prev_type, &type))
246			return type;
 
 
 
 
 
 
 
247	}
248
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249	return type;
250}
251
252/* Get the MSR pair relating to a var range */
253static void
254get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
255{
256	rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
257	rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
258}
259
260/* Fill the MSR pair relating to a var range */
261void fill_mtrr_var_range(unsigned int index,
262		u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
263{
264	struct mtrr_var_range *vr;
265
266	vr = mtrr_state.var_ranges;
267
268	vr[index].base_lo = base_lo;
269	vr[index].base_hi = base_hi;
270	vr[index].mask_lo = mask_lo;
271	vr[index].mask_hi = mask_hi;
272}
273
274static void get_fixed_ranges(mtrr_type *frs)
275{
276	unsigned int *p = (unsigned int *)frs;
277	int i;
278
279	k8_check_syscfg_dram_mod_en();
280
281	rdmsr(MSR_MTRRfix64K_00000, p[0], p[1]);
282
283	for (i = 0; i < 2; i++)
284		rdmsr(MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]);
285	for (i = 0; i < 8; i++)
286		rdmsr(MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]);
287}
288
289void mtrr_save_fixed_ranges(void *info)
290{
291	if (cpu_has_mtrr)
292		get_fixed_ranges(mtrr_state.fixed_ranges);
293}
294
295static unsigned __initdata last_fixed_start;
296static unsigned __initdata last_fixed_end;
297static mtrr_type __initdata last_fixed_type;
298
299static void __init print_fixed_last(void)
300{
301	if (!last_fixed_end)
302		return;
303
304	pr_debug("  %05X-%05X %s\n", last_fixed_start,
305		 last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
306
307	last_fixed_end = 0;
308}
309
310static void __init update_fixed_last(unsigned base, unsigned end,
311				     mtrr_type type)
312{
313	last_fixed_start = base;
314	last_fixed_end = end;
315	last_fixed_type = type;
316}
317
318static void __init
319print_fixed(unsigned base, unsigned step, const mtrr_type *types)
320{
321	unsigned i;
322
323	for (i = 0; i < 8; ++i, ++types, base += step) {
324		if (last_fixed_end == 0) {
325			update_fixed_last(base, base + step, *types);
326			continue;
327		}
328		if (last_fixed_end == base && last_fixed_type == *types) {
329			last_fixed_end = base + step;
330			continue;
331		}
332		/* new segments: gap or different type */
333		print_fixed_last();
334		update_fixed_last(base, base + step, *types);
335	}
336}
337
338static void prepare_set(void);
339static void post_set(void);
340
341static void __init print_mtrr_state(void)
342{
343	unsigned int i;
344	int high_width;
345
346	pr_debug("MTRR default type: %s\n",
347		 mtrr_attrib_to_str(mtrr_state.def_type));
348	if (mtrr_state.have_fixed) {
349		pr_debug("MTRR fixed ranges %sabled:\n",
350			 mtrr_state.enabled & 1 ? "en" : "dis");
 
 
351		print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
352		for (i = 0; i < 2; ++i)
353			print_fixed(0x80000 + i * 0x20000, 0x04000,
354				    mtrr_state.fixed_ranges + (i + 1) * 8);
355		for (i = 0; i < 8; ++i)
356			print_fixed(0xC0000 + i * 0x08000, 0x01000,
357				    mtrr_state.fixed_ranges + (i + 3) * 8);
358
359		/* tail */
360		print_fixed_last();
361	}
362	pr_debug("MTRR variable ranges %sabled:\n",
363		 mtrr_state.enabled & 2 ? "en" : "dis");
364	if (size_or_mask & 0xffffffffUL)
365		high_width = ffs(size_or_mask & 0xffffffffUL) - 1;
366	else
367		high_width = ffs(size_or_mask>>32) + 32 - 1;
368	high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4;
369
370	for (i = 0; i < num_var_ranges; ++i) {
371		if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
372			pr_debug("  %u base %0*X%05X000 mask %0*X%05X000 %s\n",
373				 i,
374				 high_width,
375				 mtrr_state.var_ranges[i].base_hi,
376				 mtrr_state.var_ranges[i].base_lo >> 12,
377				 high_width,
378				 mtrr_state.var_ranges[i].mask_hi,
379				 mtrr_state.var_ranges[i].mask_lo >> 12,
380				 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
 
381		else
382			pr_debug("  %u disabled\n", i);
383	}
384	if (mtrr_tom2)
385		pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20);
386}
387
388/* Grab all of the MTRR state for this CPU into *state */
389void __init get_mtrr_state(void)
390{
391	struct mtrr_var_range *vrs;
392	unsigned long flags;
393	unsigned lo, dummy;
394	unsigned int i;
395
396	vrs = mtrr_state.var_ranges;
397
398	rdmsr(MSR_MTRRcap, lo, dummy);
399	mtrr_state.have_fixed = (lo >> 8) & 1;
400
401	for (i = 0; i < num_var_ranges; i++)
402		get_mtrr_var_range(i, &vrs[i]);
403	if (mtrr_state.have_fixed)
404		get_fixed_ranges(mtrr_state.fixed_ranges);
405
406	rdmsr(MSR_MTRRdefType, lo, dummy);
407	mtrr_state.def_type = (lo & 0xff);
408	mtrr_state.enabled = (lo & 0xc00) >> 10;
409
410	if (amd_special_default_mtrr()) {
411		unsigned low, high;
412
413		/* TOP_MEM2 */
414		rdmsr(MSR_K8_TOP_MEM2, low, high);
415		mtrr_tom2 = high;
416		mtrr_tom2 <<= 32;
417		mtrr_tom2 |= low;
418		mtrr_tom2 &= 0xffffff800000ULL;
419	}
420
421	print_mtrr_state();
 
422
423	mtrr_state_set = 1;
424
425	/* PAT setup for BP. We need to go through sync steps here */
426	local_irq_save(flags);
427	prepare_set();
428
429	pat_init();
430
431	post_set();
432	local_irq_restore(flags);
433}
434
435/* Some BIOS's are messed up and don't set all MTRRs the same! */
436void __init mtrr_state_warn(void)
437{
438	unsigned long mask = smp_changes_mask;
439
440	if (!mask)
441		return;
442	if (mask & MTRR_CHANGE_MASK_FIXED)
443		pr_warning("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
444	if (mask & MTRR_CHANGE_MASK_VARIABLE)
445		pr_warning("mtrr: your CPUs had inconsistent variable MTRR settings\n");
446	if (mask & MTRR_CHANGE_MASK_DEFTYPE)
447		pr_warning("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
448
449	printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
450	printk(KERN_INFO "mtrr: corrected configuration.\n");
451}
452
453/*
454 * Doesn't attempt to pass an error out to MTRR users
455 * because it's quite complicated in some cases and probably not
456 * worth it because the best error handling is to ignore it.
457 */
458void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
459{
460	if (wrmsr_safe(msr, a, b) < 0) {
461		printk(KERN_ERR
462			"MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
463			smp_processor_id(), msr, a, b);
464	}
465}
466
467/**
468 * set_fixed_range - checks & updates a fixed-range MTRR if it
469 *		     differs from the value it should have
470 * @msr: MSR address of the MTTR which should be checked and updated
471 * @changed: pointer which indicates whether the MTRR needed to be changed
472 * @msrwords: pointer to the MSR values which the MSR should have
473 */
474static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
475{
476	unsigned lo, hi;
477
478	rdmsr(msr, lo, hi);
479
480	if (lo != msrwords[0] || hi != msrwords[1]) {
481		mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
482		*changed = true;
483	}
484}
485
486/**
487 * generic_get_free_region - Get a free MTRR.
488 * @base: The starting (base) address of the region.
489 * @size: The size (in bytes) of the region.
490 * @replace_reg: mtrr index to be replaced; set to invalid value if none.
491 *
492 * Returns: The index of the region on success, else negative on error.
493 */
494int
495generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
496{
497	unsigned long lbase, lsize;
498	mtrr_type ltype;
499	int i, max;
500
501	max = num_var_ranges;
502	if (replace_reg >= 0 && replace_reg < max)
503		return replace_reg;
504
505	for (i = 0; i < max; ++i) {
506		mtrr_if->get(i, &lbase, &lsize, &ltype);
507		if (lsize == 0)
508			return i;
509	}
510
511	return -ENOSPC;
512}
513
514static void generic_get_mtrr(unsigned int reg, unsigned long *base,
515			     unsigned long *size, mtrr_type *type)
516{
517	unsigned int mask_lo, mask_hi, base_lo, base_hi;
518	unsigned int tmp, hi;
 
519
520	/*
521	 * get_mtrr doesn't need to update mtrr_state, also it could be called
522	 * from any cpu, so try to print it out directly.
523	 */
524	get_cpu();
525
526	rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
527
528	if ((mask_lo & 0x800) == 0) {
529		/*  Invalid (i.e. free) range */
530		*base = 0;
531		*size = 0;
532		*type = 0;
533		goto out_put_cpu;
534	}
535
536	rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
537
538	/* Work out the shifted address mask: */
539	tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
540	mask_lo = size_or_mask | tmp;
541
542	/* Expand tmp with high bits to all 1s: */
543	hi = fls(tmp);
544	if (hi > 0) {
545		tmp |= ~((1<<(hi - 1)) - 1);
546
547		if (tmp != mask_lo) {
548			printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
549			add_taint(TAINT_FIRMWARE_WORKAROUND);
550			mask_lo = tmp;
551		}
552	}
553
554	/*
555	 * This works correctly if size is a power of two, i.e. a
556	 * contiguous range:
557	 */
558	*size = -mask_lo;
559	*base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
560	*type = base_lo & 0xff;
561
562out_put_cpu:
563	put_cpu();
564}
565
566/**
567 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they
568 *		      differ from the saved set
569 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
570 */
571static int set_fixed_ranges(mtrr_type *frs)
572{
573	unsigned long long *saved = (unsigned long long *)frs;
574	bool changed = false;
575	int block = -1, range;
576
577	k8_check_syscfg_dram_mod_en();
578
579	while (fixed_range_blocks[++block].ranges) {
580		for (range = 0; range < fixed_range_blocks[block].ranges; range++)
581			set_fixed_range(fixed_range_blocks[block].base_msr + range,
582					&changed, (unsigned int *)saved++);
583	}
584
585	return changed;
586}
587
588/*
589 * Set the MSR pair relating to a var range.
590 * Returns true if changes are made.
591 */
592static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
593{
594	unsigned int lo, hi;
595	bool changed = false;
596
597	rdmsr(MTRRphysBase_MSR(index), lo, hi);
598	if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
599	    || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
600		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
601
602		mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
603		changed = true;
604	}
605
606	rdmsr(MTRRphysMask_MSR(index), lo, hi);
607
608	if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
609	    || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
610		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
611		mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
612		changed = true;
613	}
614	return changed;
615}
616
617static u32 deftype_lo, deftype_hi;
618
619/**
620 * set_mtrr_state - Set the MTRR state for this CPU.
621 *
622 * NOTE: The CPU must already be in a safe state for MTRR changes.
 
 
 
623 * RETURNS: 0 if no changes made, else a mask indicating what was changed.
624 */
625static unsigned long set_mtrr_state(void)
626{
627	unsigned long change_mask = 0;
628	unsigned int i;
629
630	for (i = 0; i < num_var_ranges; i++) {
631		if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
632			change_mask |= MTRR_CHANGE_MASK_VARIABLE;
633	}
634
635	if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
636		change_mask |= MTRR_CHANGE_MASK_FIXED;
637
638	/*
639	 * Set_mtrr_restore restores the old value of MTRRdefType,
640	 * so to set it we fiddle with the saved value:
641	 */
642	if ((deftype_lo & 0xff) != mtrr_state.def_type
643	    || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
644
645		deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type |
646			     (mtrr_state.enabled << 10);
 
647		change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
648	}
649
650	return change_mask;
651}
652
653
654static unsigned long cr4;
655static DEFINE_RAW_SPINLOCK(set_atomicity_lock);
656
657/*
658 * Since we are disabling the cache don't allow any interrupts,
659 * they would run extremely slow and would only increase the pain.
660 *
661 * The caller must ensure that local interrupts are disabled and
662 * are reenabled after post_set() has been called.
663 */
664static void prepare_set(void) __acquires(set_atomicity_lock)
665{
666	unsigned long cr0;
667
668	/*
669	 * Note that this is not ideal
670	 * since the cache is only flushed/disabled for this CPU while the
671	 * MTRRs are changed, but changing this requires more invasive
672	 * changes to the way the kernel boots
673	 */
674
675	raw_spin_lock(&set_atomicity_lock);
676
677	/* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
678	cr0 = read_cr0() | X86_CR0_CD;
679	write_cr0(cr0);
680	wbinvd();
681
682	/* Save value of CR4 and clear Page Global Enable (bit 7) */
683	if (cpu_has_pge) {
684		cr4 = read_cr4();
685		write_cr4(cr4 & ~X86_CR4_PGE);
686	}
687
688	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
689	__flush_tlb();
690
691	/* Save MTRR state */
692	rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
693
694	/* Disable MTRRs, and set the default type to uncached */
695	mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
696	wbinvd();
697}
698
699static void post_set(void) __releases(set_atomicity_lock)
700{
701	/* Flush TLBs (no need to flush caches - they are disabled) */
702	__flush_tlb();
703
704	/* Intel (P6) standard MTRRs */
705	mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
706
707	/* Enable caches */
708	write_cr0(read_cr0() & 0xbfffffff);
709
710	/* Restore value of CR4 */
711	if (cpu_has_pge)
712		write_cr4(cr4);
713	raw_spin_unlock(&set_atomicity_lock);
714}
715
716static void generic_set_all(void)
717{
718	unsigned long mask, count;
719	unsigned long flags;
720
721	local_irq_save(flags);
722	prepare_set();
723
724	/* Actually set the state */
725	mask = set_mtrr_state();
726
727	/* also set PAT */
728	pat_init();
729
730	post_set();
731	local_irq_restore(flags);
732
733	/* Use the atomic bitops to update the global mask */
734	for (count = 0; count < sizeof mask * 8; ++count) {
735		if (mask & 0x01)
736			set_bit(count, &smp_changes_mask);
737		mask >>= 1;
738	}
739
740}
741
742/**
743 * generic_set_mtrr - set variable MTRR register on the local CPU.
744 *
745 * @reg: The register to set.
746 * @base: The base address of the region.
747 * @size: The size of the region. If this is 0 the region is disabled.
748 * @type: The type of the region.
749 *
750 * Returns nothing.
751 */
752static void generic_set_mtrr(unsigned int reg, unsigned long base,
753			     unsigned long size, mtrr_type type)
754{
755	unsigned long flags;
756	struct mtrr_var_range *vr;
757
758	vr = &mtrr_state.var_ranges[reg];
759
760	local_irq_save(flags);
761	prepare_set();
762
763	if (size == 0) {
764		/*
765		 * The invalid bit is kept in the mask, so we simply
766		 * clear the relevant mask register to disable a range.
767		 */
768		mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
769		memset(vr, 0, sizeof(struct mtrr_var_range));
770	} else {
771		vr->base_lo = base << PAGE_SHIFT | type;
772		vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
773		vr->mask_lo = -size << PAGE_SHIFT | 0x800;
774		vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
775
776		mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
777		mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
778	}
779
780	post_set();
781	local_irq_restore(flags);
782}
783
784int generic_validate_add_page(unsigned long base, unsigned long size,
785			      unsigned int type)
786{
787	unsigned long lbase, last;
788
789	/*
790	 * For Intel PPro stepping <= 7
791	 * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF
792	 */
793	if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
794	    boot_cpu_data.x86_model == 1 &&
795	    boot_cpu_data.x86_mask <= 7) {
796		if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
797			pr_warning("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
798			return -EINVAL;
799		}
800		if (!(base + size < 0x70000 || base > 0x7003F) &&
801		    (type == MTRR_TYPE_WRCOMB
802		     || type == MTRR_TYPE_WRBACK)) {
803			pr_warning("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
804			return -EINVAL;
805		}
806	}
807
808	/*
809	 * Check upper bits of base and last are equal and lower bits are 0
810	 * for base and 1 for last
811	 */
812	last = base + size - 1;
813	for (lbase = base; !(lbase & 1) && (last & 1);
814	     lbase = lbase >> 1, last = last >> 1)
815		;
816	if (lbase != last) {
817		pr_warning("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size);
818		return -EINVAL;
819	}
820	return 0;
821}
822
823static int generic_have_wrcomb(void)
824{
825	unsigned long config, dummy;
826	rdmsr(MSR_MTRRcap, config, dummy);
827	return config & (1 << 10);
828}
829
830int positive_have_wrcomb(void)
831{
832	return 1;
833}
834
835/*
836 * Generic structure...
837 */
838const struct mtrr_ops generic_mtrr_ops = {
839	.use_intel_if		= 1,
840	.set_all		= generic_set_all,
841	.get			= generic_get_mtrr,
842	.get_free_region	= generic_get_free_region,
843	.set			= generic_set_mtrr,
844	.validate_add_page	= generic_validate_add_page,
845	.have_wrcomb		= generic_have_wrcomb,
846};