Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
4 * because MTRRs can span up to 40 bits (36bits on most modern x86)
5 */
6#define DEBUG
7
8#include <linux/export.h>
9#include <linux/init.h>
10#include <linux/io.h>
11#include <linux/mm.h>
12
13#include <asm/processor-flags.h>
14#include <asm/cpufeature.h>
15#include <asm/tlbflush.h>
16#include <asm/mtrr.h>
17#include <asm/msr.h>
18#include <asm/pat.h>
19
20#include "mtrr.h"
21
22struct fixed_range_block {
23 int base_msr; /* start address of an MTRR block */
24 int ranges; /* number of MTRRs in this block */
25};
26
27static struct fixed_range_block fixed_range_blocks[] = {
28 { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */
29 { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */
30 { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */
31 {}
32};
33
34static unsigned long smp_changes_mask;
35static int mtrr_state_set;
36u64 mtrr_tom2;
37
38struct mtrr_state_type mtrr_state;
39EXPORT_SYMBOL_GPL(mtrr_state);
40
41/*
42 * BIOS is expected to clear MtrrFixDramModEn bit, see for example
43 * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
44 * Opteron Processors" (26094 Rev. 3.30 February 2006), section
45 * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
46 * to 1 during BIOS initialization of the fixed MTRRs, then cleared to
47 * 0 for operation."
48 */
49static inline void k8_check_syscfg_dram_mod_en(void)
50{
51 u32 lo, hi;
52
53 if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
54 (boot_cpu_data.x86 >= 0x0f)))
55 return;
56
57 rdmsr(MSR_K8_SYSCFG, lo, hi);
58 if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
59 pr_err(FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
60 " not cleared by BIOS, clearing this bit\n",
61 smp_processor_id());
62 lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
63 mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
64 }
65}
66
67/* Get the size of contiguous MTRR range */
68static u64 get_mtrr_size(u64 mask)
69{
70 u64 size;
71
72 mask >>= PAGE_SHIFT;
73 mask |= size_or_mask;
74 size = -mask;
75 size <<= PAGE_SHIFT;
76 return size;
77}
78
79/*
80 * Check and return the effective type for MTRR-MTRR type overlap.
81 * Returns 1 if the effective type is UNCACHEABLE, else returns 0
82 */
83static int check_type_overlap(u8 *prev, u8 *curr)
84{
85 if (*prev == MTRR_TYPE_UNCACHABLE || *curr == MTRR_TYPE_UNCACHABLE) {
86 *prev = MTRR_TYPE_UNCACHABLE;
87 *curr = MTRR_TYPE_UNCACHABLE;
88 return 1;
89 }
90
91 if ((*prev == MTRR_TYPE_WRBACK && *curr == MTRR_TYPE_WRTHROUGH) ||
92 (*prev == MTRR_TYPE_WRTHROUGH && *curr == MTRR_TYPE_WRBACK)) {
93 *prev = MTRR_TYPE_WRTHROUGH;
94 *curr = MTRR_TYPE_WRTHROUGH;
95 }
96
97 if (*prev != *curr) {
98 *prev = MTRR_TYPE_UNCACHABLE;
99 *curr = MTRR_TYPE_UNCACHABLE;
100 return 1;
101 }
102
103 return 0;
104}
105
106/**
107 * mtrr_type_lookup_fixed - look up memory type in MTRR fixed entries
108 *
109 * Return the MTRR fixed memory type of 'start'.
110 *
111 * MTRR fixed entries are divided into the following ways:
112 * 0x00000 - 0x7FFFF : This range is divided into eight 64KB sub-ranges
113 * 0x80000 - 0xBFFFF : This range is divided into sixteen 16KB sub-ranges
114 * 0xC0000 - 0xFFFFF : This range is divided into sixty-four 4KB sub-ranges
115 *
116 * Return Values:
117 * MTRR_TYPE_(type) - Matched memory type
118 * MTRR_TYPE_INVALID - Unmatched
119 */
120static u8 mtrr_type_lookup_fixed(u64 start, u64 end)
121{
122 int idx;
123
124 if (start >= 0x100000)
125 return MTRR_TYPE_INVALID;
126
127 /* 0x0 - 0x7FFFF */
128 if (start < 0x80000) {
129 idx = 0;
130 idx += (start >> 16);
131 return mtrr_state.fixed_ranges[idx];
132 /* 0x80000 - 0xBFFFF */
133 } else if (start < 0xC0000) {
134 idx = 1 * 8;
135 idx += ((start - 0x80000) >> 14);
136 return mtrr_state.fixed_ranges[idx];
137 }
138
139 /* 0xC0000 - 0xFFFFF */
140 idx = 3 * 8;
141 idx += ((start - 0xC0000) >> 12);
142 return mtrr_state.fixed_ranges[idx];
143}
144
145/**
146 * mtrr_type_lookup_variable - look up memory type in MTRR variable entries
147 *
148 * Return Value:
149 * MTRR_TYPE_(type) - Matched memory type or default memory type (unmatched)
150 *
151 * Output Arguments:
152 * repeat - Set to 1 when [start:end] spanned across MTRR range and type
153 * returned corresponds only to [start:*partial_end]. Caller has
154 * to lookup again for [*partial_end:end].
155 *
156 * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the
157 * region is fully covered by a single MTRR entry or the default
158 * type.
159 */
160static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
161 int *repeat, u8 *uniform)
162{
163 int i;
164 u64 base, mask;
165 u8 prev_match, curr_match;
166
167 *repeat = 0;
168 *uniform = 1;
169
170 /* Make end inclusive instead of exclusive */
171 end--;
172
173 prev_match = MTRR_TYPE_INVALID;
174 for (i = 0; i < num_var_ranges; ++i) {
175 unsigned short start_state, end_state, inclusive;
176
177 if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
178 continue;
179
180 base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
181 (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
182 mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
183 (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
184
185 start_state = ((start & mask) == (base & mask));
186 end_state = ((end & mask) == (base & mask));
187 inclusive = ((start < base) && (end > base));
188
189 if ((start_state != end_state) || inclusive) {
190 /*
191 * We have start:end spanning across an MTRR.
192 * We split the region into either
193 *
194 * - start_state:1
195 * (start:mtrr_end)(mtrr_end:end)
196 * - end_state:1
197 * (start:mtrr_start)(mtrr_start:end)
198 * - inclusive:1
199 * (start:mtrr_start)(mtrr_start:mtrr_end)(mtrr_end:end)
200 *
201 * depending on kind of overlap.
202 *
203 * Return the type of the first region and a pointer
204 * to the start of next region so that caller will be
205 * advised to lookup again after having adjusted start
206 * and end.
207 *
208 * Note: This way we handle overlaps with multiple
209 * entries and the default type properly.
210 */
211 if (start_state)
212 *partial_end = base + get_mtrr_size(mask);
213 else
214 *partial_end = base;
215
216 if (unlikely(*partial_end <= start)) {
217 WARN_ON(1);
218 *partial_end = start + PAGE_SIZE;
219 }
220
221 end = *partial_end - 1; /* end is inclusive */
222 *repeat = 1;
223 *uniform = 0;
224 }
225
226 if ((start & mask) != (base & mask))
227 continue;
228
229 curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
230 if (prev_match == MTRR_TYPE_INVALID) {
231 prev_match = curr_match;
232 continue;
233 }
234
235 *uniform = 0;
236 if (check_type_overlap(&prev_match, &curr_match))
237 return curr_match;
238 }
239
240 if (prev_match != MTRR_TYPE_INVALID)
241 return prev_match;
242
243 return mtrr_state.def_type;
244}
245
246/**
247 * mtrr_type_lookup - look up memory type in MTRR
248 *
249 * Return Values:
250 * MTRR_TYPE_(type) - The effective MTRR type for the region
251 * MTRR_TYPE_INVALID - MTRR is disabled
252 *
253 * Output Argument:
254 * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the
255 * region is fully covered by a single MTRR entry or the default
256 * type.
257 */
258u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform)
259{
260 u8 type, prev_type, is_uniform = 1, dummy;
261 int repeat;
262 u64 partial_end;
263
264 if (!mtrr_state_set)
265 return MTRR_TYPE_INVALID;
266
267 if (!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED))
268 return MTRR_TYPE_INVALID;
269
270 /*
271 * Look up the fixed ranges first, which take priority over
272 * the variable ranges.
273 */
274 if ((start < 0x100000) &&
275 (mtrr_state.have_fixed) &&
276 (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) {
277 is_uniform = 0;
278 type = mtrr_type_lookup_fixed(start, end);
279 goto out;
280 }
281
282 /*
283 * Look up the variable ranges. Look of multiple ranges matching
284 * this address and pick type as per MTRR precedence.
285 */
286 type = mtrr_type_lookup_variable(start, end, &partial_end,
287 &repeat, &is_uniform);
288
289 /*
290 * Common path is with repeat = 0.
291 * However, we can have cases where [start:end] spans across some
292 * MTRR ranges and/or the default type. Do repeated lookups for
293 * that case here.
294 */
295 while (repeat) {
296 prev_type = type;
297 start = partial_end;
298 is_uniform = 0;
299 type = mtrr_type_lookup_variable(start, end, &partial_end,
300 &repeat, &dummy);
301
302 if (check_type_overlap(&prev_type, &type))
303 goto out;
304 }
305
306 if (mtrr_tom2 && (start >= (1ULL<<32)) && (end < mtrr_tom2))
307 type = MTRR_TYPE_WRBACK;
308
309out:
310 *uniform = is_uniform;
311 return type;
312}
313
314/* Get the MSR pair relating to a var range */
315static void
316get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
317{
318 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
319 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
320}
321
322/* Fill the MSR pair relating to a var range */
323void fill_mtrr_var_range(unsigned int index,
324 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
325{
326 struct mtrr_var_range *vr;
327
328 vr = mtrr_state.var_ranges;
329
330 vr[index].base_lo = base_lo;
331 vr[index].base_hi = base_hi;
332 vr[index].mask_lo = mask_lo;
333 vr[index].mask_hi = mask_hi;
334}
335
336static void get_fixed_ranges(mtrr_type *frs)
337{
338 unsigned int *p = (unsigned int *)frs;
339 int i;
340
341 k8_check_syscfg_dram_mod_en();
342
343 rdmsr(MSR_MTRRfix64K_00000, p[0], p[1]);
344
345 for (i = 0; i < 2; i++)
346 rdmsr(MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]);
347 for (i = 0; i < 8; i++)
348 rdmsr(MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]);
349}
350
351void mtrr_save_fixed_ranges(void *info)
352{
353 if (boot_cpu_has(X86_FEATURE_MTRR))
354 get_fixed_ranges(mtrr_state.fixed_ranges);
355}
356
357static unsigned __initdata last_fixed_start;
358static unsigned __initdata last_fixed_end;
359static mtrr_type __initdata last_fixed_type;
360
361static void __init print_fixed_last(void)
362{
363 if (!last_fixed_end)
364 return;
365
366 pr_debug(" %05X-%05X %s\n", last_fixed_start,
367 last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
368
369 last_fixed_end = 0;
370}
371
372static void __init update_fixed_last(unsigned base, unsigned end,
373 mtrr_type type)
374{
375 last_fixed_start = base;
376 last_fixed_end = end;
377 last_fixed_type = type;
378}
379
380static void __init
381print_fixed(unsigned base, unsigned step, const mtrr_type *types)
382{
383 unsigned i;
384
385 for (i = 0; i < 8; ++i, ++types, base += step) {
386 if (last_fixed_end == 0) {
387 update_fixed_last(base, base + step, *types);
388 continue;
389 }
390 if (last_fixed_end == base && last_fixed_type == *types) {
391 last_fixed_end = base + step;
392 continue;
393 }
394 /* new segments: gap or different type */
395 print_fixed_last();
396 update_fixed_last(base, base + step, *types);
397 }
398}
399
400static void prepare_set(void);
401static void post_set(void);
402
403static void __init print_mtrr_state(void)
404{
405 unsigned int i;
406 int high_width;
407
408 pr_debug("MTRR default type: %s\n",
409 mtrr_attrib_to_str(mtrr_state.def_type));
410 if (mtrr_state.have_fixed) {
411 pr_debug("MTRR fixed ranges %sabled:\n",
412 ((mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) &&
413 (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) ?
414 "en" : "dis");
415 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
416 for (i = 0; i < 2; ++i)
417 print_fixed(0x80000 + i * 0x20000, 0x04000,
418 mtrr_state.fixed_ranges + (i + 1) * 8);
419 for (i = 0; i < 8; ++i)
420 print_fixed(0xC0000 + i * 0x08000, 0x01000,
421 mtrr_state.fixed_ranges + (i + 3) * 8);
422
423 /* tail */
424 print_fixed_last();
425 }
426 pr_debug("MTRR variable ranges %sabled:\n",
427 mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED ? "en" : "dis");
428 high_width = (__ffs64(size_or_mask) - (32 - PAGE_SHIFT) + 3) / 4;
429
430 for (i = 0; i < num_var_ranges; ++i) {
431 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
432 pr_debug(" %u base %0*X%05X000 mask %0*X%05X000 %s\n",
433 i,
434 high_width,
435 mtrr_state.var_ranges[i].base_hi,
436 mtrr_state.var_ranges[i].base_lo >> 12,
437 high_width,
438 mtrr_state.var_ranges[i].mask_hi,
439 mtrr_state.var_ranges[i].mask_lo >> 12,
440 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
441 else
442 pr_debug(" %u disabled\n", i);
443 }
444 if (mtrr_tom2)
445 pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20);
446}
447
448/* PAT setup for BP. We need to go through sync steps here */
449void __init mtrr_bp_pat_init(void)
450{
451 unsigned long flags;
452
453 local_irq_save(flags);
454 prepare_set();
455
456 pat_init();
457
458 post_set();
459 local_irq_restore(flags);
460}
461
462/* Grab all of the MTRR state for this CPU into *state */
463bool __init get_mtrr_state(void)
464{
465 struct mtrr_var_range *vrs;
466 unsigned lo, dummy;
467 unsigned int i;
468
469 vrs = mtrr_state.var_ranges;
470
471 rdmsr(MSR_MTRRcap, lo, dummy);
472 mtrr_state.have_fixed = (lo >> 8) & 1;
473
474 for (i = 0; i < num_var_ranges; i++)
475 get_mtrr_var_range(i, &vrs[i]);
476 if (mtrr_state.have_fixed)
477 get_fixed_ranges(mtrr_state.fixed_ranges);
478
479 rdmsr(MSR_MTRRdefType, lo, dummy);
480 mtrr_state.def_type = (lo & 0xff);
481 mtrr_state.enabled = (lo & 0xc00) >> 10;
482
483 if (amd_special_default_mtrr()) {
484 unsigned low, high;
485
486 /* TOP_MEM2 */
487 rdmsr(MSR_K8_TOP_MEM2, low, high);
488 mtrr_tom2 = high;
489 mtrr_tom2 <<= 32;
490 mtrr_tom2 |= low;
491 mtrr_tom2 &= 0xffffff800000ULL;
492 }
493
494 print_mtrr_state();
495
496 mtrr_state_set = 1;
497
498 return !!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED);
499}
500
501/* Some BIOS's are messed up and don't set all MTRRs the same! */
502void __init mtrr_state_warn(void)
503{
504 unsigned long mask = smp_changes_mask;
505
506 if (!mask)
507 return;
508 if (mask & MTRR_CHANGE_MASK_FIXED)
509 pr_warn("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
510 if (mask & MTRR_CHANGE_MASK_VARIABLE)
511 pr_warn("mtrr: your CPUs had inconsistent variable MTRR settings\n");
512 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
513 pr_warn("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
514
515 pr_info("mtrr: probably your BIOS does not setup all CPUs.\n");
516 pr_info("mtrr: corrected configuration.\n");
517}
518
519/*
520 * Doesn't attempt to pass an error out to MTRR users
521 * because it's quite complicated in some cases and probably not
522 * worth it because the best error handling is to ignore it.
523 */
524void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
525{
526 if (wrmsr_safe(msr, a, b) < 0) {
527 pr_err("MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
528 smp_processor_id(), msr, a, b);
529 }
530}
531
532/**
533 * set_fixed_range - checks & updates a fixed-range MTRR if it
534 * differs from the value it should have
535 * @msr: MSR address of the MTTR which should be checked and updated
536 * @changed: pointer which indicates whether the MTRR needed to be changed
537 * @msrwords: pointer to the MSR values which the MSR should have
538 */
539static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
540{
541 unsigned lo, hi;
542
543 rdmsr(msr, lo, hi);
544
545 if (lo != msrwords[0] || hi != msrwords[1]) {
546 mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
547 *changed = true;
548 }
549}
550
551/**
552 * generic_get_free_region - Get a free MTRR.
553 * @base: The starting (base) address of the region.
554 * @size: The size (in bytes) of the region.
555 * @replace_reg: mtrr index to be replaced; set to invalid value if none.
556 *
557 * Returns: The index of the region on success, else negative on error.
558 */
559int
560generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
561{
562 unsigned long lbase, lsize;
563 mtrr_type ltype;
564 int i, max;
565
566 max = num_var_ranges;
567 if (replace_reg >= 0 && replace_reg < max)
568 return replace_reg;
569
570 for (i = 0; i < max; ++i) {
571 mtrr_if->get(i, &lbase, &lsize, <ype);
572 if (lsize == 0)
573 return i;
574 }
575
576 return -ENOSPC;
577}
578
579static void generic_get_mtrr(unsigned int reg, unsigned long *base,
580 unsigned long *size, mtrr_type *type)
581{
582 u32 mask_lo, mask_hi, base_lo, base_hi;
583 unsigned int hi;
584 u64 tmp, mask;
585
586 /*
587 * get_mtrr doesn't need to update mtrr_state, also it could be called
588 * from any cpu, so try to print it out directly.
589 */
590 get_cpu();
591
592 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
593
594 if ((mask_lo & 0x800) == 0) {
595 /* Invalid (i.e. free) range */
596 *base = 0;
597 *size = 0;
598 *type = 0;
599 goto out_put_cpu;
600 }
601
602 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
603
604 /* Work out the shifted address mask: */
605 tmp = (u64)mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
606 mask = size_or_mask | tmp;
607
608 /* Expand tmp with high bits to all 1s: */
609 hi = fls64(tmp);
610 if (hi > 0) {
611 tmp |= ~((1ULL<<(hi - 1)) - 1);
612
613 if (tmp != mask) {
614 pr_warn("mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
615 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
616 mask = tmp;
617 }
618 }
619
620 /*
621 * This works correctly if size is a power of two, i.e. a
622 * contiguous range:
623 */
624 *size = -mask;
625 *base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
626 *type = base_lo & 0xff;
627
628out_put_cpu:
629 put_cpu();
630}
631
632/**
633 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they
634 * differ from the saved set
635 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
636 */
637static int set_fixed_ranges(mtrr_type *frs)
638{
639 unsigned long long *saved = (unsigned long long *)frs;
640 bool changed = false;
641 int block = -1, range;
642
643 k8_check_syscfg_dram_mod_en();
644
645 while (fixed_range_blocks[++block].ranges) {
646 for (range = 0; range < fixed_range_blocks[block].ranges; range++)
647 set_fixed_range(fixed_range_blocks[block].base_msr + range,
648 &changed, (unsigned int *)saved++);
649 }
650
651 return changed;
652}
653
654/*
655 * Set the MSR pair relating to a var range.
656 * Returns true if changes are made.
657 */
658static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
659{
660 unsigned int lo, hi;
661 bool changed = false;
662
663 rdmsr(MTRRphysBase_MSR(index), lo, hi);
664 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
665 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
666 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
667
668 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
669 changed = true;
670 }
671
672 rdmsr(MTRRphysMask_MSR(index), lo, hi);
673
674 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
675 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
676 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
677 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
678 changed = true;
679 }
680 return changed;
681}
682
683static u32 deftype_lo, deftype_hi;
684
685/**
686 * set_mtrr_state - Set the MTRR state for this CPU.
687 *
688 * NOTE: The CPU must already be in a safe state for MTRR changes.
689 * RETURNS: 0 if no changes made, else a mask indicating what was changed.
690 */
691static unsigned long set_mtrr_state(void)
692{
693 unsigned long change_mask = 0;
694 unsigned int i;
695
696 for (i = 0; i < num_var_ranges; i++) {
697 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
698 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
699 }
700
701 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
702 change_mask |= MTRR_CHANGE_MASK_FIXED;
703
704 /*
705 * Set_mtrr_restore restores the old value of MTRRdefType,
706 * so to set it we fiddle with the saved value:
707 */
708 if ((deftype_lo & 0xff) != mtrr_state.def_type
709 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
710
711 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type |
712 (mtrr_state.enabled << 10);
713 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
714 }
715
716 return change_mask;
717}
718
719
720static unsigned long cr4;
721static DEFINE_RAW_SPINLOCK(set_atomicity_lock);
722
723/*
724 * Since we are disabling the cache don't allow any interrupts,
725 * they would run extremely slow and would only increase the pain.
726 *
727 * The caller must ensure that local interrupts are disabled and
728 * are reenabled after post_set() has been called.
729 */
730static void prepare_set(void) __acquires(set_atomicity_lock)
731{
732 unsigned long cr0;
733
734 /*
735 * Note that this is not ideal
736 * since the cache is only flushed/disabled for this CPU while the
737 * MTRRs are changed, but changing this requires more invasive
738 * changes to the way the kernel boots
739 */
740
741 raw_spin_lock(&set_atomicity_lock);
742
743 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
744 cr0 = read_cr0() | X86_CR0_CD;
745 write_cr0(cr0);
746
747 /*
748 * Cache flushing is the most time-consuming step when programming
749 * the MTRRs. Fortunately, as per the Intel Software Development
750 * Manual, we can skip it if the processor supports cache self-
751 * snooping.
752 */
753 if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
754 wbinvd();
755
756 /* Save value of CR4 and clear Page Global Enable (bit 7) */
757 if (boot_cpu_has(X86_FEATURE_PGE)) {
758 cr4 = __read_cr4();
759 __write_cr4(cr4 & ~X86_CR4_PGE);
760 }
761
762 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
763 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
764 __flush_tlb();
765
766 /* Save MTRR state */
767 rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
768
769 /* Disable MTRRs, and set the default type to uncached */
770 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
771
772 /* Again, only flush caches if we have to. */
773 if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
774 wbinvd();
775}
776
777static void post_set(void) __releases(set_atomicity_lock)
778{
779 /* Flush TLBs (no need to flush caches - they are disabled) */
780 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
781 __flush_tlb();
782
783 /* Intel (P6) standard MTRRs */
784 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
785
786 /* Enable caches */
787 write_cr0(read_cr0() & ~X86_CR0_CD);
788
789 /* Restore value of CR4 */
790 if (boot_cpu_has(X86_FEATURE_PGE))
791 __write_cr4(cr4);
792 raw_spin_unlock(&set_atomicity_lock);
793}
794
795static void generic_set_all(void)
796{
797 unsigned long mask, count;
798 unsigned long flags;
799
800 local_irq_save(flags);
801 prepare_set();
802
803 /* Actually set the state */
804 mask = set_mtrr_state();
805
806 /* also set PAT */
807 pat_init();
808
809 post_set();
810 local_irq_restore(flags);
811
812 /* Use the atomic bitops to update the global mask */
813 for (count = 0; count < sizeof(mask) * 8; ++count) {
814 if (mask & 0x01)
815 set_bit(count, &smp_changes_mask);
816 mask >>= 1;
817 }
818
819}
820
821/**
822 * generic_set_mtrr - set variable MTRR register on the local CPU.
823 *
824 * @reg: The register to set.
825 * @base: The base address of the region.
826 * @size: The size of the region. If this is 0 the region is disabled.
827 * @type: The type of the region.
828 *
829 * Returns nothing.
830 */
831static void generic_set_mtrr(unsigned int reg, unsigned long base,
832 unsigned long size, mtrr_type type)
833{
834 unsigned long flags;
835 struct mtrr_var_range *vr;
836
837 vr = &mtrr_state.var_ranges[reg];
838
839 local_irq_save(flags);
840 prepare_set();
841
842 if (size == 0) {
843 /*
844 * The invalid bit is kept in the mask, so we simply
845 * clear the relevant mask register to disable a range.
846 */
847 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
848 memset(vr, 0, sizeof(struct mtrr_var_range));
849 } else {
850 vr->base_lo = base << PAGE_SHIFT | type;
851 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
852 vr->mask_lo = -size << PAGE_SHIFT | 0x800;
853 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
854
855 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
856 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
857 }
858
859 post_set();
860 local_irq_restore(flags);
861}
862
863int generic_validate_add_page(unsigned long base, unsigned long size,
864 unsigned int type)
865{
866 unsigned long lbase, last;
867
868 /*
869 * For Intel PPro stepping <= 7
870 * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF
871 */
872 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
873 boot_cpu_data.x86_model == 1 &&
874 boot_cpu_data.x86_stepping <= 7) {
875 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
876 pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
877 return -EINVAL;
878 }
879 if (!(base + size < 0x70000 || base > 0x7003F) &&
880 (type == MTRR_TYPE_WRCOMB
881 || type == MTRR_TYPE_WRBACK)) {
882 pr_warn("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
883 return -EINVAL;
884 }
885 }
886
887 /*
888 * Check upper bits of base and last are equal and lower bits are 0
889 * for base and 1 for last
890 */
891 last = base + size - 1;
892 for (lbase = base; !(lbase & 1) && (last & 1);
893 lbase = lbase >> 1, last = last >> 1)
894 ;
895 if (lbase != last) {
896 pr_warn("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size);
897 return -EINVAL;
898 }
899 return 0;
900}
901
902static int generic_have_wrcomb(void)
903{
904 unsigned long config, dummy;
905 rdmsr(MSR_MTRRcap, config, dummy);
906 return config & (1 << 10);
907}
908
909int positive_have_wrcomb(void)
910{
911 return 1;
912}
913
914/*
915 * Generic structure...
916 */
917const struct mtrr_ops generic_mtrr_ops = {
918 .use_intel_if = 1,
919 .set_all = generic_set_all,
920 .get = generic_get_mtrr,
921 .get_free_region = generic_get_free_region,
922 .set = generic_set_mtrr,
923 .validate_add_page = generic_validate_add_page,
924 .have_wrcomb = generic_have_wrcomb,
925};
1/*
2 * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
3 * because MTRRs can span up to 40 bits (36bits on most modern x86)
4 */
5#define DEBUG
6
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/io.h>
10#include <linux/mm.h>
11
12#include <asm/processor-flags.h>
13#include <asm/cpufeature.h>
14#include <asm/tlbflush.h>
15#include <asm/mtrr.h>
16#include <asm/msr.h>
17#include <asm/pat.h>
18
19#include "mtrr.h"
20
21struct fixed_range_block {
22 int base_msr; /* start address of an MTRR block */
23 int ranges; /* number of MTRRs in this block */
24};
25
26static struct fixed_range_block fixed_range_blocks[] = {
27 { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */
28 { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */
29 { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */
30 {}
31};
32
33static unsigned long smp_changes_mask;
34static int mtrr_state_set;
35u64 mtrr_tom2;
36
37struct mtrr_state_type mtrr_state;
38EXPORT_SYMBOL_GPL(mtrr_state);
39
40/*
41 * BIOS is expected to clear MtrrFixDramModEn bit, see for example
42 * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
43 * Opteron Processors" (26094 Rev. 3.30 February 2006), section
44 * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
45 * to 1 during BIOS initialization of the fixed MTRRs, then cleared to
46 * 0 for operation."
47 */
48static inline void k8_check_syscfg_dram_mod_en(void)
49{
50 u32 lo, hi;
51
52 if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
53 (boot_cpu_data.x86 >= 0x0f)))
54 return;
55
56 rdmsr(MSR_K8_SYSCFG, lo, hi);
57 if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
58 pr_err(FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
59 " not cleared by BIOS, clearing this bit\n",
60 smp_processor_id());
61 lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
62 mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
63 }
64}
65
66/* Get the size of contiguous MTRR range */
67static u64 get_mtrr_size(u64 mask)
68{
69 u64 size;
70
71 mask >>= PAGE_SHIFT;
72 mask |= size_or_mask;
73 size = -mask;
74 size <<= PAGE_SHIFT;
75 return size;
76}
77
78/*
79 * Check and return the effective type for MTRR-MTRR type overlap.
80 * Returns 1 if the effective type is UNCACHEABLE, else returns 0
81 */
82static int check_type_overlap(u8 *prev, u8 *curr)
83{
84 if (*prev == MTRR_TYPE_UNCACHABLE || *curr == MTRR_TYPE_UNCACHABLE) {
85 *prev = MTRR_TYPE_UNCACHABLE;
86 *curr = MTRR_TYPE_UNCACHABLE;
87 return 1;
88 }
89
90 if ((*prev == MTRR_TYPE_WRBACK && *curr == MTRR_TYPE_WRTHROUGH) ||
91 (*prev == MTRR_TYPE_WRTHROUGH && *curr == MTRR_TYPE_WRBACK)) {
92 *prev = MTRR_TYPE_WRTHROUGH;
93 *curr = MTRR_TYPE_WRTHROUGH;
94 }
95
96 if (*prev != *curr) {
97 *prev = MTRR_TYPE_UNCACHABLE;
98 *curr = MTRR_TYPE_UNCACHABLE;
99 return 1;
100 }
101
102 return 0;
103}
104
105/**
106 * mtrr_type_lookup_fixed - look up memory type in MTRR fixed entries
107 *
108 * Return the MTRR fixed memory type of 'start'.
109 *
110 * MTRR fixed entries are divided into the following ways:
111 * 0x00000 - 0x7FFFF : This range is divided into eight 64KB sub-ranges
112 * 0x80000 - 0xBFFFF : This range is divided into sixteen 16KB sub-ranges
113 * 0xC0000 - 0xFFFFF : This range is divided into sixty-four 4KB sub-ranges
114 *
115 * Return Values:
116 * MTRR_TYPE_(type) - Matched memory type
117 * MTRR_TYPE_INVALID - Unmatched
118 */
119static u8 mtrr_type_lookup_fixed(u64 start, u64 end)
120{
121 int idx;
122
123 if (start >= 0x100000)
124 return MTRR_TYPE_INVALID;
125
126 /* 0x0 - 0x7FFFF */
127 if (start < 0x80000) {
128 idx = 0;
129 idx += (start >> 16);
130 return mtrr_state.fixed_ranges[idx];
131 /* 0x80000 - 0xBFFFF */
132 } else if (start < 0xC0000) {
133 idx = 1 * 8;
134 idx += ((start - 0x80000) >> 14);
135 return mtrr_state.fixed_ranges[idx];
136 }
137
138 /* 0xC0000 - 0xFFFFF */
139 idx = 3 * 8;
140 idx += ((start - 0xC0000) >> 12);
141 return mtrr_state.fixed_ranges[idx];
142}
143
144/**
145 * mtrr_type_lookup_variable - look up memory type in MTRR variable entries
146 *
147 * Return Value:
148 * MTRR_TYPE_(type) - Matched memory type or default memory type (unmatched)
149 *
150 * Output Arguments:
151 * repeat - Set to 1 when [start:end] spanned across MTRR range and type
152 * returned corresponds only to [start:*partial_end]. Caller has
153 * to lookup again for [*partial_end:end].
154 *
155 * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the
156 * region is fully covered by a single MTRR entry or the default
157 * type.
158 */
159static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
160 int *repeat, u8 *uniform)
161{
162 int i;
163 u64 base, mask;
164 u8 prev_match, curr_match;
165
166 *repeat = 0;
167 *uniform = 1;
168
169 /* Make end inclusive instead of exclusive */
170 end--;
171
172 prev_match = MTRR_TYPE_INVALID;
173 for (i = 0; i < num_var_ranges; ++i) {
174 unsigned short start_state, end_state, inclusive;
175
176 if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
177 continue;
178
179 base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
180 (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
181 mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
182 (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
183
184 start_state = ((start & mask) == (base & mask));
185 end_state = ((end & mask) == (base & mask));
186 inclusive = ((start < base) && (end > base));
187
188 if ((start_state != end_state) || inclusive) {
189 /*
190 * We have start:end spanning across an MTRR.
191 * We split the region into either
192 *
193 * - start_state:1
194 * (start:mtrr_end)(mtrr_end:end)
195 * - end_state:1
196 * (start:mtrr_start)(mtrr_start:end)
197 * - inclusive:1
198 * (start:mtrr_start)(mtrr_start:mtrr_end)(mtrr_end:end)
199 *
200 * depending on kind of overlap.
201 *
202 * Return the type of the first region and a pointer
203 * to the start of next region so that caller will be
204 * advised to lookup again after having adjusted start
205 * and end.
206 *
207 * Note: This way we handle overlaps with multiple
208 * entries and the default type properly.
209 */
210 if (start_state)
211 *partial_end = base + get_mtrr_size(mask);
212 else
213 *partial_end = base;
214
215 if (unlikely(*partial_end <= start)) {
216 WARN_ON(1);
217 *partial_end = start + PAGE_SIZE;
218 }
219
220 end = *partial_end - 1; /* end is inclusive */
221 *repeat = 1;
222 *uniform = 0;
223 }
224
225 if ((start & mask) != (base & mask))
226 continue;
227
228 curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
229 if (prev_match == MTRR_TYPE_INVALID) {
230 prev_match = curr_match;
231 continue;
232 }
233
234 *uniform = 0;
235 if (check_type_overlap(&prev_match, &curr_match))
236 return curr_match;
237 }
238
239 if (prev_match != MTRR_TYPE_INVALID)
240 return prev_match;
241
242 return mtrr_state.def_type;
243}
244
245/**
246 * mtrr_type_lookup - look up memory type in MTRR
247 *
248 * Return Values:
249 * MTRR_TYPE_(type) - The effective MTRR type for the region
250 * MTRR_TYPE_INVALID - MTRR is disabled
251 *
252 * Output Argument:
253 * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the
254 * region is fully covered by a single MTRR entry or the default
255 * type.
256 */
257u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform)
258{
259 u8 type, prev_type, is_uniform = 1, dummy;
260 int repeat;
261 u64 partial_end;
262
263 if (!mtrr_state_set)
264 return MTRR_TYPE_INVALID;
265
266 if (!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED))
267 return MTRR_TYPE_INVALID;
268
269 /*
270 * Look up the fixed ranges first, which take priority over
271 * the variable ranges.
272 */
273 if ((start < 0x100000) &&
274 (mtrr_state.have_fixed) &&
275 (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) {
276 is_uniform = 0;
277 type = mtrr_type_lookup_fixed(start, end);
278 goto out;
279 }
280
281 /*
282 * Look up the variable ranges. Look of multiple ranges matching
283 * this address and pick type as per MTRR precedence.
284 */
285 type = mtrr_type_lookup_variable(start, end, &partial_end,
286 &repeat, &is_uniform);
287
288 /*
289 * Common path is with repeat = 0.
290 * However, we can have cases where [start:end] spans across some
291 * MTRR ranges and/or the default type. Do repeated lookups for
292 * that case here.
293 */
294 while (repeat) {
295 prev_type = type;
296 start = partial_end;
297 is_uniform = 0;
298 type = mtrr_type_lookup_variable(start, end, &partial_end,
299 &repeat, &dummy);
300
301 if (check_type_overlap(&prev_type, &type))
302 goto out;
303 }
304
305 if (mtrr_tom2 && (start >= (1ULL<<32)) && (end < mtrr_tom2))
306 type = MTRR_TYPE_WRBACK;
307
308out:
309 *uniform = is_uniform;
310 return type;
311}
312
313/* Get the MSR pair relating to a var range */
314static void
315get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
316{
317 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
318 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
319}
320
321/* Fill the MSR pair relating to a var range */
322void fill_mtrr_var_range(unsigned int index,
323 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
324{
325 struct mtrr_var_range *vr;
326
327 vr = mtrr_state.var_ranges;
328
329 vr[index].base_lo = base_lo;
330 vr[index].base_hi = base_hi;
331 vr[index].mask_lo = mask_lo;
332 vr[index].mask_hi = mask_hi;
333}
334
335static void get_fixed_ranges(mtrr_type *frs)
336{
337 unsigned int *p = (unsigned int *)frs;
338 int i;
339
340 k8_check_syscfg_dram_mod_en();
341
342 rdmsr(MSR_MTRRfix64K_00000, p[0], p[1]);
343
344 for (i = 0; i < 2; i++)
345 rdmsr(MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]);
346 for (i = 0; i < 8; i++)
347 rdmsr(MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]);
348}
349
350void mtrr_save_fixed_ranges(void *info)
351{
352 if (boot_cpu_has(X86_FEATURE_MTRR))
353 get_fixed_ranges(mtrr_state.fixed_ranges);
354}
355
356static unsigned __initdata last_fixed_start;
357static unsigned __initdata last_fixed_end;
358static mtrr_type __initdata last_fixed_type;
359
360static void __init print_fixed_last(void)
361{
362 if (!last_fixed_end)
363 return;
364
365 pr_debug(" %05X-%05X %s\n", last_fixed_start,
366 last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
367
368 last_fixed_end = 0;
369}
370
371static void __init update_fixed_last(unsigned base, unsigned end,
372 mtrr_type type)
373{
374 last_fixed_start = base;
375 last_fixed_end = end;
376 last_fixed_type = type;
377}
378
379static void __init
380print_fixed(unsigned base, unsigned step, const mtrr_type *types)
381{
382 unsigned i;
383
384 for (i = 0; i < 8; ++i, ++types, base += step) {
385 if (last_fixed_end == 0) {
386 update_fixed_last(base, base + step, *types);
387 continue;
388 }
389 if (last_fixed_end == base && last_fixed_type == *types) {
390 last_fixed_end = base + step;
391 continue;
392 }
393 /* new segments: gap or different type */
394 print_fixed_last();
395 update_fixed_last(base, base + step, *types);
396 }
397}
398
399static void prepare_set(void);
400static void post_set(void);
401
402static void __init print_mtrr_state(void)
403{
404 unsigned int i;
405 int high_width;
406
407 pr_debug("MTRR default type: %s\n",
408 mtrr_attrib_to_str(mtrr_state.def_type));
409 if (mtrr_state.have_fixed) {
410 pr_debug("MTRR fixed ranges %sabled:\n",
411 ((mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) &&
412 (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) ?
413 "en" : "dis");
414 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
415 for (i = 0; i < 2; ++i)
416 print_fixed(0x80000 + i * 0x20000, 0x04000,
417 mtrr_state.fixed_ranges + (i + 1) * 8);
418 for (i = 0; i < 8; ++i)
419 print_fixed(0xC0000 + i * 0x08000, 0x01000,
420 mtrr_state.fixed_ranges + (i + 3) * 8);
421
422 /* tail */
423 print_fixed_last();
424 }
425 pr_debug("MTRR variable ranges %sabled:\n",
426 mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED ? "en" : "dis");
427 high_width = (__ffs64(size_or_mask) - (32 - PAGE_SHIFT) + 3) / 4;
428
429 for (i = 0; i < num_var_ranges; ++i) {
430 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
431 pr_debug(" %u base %0*X%05X000 mask %0*X%05X000 %s\n",
432 i,
433 high_width,
434 mtrr_state.var_ranges[i].base_hi,
435 mtrr_state.var_ranges[i].base_lo >> 12,
436 high_width,
437 mtrr_state.var_ranges[i].mask_hi,
438 mtrr_state.var_ranges[i].mask_lo >> 12,
439 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
440 else
441 pr_debug(" %u disabled\n", i);
442 }
443 if (mtrr_tom2)
444 pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20);
445}
446
447/* Grab all of the MTRR state for this CPU into *state */
448bool __init get_mtrr_state(void)
449{
450 struct mtrr_var_range *vrs;
451 unsigned long flags;
452 unsigned lo, dummy;
453 unsigned int i;
454
455 vrs = mtrr_state.var_ranges;
456
457 rdmsr(MSR_MTRRcap, lo, dummy);
458 mtrr_state.have_fixed = (lo >> 8) & 1;
459
460 for (i = 0; i < num_var_ranges; i++)
461 get_mtrr_var_range(i, &vrs[i]);
462 if (mtrr_state.have_fixed)
463 get_fixed_ranges(mtrr_state.fixed_ranges);
464
465 rdmsr(MSR_MTRRdefType, lo, dummy);
466 mtrr_state.def_type = (lo & 0xff);
467 mtrr_state.enabled = (lo & 0xc00) >> 10;
468
469 if (amd_special_default_mtrr()) {
470 unsigned low, high;
471
472 /* TOP_MEM2 */
473 rdmsr(MSR_K8_TOP_MEM2, low, high);
474 mtrr_tom2 = high;
475 mtrr_tom2 <<= 32;
476 mtrr_tom2 |= low;
477 mtrr_tom2 &= 0xffffff800000ULL;
478 }
479
480 print_mtrr_state();
481
482 mtrr_state_set = 1;
483
484 /* PAT setup for BP. We need to go through sync steps here */
485 local_irq_save(flags);
486 prepare_set();
487
488 pat_init();
489
490 post_set();
491 local_irq_restore(flags);
492
493 return !!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED);
494}
495
496/* Some BIOS's are messed up and don't set all MTRRs the same! */
497void __init mtrr_state_warn(void)
498{
499 unsigned long mask = smp_changes_mask;
500
501 if (!mask)
502 return;
503 if (mask & MTRR_CHANGE_MASK_FIXED)
504 pr_warn("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
505 if (mask & MTRR_CHANGE_MASK_VARIABLE)
506 pr_warn("mtrr: your CPUs had inconsistent variable MTRR settings\n");
507 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
508 pr_warn("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
509
510 pr_info("mtrr: probably your BIOS does not setup all CPUs.\n");
511 pr_info("mtrr: corrected configuration.\n");
512}
513
514/*
515 * Doesn't attempt to pass an error out to MTRR users
516 * because it's quite complicated in some cases and probably not
517 * worth it because the best error handling is to ignore it.
518 */
519void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
520{
521 if (wrmsr_safe(msr, a, b) < 0) {
522 pr_err("MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
523 smp_processor_id(), msr, a, b);
524 }
525}
526
527/**
528 * set_fixed_range - checks & updates a fixed-range MTRR if it
529 * differs from the value it should have
530 * @msr: MSR address of the MTTR which should be checked and updated
531 * @changed: pointer which indicates whether the MTRR needed to be changed
532 * @msrwords: pointer to the MSR values which the MSR should have
533 */
534static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
535{
536 unsigned lo, hi;
537
538 rdmsr(msr, lo, hi);
539
540 if (lo != msrwords[0] || hi != msrwords[1]) {
541 mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
542 *changed = true;
543 }
544}
545
546/**
547 * generic_get_free_region - Get a free MTRR.
548 * @base: The starting (base) address of the region.
549 * @size: The size (in bytes) of the region.
550 * @replace_reg: mtrr index to be replaced; set to invalid value if none.
551 *
552 * Returns: The index of the region on success, else negative on error.
553 */
554int
555generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
556{
557 unsigned long lbase, lsize;
558 mtrr_type ltype;
559 int i, max;
560
561 max = num_var_ranges;
562 if (replace_reg >= 0 && replace_reg < max)
563 return replace_reg;
564
565 for (i = 0; i < max; ++i) {
566 mtrr_if->get(i, &lbase, &lsize, <ype);
567 if (lsize == 0)
568 return i;
569 }
570
571 return -ENOSPC;
572}
573
574static void generic_get_mtrr(unsigned int reg, unsigned long *base,
575 unsigned long *size, mtrr_type *type)
576{
577 u32 mask_lo, mask_hi, base_lo, base_hi;
578 unsigned int hi;
579 u64 tmp, mask;
580
581 /*
582 * get_mtrr doesn't need to update mtrr_state, also it could be called
583 * from any cpu, so try to print it out directly.
584 */
585 get_cpu();
586
587 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
588
589 if ((mask_lo & 0x800) == 0) {
590 /* Invalid (i.e. free) range */
591 *base = 0;
592 *size = 0;
593 *type = 0;
594 goto out_put_cpu;
595 }
596
597 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
598
599 /* Work out the shifted address mask: */
600 tmp = (u64)mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
601 mask = size_or_mask | tmp;
602
603 /* Expand tmp with high bits to all 1s: */
604 hi = fls64(tmp);
605 if (hi > 0) {
606 tmp |= ~((1ULL<<(hi - 1)) - 1);
607
608 if (tmp != mask) {
609 pr_warn("mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
610 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
611 mask = tmp;
612 }
613 }
614
615 /*
616 * This works correctly if size is a power of two, i.e. a
617 * contiguous range:
618 */
619 *size = -mask;
620 *base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
621 *type = base_lo & 0xff;
622
623out_put_cpu:
624 put_cpu();
625}
626
627/**
628 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they
629 * differ from the saved set
630 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
631 */
632static int set_fixed_ranges(mtrr_type *frs)
633{
634 unsigned long long *saved = (unsigned long long *)frs;
635 bool changed = false;
636 int block = -1, range;
637
638 k8_check_syscfg_dram_mod_en();
639
640 while (fixed_range_blocks[++block].ranges) {
641 for (range = 0; range < fixed_range_blocks[block].ranges; range++)
642 set_fixed_range(fixed_range_blocks[block].base_msr + range,
643 &changed, (unsigned int *)saved++);
644 }
645
646 return changed;
647}
648
649/*
650 * Set the MSR pair relating to a var range.
651 * Returns true if changes are made.
652 */
653static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
654{
655 unsigned int lo, hi;
656 bool changed = false;
657
658 rdmsr(MTRRphysBase_MSR(index), lo, hi);
659 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
660 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
661 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
662
663 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
664 changed = true;
665 }
666
667 rdmsr(MTRRphysMask_MSR(index), lo, hi);
668
669 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
670 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
671 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
672 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
673 changed = true;
674 }
675 return changed;
676}
677
678static u32 deftype_lo, deftype_hi;
679
680/**
681 * set_mtrr_state - Set the MTRR state for this CPU.
682 *
683 * NOTE: The CPU must already be in a safe state for MTRR changes.
684 * RETURNS: 0 if no changes made, else a mask indicating what was changed.
685 */
686static unsigned long set_mtrr_state(void)
687{
688 unsigned long change_mask = 0;
689 unsigned int i;
690
691 for (i = 0; i < num_var_ranges; i++) {
692 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
693 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
694 }
695
696 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
697 change_mask |= MTRR_CHANGE_MASK_FIXED;
698
699 /*
700 * Set_mtrr_restore restores the old value of MTRRdefType,
701 * so to set it we fiddle with the saved value:
702 */
703 if ((deftype_lo & 0xff) != mtrr_state.def_type
704 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
705
706 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type |
707 (mtrr_state.enabled << 10);
708 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
709 }
710
711 return change_mask;
712}
713
714
715static unsigned long cr4;
716static DEFINE_RAW_SPINLOCK(set_atomicity_lock);
717
718/*
719 * Since we are disabling the cache don't allow any interrupts,
720 * they would run extremely slow and would only increase the pain.
721 *
722 * The caller must ensure that local interrupts are disabled and
723 * are reenabled after post_set() has been called.
724 */
725static void prepare_set(void) __acquires(set_atomicity_lock)
726{
727 unsigned long cr0;
728
729 /*
730 * Note that this is not ideal
731 * since the cache is only flushed/disabled for this CPU while the
732 * MTRRs are changed, but changing this requires more invasive
733 * changes to the way the kernel boots
734 */
735
736 raw_spin_lock(&set_atomicity_lock);
737
738 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
739 cr0 = read_cr0() | X86_CR0_CD;
740 write_cr0(cr0);
741 wbinvd();
742
743 /* Save value of CR4 and clear Page Global Enable (bit 7) */
744 if (cpu_has_pge) {
745 cr4 = __read_cr4();
746 __write_cr4(cr4 & ~X86_CR4_PGE);
747 }
748
749 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
750 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
751 __flush_tlb();
752
753 /* Save MTRR state */
754 rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
755
756 /* Disable MTRRs, and set the default type to uncached */
757 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
758 wbinvd();
759}
760
761static void post_set(void) __releases(set_atomicity_lock)
762{
763 /* Flush TLBs (no need to flush caches - they are disabled) */
764 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
765 __flush_tlb();
766
767 /* Intel (P6) standard MTRRs */
768 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
769
770 /* Enable caches */
771 write_cr0(read_cr0() & ~X86_CR0_CD);
772
773 /* Restore value of CR4 */
774 if (cpu_has_pge)
775 __write_cr4(cr4);
776 raw_spin_unlock(&set_atomicity_lock);
777}
778
779static void generic_set_all(void)
780{
781 unsigned long mask, count;
782 unsigned long flags;
783
784 local_irq_save(flags);
785 prepare_set();
786
787 /* Actually set the state */
788 mask = set_mtrr_state();
789
790 /* also set PAT */
791 pat_init();
792
793 post_set();
794 local_irq_restore(flags);
795
796 /* Use the atomic bitops to update the global mask */
797 for (count = 0; count < sizeof mask * 8; ++count) {
798 if (mask & 0x01)
799 set_bit(count, &smp_changes_mask);
800 mask >>= 1;
801 }
802
803}
804
805/**
806 * generic_set_mtrr - set variable MTRR register on the local CPU.
807 *
808 * @reg: The register to set.
809 * @base: The base address of the region.
810 * @size: The size of the region. If this is 0 the region is disabled.
811 * @type: The type of the region.
812 *
813 * Returns nothing.
814 */
815static void generic_set_mtrr(unsigned int reg, unsigned long base,
816 unsigned long size, mtrr_type type)
817{
818 unsigned long flags;
819 struct mtrr_var_range *vr;
820
821 vr = &mtrr_state.var_ranges[reg];
822
823 local_irq_save(flags);
824 prepare_set();
825
826 if (size == 0) {
827 /*
828 * The invalid bit is kept in the mask, so we simply
829 * clear the relevant mask register to disable a range.
830 */
831 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
832 memset(vr, 0, sizeof(struct mtrr_var_range));
833 } else {
834 vr->base_lo = base << PAGE_SHIFT | type;
835 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
836 vr->mask_lo = -size << PAGE_SHIFT | 0x800;
837 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
838
839 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
840 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
841 }
842
843 post_set();
844 local_irq_restore(flags);
845}
846
847int generic_validate_add_page(unsigned long base, unsigned long size,
848 unsigned int type)
849{
850 unsigned long lbase, last;
851
852 /*
853 * For Intel PPro stepping <= 7
854 * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF
855 */
856 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
857 boot_cpu_data.x86_model == 1 &&
858 boot_cpu_data.x86_mask <= 7) {
859 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
860 pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
861 return -EINVAL;
862 }
863 if (!(base + size < 0x70000 || base > 0x7003F) &&
864 (type == MTRR_TYPE_WRCOMB
865 || type == MTRR_TYPE_WRBACK)) {
866 pr_warn("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
867 return -EINVAL;
868 }
869 }
870
871 /*
872 * Check upper bits of base and last are equal and lower bits are 0
873 * for base and 1 for last
874 */
875 last = base + size - 1;
876 for (lbase = base; !(lbase & 1) && (last & 1);
877 lbase = lbase >> 1, last = last >> 1)
878 ;
879 if (lbase != last) {
880 pr_warn("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size);
881 return -EINVAL;
882 }
883 return 0;
884}
885
886static int generic_have_wrcomb(void)
887{
888 unsigned long config, dummy;
889 rdmsr(MSR_MTRRcap, config, dummy);
890 return config & (1 << 10);
891}
892
893int positive_have_wrcomb(void)
894{
895 return 1;
896}
897
898/*
899 * Generic structure...
900 */
901const struct mtrr_ops generic_mtrr_ops = {
902 .use_intel_if = 1,
903 .set_all = generic_set_all,
904 .get = generic_get_mtrr,
905 .get_free_region = generic_get_free_region,
906 .set = generic_set_mtrr,
907 .validate_add_page = generic_validate_add_page,
908 .have_wrcomb = generic_have_wrcomb,
909};