Loading...
1/*
2 * MTRR (Memory Type Range Register) cleanup
3 *
4 * Copyright (C) 2009 Yinghai Lu
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Library General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Library General Public License for more details.
15 *
16 * You should have received a copy of the GNU Library General Public
17 * License along with this library; if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#include <linux/init.h>
21#include <linux/pci.h>
22#include <linux/smp.h>
23#include <linux/cpu.h>
24#include <linux/mutex.h>
25#include <linux/uaccess.h>
26#include <linux/kvm_para.h>
27#include <linux/range.h>
28
29#include <asm/processor.h>
30#include <asm/e820/api.h>
31#include <asm/mtrr.h>
32#include <asm/msr.h>
33
34#include "mtrr.h"
35
36struct var_mtrr_range_state {
37 unsigned long base_pfn;
38 unsigned long size_pfn;
39 mtrr_type type;
40};
41
42struct var_mtrr_state {
43 unsigned long range_startk;
44 unsigned long range_sizek;
45 unsigned long chunk_sizek;
46 unsigned long gran_sizek;
47 unsigned int reg;
48};
49
50/* Should be related to MTRR_VAR_RANGES nums */
51#define RANGE_NUM 256
52
53static struct range __initdata range[RANGE_NUM];
54static int __initdata nr_range;
55
56static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
57
58#define BIOS_BUG_MSG \
59 "WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n"
60
61static int __init
62x86_get_mtrr_mem_range(struct range *range, int nr_range,
63 unsigned long extra_remove_base,
64 unsigned long extra_remove_size)
65{
66 unsigned long base, size;
67 mtrr_type type;
68 int i;
69
70 for (i = 0; i < num_var_ranges; i++) {
71 type = range_state[i].type;
72 if (type != MTRR_TYPE_WRBACK)
73 continue;
74 base = range_state[i].base_pfn;
75 size = range_state[i].size_pfn;
76 nr_range = add_range_with_merge(range, RANGE_NUM, nr_range,
77 base, base + size);
78 }
79
80 Dprintk("After WB checking\n");
81 for (i = 0; i < nr_range; i++)
82 Dprintk("MTRR MAP PFN: %016llx - %016llx\n",
83 range[i].start, range[i].end);
84
85 /* Take out UC ranges: */
86 for (i = 0; i < num_var_ranges; i++) {
87 type = range_state[i].type;
88 if (type != MTRR_TYPE_UNCACHABLE &&
89 type != MTRR_TYPE_WRPROT)
90 continue;
91 size = range_state[i].size_pfn;
92 if (!size)
93 continue;
94 base = range_state[i].base_pfn;
95 if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed &&
96 (mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) &&
97 (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) {
98 /* Var MTRR contains UC entry below 1M? Skip it: */
99 pr_warn(BIOS_BUG_MSG, i);
100 if (base + size <= (1<<(20-PAGE_SHIFT)))
101 continue;
102 size -= (1<<(20-PAGE_SHIFT)) - base;
103 base = 1<<(20-PAGE_SHIFT);
104 }
105 subtract_range(range, RANGE_NUM, base, base + size);
106 }
107 if (extra_remove_size)
108 subtract_range(range, RANGE_NUM, extra_remove_base,
109 extra_remove_base + extra_remove_size);
110
111 Dprintk("After UC checking\n");
112 for (i = 0; i < RANGE_NUM; i++) {
113 if (!range[i].end)
114 continue;
115
116 Dprintk("MTRR MAP PFN: %016llx - %016llx\n",
117 range[i].start, range[i].end);
118 }
119
120 /* sort the ranges */
121 nr_range = clean_sort_range(range, RANGE_NUM);
122
123 Dprintk("After sorting\n");
124 for (i = 0; i < nr_range; i++)
125 Dprintk("MTRR MAP PFN: %016llx - %016llx\n",
126 range[i].start, range[i].end);
127
128 return nr_range;
129}
130
131#ifdef CONFIG_MTRR_SANITIZER
132
133static unsigned long __init sum_ranges(struct range *range, int nr_range)
134{
135 unsigned long sum = 0;
136 int i;
137
138 for (i = 0; i < nr_range; i++)
139 sum += range[i].end - range[i].start;
140
141 return sum;
142}
143
144static int enable_mtrr_cleanup __initdata =
145 CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT;
146
147static int __init disable_mtrr_cleanup_setup(char *str)
148{
149 enable_mtrr_cleanup = 0;
150 return 0;
151}
152early_param("disable_mtrr_cleanup", disable_mtrr_cleanup_setup);
153
154static int __init enable_mtrr_cleanup_setup(char *str)
155{
156 enable_mtrr_cleanup = 1;
157 return 0;
158}
159early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup);
160
161static void __init
162set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
163 unsigned char type)
164{
165 u32 base_lo, base_hi, mask_lo, mask_hi;
166 u64 base, mask;
167
168 if (!sizek) {
169 fill_mtrr_var_range(reg, 0, 0, 0, 0);
170 return;
171 }
172
173 mask = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
174 mask &= ~((((u64)sizek) << 10) - 1);
175
176 base = ((u64)basek) << 10;
177
178 base |= type;
179 mask |= 0x800;
180
181 base_lo = base & ((1ULL<<32) - 1);
182 base_hi = base >> 32;
183
184 mask_lo = mask & ((1ULL<<32) - 1);
185 mask_hi = mask >> 32;
186
187 fill_mtrr_var_range(reg, base_lo, base_hi, mask_lo, mask_hi);
188}
189
190static void __init
191save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
192 unsigned char type)
193{
194 range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10);
195 range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10);
196 range_state[reg].type = type;
197}
198
199static void __init set_var_mtrr_all(void)
200{
201 unsigned long basek, sizek;
202 unsigned char type;
203 unsigned int reg;
204
205 for (reg = 0; reg < num_var_ranges; reg++) {
206 basek = range_state[reg].base_pfn << (PAGE_SHIFT - 10);
207 sizek = range_state[reg].size_pfn << (PAGE_SHIFT - 10);
208 type = range_state[reg].type;
209
210 set_var_mtrr(reg, basek, sizek, type);
211 }
212}
213
214static unsigned long to_size_factor(unsigned long sizek, char *factorp)
215{
216 unsigned long base = sizek;
217 char factor;
218
219 if (base & ((1<<10) - 1)) {
220 /* Not MB-aligned: */
221 factor = 'K';
222 } else if (base & ((1<<20) - 1)) {
223 factor = 'M';
224 base >>= 10;
225 } else {
226 factor = 'G';
227 base >>= 20;
228 }
229
230 *factorp = factor;
231
232 return base;
233}
234
235static unsigned int __init
236range_to_mtrr(unsigned int reg, unsigned long range_startk,
237 unsigned long range_sizek, unsigned char type)
238{
239 if (!range_sizek || (reg >= num_var_ranges))
240 return reg;
241
242 while (range_sizek) {
243 unsigned long max_align, align;
244 unsigned long sizek;
245
246 /* Compute the maximum size with which we can make a range: */
247 if (range_startk)
248 max_align = __ffs(range_startk);
249 else
250 max_align = BITS_PER_LONG - 1;
251
252 align = __fls(range_sizek);
253 if (align > max_align)
254 align = max_align;
255
256 sizek = 1UL << align;
257 if (mtrr_debug) {
258 char start_factor = 'K', size_factor = 'K';
259 unsigned long start_base, size_base;
260
261 start_base = to_size_factor(range_startk, &start_factor);
262 size_base = to_size_factor(sizek, &size_factor);
263
264 Dprintk("Setting variable MTRR %d, "
265 "base: %ld%cB, range: %ld%cB, type %s\n",
266 reg, start_base, start_factor,
267 size_base, size_factor,
268 (type == MTRR_TYPE_UNCACHABLE) ? "UC" :
269 ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other")
270 );
271 }
272 save_var_mtrr(reg++, range_startk, sizek, type);
273 range_startk += sizek;
274 range_sizek -= sizek;
275 if (reg >= num_var_ranges)
276 break;
277 }
278 return reg;
279}
280
281static unsigned __init
282range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
283 unsigned long sizek)
284{
285 unsigned long hole_basek, hole_sizek;
286 unsigned long second_sizek;
287 unsigned long range0_basek, range0_sizek;
288 unsigned long range_basek, range_sizek;
289 unsigned long chunk_sizek;
290 unsigned long gran_sizek;
291
292 hole_basek = 0;
293 hole_sizek = 0;
294 second_sizek = 0;
295 chunk_sizek = state->chunk_sizek;
296 gran_sizek = state->gran_sizek;
297
298 /* Align with gran size, prevent small block used up MTRRs: */
299 range_basek = ALIGN(state->range_startk, gran_sizek);
300 if ((range_basek > basek) && basek)
301 return second_sizek;
302
303 state->range_sizek -= (range_basek - state->range_startk);
304 range_sizek = ALIGN(state->range_sizek, gran_sizek);
305
306 while (range_sizek > state->range_sizek) {
307 range_sizek -= gran_sizek;
308 if (!range_sizek)
309 return 0;
310 }
311 state->range_sizek = range_sizek;
312
313 /* Try to append some small hole: */
314 range0_basek = state->range_startk;
315 range0_sizek = ALIGN(state->range_sizek, chunk_sizek);
316
317 /* No increase: */
318 if (range0_sizek == state->range_sizek) {
319 Dprintk("rangeX: %016lx - %016lx\n",
320 range0_basek<<10,
321 (range0_basek + state->range_sizek)<<10);
322 state->reg = range_to_mtrr(state->reg, range0_basek,
323 state->range_sizek, MTRR_TYPE_WRBACK);
324 return 0;
325 }
326
327 /* Only cut back when it is not the last: */
328 if (sizek) {
329 while (range0_basek + range0_sizek > (basek + sizek)) {
330 if (range0_sizek >= chunk_sizek)
331 range0_sizek -= chunk_sizek;
332 else
333 range0_sizek = 0;
334
335 if (!range0_sizek)
336 break;
337 }
338 }
339
340second_try:
341 range_basek = range0_basek + range0_sizek;
342
343 /* One hole in the middle: */
344 if (range_basek > basek && range_basek <= (basek + sizek))
345 second_sizek = range_basek - basek;
346
347 if (range0_sizek > state->range_sizek) {
348
349 /* One hole in middle or at the end: */
350 hole_sizek = range0_sizek - state->range_sizek - second_sizek;
351
352 /* Hole size should be less than half of range0 size: */
353 if (hole_sizek >= (range0_sizek >> 1) &&
354 range0_sizek >= chunk_sizek) {
355 range0_sizek -= chunk_sizek;
356 second_sizek = 0;
357 hole_sizek = 0;
358
359 goto second_try;
360 }
361 }
362
363 if (range0_sizek) {
364 Dprintk("range0: %016lx - %016lx\n",
365 range0_basek<<10,
366 (range0_basek + range0_sizek)<<10);
367 state->reg = range_to_mtrr(state->reg, range0_basek,
368 range0_sizek, MTRR_TYPE_WRBACK);
369 }
370
371 if (range0_sizek < state->range_sizek) {
372 /* Need to handle left over range: */
373 range_sizek = state->range_sizek - range0_sizek;
374
375 Dprintk("range: %016lx - %016lx\n",
376 range_basek<<10,
377 (range_basek + range_sizek)<<10);
378
379 state->reg = range_to_mtrr(state->reg, range_basek,
380 range_sizek, MTRR_TYPE_WRBACK);
381 }
382
383 if (hole_sizek) {
384 hole_basek = range_basek - hole_sizek - second_sizek;
385 Dprintk("hole: %016lx - %016lx\n",
386 hole_basek<<10,
387 (hole_basek + hole_sizek)<<10);
388 state->reg = range_to_mtrr(state->reg, hole_basek,
389 hole_sizek, MTRR_TYPE_UNCACHABLE);
390 }
391
392 return second_sizek;
393}
394
395static void __init
396set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn,
397 unsigned long size_pfn)
398{
399 unsigned long basek, sizek;
400 unsigned long second_sizek = 0;
401
402 if (state->reg >= num_var_ranges)
403 return;
404
405 basek = base_pfn << (PAGE_SHIFT - 10);
406 sizek = size_pfn << (PAGE_SHIFT - 10);
407
408 /* See if I can merge with the last range: */
409 if ((basek <= 1024) ||
410 (state->range_startk + state->range_sizek == basek)) {
411 unsigned long endk = basek + sizek;
412 state->range_sizek = endk - state->range_startk;
413 return;
414 }
415 /* Write the range mtrrs: */
416 if (state->range_sizek != 0)
417 second_sizek = range_to_mtrr_with_hole(state, basek, sizek);
418
419 /* Allocate an msr: */
420 state->range_startk = basek + second_sizek;
421 state->range_sizek = sizek - second_sizek;
422}
423
424/* Minimum size of mtrr block that can take hole: */
425static u64 mtrr_chunk_size __initdata = (256ULL<<20);
426
427static int __init parse_mtrr_chunk_size_opt(char *p)
428{
429 if (!p)
430 return -EINVAL;
431 mtrr_chunk_size = memparse(p, &p);
432 return 0;
433}
434early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt);
435
436/* Granularity of mtrr of block: */
437static u64 mtrr_gran_size __initdata;
438
439static int __init parse_mtrr_gran_size_opt(char *p)
440{
441 if (!p)
442 return -EINVAL;
443 mtrr_gran_size = memparse(p, &p);
444 return 0;
445}
446early_param("mtrr_gran_size", parse_mtrr_gran_size_opt);
447
448static unsigned long nr_mtrr_spare_reg __initdata =
449 CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT;
450
451static int __init parse_mtrr_spare_reg(char *arg)
452{
453 if (arg)
454 nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0);
455 return 0;
456}
457early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg);
458
459static int __init
460x86_setup_var_mtrrs(struct range *range, int nr_range,
461 u64 chunk_size, u64 gran_size)
462{
463 struct var_mtrr_state var_state;
464 int num_reg;
465 int i;
466
467 var_state.range_startk = 0;
468 var_state.range_sizek = 0;
469 var_state.reg = 0;
470 var_state.chunk_sizek = chunk_size >> 10;
471 var_state.gran_sizek = gran_size >> 10;
472
473 memset(range_state, 0, sizeof(range_state));
474
475 /* Write the range: */
476 for (i = 0; i < nr_range; i++) {
477 set_var_mtrr_range(&var_state, range[i].start,
478 range[i].end - range[i].start);
479 }
480
481 /* Write the last range: */
482 if (var_state.range_sizek != 0)
483 range_to_mtrr_with_hole(&var_state, 0, 0);
484
485 num_reg = var_state.reg;
486 /* Clear out the extra MTRR's: */
487 while (var_state.reg < num_var_ranges) {
488 save_var_mtrr(var_state.reg, 0, 0, 0);
489 var_state.reg++;
490 }
491
492 return num_reg;
493}
494
495struct mtrr_cleanup_result {
496 unsigned long gran_sizek;
497 unsigned long chunk_sizek;
498 unsigned long lose_cover_sizek;
499 unsigned int num_reg;
500 int bad;
501};
502
503/*
504 * gran_size: 64K, 128K, 256K, 512K, 1M, 2M, ..., 2G
505 * chunk size: gran_size, ..., 2G
506 * so we need (1+16)*8
507 */
508#define NUM_RESULT 136
509#define PSHIFT (PAGE_SHIFT - 10)
510
511static struct mtrr_cleanup_result __initdata result[NUM_RESULT];
512static unsigned long __initdata min_loss_pfn[RANGE_NUM];
513
514static void __init print_out_mtrr_range_state(void)
515{
516 char start_factor = 'K', size_factor = 'K';
517 unsigned long start_base, size_base;
518 mtrr_type type;
519 int i;
520
521 for (i = 0; i < num_var_ranges; i++) {
522
523 size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10);
524 if (!size_base)
525 continue;
526
527 size_base = to_size_factor(size_base, &size_factor);
528 start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10);
529 start_base = to_size_factor(start_base, &start_factor);
530 type = range_state[i].type;
531
532 Dprintk("reg %d, base: %ld%cB, range: %ld%cB, type %s\n",
533 i, start_base, start_factor,
534 size_base, size_factor,
535 (type == MTRR_TYPE_UNCACHABLE) ? "UC" :
536 ((type == MTRR_TYPE_WRPROT) ? "WP" :
537 ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other"))
538 );
539 }
540}
541
542static int __init mtrr_need_cleanup(void)
543{
544 int i;
545 mtrr_type type;
546 unsigned long size;
547 /* Extra one for all 0: */
548 int num[MTRR_NUM_TYPES + 1];
549
550 /* Check entries number: */
551 memset(num, 0, sizeof(num));
552 for (i = 0; i < num_var_ranges; i++) {
553 type = range_state[i].type;
554 size = range_state[i].size_pfn;
555 if (type >= MTRR_NUM_TYPES)
556 continue;
557 if (!size)
558 type = MTRR_NUM_TYPES;
559 num[type]++;
560 }
561
562 /* Check if we got UC entries: */
563 if (!num[MTRR_TYPE_UNCACHABLE])
564 return 0;
565
566 /* Check if we only had WB and UC */
567 if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
568 num_var_ranges - num[MTRR_NUM_TYPES])
569 return 0;
570
571 return 1;
572}
573
574static unsigned long __initdata range_sums;
575
576static void __init
577mtrr_calc_range_state(u64 chunk_size, u64 gran_size,
578 unsigned long x_remove_base,
579 unsigned long x_remove_size, int i)
580{
581 /*
582 * range_new should really be an automatic variable, but
583 * putting 4096 bytes on the stack is frowned upon, to put it
584 * mildly. It is safe to make it a static __initdata variable,
585 * since mtrr_calc_range_state is only called during init and
586 * there's no way it will call itself recursively.
587 */
588 static struct range range_new[RANGE_NUM] __initdata;
589 unsigned long range_sums_new;
590 int nr_range_new;
591 int num_reg;
592
593 /* Convert ranges to var ranges state: */
594 num_reg = x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
595
596 /* We got new setting in range_state, check it: */
597 memset(range_new, 0, sizeof(range_new));
598 nr_range_new = x86_get_mtrr_mem_range(range_new, 0,
599 x_remove_base, x_remove_size);
600 range_sums_new = sum_ranges(range_new, nr_range_new);
601
602 result[i].chunk_sizek = chunk_size >> 10;
603 result[i].gran_sizek = gran_size >> 10;
604 result[i].num_reg = num_reg;
605
606 if (range_sums < range_sums_new) {
607 result[i].lose_cover_sizek = (range_sums_new - range_sums) << PSHIFT;
608 result[i].bad = 1;
609 } else {
610 result[i].lose_cover_sizek = (range_sums - range_sums_new) << PSHIFT;
611 }
612
613 /* Double check it: */
614 if (!result[i].bad && !result[i].lose_cover_sizek) {
615 if (nr_range_new != nr_range || memcmp(range, range_new, sizeof(range)))
616 result[i].bad = 1;
617 }
618
619 if (!result[i].bad && (range_sums - range_sums_new < min_loss_pfn[num_reg]))
620 min_loss_pfn[num_reg] = range_sums - range_sums_new;
621}
622
623static void __init mtrr_print_out_one_result(int i)
624{
625 unsigned long gran_base, chunk_base, lose_base;
626 char gran_factor, chunk_factor, lose_factor;
627
628 gran_base = to_size_factor(result[i].gran_sizek, &gran_factor);
629 chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor);
630 lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor);
631
632 pr_info("%sgran_size: %ld%c \tchunk_size: %ld%c \t",
633 result[i].bad ? "*BAD*" : " ",
634 gran_base, gran_factor, chunk_base, chunk_factor);
635 pr_cont("num_reg: %d \tlose cover RAM: %s%ld%c\n",
636 result[i].num_reg, result[i].bad ? "-" : "",
637 lose_base, lose_factor);
638}
639
640static int __init mtrr_search_optimal_index(void)
641{
642 int num_reg_good;
643 int index_good;
644 int i;
645
646 if (nr_mtrr_spare_reg >= num_var_ranges)
647 nr_mtrr_spare_reg = num_var_ranges - 1;
648
649 num_reg_good = -1;
650 for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) {
651 if (!min_loss_pfn[i])
652 num_reg_good = i;
653 }
654
655 index_good = -1;
656 if (num_reg_good != -1) {
657 for (i = 0; i < NUM_RESULT; i++) {
658 if (!result[i].bad &&
659 result[i].num_reg == num_reg_good &&
660 !result[i].lose_cover_sizek) {
661 index_good = i;
662 break;
663 }
664 }
665 }
666
667 return index_good;
668}
669
670int __init mtrr_cleanup(void)
671{
672 unsigned long x_remove_base, x_remove_size;
673 unsigned long base, size, def, dummy;
674 u64 chunk_size, gran_size;
675 mtrr_type type;
676 int index_good;
677 int i;
678
679 if (!mtrr_enabled())
680 return 0;
681
682 if (!cpu_feature_enabled(X86_FEATURE_MTRR) || enable_mtrr_cleanup < 1)
683 return 0;
684
685 rdmsr(MSR_MTRRdefType, def, dummy);
686 def &= 0xff;
687 if (def != MTRR_TYPE_UNCACHABLE)
688 return 0;
689
690 /* Get it and store it aside: */
691 memset(range_state, 0, sizeof(range_state));
692 for (i = 0; i < num_var_ranges; i++) {
693 mtrr_if->get(i, &base, &size, &type);
694 range_state[i].base_pfn = base;
695 range_state[i].size_pfn = size;
696 range_state[i].type = type;
697 }
698
699 /* Check if we need handle it and can handle it: */
700 if (!mtrr_need_cleanup())
701 return 0;
702
703 /* Print original var MTRRs at first, for debugging: */
704 Dprintk("original variable MTRRs\n");
705 print_out_mtrr_range_state();
706
707 memset(range, 0, sizeof(range));
708 x_remove_size = 0;
709 x_remove_base = 1 << (32 - PAGE_SHIFT);
710 if (mtrr_tom2)
711 x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base;
712
713 /*
714 * [0, 1M) should always be covered by var mtrr with WB
715 * and fixed mtrrs should take effect before var mtrr for it:
716 */
717 nr_range = add_range_with_merge(range, RANGE_NUM, 0, 0,
718 1ULL<<(20 - PAGE_SHIFT));
719 /* add from var mtrr at last */
720 nr_range = x86_get_mtrr_mem_range(range, nr_range,
721 x_remove_base, x_remove_size);
722
723 range_sums = sum_ranges(range, nr_range);
724 pr_info("total RAM covered: %ldM\n",
725 range_sums >> (20 - PAGE_SHIFT));
726
727 if (mtrr_chunk_size && mtrr_gran_size) {
728 i = 0;
729 mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size,
730 x_remove_base, x_remove_size, i);
731
732 mtrr_print_out_one_result(i);
733
734 if (!result[i].bad) {
735 set_var_mtrr_all();
736 Dprintk("New variable MTRRs\n");
737 print_out_mtrr_range_state();
738 return 1;
739 }
740 pr_info("invalid mtrr_gran_size or mtrr_chunk_size, will find optimal one\n");
741 }
742
743 i = 0;
744 memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn));
745 memset(result, 0, sizeof(result));
746 for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) {
747
748 for (chunk_size = gran_size; chunk_size < (1ULL<<32);
749 chunk_size <<= 1) {
750
751 if (i >= NUM_RESULT)
752 continue;
753
754 mtrr_calc_range_state(chunk_size, gran_size,
755 x_remove_base, x_remove_size, i);
756 if (mtrr_debug) {
757 mtrr_print_out_one_result(i);
758 pr_info("\n");
759 }
760
761 i++;
762 }
763 }
764
765 /* Try to find the optimal index: */
766 index_good = mtrr_search_optimal_index();
767
768 if (index_good != -1) {
769 pr_info("Found optimal setting for mtrr clean up\n");
770 i = index_good;
771 mtrr_print_out_one_result(i);
772
773 /* Convert ranges to var ranges state: */
774 chunk_size = result[i].chunk_sizek;
775 chunk_size <<= 10;
776 gran_size = result[i].gran_sizek;
777 gran_size <<= 10;
778 x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
779 set_var_mtrr_all();
780 Dprintk("New variable MTRRs\n");
781 print_out_mtrr_range_state();
782 return 1;
783 } else {
784 /* print out all */
785 for (i = 0; i < NUM_RESULT; i++)
786 mtrr_print_out_one_result(i);
787 }
788
789 pr_info("mtrr_cleanup: can not find optimal value\n");
790 pr_info("please specify mtrr_gran_size/mtrr_chunk_size\n");
791
792 return 0;
793}
794#else
795int __init mtrr_cleanup(void)
796{
797 return 0;
798}
799#endif
800
801static int disable_mtrr_trim;
802
803static int __init disable_mtrr_trim_setup(char *str)
804{
805 disable_mtrr_trim = 1;
806 return 0;
807}
808early_param("disable_mtrr_trim", disable_mtrr_trim_setup);
809
810/*
811 * Newer AMD K8s and later CPUs have a special magic MSR way to force WB
812 * for memory >4GB. Check for that here.
813 * Note this won't check if the MTRRs < 4GB where the magic bit doesn't
814 * apply to are wrong, but so far we don't know of any such case in the wild.
815 */
816#define Tom2Enabled (1U << 21)
817#define Tom2ForceMemTypeWB (1U << 22)
818
819int __init amd_special_default_mtrr(void)
820{
821 u32 l, h;
822
823 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
824 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
825 return 0;
826 if (boot_cpu_data.x86 < 0xf)
827 return 0;
828 /* In case some hypervisor doesn't pass SYSCFG through: */
829 if (rdmsr_safe(MSR_AMD64_SYSCFG, &l, &h) < 0)
830 return 0;
831 /*
832 * Memory between 4GB and top of mem is forced WB by this magic bit.
833 * Reserved before K8RevF, but should be zero there.
834 */
835 if ((l & (Tom2Enabled | Tom2ForceMemTypeWB)) ==
836 (Tom2Enabled | Tom2ForceMemTypeWB))
837 return 1;
838 return 0;
839}
840
841static u64 __init
842real_trim_memory(unsigned long start_pfn, unsigned long limit_pfn)
843{
844 u64 trim_start, trim_size;
845
846 trim_start = start_pfn;
847 trim_start <<= PAGE_SHIFT;
848
849 trim_size = limit_pfn;
850 trim_size <<= PAGE_SHIFT;
851 trim_size -= trim_start;
852
853 return e820__range_update(trim_start, trim_size, E820_TYPE_RAM, E820_TYPE_RESERVED);
854}
855
856/**
857 * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs
858 * @end_pfn: ending page frame number
859 *
860 * Some buggy BIOSes don't setup the MTRRs properly for systems with certain
861 * memory configurations. This routine checks that the highest MTRR matches
862 * the end of memory, to make sure the MTRRs having a write back type cover
863 * all of the memory the kernel is intending to use. If not, it'll trim any
864 * memory off the end by adjusting end_pfn, removing it from the kernel's
865 * allocation pools, warning the user with an obnoxious message.
866 */
867int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
868{
869 unsigned long i, base, size, highest_pfn = 0, def, dummy;
870 mtrr_type type;
871 u64 total_trim_size;
872 /* extra one for all 0 */
873 int num[MTRR_NUM_TYPES + 1];
874
875 if (!mtrr_enabled())
876 return 0;
877
878 /*
879 * Make sure we only trim uncachable memory on machines that
880 * support the Intel MTRR architecture:
881 */
882 if (!cpu_feature_enabled(X86_FEATURE_MTRR) || disable_mtrr_trim)
883 return 0;
884
885 rdmsr(MSR_MTRRdefType, def, dummy);
886 def &= MTRR_DEF_TYPE_TYPE;
887 if (def != MTRR_TYPE_UNCACHABLE)
888 return 0;
889
890 /* Get it and store it aside: */
891 memset(range_state, 0, sizeof(range_state));
892 for (i = 0; i < num_var_ranges; i++) {
893 mtrr_if->get(i, &base, &size, &type);
894 range_state[i].base_pfn = base;
895 range_state[i].size_pfn = size;
896 range_state[i].type = type;
897 }
898
899 /* Find highest cached pfn: */
900 for (i = 0; i < num_var_ranges; i++) {
901 type = range_state[i].type;
902 if (type != MTRR_TYPE_WRBACK)
903 continue;
904 base = range_state[i].base_pfn;
905 size = range_state[i].size_pfn;
906 if (highest_pfn < base + size)
907 highest_pfn = base + size;
908 }
909
910 /* kvm/qemu doesn't have mtrr set right, don't trim them all: */
911 if (!highest_pfn) {
912 pr_info("CPU MTRRs all blank - virtualized system.\n");
913 return 0;
914 }
915
916 /* Check entries number: */
917 memset(num, 0, sizeof(num));
918 for (i = 0; i < num_var_ranges; i++) {
919 type = range_state[i].type;
920 if (type >= MTRR_NUM_TYPES)
921 continue;
922 size = range_state[i].size_pfn;
923 if (!size)
924 type = MTRR_NUM_TYPES;
925 num[type]++;
926 }
927
928 /* No entry for WB? */
929 if (!num[MTRR_TYPE_WRBACK])
930 return 0;
931
932 /* Check if we only had WB and UC: */
933 if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
934 num_var_ranges - num[MTRR_NUM_TYPES])
935 return 0;
936
937 memset(range, 0, sizeof(range));
938 nr_range = 0;
939 if (mtrr_tom2) {
940 range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT));
941 range[nr_range].end = mtrr_tom2 >> PAGE_SHIFT;
942 if (highest_pfn < range[nr_range].end)
943 highest_pfn = range[nr_range].end;
944 nr_range++;
945 }
946 nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0);
947
948 /* Check the head: */
949 total_trim_size = 0;
950 if (range[0].start)
951 total_trim_size += real_trim_memory(0, range[0].start);
952
953 /* Check the holes: */
954 for (i = 0; i < nr_range - 1; i++) {
955 if (range[i].end < range[i+1].start)
956 total_trim_size += real_trim_memory(range[i].end,
957 range[i+1].start);
958 }
959
960 /* Check the top: */
961 i = nr_range - 1;
962 if (range[i].end < end_pfn)
963 total_trim_size += real_trim_memory(range[i].end,
964 end_pfn);
965
966 if (total_trim_size) {
967 pr_warn("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n",
968 total_trim_size >> 20);
969
970 if (!changed_by_mtrr_cleanup)
971 WARN_ON(1);
972
973 pr_info("update e820 for mtrr\n");
974 e820__update_table_print();
975
976 return 1;
977 }
978
979 return 0;
980}
1/*
2 * MTRR (Memory Type Range Register) cleanup
3 *
4 * Copyright (C) 2009 Yinghai Lu
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Library General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Library General Public License for more details.
15 *
16 * You should have received a copy of the GNU Library General Public
17 * License along with this library; if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/pci.h>
23#include <linux/smp.h>
24#include <linux/cpu.h>
25#include <linux/mutex.h>
26#include <linux/uaccess.h>
27#include <linux/kvm_para.h>
28#include <linux/range.h>
29
30#include <asm/processor.h>
31#include <asm/e820.h>
32#include <asm/mtrr.h>
33#include <asm/msr.h>
34
35#include "mtrr.h"
36
37struct var_mtrr_range_state {
38 unsigned long base_pfn;
39 unsigned long size_pfn;
40 mtrr_type type;
41};
42
43struct var_mtrr_state {
44 unsigned long range_startk;
45 unsigned long range_sizek;
46 unsigned long chunk_sizek;
47 unsigned long gran_sizek;
48 unsigned int reg;
49};
50
51/* Should be related to MTRR_VAR_RANGES nums */
52#define RANGE_NUM 256
53
54static struct range __initdata range[RANGE_NUM];
55static int __initdata nr_range;
56
57static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
58
59static int __initdata debug_print;
60#define Dprintk(x...) do { if (debug_print) printk(KERN_DEBUG x); } while (0)
61
62#define BIOS_BUG_MSG KERN_WARNING \
63 "WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n"
64
65static int __init
66x86_get_mtrr_mem_range(struct range *range, int nr_range,
67 unsigned long extra_remove_base,
68 unsigned long extra_remove_size)
69{
70 unsigned long base, size;
71 mtrr_type type;
72 int i;
73
74 for (i = 0; i < num_var_ranges; i++) {
75 type = range_state[i].type;
76 if (type != MTRR_TYPE_WRBACK)
77 continue;
78 base = range_state[i].base_pfn;
79 size = range_state[i].size_pfn;
80 nr_range = add_range_with_merge(range, RANGE_NUM, nr_range,
81 base, base + size);
82 }
83 if (debug_print) {
84 printk(KERN_DEBUG "After WB checking\n");
85 for (i = 0; i < nr_range; i++)
86 printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
87 range[i].start, range[i].end);
88 }
89
90 /* Take out UC ranges: */
91 for (i = 0; i < num_var_ranges; i++) {
92 type = range_state[i].type;
93 if (type != MTRR_TYPE_UNCACHABLE &&
94 type != MTRR_TYPE_WRPROT)
95 continue;
96 size = range_state[i].size_pfn;
97 if (!size)
98 continue;
99 base = range_state[i].base_pfn;
100 if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed &&
101 (mtrr_state.enabled & 1)) {
102 /* Var MTRR contains UC entry below 1M? Skip it: */
103 printk(BIOS_BUG_MSG, i);
104 if (base + size <= (1<<(20-PAGE_SHIFT)))
105 continue;
106 size -= (1<<(20-PAGE_SHIFT)) - base;
107 base = 1<<(20-PAGE_SHIFT);
108 }
109 subtract_range(range, RANGE_NUM, base, base + size);
110 }
111 if (extra_remove_size)
112 subtract_range(range, RANGE_NUM, extra_remove_base,
113 extra_remove_base + extra_remove_size);
114
115 if (debug_print) {
116 printk(KERN_DEBUG "After UC checking\n");
117 for (i = 0; i < RANGE_NUM; i++) {
118 if (!range[i].end)
119 continue;
120 printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
121 range[i].start, range[i].end);
122 }
123 }
124
125 /* sort the ranges */
126 nr_range = clean_sort_range(range, RANGE_NUM);
127 if (debug_print) {
128 printk(KERN_DEBUG "After sorting\n");
129 for (i = 0; i < nr_range; i++)
130 printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
131 range[i].start, range[i].end);
132 }
133
134 return nr_range;
135}
136
137#ifdef CONFIG_MTRR_SANITIZER
138
139static unsigned long __init sum_ranges(struct range *range, int nr_range)
140{
141 unsigned long sum = 0;
142 int i;
143
144 for (i = 0; i < nr_range; i++)
145 sum += range[i].end - range[i].start;
146
147 return sum;
148}
149
150static int enable_mtrr_cleanup __initdata =
151 CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT;
152
153static int __init disable_mtrr_cleanup_setup(char *str)
154{
155 enable_mtrr_cleanup = 0;
156 return 0;
157}
158early_param("disable_mtrr_cleanup", disable_mtrr_cleanup_setup);
159
160static int __init enable_mtrr_cleanup_setup(char *str)
161{
162 enable_mtrr_cleanup = 1;
163 return 0;
164}
165early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup);
166
167static int __init mtrr_cleanup_debug_setup(char *str)
168{
169 debug_print = 1;
170 return 0;
171}
172early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup);
173
174static void __init
175set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
176 unsigned char type, unsigned int address_bits)
177{
178 u32 base_lo, base_hi, mask_lo, mask_hi;
179 u64 base, mask;
180
181 if (!sizek) {
182 fill_mtrr_var_range(reg, 0, 0, 0, 0);
183 return;
184 }
185
186 mask = (1ULL << address_bits) - 1;
187 mask &= ~((((u64)sizek) << 10) - 1);
188
189 base = ((u64)basek) << 10;
190
191 base |= type;
192 mask |= 0x800;
193
194 base_lo = base & ((1ULL<<32) - 1);
195 base_hi = base >> 32;
196
197 mask_lo = mask & ((1ULL<<32) - 1);
198 mask_hi = mask >> 32;
199
200 fill_mtrr_var_range(reg, base_lo, base_hi, mask_lo, mask_hi);
201}
202
203static void __init
204save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
205 unsigned char type)
206{
207 range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10);
208 range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10);
209 range_state[reg].type = type;
210}
211
212static void __init set_var_mtrr_all(unsigned int address_bits)
213{
214 unsigned long basek, sizek;
215 unsigned char type;
216 unsigned int reg;
217
218 for (reg = 0; reg < num_var_ranges; reg++) {
219 basek = range_state[reg].base_pfn << (PAGE_SHIFT - 10);
220 sizek = range_state[reg].size_pfn << (PAGE_SHIFT - 10);
221 type = range_state[reg].type;
222
223 set_var_mtrr(reg, basek, sizek, type, address_bits);
224 }
225}
226
227static unsigned long to_size_factor(unsigned long sizek, char *factorp)
228{
229 unsigned long base = sizek;
230 char factor;
231
232 if (base & ((1<<10) - 1)) {
233 /* Not MB-aligned: */
234 factor = 'K';
235 } else if (base & ((1<<20) - 1)) {
236 factor = 'M';
237 base >>= 10;
238 } else {
239 factor = 'G';
240 base >>= 20;
241 }
242
243 *factorp = factor;
244
245 return base;
246}
247
248static unsigned int __init
249range_to_mtrr(unsigned int reg, unsigned long range_startk,
250 unsigned long range_sizek, unsigned char type)
251{
252 if (!range_sizek || (reg >= num_var_ranges))
253 return reg;
254
255 while (range_sizek) {
256 unsigned long max_align, align;
257 unsigned long sizek;
258
259 /* Compute the maximum size with which we can make a range: */
260 if (range_startk)
261 max_align = ffs(range_startk) - 1;
262 else
263 max_align = 32;
264
265 align = fls(range_sizek) - 1;
266 if (align > max_align)
267 align = max_align;
268
269 sizek = 1UL << align;
270 if (debug_print) {
271 char start_factor = 'K', size_factor = 'K';
272 unsigned long start_base, size_base;
273
274 start_base = to_size_factor(range_startk, &start_factor);
275 size_base = to_size_factor(sizek, &size_factor);
276
277 Dprintk("Setting variable MTRR %d, "
278 "base: %ld%cB, range: %ld%cB, type %s\n",
279 reg, start_base, start_factor,
280 size_base, size_factor,
281 (type == MTRR_TYPE_UNCACHABLE) ? "UC" :
282 ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other")
283 );
284 }
285 save_var_mtrr(reg++, range_startk, sizek, type);
286 range_startk += sizek;
287 range_sizek -= sizek;
288 if (reg >= num_var_ranges)
289 break;
290 }
291 return reg;
292}
293
294static unsigned __init
295range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
296 unsigned long sizek)
297{
298 unsigned long hole_basek, hole_sizek;
299 unsigned long second_basek, second_sizek;
300 unsigned long range0_basek, range0_sizek;
301 unsigned long range_basek, range_sizek;
302 unsigned long chunk_sizek;
303 unsigned long gran_sizek;
304
305 hole_basek = 0;
306 hole_sizek = 0;
307 second_basek = 0;
308 second_sizek = 0;
309 chunk_sizek = state->chunk_sizek;
310 gran_sizek = state->gran_sizek;
311
312 /* Align with gran size, prevent small block used up MTRRs: */
313 range_basek = ALIGN(state->range_startk, gran_sizek);
314 if ((range_basek > basek) && basek)
315 return second_sizek;
316
317 state->range_sizek -= (range_basek - state->range_startk);
318 range_sizek = ALIGN(state->range_sizek, gran_sizek);
319
320 while (range_sizek > state->range_sizek) {
321 range_sizek -= gran_sizek;
322 if (!range_sizek)
323 return 0;
324 }
325 state->range_sizek = range_sizek;
326
327 /* Try to append some small hole: */
328 range0_basek = state->range_startk;
329 range0_sizek = ALIGN(state->range_sizek, chunk_sizek);
330
331 /* No increase: */
332 if (range0_sizek == state->range_sizek) {
333 Dprintk("rangeX: %016lx - %016lx\n",
334 range0_basek<<10,
335 (range0_basek + state->range_sizek)<<10);
336 state->reg = range_to_mtrr(state->reg, range0_basek,
337 state->range_sizek, MTRR_TYPE_WRBACK);
338 return 0;
339 }
340
341 /* Only cut back when it is not the last: */
342 if (sizek) {
343 while (range0_basek + range0_sizek > (basek + sizek)) {
344 if (range0_sizek >= chunk_sizek)
345 range0_sizek -= chunk_sizek;
346 else
347 range0_sizek = 0;
348
349 if (!range0_sizek)
350 break;
351 }
352 }
353
354second_try:
355 range_basek = range0_basek + range0_sizek;
356
357 /* One hole in the middle: */
358 if (range_basek > basek && range_basek <= (basek + sizek))
359 second_sizek = range_basek - basek;
360
361 if (range0_sizek > state->range_sizek) {
362
363 /* One hole in middle or at the end: */
364 hole_sizek = range0_sizek - state->range_sizek - second_sizek;
365
366 /* Hole size should be less than half of range0 size: */
367 if (hole_sizek >= (range0_sizek >> 1) &&
368 range0_sizek >= chunk_sizek) {
369 range0_sizek -= chunk_sizek;
370 second_sizek = 0;
371 hole_sizek = 0;
372
373 goto second_try;
374 }
375 }
376
377 if (range0_sizek) {
378 Dprintk("range0: %016lx - %016lx\n",
379 range0_basek<<10,
380 (range0_basek + range0_sizek)<<10);
381 state->reg = range_to_mtrr(state->reg, range0_basek,
382 range0_sizek, MTRR_TYPE_WRBACK);
383 }
384
385 if (range0_sizek < state->range_sizek) {
386 /* Need to handle left over range: */
387 range_sizek = state->range_sizek - range0_sizek;
388
389 Dprintk("range: %016lx - %016lx\n",
390 range_basek<<10,
391 (range_basek + range_sizek)<<10);
392
393 state->reg = range_to_mtrr(state->reg, range_basek,
394 range_sizek, MTRR_TYPE_WRBACK);
395 }
396
397 if (hole_sizek) {
398 hole_basek = range_basek - hole_sizek - second_sizek;
399 Dprintk("hole: %016lx - %016lx\n",
400 hole_basek<<10,
401 (hole_basek + hole_sizek)<<10);
402 state->reg = range_to_mtrr(state->reg, hole_basek,
403 hole_sizek, MTRR_TYPE_UNCACHABLE);
404 }
405
406 return second_sizek;
407}
408
409static void __init
410set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn,
411 unsigned long size_pfn)
412{
413 unsigned long basek, sizek;
414 unsigned long second_sizek = 0;
415
416 if (state->reg >= num_var_ranges)
417 return;
418
419 basek = base_pfn << (PAGE_SHIFT - 10);
420 sizek = size_pfn << (PAGE_SHIFT - 10);
421
422 /* See if I can merge with the last range: */
423 if ((basek <= 1024) ||
424 (state->range_startk + state->range_sizek == basek)) {
425 unsigned long endk = basek + sizek;
426 state->range_sizek = endk - state->range_startk;
427 return;
428 }
429 /* Write the range mtrrs: */
430 if (state->range_sizek != 0)
431 second_sizek = range_to_mtrr_with_hole(state, basek, sizek);
432
433 /* Allocate an msr: */
434 state->range_startk = basek + second_sizek;
435 state->range_sizek = sizek - second_sizek;
436}
437
438/* Mininum size of mtrr block that can take hole: */
439static u64 mtrr_chunk_size __initdata = (256ULL<<20);
440
441static int __init parse_mtrr_chunk_size_opt(char *p)
442{
443 if (!p)
444 return -EINVAL;
445 mtrr_chunk_size = memparse(p, &p);
446 return 0;
447}
448early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt);
449
450/* Granularity of mtrr of block: */
451static u64 mtrr_gran_size __initdata;
452
453static int __init parse_mtrr_gran_size_opt(char *p)
454{
455 if (!p)
456 return -EINVAL;
457 mtrr_gran_size = memparse(p, &p);
458 return 0;
459}
460early_param("mtrr_gran_size", parse_mtrr_gran_size_opt);
461
462static unsigned long nr_mtrr_spare_reg __initdata =
463 CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT;
464
465static int __init parse_mtrr_spare_reg(char *arg)
466{
467 if (arg)
468 nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0);
469 return 0;
470}
471early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg);
472
473static int __init
474x86_setup_var_mtrrs(struct range *range, int nr_range,
475 u64 chunk_size, u64 gran_size)
476{
477 struct var_mtrr_state var_state;
478 int num_reg;
479 int i;
480
481 var_state.range_startk = 0;
482 var_state.range_sizek = 0;
483 var_state.reg = 0;
484 var_state.chunk_sizek = chunk_size >> 10;
485 var_state.gran_sizek = gran_size >> 10;
486
487 memset(range_state, 0, sizeof(range_state));
488
489 /* Write the range: */
490 for (i = 0; i < nr_range; i++) {
491 set_var_mtrr_range(&var_state, range[i].start,
492 range[i].end - range[i].start);
493 }
494
495 /* Write the last range: */
496 if (var_state.range_sizek != 0)
497 range_to_mtrr_with_hole(&var_state, 0, 0);
498
499 num_reg = var_state.reg;
500 /* Clear out the extra MTRR's: */
501 while (var_state.reg < num_var_ranges) {
502 save_var_mtrr(var_state.reg, 0, 0, 0);
503 var_state.reg++;
504 }
505
506 return num_reg;
507}
508
509struct mtrr_cleanup_result {
510 unsigned long gran_sizek;
511 unsigned long chunk_sizek;
512 unsigned long lose_cover_sizek;
513 unsigned int num_reg;
514 int bad;
515};
516
517/*
518 * gran_size: 64K, 128K, 256K, 512K, 1M, 2M, ..., 2G
519 * chunk size: gran_size, ..., 2G
520 * so we need (1+16)*8
521 */
522#define NUM_RESULT 136
523#define PSHIFT (PAGE_SHIFT - 10)
524
525static struct mtrr_cleanup_result __initdata result[NUM_RESULT];
526static unsigned long __initdata min_loss_pfn[RANGE_NUM];
527
528static void __init print_out_mtrr_range_state(void)
529{
530 char start_factor = 'K', size_factor = 'K';
531 unsigned long start_base, size_base;
532 mtrr_type type;
533 int i;
534
535 for (i = 0; i < num_var_ranges; i++) {
536
537 size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10);
538 if (!size_base)
539 continue;
540
541 size_base = to_size_factor(size_base, &size_factor),
542 start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10);
543 start_base = to_size_factor(start_base, &start_factor),
544 type = range_state[i].type;
545
546 printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n",
547 i, start_base, start_factor,
548 size_base, size_factor,
549 (type == MTRR_TYPE_UNCACHABLE) ? "UC" :
550 ((type == MTRR_TYPE_WRPROT) ? "WP" :
551 ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other"))
552 );
553 }
554}
555
556static int __init mtrr_need_cleanup(void)
557{
558 int i;
559 mtrr_type type;
560 unsigned long size;
561 /* Extra one for all 0: */
562 int num[MTRR_NUM_TYPES + 1];
563
564 /* Check entries number: */
565 memset(num, 0, sizeof(num));
566 for (i = 0; i < num_var_ranges; i++) {
567 type = range_state[i].type;
568 size = range_state[i].size_pfn;
569 if (type >= MTRR_NUM_TYPES)
570 continue;
571 if (!size)
572 type = MTRR_NUM_TYPES;
573 num[type]++;
574 }
575
576 /* Check if we got UC entries: */
577 if (!num[MTRR_TYPE_UNCACHABLE])
578 return 0;
579
580 /* Check if we only had WB and UC */
581 if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
582 num_var_ranges - num[MTRR_NUM_TYPES])
583 return 0;
584
585 return 1;
586}
587
588static unsigned long __initdata range_sums;
589
590static void __init
591mtrr_calc_range_state(u64 chunk_size, u64 gran_size,
592 unsigned long x_remove_base,
593 unsigned long x_remove_size, int i)
594{
595 static struct range range_new[RANGE_NUM];
596 unsigned long range_sums_new;
597 static int nr_range_new;
598 int num_reg;
599
600 /* Convert ranges to var ranges state: */
601 num_reg = x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
602
603 /* We got new setting in range_state, check it: */
604 memset(range_new, 0, sizeof(range_new));
605 nr_range_new = x86_get_mtrr_mem_range(range_new, 0,
606 x_remove_base, x_remove_size);
607 range_sums_new = sum_ranges(range_new, nr_range_new);
608
609 result[i].chunk_sizek = chunk_size >> 10;
610 result[i].gran_sizek = gran_size >> 10;
611 result[i].num_reg = num_reg;
612
613 if (range_sums < range_sums_new) {
614 result[i].lose_cover_sizek = (range_sums_new - range_sums) << PSHIFT;
615 result[i].bad = 1;
616 } else {
617 result[i].lose_cover_sizek = (range_sums - range_sums_new) << PSHIFT;
618 }
619
620 /* Double check it: */
621 if (!result[i].bad && !result[i].lose_cover_sizek) {
622 if (nr_range_new != nr_range || memcmp(range, range_new, sizeof(range)))
623 result[i].bad = 1;
624 }
625
626 if (!result[i].bad && (range_sums - range_sums_new < min_loss_pfn[num_reg]))
627 min_loss_pfn[num_reg] = range_sums - range_sums_new;
628}
629
630static void __init mtrr_print_out_one_result(int i)
631{
632 unsigned long gran_base, chunk_base, lose_base;
633 char gran_factor, chunk_factor, lose_factor;
634
635 gran_base = to_size_factor(result[i].gran_sizek, &gran_factor);
636 chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor);
637 lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor);
638
639 pr_info("%sgran_size: %ld%c \tchunk_size: %ld%c \t",
640 result[i].bad ? "*BAD*" : " ",
641 gran_base, gran_factor, chunk_base, chunk_factor);
642 pr_cont("num_reg: %d \tlose cover RAM: %s%ld%c\n",
643 result[i].num_reg, result[i].bad ? "-" : "",
644 lose_base, lose_factor);
645}
646
647static int __init mtrr_search_optimal_index(void)
648{
649 int num_reg_good;
650 int index_good;
651 int i;
652
653 if (nr_mtrr_spare_reg >= num_var_ranges)
654 nr_mtrr_spare_reg = num_var_ranges - 1;
655
656 num_reg_good = -1;
657 for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) {
658 if (!min_loss_pfn[i])
659 num_reg_good = i;
660 }
661
662 index_good = -1;
663 if (num_reg_good != -1) {
664 for (i = 0; i < NUM_RESULT; i++) {
665 if (!result[i].bad &&
666 result[i].num_reg == num_reg_good &&
667 !result[i].lose_cover_sizek) {
668 index_good = i;
669 break;
670 }
671 }
672 }
673
674 return index_good;
675}
676
677int __init mtrr_cleanup(unsigned address_bits)
678{
679 unsigned long x_remove_base, x_remove_size;
680 unsigned long base, size, def, dummy;
681 u64 chunk_size, gran_size;
682 mtrr_type type;
683 int index_good;
684 int i;
685
686 if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1)
687 return 0;
688
689 rdmsr(MSR_MTRRdefType, def, dummy);
690 def &= 0xff;
691 if (def != MTRR_TYPE_UNCACHABLE)
692 return 0;
693
694 /* Get it and store it aside: */
695 memset(range_state, 0, sizeof(range_state));
696 for (i = 0; i < num_var_ranges; i++) {
697 mtrr_if->get(i, &base, &size, &type);
698 range_state[i].base_pfn = base;
699 range_state[i].size_pfn = size;
700 range_state[i].type = type;
701 }
702
703 /* Check if we need handle it and can handle it: */
704 if (!mtrr_need_cleanup())
705 return 0;
706
707 /* Print original var MTRRs at first, for debugging: */
708 printk(KERN_DEBUG "original variable MTRRs\n");
709 print_out_mtrr_range_state();
710
711 memset(range, 0, sizeof(range));
712 x_remove_size = 0;
713 x_remove_base = 1 << (32 - PAGE_SHIFT);
714 if (mtrr_tom2)
715 x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base;
716
717 nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size);
718 /*
719 * [0, 1M) should always be covered by var mtrr with WB
720 * and fixed mtrrs should take effect before var mtrr for it:
721 */
722 nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0,
723 1ULL<<(20 - PAGE_SHIFT));
724 /* Sort the ranges: */
725 sort_range(range, nr_range);
726
727 range_sums = sum_ranges(range, nr_range);
728 printk(KERN_INFO "total RAM covered: %ldM\n",
729 range_sums >> (20 - PAGE_SHIFT));
730
731 if (mtrr_chunk_size && mtrr_gran_size) {
732 i = 0;
733 mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size,
734 x_remove_base, x_remove_size, i);
735
736 mtrr_print_out_one_result(i);
737
738 if (!result[i].bad) {
739 set_var_mtrr_all(address_bits);
740 printk(KERN_DEBUG "New variable MTRRs\n");
741 print_out_mtrr_range_state();
742 return 1;
743 }
744 printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, "
745 "will find optimal one\n");
746 }
747
748 i = 0;
749 memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn));
750 memset(result, 0, sizeof(result));
751 for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) {
752
753 for (chunk_size = gran_size; chunk_size < (1ULL<<32);
754 chunk_size <<= 1) {
755
756 if (i >= NUM_RESULT)
757 continue;
758
759 mtrr_calc_range_state(chunk_size, gran_size,
760 x_remove_base, x_remove_size, i);
761 if (debug_print) {
762 mtrr_print_out_one_result(i);
763 printk(KERN_INFO "\n");
764 }
765
766 i++;
767 }
768 }
769
770 /* Try to find the optimal index: */
771 index_good = mtrr_search_optimal_index();
772
773 if (index_good != -1) {
774 printk(KERN_INFO "Found optimal setting for mtrr clean up\n");
775 i = index_good;
776 mtrr_print_out_one_result(i);
777
778 /* Convert ranges to var ranges state: */
779 chunk_size = result[i].chunk_sizek;
780 chunk_size <<= 10;
781 gran_size = result[i].gran_sizek;
782 gran_size <<= 10;
783 x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
784 set_var_mtrr_all(address_bits);
785 printk(KERN_DEBUG "New variable MTRRs\n");
786 print_out_mtrr_range_state();
787 return 1;
788 } else {
789 /* print out all */
790 for (i = 0; i < NUM_RESULT; i++)
791 mtrr_print_out_one_result(i);
792 }
793
794 printk(KERN_INFO "mtrr_cleanup: can not find optimal value\n");
795 printk(KERN_INFO "please specify mtrr_gran_size/mtrr_chunk_size\n");
796
797 return 0;
798}
799#else
800int __init mtrr_cleanup(unsigned address_bits)
801{
802 return 0;
803}
804#endif
805
806static int disable_mtrr_trim;
807
808static int __init disable_mtrr_trim_setup(char *str)
809{
810 disable_mtrr_trim = 1;
811 return 0;
812}
813early_param("disable_mtrr_trim", disable_mtrr_trim_setup);
814
815/*
816 * Newer AMD K8s and later CPUs have a special magic MSR way to force WB
817 * for memory >4GB. Check for that here.
818 * Note this won't check if the MTRRs < 4GB where the magic bit doesn't
819 * apply to are wrong, but so far we don't know of any such case in the wild.
820 */
821#define Tom2Enabled (1U << 21)
822#define Tom2ForceMemTypeWB (1U << 22)
823
824int __init amd_special_default_mtrr(void)
825{
826 u32 l, h;
827
828 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
829 return 0;
830 if (boot_cpu_data.x86 < 0xf)
831 return 0;
832 /* In case some hypervisor doesn't pass SYSCFG through: */
833 if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
834 return 0;
835 /*
836 * Memory between 4GB and top of mem is forced WB by this magic bit.
837 * Reserved before K8RevF, but should be zero there.
838 */
839 if ((l & (Tom2Enabled | Tom2ForceMemTypeWB)) ==
840 (Tom2Enabled | Tom2ForceMemTypeWB))
841 return 1;
842 return 0;
843}
844
845static u64 __init
846real_trim_memory(unsigned long start_pfn, unsigned long limit_pfn)
847{
848 u64 trim_start, trim_size;
849
850 trim_start = start_pfn;
851 trim_start <<= PAGE_SHIFT;
852
853 trim_size = limit_pfn;
854 trim_size <<= PAGE_SHIFT;
855 trim_size -= trim_start;
856
857 return e820_update_range(trim_start, trim_size, E820_RAM, E820_RESERVED);
858}
859
860/**
861 * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs
862 * @end_pfn: ending page frame number
863 *
864 * Some buggy BIOSes don't setup the MTRRs properly for systems with certain
865 * memory configurations. This routine checks that the highest MTRR matches
866 * the end of memory, to make sure the MTRRs having a write back type cover
867 * all of the memory the kernel is intending to use. If not, it'll trim any
868 * memory off the end by adjusting end_pfn, removing it from the kernel's
869 * allocation pools, warning the user with an obnoxious message.
870 */
871int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
872{
873 unsigned long i, base, size, highest_pfn = 0, def, dummy;
874 mtrr_type type;
875 u64 total_trim_size;
876 /* extra one for all 0 */
877 int num[MTRR_NUM_TYPES + 1];
878
879 /*
880 * Make sure we only trim uncachable memory on machines that
881 * support the Intel MTRR architecture:
882 */
883 if (!is_cpu(INTEL) || disable_mtrr_trim)
884 return 0;
885
886 rdmsr(MSR_MTRRdefType, def, dummy);
887 def &= 0xff;
888 if (def != MTRR_TYPE_UNCACHABLE)
889 return 0;
890
891 /* Get it and store it aside: */
892 memset(range_state, 0, sizeof(range_state));
893 for (i = 0; i < num_var_ranges; i++) {
894 mtrr_if->get(i, &base, &size, &type);
895 range_state[i].base_pfn = base;
896 range_state[i].size_pfn = size;
897 range_state[i].type = type;
898 }
899
900 /* Find highest cached pfn: */
901 for (i = 0; i < num_var_ranges; i++) {
902 type = range_state[i].type;
903 if (type != MTRR_TYPE_WRBACK)
904 continue;
905 base = range_state[i].base_pfn;
906 size = range_state[i].size_pfn;
907 if (highest_pfn < base + size)
908 highest_pfn = base + size;
909 }
910
911 /* kvm/qemu doesn't have mtrr set right, don't trim them all: */
912 if (!highest_pfn) {
913 printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n");
914 return 0;
915 }
916
917 /* Check entries number: */
918 memset(num, 0, sizeof(num));
919 for (i = 0; i < num_var_ranges; i++) {
920 type = range_state[i].type;
921 if (type >= MTRR_NUM_TYPES)
922 continue;
923 size = range_state[i].size_pfn;
924 if (!size)
925 type = MTRR_NUM_TYPES;
926 num[type]++;
927 }
928
929 /* No entry for WB? */
930 if (!num[MTRR_TYPE_WRBACK])
931 return 0;
932
933 /* Check if we only had WB and UC: */
934 if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
935 num_var_ranges - num[MTRR_NUM_TYPES])
936 return 0;
937
938 memset(range, 0, sizeof(range));
939 nr_range = 0;
940 if (mtrr_tom2) {
941 range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT));
942 range[nr_range].end = mtrr_tom2 >> PAGE_SHIFT;
943 if (highest_pfn < range[nr_range].end)
944 highest_pfn = range[nr_range].end;
945 nr_range++;
946 }
947 nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0);
948
949 /* Check the head: */
950 total_trim_size = 0;
951 if (range[0].start)
952 total_trim_size += real_trim_memory(0, range[0].start);
953
954 /* Check the holes: */
955 for (i = 0; i < nr_range - 1; i++) {
956 if (range[i].end < range[i+1].start)
957 total_trim_size += real_trim_memory(range[i].end,
958 range[i+1].start);
959 }
960
961 /* Check the top: */
962 i = nr_range - 1;
963 if (range[i].end < end_pfn)
964 total_trim_size += real_trim_memory(range[i].end,
965 end_pfn);
966
967 if (total_trim_size) {
968 pr_warning("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n", total_trim_size >> 20);
969
970 if (!changed_by_mtrr_cleanup)
971 WARN_ON(1);
972
973 pr_info("update e820 for mtrr\n");
974 update_e820();
975
976 return 1;
977 }
978
979 return 0;
980}