Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * vMTRR implementation
  4 *
  5 * Copyright (C) 2006 Qumranet, Inc.
  6 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  7 * Copyright(C) 2015 Intel Corporation.
  8 *
  9 * Authors:
 10 *   Yaniv Kamay  <yaniv@qumranet.com>
 11 *   Avi Kivity   <avi@qumranet.com>
 12 *   Marcelo Tosatti <mtosatti@redhat.com>
 13 *   Paolo Bonzini <pbonzini@redhat.com>
 14 *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
 
 
 
 15 */
 16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 17
 18#include <linux/kvm_host.h>
 19#include <asm/mtrr.h>
 20
 21#include "cpuid.h"
 22#include "x86.h"
 23
 24static u64 *find_mtrr(struct kvm_vcpu *vcpu, unsigned int msr)
 25{
 26	int index;
 27
 
 
 28	switch (msr) {
 29	case MTRRphysBase_MSR(0) ... MTRRphysMask_MSR(KVM_NR_VAR_MTRR - 1):
 30		index = msr - MTRRphysBase_MSR(0);
 31		return &vcpu->arch.mtrr_state.var[index];
 32	case MSR_MTRRfix64K_00000:
 33		return &vcpu->arch.mtrr_state.fixed_64k;
 34	case MSR_MTRRfix16K_80000:
 35	case MSR_MTRRfix16K_A0000:
 36		index = msr - MSR_MTRRfix16K_80000;
 37		return &vcpu->arch.mtrr_state.fixed_16k[index];
 38	case MSR_MTRRfix4K_C0000:
 39	case MSR_MTRRfix4K_C8000:
 40	case MSR_MTRRfix4K_D0000:
 41	case MSR_MTRRfix4K_D8000:
 42	case MSR_MTRRfix4K_E0000:
 43	case MSR_MTRRfix4K_E8000:
 44	case MSR_MTRRfix4K_F0000:
 45	case MSR_MTRRfix4K_F8000:
 46		index = msr - MSR_MTRRfix4K_C0000;
 47		return &vcpu->arch.mtrr_state.fixed_4k[index];
 48	case MSR_MTRRdefType:
 49		return &vcpu->arch.mtrr_state.deftype;
 50	default:
 51		break;
 
 52	}
 53	return NULL;
 
 
 
 
 
 54}
 55
 56static bool valid_mtrr_type(unsigned t)
 57{
 58	return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
 59}
 60
 61static bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 62{
 63	int i;
 64	u64 mask;
 65
 66	if (msr == MSR_MTRRdefType) {
 
 
 
 
 
 
 
 
 67		if (data & ~0xcff)
 68			return false;
 69		return valid_mtrr_type(data & 0xff);
 70	} else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
 71		for (i = 0; i < 8 ; i++)
 72			if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
 73				return false;
 74		return true;
 75	}
 76
 77	/* variable MTRRs */
 78	if (WARN_ON_ONCE(!(msr >= MTRRphysBase_MSR(0) &&
 79			   msr <= MTRRphysMask_MSR(KVM_NR_VAR_MTRR - 1))))
 80		return false;
 81
 82	mask = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
 83	if ((msr & 1) == 0) {
 84		/* MTRR base */
 85		if (!valid_mtrr_type(data & 0xff))
 86			return false;
 87		mask |= 0xf00;
 88	} else {
 89		/* MTRR mask */
 90		mask |= 0x7ff;
 
 
 
 91	}
 92
 93	return (data & mask) == 0;
 94}
 
 95
 96int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 97{
 98	u64 *mtrr;
 
 99
100	mtrr = find_mtrr(vcpu, msr);
101	if (!mtrr)
102		return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
104	if (!kvm_mtrr_valid(vcpu, msr, data))
105		return 1;
106
107	*mtrr = data;
 
 
 
 
 
 
 
 
 
 
108	return 0;
109}
110
111int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
112{
113	u64 *mtrr;
114
115	/* MSR_MTRRcap is a readonly MSR. */
116	if (msr == MSR_MTRRcap) {
117		/*
118		 * SMRR = 0
119		 * WC = 1
120		 * FIX = 1
121		 * VCNT = KVM_NR_VAR_MTRR
122		 */
123		*pdata = 0x500 | KVM_NR_VAR_MTRR;
124		return 0;
125	}
126
127	mtrr = find_mtrr(vcpu, msr);
128	if (!mtrr)
129		return 1;
130
131	*pdata = *mtrr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133}
v4.6
 
  1/*
  2 * vMTRR implementation
  3 *
  4 * Copyright (C) 2006 Qumranet, Inc.
  5 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  6 * Copyright(C) 2015 Intel Corporation.
  7 *
  8 * Authors:
  9 *   Yaniv Kamay  <yaniv@qumranet.com>
 10 *   Avi Kivity   <avi@qumranet.com>
 11 *   Marcelo Tosatti <mtosatti@redhat.com>
 12 *   Paolo Bonzini <pbonzini@redhat.com>
 13 *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
 14 *
 15 * This work is licensed under the terms of the GNU GPL, version 2.  See
 16 * the COPYING file in the top-level directory.
 17 */
 
 18
 19#include <linux/kvm_host.h>
 20#include <asm/mtrr.h>
 21
 22#include "cpuid.h"
 23#include "mmu.h"
 24
 25#define IA32_MTRR_DEF_TYPE_E		(1ULL << 11)
 26#define IA32_MTRR_DEF_TYPE_FE		(1ULL << 10)
 27#define IA32_MTRR_DEF_TYPE_TYPE_MASK	(0xff)
 28
 29static bool msr_mtrr_valid(unsigned msr)
 30{
 31	switch (msr) {
 32	case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
 
 
 33	case MSR_MTRRfix64K_00000:
 
 34	case MSR_MTRRfix16K_80000:
 35	case MSR_MTRRfix16K_A0000:
 
 
 36	case MSR_MTRRfix4K_C0000:
 37	case MSR_MTRRfix4K_C8000:
 38	case MSR_MTRRfix4K_D0000:
 39	case MSR_MTRRfix4K_D8000:
 40	case MSR_MTRRfix4K_E0000:
 41	case MSR_MTRRfix4K_E8000:
 42	case MSR_MTRRfix4K_F0000:
 43	case MSR_MTRRfix4K_F8000:
 
 
 44	case MSR_MTRRdefType:
 45	case MSR_IA32_CR_PAT:
 46		return true;
 47	case 0x2f8:
 48		return true;
 49	}
 50	return false;
 51}
 52
 53static bool valid_pat_type(unsigned t)
 54{
 55	return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
 56}
 57
 58static bool valid_mtrr_type(unsigned t)
 59{
 60	return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
 61}
 62
 63bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 64{
 65	int i;
 66	u64 mask;
 67
 68	if (!msr_mtrr_valid(msr))
 69		return false;
 70
 71	if (msr == MSR_IA32_CR_PAT) {
 72		for (i = 0; i < 8; i++)
 73			if (!valid_pat_type((data >> (i * 8)) & 0xff))
 74				return false;
 75		return true;
 76	} else if (msr == MSR_MTRRdefType) {
 77		if (data & ~0xcff)
 78			return false;
 79		return valid_mtrr_type(data & 0xff);
 80	} else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
 81		for (i = 0; i < 8 ; i++)
 82			if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
 83				return false;
 84		return true;
 85	}
 86
 87	/* variable MTRRs */
 88	WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR));
 
 
 89
 90	mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
 91	if ((msr & 1) == 0) {
 92		/* MTRR base */
 93		if (!valid_mtrr_type(data & 0xff))
 94			return false;
 95		mask |= 0xf00;
 96	} else
 97		/* MTRR mask */
 98		mask |= 0x7ff;
 99	if (data & mask) {
100		kvm_inject_gp(vcpu, 0);
101		return false;
102	}
103
104	return true;
105}
106EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
107
108static bool mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
109{
110	return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_E);
111}
112
113static bool fixed_mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
114{
115	return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_FE);
116}
117
118static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
119{
120	return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
121}
122
123static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
124{
125	/*
126	 * Intel SDM 11.11.2.2: all MTRRs are disabled when
127	 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
128	 * memory type is applied to all of physical memory.
129	 *
130	 * However, virtual machines can be run with CPUID such that
131	 * there are no MTRRs.  In that case, the firmware will never
132	 * enable MTRRs and it is obviously undesirable to run the
133	 * guest entirely with UC memory and we use WB.
134	 */
135	if (guest_cpuid_has_mtrr(vcpu))
136		return MTRR_TYPE_UNCACHABLE;
137	else
138		return MTRR_TYPE_WRBACK;
139}
140
141/*
142* Three terms are used in the following code:
143* - segment, it indicates the address segments covered by fixed MTRRs.
144* - unit, it corresponds to the MSR entry in the segment.
145* - range, a range is covered in one memory cache type.
146*/
147struct fixed_mtrr_segment {
148	u64 start;
149	u64 end;
150
151	int range_shift;
152
153	/* the start position in kvm_mtrr.fixed_ranges[]. */
154	int range_start;
155};
156
157static struct fixed_mtrr_segment fixed_seg_table[] = {
158	/* MSR_MTRRfix64K_00000, 1 unit. 64K fixed mtrr. */
159	{
160		.start = 0x0,
161		.end = 0x80000,
162		.range_shift = 16, /* 64K */
163		.range_start = 0,
164	},
165
166	/*
167	 * MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000, 2 units,
168	 * 16K fixed mtrr.
169	 */
170	{
171		.start = 0x80000,
172		.end = 0xc0000,
173		.range_shift = 14, /* 16K */
174		.range_start = 8,
175	},
176
177	/*
178	 * MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000, 8 units,
179	 * 4K fixed mtrr.
180	 */
181	{
182		.start = 0xc0000,
183		.end = 0x100000,
184		.range_shift = 12, /* 12K */
185		.range_start = 24,
186	}
187};
188
189/*
190 * The size of unit is covered in one MSR, one MSR entry contains
191 * 8 ranges so that unit size is always 8 * 2^range_shift.
192 */
193static u64 fixed_mtrr_seg_unit_size(int seg)
194{
195	return 8 << fixed_seg_table[seg].range_shift;
196}
197
198static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit)
199{
200	switch (msr) {
201	case MSR_MTRRfix64K_00000:
202		*seg = 0;
203		*unit = 0;
204		break;
205	case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000:
206		*seg = 1;
207		*unit = msr - MSR_MTRRfix16K_80000;
208		break;
209	case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
210		*seg = 2;
211		*unit = msr - MSR_MTRRfix4K_C0000;
212		break;
213	default:
214		return false;
215	}
216
217	return true;
218}
219
220static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end)
221{
222	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
223	u64 unit_size = fixed_mtrr_seg_unit_size(seg);
224
225	*start = mtrr_seg->start + unit * unit_size;
226	*end = *start + unit_size;
227	WARN_ON(*end > mtrr_seg->end);
228}
229
230static int fixed_mtrr_seg_unit_range_index(int seg, int unit)
231{
232	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
233
234	WARN_ON(mtrr_seg->start + unit * fixed_mtrr_seg_unit_size(seg)
235		> mtrr_seg->end);
236
237	/* each unit has 8 ranges. */
238	return mtrr_seg->range_start + 8 * unit;
239}
240
241static int fixed_mtrr_seg_end_range_index(int seg)
242{
243	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
244	int n;
245
246	n = (mtrr_seg->end - mtrr_seg->start) >> mtrr_seg->range_shift;
247	return mtrr_seg->range_start + n - 1;
248}
249
250static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end)
251{
252	int seg, unit;
253
254	if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
255		return false;
256
257	fixed_mtrr_seg_unit_range(seg, unit, start, end);
258	return true;
259}
260
261static int fixed_msr_to_range_index(u32 msr)
262{
263	int seg, unit;
264
265	if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
266		return -1;
267
268	return fixed_mtrr_seg_unit_range_index(seg, unit);
269}
270
271static int fixed_mtrr_addr_to_seg(u64 addr)
272{
273	struct fixed_mtrr_segment *mtrr_seg;
274	int seg, seg_num = ARRAY_SIZE(fixed_seg_table);
275
276	for (seg = 0; seg < seg_num; seg++) {
277		mtrr_seg = &fixed_seg_table[seg];
278		if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
279			return seg;
280	}
281
282	return -1;
283}
284
285static int fixed_mtrr_addr_seg_to_range_index(u64 addr, int seg)
286{
287	struct fixed_mtrr_segment *mtrr_seg;
288	int index;
289
290	mtrr_seg = &fixed_seg_table[seg];
291	index = mtrr_seg->range_start;
292	index += (addr - mtrr_seg->start) >> mtrr_seg->range_shift;
293	return index;
294}
295
296static u64 fixed_mtrr_range_end_addr(int seg, int index)
297{
298	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
299	int pos = index - mtrr_seg->range_start;
300
301	return mtrr_seg->start + ((pos + 1) << mtrr_seg->range_shift);
302}
303
304static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
305{
306	u64 mask;
307
308	*start = range->base & PAGE_MASK;
309
310	mask = range->mask & PAGE_MASK;
311
312	/* This cannot overflow because writing to the reserved bits of
313	 * variable MTRRs causes a #GP.
314	 */
315	*end = (*start | ~mask) + 1;
316}
317
318static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
319{
320	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
321	gfn_t start, end;
322	int index;
323
324	if (msr == MSR_IA32_CR_PAT || !tdp_enabled ||
325	      !kvm_arch_has_noncoherent_dma(vcpu->kvm))
326		return;
327
328	if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType)
329		return;
330
331	/* fixed MTRRs. */
332	if (fixed_msr_to_range(msr, &start, &end)) {
333		if (!fixed_mtrr_is_enabled(mtrr_state))
334			return;
335	} else if (msr == MSR_MTRRdefType) {
336		start = 0x0;
337		end = ~0ULL;
338	} else {
339		/* variable range MTRRs. */
340		index = (msr - 0x200) / 2;
341		var_mtrr_range(&mtrr_state->var_ranges[index], &start, &end);
342	}
343
344	kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
345}
346
347static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range)
348{
349	return (range->mask & (1 << 11)) != 0;
350}
351
352static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
353{
354	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
355	struct kvm_mtrr_range *tmp, *cur;
356	int index, is_mtrr_mask;
357
358	index = (msr - 0x200) / 2;
359	is_mtrr_mask = msr - 0x200 - 2 * index;
360	cur = &mtrr_state->var_ranges[index];
361
362	/* remove the entry if it's in the list. */
363	if (var_mtrr_range_is_valid(cur))
364		list_del(&mtrr_state->var_ranges[index].node);
365
366	/* Extend the mask with all 1 bits to the left, since those
367	 * bits must implicitly be 0.  The bits are then cleared
368	 * when reading them.
369	 */
370	if (!is_mtrr_mask)
371		cur->base = data;
372	else
373		cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu));
374
375	/* add it to the list if it's enabled. */
376	if (var_mtrr_range_is_valid(cur)) {
377		list_for_each_entry(tmp, &mtrr_state->head, node)
378			if (cur->base >= tmp->base)
379				break;
380		list_add_tail(&cur->node, &tmp->node);
381	}
382}
383
384int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
385{
386	int index;
387
388	if (!kvm_mtrr_valid(vcpu, msr, data))
389		return 1;
390
391	index = fixed_msr_to_range_index(msr);
392	if (index >= 0)
393		*(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data;
394	else if (msr == MSR_MTRRdefType)
395		vcpu->arch.mtrr_state.deftype = data;
396	else if (msr == MSR_IA32_CR_PAT)
397		vcpu->arch.pat = data;
398	else
399		set_var_mtrr_msr(vcpu, msr, data);
400
401	update_mtrr(vcpu, msr);
402	return 0;
403}
404
405int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
406{
407	int index;
408
409	/* MSR_MTRRcap is a readonly MSR. */
410	if (msr == MSR_MTRRcap) {
411		/*
412		 * SMRR = 0
413		 * WC = 1
414		 * FIX = 1
415		 * VCNT = KVM_NR_VAR_MTRR
416		 */
417		*pdata = 0x500 | KVM_NR_VAR_MTRR;
418		return 0;
419	}
420
421	if (!msr_mtrr_valid(msr))
 
422		return 1;
423
424	index = fixed_msr_to_range_index(msr);
425	if (index >= 0)
426		*pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index];
427	else if (msr == MSR_MTRRdefType)
428		*pdata = vcpu->arch.mtrr_state.deftype;
429	else if (msr == MSR_IA32_CR_PAT)
430		*pdata = vcpu->arch.pat;
431	else {	/* Variable MTRRs */
432		int is_mtrr_mask;
433
434		index = (msr - 0x200) / 2;
435		is_mtrr_mask = msr - 0x200 - 2 * index;
436		if (!is_mtrr_mask)
437			*pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
438		else
439			*pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
440
441		*pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1;
442	}
443
444	return 0;
445}
446
447void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
448{
449	INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
450}
451
452struct mtrr_iter {
453	/* input fields. */
454	struct kvm_mtrr *mtrr_state;
455	u64 start;
456	u64 end;
457
458	/* output fields. */
459	int mem_type;
460	/* mtrr is completely disabled? */
461	bool mtrr_disabled;
462	/* [start, end) is not fully covered in MTRRs? */
463	bool partial_map;
464
465	/* private fields. */
466	union {
467		/* used for fixed MTRRs. */
468		struct {
469			int index;
470			int seg;
471		};
472
473		/* used for var MTRRs. */
474		struct {
475			struct kvm_mtrr_range *range;
476			/* max address has been covered in var MTRRs. */
477			u64 start_max;
478		};
479	};
480
481	bool fixed;
482};
483
484static bool mtrr_lookup_fixed_start(struct mtrr_iter *iter)
485{
486	int seg, index;
487
488	if (!fixed_mtrr_is_enabled(iter->mtrr_state))
489		return false;
490
491	seg = fixed_mtrr_addr_to_seg(iter->start);
492	if (seg < 0)
493		return false;
494
495	iter->fixed = true;
496	index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg);
497	iter->index = index;
498	iter->seg = seg;
499	return true;
500}
501
502static bool match_var_range(struct mtrr_iter *iter,
503			    struct kvm_mtrr_range *range)
504{
505	u64 start, end;
506
507	var_mtrr_range(range, &start, &end);
508	if (!(start >= iter->end || end <= iter->start)) {
509		iter->range = range;
510
511		/*
512		 * the function is called when we do kvm_mtrr.head walking.
513		 * Range has the minimum base address which interleaves
514		 * [looker->start_max, looker->end).
515		 */
516		iter->partial_map |= iter->start_max < start;
517
518		/* update the max address has been covered. */
519		iter->start_max = max(iter->start_max, end);
520		return true;
521	}
522
523	return false;
524}
525
526static void __mtrr_lookup_var_next(struct mtrr_iter *iter)
527{
528	struct kvm_mtrr *mtrr_state = iter->mtrr_state;
529
530	list_for_each_entry_continue(iter->range, &mtrr_state->head, node)
531		if (match_var_range(iter, iter->range))
532			return;
533
534	iter->range = NULL;
535	iter->partial_map |= iter->start_max < iter->end;
536}
537
538static void mtrr_lookup_var_start(struct mtrr_iter *iter)
539{
540	struct kvm_mtrr *mtrr_state = iter->mtrr_state;
541
542	iter->fixed = false;
543	iter->start_max = iter->start;
544	iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
545
546	__mtrr_lookup_var_next(iter);
547}
548
549static void mtrr_lookup_fixed_next(struct mtrr_iter *iter)
550{
551	/* terminate the lookup. */
552	if (fixed_mtrr_range_end_addr(iter->seg, iter->index) >= iter->end) {
553		iter->fixed = false;
554		iter->range = NULL;
555		return;
556	}
557
558	iter->index++;
559
560	/* have looked up for all fixed MTRRs. */
561	if (iter->index >= ARRAY_SIZE(iter->mtrr_state->fixed_ranges))
562		return mtrr_lookup_var_start(iter);
563
564	/* switch to next segment. */
565	if (iter->index > fixed_mtrr_seg_end_range_index(iter->seg))
566		iter->seg++;
567}
568
569static void mtrr_lookup_var_next(struct mtrr_iter *iter)
570{
571	__mtrr_lookup_var_next(iter);
572}
573
574static void mtrr_lookup_start(struct mtrr_iter *iter)
575{
576	if (!mtrr_is_enabled(iter->mtrr_state)) {
577		iter->mtrr_disabled = true;
578		return;
579	}
580
581	if (!mtrr_lookup_fixed_start(iter))
582		mtrr_lookup_var_start(iter);
583}
584
585static void mtrr_lookup_init(struct mtrr_iter *iter,
586			     struct kvm_mtrr *mtrr_state, u64 start, u64 end)
587{
588	iter->mtrr_state = mtrr_state;
589	iter->start = start;
590	iter->end = end;
591	iter->mtrr_disabled = false;
592	iter->partial_map = false;
593	iter->fixed = false;
594	iter->range = NULL;
595
596	mtrr_lookup_start(iter);
597}
598
599static bool mtrr_lookup_okay(struct mtrr_iter *iter)
600{
601	if (iter->fixed) {
602		iter->mem_type = iter->mtrr_state->fixed_ranges[iter->index];
603		return true;
604	}
605
606	if (iter->range) {
607		iter->mem_type = iter->range->base & 0xff;
608		return true;
609	}
610
611	return false;
612}
613
614static void mtrr_lookup_next(struct mtrr_iter *iter)
615{
616	if (iter->fixed)
617		mtrr_lookup_fixed_next(iter);
618	else
619		mtrr_lookup_var_next(iter);
620}
621
622#define mtrr_for_each_mem_type(_iter_, _mtrr_, _gpa_start_, _gpa_end_) \
623	for (mtrr_lookup_init(_iter_, _mtrr_, _gpa_start_, _gpa_end_); \
624	     mtrr_lookup_okay(_iter_); mtrr_lookup_next(_iter_))
625
626u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
627{
628	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
629	struct mtrr_iter iter;
630	u64 start, end;
631	int type = -1;
632	const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK)
633			       | (1 << MTRR_TYPE_WRTHROUGH);
634
635	start = gfn_to_gpa(gfn);
636	end = start + PAGE_SIZE;
637
638	mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
639		int curr_type = iter.mem_type;
640
641		/*
642		 * Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR
643		 * Precedences.
644		 */
645
646		if (type == -1) {
647			type = curr_type;
648			continue;
649		}
650
651		/*
652		 * If two or more variable memory ranges match and the
653		 * memory types are identical, then that memory type is
654		 * used.
655		 */
656		if (type == curr_type)
657			continue;
658
659		/*
660		 * If two or more variable memory ranges match and one of
661		 * the memory types is UC, the UC memory type used.
662		 */
663		if (curr_type == MTRR_TYPE_UNCACHABLE)
664			return MTRR_TYPE_UNCACHABLE;
665
666		/*
667		 * If two or more variable memory ranges match and the
668		 * memory types are WT and WB, the WT memory type is used.
669		 */
670		if (((1 << type) & wt_wb_mask) &&
671		      ((1 << curr_type) & wt_wb_mask)) {
672			type = MTRR_TYPE_WRTHROUGH;
673			continue;
674		}
675
676		/*
677		 * For overlaps not defined by the above rules, processor
678		 * behavior is undefined.
679		 */
680
681		/* We use WB for this undefined behavior. :( */
682		return MTRR_TYPE_WRBACK;
683	}
684
685	if (iter.mtrr_disabled)
686		return mtrr_disabled_type(vcpu);
687
688	/* not contained in any MTRRs. */
689	if (type == -1)
690		return mtrr_default_type(mtrr_state);
691
692	/*
693	 * We just check one page, partially covered by MTRRs is
694	 * impossible.
695	 */
696	WARN_ON(iter.partial_map);
697
698	return type;
699}
700EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
701
702bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
703					  int page_num)
704{
705	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
706	struct mtrr_iter iter;
707	u64 start, end;
708	int type = -1;
709
710	start = gfn_to_gpa(gfn);
711	end = gfn_to_gpa(gfn + page_num);
712	mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
713		if (type == -1) {
714			type = iter.mem_type;
715			continue;
716		}
717
718		if (type != iter.mem_type)
719			return false;
720	}
721
722	if (iter.mtrr_disabled)
723		return true;
724
725	if (!iter.partial_map)
726		return true;
727
728	if (type == -1)
729		return true;
730
731	return type == mtrr_default_type(mtrr_state);
732}