Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * vMTRR implementation
  4 *
  5 * Copyright (C) 2006 Qumranet, Inc.
  6 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  7 * Copyright(C) 2015 Intel Corporation.
  8 *
  9 * Authors:
 10 *   Yaniv Kamay  <yaniv@qumranet.com>
 11 *   Avi Kivity   <avi@qumranet.com>
 12 *   Marcelo Tosatti <mtosatti@redhat.com>
 13 *   Paolo Bonzini <pbonzini@redhat.com>
 14 *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
 15 */
 16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 17
 18#include <linux/kvm_host.h>
 19#include <asm/mtrr.h>
 20
 21#include "cpuid.h"
 22#include "x86.h"
 23
 24static u64 *find_mtrr(struct kvm_vcpu *vcpu, unsigned int msr)
 25{
 26	int index;
 27
 28	switch (msr) {
 29	case MTRRphysBase_MSR(0) ... MTRRphysMask_MSR(KVM_NR_VAR_MTRR - 1):
 30		index = msr - MTRRphysBase_MSR(0);
 31		return &vcpu->arch.mtrr_state.var[index];
 32	case MSR_MTRRfix64K_00000:
 33		return &vcpu->arch.mtrr_state.fixed_64k;
 34	case MSR_MTRRfix16K_80000:
 35	case MSR_MTRRfix16K_A0000:
 36		index = msr - MSR_MTRRfix16K_80000;
 37		return &vcpu->arch.mtrr_state.fixed_16k[index];
 38	case MSR_MTRRfix4K_C0000:
 39	case MSR_MTRRfix4K_C8000:
 40	case MSR_MTRRfix4K_D0000:
 41	case MSR_MTRRfix4K_D8000:
 42	case MSR_MTRRfix4K_E0000:
 43	case MSR_MTRRfix4K_E8000:
 44	case MSR_MTRRfix4K_F0000:
 45	case MSR_MTRRfix4K_F8000:
 46		index = msr - MSR_MTRRfix4K_C0000;
 47		return &vcpu->arch.mtrr_state.fixed_4k[index];
 48	case MSR_MTRRdefType:
 49		return &vcpu->arch.mtrr_state.deftype;
 50	default:
 51		break;
 52	}
 53	return NULL;
 54}
 55
 56static bool valid_mtrr_type(unsigned t)
 57{
 58	return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
 59}
 60
 61static bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 62{
 63	int i;
 64	u64 mask;
 65
 66	if (msr == MSR_MTRRdefType) {
 67		if (data & ~0xcff)
 68			return false;
 69		return valid_mtrr_type(data & 0xff);
 70	} else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
 71		for (i = 0; i < 8 ; i++)
 72			if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
 73				return false;
 74		return true;
 75	}
 76
 77	/* variable MTRRs */
 78	if (WARN_ON_ONCE(!(msr >= MTRRphysBase_MSR(0) &&
 79			   msr <= MTRRphysMask_MSR(KVM_NR_VAR_MTRR - 1))))
 80		return false;
 81
 82	mask = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
 83	if ((msr & 1) == 0) {
 84		/* MTRR base */
 85		if (!valid_mtrr_type(data & 0xff))
 86			return false;
 87		mask |= 0xf00;
 88	} else {
 89		/* MTRR mask */
 90		mask |= 0x7ff;
 91	}
 92
 93	return (data & mask) == 0;
 94}
 95
 96int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 97{
 98	u64 *mtrr;
 99
100	mtrr = find_mtrr(vcpu, msr);
101	if (!mtrr)
102		return 1;
103
104	if (!kvm_mtrr_valid(vcpu, msr, data))
105		return 1;
106
107	*mtrr = data;
108	return 0;
109}
110
111int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
112{
113	u64 *mtrr;
114
115	/* MSR_MTRRcap is a readonly MSR. */
116	if (msr == MSR_MTRRcap) {
117		/*
118		 * SMRR = 0
119		 * WC = 1
120		 * FIX = 1
121		 * VCNT = KVM_NR_VAR_MTRR
122		 */
123		*pdata = 0x500 | KVM_NR_VAR_MTRR;
124		return 0;
125	}
126
127	mtrr = find_mtrr(vcpu, msr);
128	if (!mtrr)
129		return 1;
130
131	*pdata = *mtrr;
132	return 0;
133}