Linux Audio

Check our new training course

Loading...
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef __KVM_X86_MMU_H
  3#define __KVM_X86_MMU_H
  4
  5#include <linux/kvm_host.h>
  6#include "kvm_cache_regs.h"
 
  7
  8#define PT64_PT_BITS 9
  9#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
 10#define PT32_PT_BITS 10
 11#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
 12
 13#define PT_WRITABLE_SHIFT 1
 14#define PT_USER_SHIFT 2
 15
 16#define PT_PRESENT_MASK (1ULL << 0)
 17#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
 18#define PT_USER_MASK (1ULL << PT_USER_SHIFT)
 19#define PT_PWT_MASK (1ULL << 3)
 20#define PT_PCD_MASK (1ULL << 4)
 21#define PT_ACCESSED_SHIFT 5
 22#define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
 23#define PT_DIRTY_SHIFT 6
 24#define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
 25#define PT_PAGE_SIZE_SHIFT 7
 26#define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
 27#define PT_PAT_MASK (1ULL << 7)
 28#define PT_GLOBAL_MASK (1ULL << 8)
 29#define PT64_NX_SHIFT 63
 30#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
 31
 32#define PT_PAT_SHIFT 7
 33#define PT_DIR_PAT_SHIFT 12
 34#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
 35
 36#define PT32_DIR_PSE36_SIZE 4
 37#define PT32_DIR_PSE36_SHIFT 13
 38#define PT32_DIR_PSE36_MASK \
 39	(((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
 40
 41#define PT64_ROOT_5LEVEL 5
 42#define PT64_ROOT_4LEVEL 4
 43#define PT32_ROOT_LEVEL 2
 44#define PT32E_ROOT_LEVEL 3
 45
 46#define PT_PDPE_LEVEL 3
 47#define PT_DIRECTORY_LEVEL 2
 48#define PT_PAGE_TABLE_LEVEL 1
 49#define PT_MAX_HUGEPAGE_LEVEL (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1)
 
 50
 51static inline u64 rsvd_bits(int s, int e)
 52{
 
 
 
 
 
 
 
 53	if (e < s)
 54		return 0;
 55
 56	return ((1ULL << (e - s + 1)) - 1) << s;
 57}
 58
 59void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 60
 61void
 62reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 63
 64void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
 
 
 
 
 
 
 65void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
 66			     bool accessed_dirty);
 
 67bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
 68int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
 69				u64 fault_address, char *insn, int insn_len);
 
 
 70
 71static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
 
 
 
 
 
 
 
 
 72{
 73	if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
 74		return kvm->arch.n_max_mmu_pages -
 75			kvm->arch.n_used_mmu_pages;
 76
 77	return 0;
 78}
 79
 80static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
 81{
 82	if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 83		return 0;
 84
 85	return kvm_mmu_load(vcpu);
 86}
 87
 88/*
 89 * Currently, we have two sorts of write-protection, a) the first one
 90 * write-protects guest page to sync the guest modification, b) another one is
 91 * used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences
 92 * between these two sorts are:
 93 * 1) the first case clears SPTE_MMU_WRITEABLE bit.
 94 * 2) the first case requires flushing tlb immediately avoiding corrupting
 95 *    shadow page table between all vcpus so it should be in the protection of
 96 *    mmu-lock. And the another case does not need to flush tlb until returning
 97 *    the dirty bitmap to userspace since it only write-protects the page
 98 *    logged in the bitmap, that means the page in the dirty bitmap is not
 99 *    missed, so it can flush tlb out of mmu-lock.
100 *
101 * So, there is the problem: the first case can meet the corrupted tlb caused
102 * by another case which write-protects pages but without flush tlb
103 * immediately. In order to making the first case be aware this problem we let
104 * it flush tlb if we try to write-protect a spte whose SPTE_MMU_WRITEABLE bit
105 * is set, it works since another case never touches SPTE_MMU_WRITEABLE bit.
106 *
107 * Anyway, whenever a spte is updated (only permission and status bits are
108 * changed) we need to check whether the spte with SPTE_MMU_WRITEABLE becomes
109 * readonly, if that happens, we need to flush tlb. Fortunately,
110 * mmu_spte_update() has already handled it perfectly.
111 *
112 * The rules to use SPTE_MMU_WRITEABLE and PT_WRITABLE_MASK:
113 * - if we want to see if it has writable tlb entry or if the spte can be
114 *   writable on the mmu mapping, check SPTE_MMU_WRITEABLE, this is the most
115 *   case, otherwise
116 * - if we fix page fault on the spte or do write-protection by dirty logging,
117 *   check PT_WRITABLE_MASK.
118 *
119 * TODO: introduce APIs to split these two cases.
120 */
121static inline int is_writable_pte(unsigned long pte)
122{
123	return pte & PT_WRITABLE_MASK;
 
 
 
 
 
 
124}
125
126static inline bool is_write_protection(struct kvm_vcpu *vcpu)
 
127{
128	return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
 
 
 
 
 
 
 
 
 
 
 
 
129}
130
131/*
132 * Check if a given access (described through the I/D, W/R and U/S bits of a
133 * page fault error code pfec) causes a permission fault with the given PTE
134 * access rights (in ACC_* format).
135 *
136 * Return zero if the access does not fault; return the page fault error code
137 * if the access faults.
138 */
139static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
140				  unsigned pte_access, unsigned pte_pkey,
141				  unsigned pfec)
142{
143	int cpl = kvm_x86_ops->get_cpl(vcpu);
144	unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
 
145
146	/*
147	 * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
 
148	 *
149	 * If CPL = 3, SMAP applies to all supervisor-mode data accesses
150	 * (these are implicit supervisor accesses) regardless of the value
151	 * of EFLAGS.AC.
152	 *
153	 * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
154	 * the result in X86_EFLAGS_AC. We then insert it in place of
155	 * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
156	 * but it will be one in index if SMAP checks are being overridden.
157	 * It is important to keep this branchless.
158	 */
159	unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
160	int index = (pfec >> 1) +
161		    (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
162	bool fault = (mmu->permissions[index] >> pte_access) & 1;
163	u32 errcode = PFERR_PRESENT_MASK;
 
 
 
 
 
164
165	WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
166	if (unlikely(mmu->pkru_mask)) {
167		u32 pkru_bits, offset;
168
169		/*
170		* PKRU defines 32 bits, there are 16 domains and 2
171		* attribute bits per domain in pkru.  pte_pkey is the
172		* index of the protection domain, so pte_pkey * 2 is
173		* is the index of the first bit for the domain.
174		*/
175		pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
176
177		/* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
178		offset = (pfec & ~1) +
179			((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
180
181		pkru_bits &= mmu->pkru_mask >> offset;
182		errcode |= -pkru_bits & PFERR_PK_MASK;
183		fault |= (pkru_bits != 0);
184	}
185
186	return -(u32)fault & errcode;
187}
188
189void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
 
 
 
 
 
 
190void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
191
192void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
193void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
194bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
195				    struct kvm_memory_slot *slot, u64 gfn);
196int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197#endif
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef __KVM_X86_MMU_H
  3#define __KVM_X86_MMU_H
  4
  5#include <linux/kvm_host.h>
  6#include "kvm_cache_regs.h"
  7#include "cpuid.h"
  8
  9extern bool __read_mostly enable_mmio_caching;
 
 
 
 10
 11#define PT_WRITABLE_SHIFT 1
 12#define PT_USER_SHIFT 2
 13
 14#define PT_PRESENT_MASK (1ULL << 0)
 15#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
 16#define PT_USER_MASK (1ULL << PT_USER_SHIFT)
 17#define PT_PWT_MASK (1ULL << 3)
 18#define PT_PCD_MASK (1ULL << 4)
 19#define PT_ACCESSED_SHIFT 5
 20#define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
 21#define PT_DIRTY_SHIFT 6
 22#define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
 23#define PT_PAGE_SIZE_SHIFT 7
 24#define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
 25#define PT_PAT_MASK (1ULL << 7)
 26#define PT_GLOBAL_MASK (1ULL << 8)
 27#define PT64_NX_SHIFT 63
 28#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
 29
 30#define PT_PAT_SHIFT 7
 31#define PT_DIR_PAT_SHIFT 12
 32#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
 33
 
 
 
 
 
 34#define PT64_ROOT_5LEVEL 5
 35#define PT64_ROOT_4LEVEL 4
 36#define PT32_ROOT_LEVEL 2
 37#define PT32E_ROOT_LEVEL 3
 38
 39#define KVM_MMU_CR4_ROLE_BITS (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_LA57 | \
 40			       X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE)
 41
 42#define KVM_MMU_CR0_ROLE_BITS (X86_CR0_PG | X86_CR0_WP)
 43#define KVM_MMU_EFER_ROLE_BITS (EFER_LME | EFER_NX)
 44
 45static __always_inline u64 rsvd_bits(int s, int e)
 46{
 47	BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
 48
 49	if (__builtin_constant_p(e))
 50		BUILD_BUG_ON(e > 63);
 51	else
 52		e &= 63;
 53
 54	if (e < s)
 55		return 0;
 56
 57	return ((2ULL << (e - s)) - 1) << s;
 58}
 59
 60/*
 61 * The number of non-reserved physical address bits irrespective of features
 62 * that repurpose legal bits, e.g. MKTME.
 63 */
 64extern u8 __read_mostly shadow_phys_bits;
 65
 66static inline gfn_t kvm_mmu_max_gfn(void)
 67{
 68	/*
 69	 * Note that this uses the host MAXPHYADDR, not the guest's.
 70	 * EPT/NPT cannot support GPAs that would exceed host.MAXPHYADDR;
 71	 * assuming KVM is running on bare metal, guest accesses beyond
 72	 * host.MAXPHYADDR will hit a #PF(RSVD) and never cause a vmexit
 73	 * (either EPT Violation/Misconfig or #NPF), and so KVM will never
 74	 * install a SPTE for such addresses.  If KVM is running as a VM
 75	 * itself, on the other hand, it might see a MAXPHYADDR that is less
 76	 * than hardware's real MAXPHYADDR.  Using the host MAXPHYADDR
 77	 * disallows such SPTEs entirely and simplifies the TDP MMU.
 78	 */
 79	int max_gpa_bits = likely(tdp_enabled) ? shadow_phys_bits : 52;
 80
 81	return (1ULL << (max_gpa_bits - PAGE_SHIFT)) - 1;
 82}
 83
 84static inline u8 kvm_get_shadow_phys_bits(void)
 85{
 86	/*
 87	 * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected
 88	 * in CPU detection code, but the processor treats those reduced bits as
 89	 * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at
 90	 * the physical address bits reported by CPUID.
 91	 */
 92	if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008))
 93		return cpuid_eax(0x80000008) & 0xff;
 94
 95	/*
 96	 * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with
 97	 * custom CPUID.  Proceed with whatever the kernel found since these features
 98	 * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008).
 99	 */
100	return boot_cpu_data.x86_phys_bits;
101}
102
103void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask);
104void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask);
105void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only);
106
107void kvm_init_mmu(struct kvm_vcpu *vcpu);
108void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
109			     unsigned long cr4, u64 efer, gpa_t nested_cr3);
110void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
111			     int huge_page_level, bool accessed_dirty,
112			     gpa_t new_eptp);
113bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
114int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
115				u64 fault_address, char *insn, int insn_len);
116void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
117					struct kvm_mmu *mmu);
118
119int kvm_mmu_load(struct kvm_vcpu *vcpu);
120void kvm_mmu_unload(struct kvm_vcpu *vcpu);
121void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu);
122void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
123void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu);
124void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
125			 int bytes);
126
127static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
128{
129	if (likely(vcpu->arch.mmu->root.hpa != INVALID_PAGE))
130		return 0;
 
131
132	return kvm_mmu_load(vcpu);
133}
134
135static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
136{
137	BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
138
139	return kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)
140	       ? cr3 & X86_CR3_PCID_MASK
141	       : 0;
142}
143
144static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
145{
146	return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
147}
148
149static inline unsigned long kvm_get_active_cr3_lam_bits(struct kvm_vcpu *vcpu)
150{
151	if (!guest_can_use(vcpu, X86_FEATURE_LAM))
152		return 0;
153
154	return kvm_read_cr3(vcpu) & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
155}
156
157static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158{
159	u64 root_hpa = vcpu->arch.mmu->root.hpa;
160
161	if (!VALID_PAGE(root_hpa))
162		return;
163
164	static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa,
165					  vcpu->arch.mmu->root_role.level);
166}
167
168static inline void kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
169						    struct kvm_mmu *mmu)
170{
171	/*
172	 * When EPT is enabled, KVM may passthrough CR0.WP to the guest, i.e.
173	 * @mmu's snapshot of CR0.WP and thus all related paging metadata may
174	 * be stale.  Refresh CR0.WP and the metadata on-demand when checking
175	 * for permission faults.  Exempt nested MMUs, i.e. MMUs for shadowing
176	 * nEPT and nNPT, as CR0.WP is ignored in both cases.  Note, KVM does
177	 * need to refresh nested_mmu, a.k.a. the walker used to translate L2
178	 * GVAs to GPAs, as that "MMU" needs to honor L2's CR0.WP.
179	 */
180	if (!tdp_enabled || mmu == &vcpu->arch.guest_mmu)
181		return;
182
183	__kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
184}
185
186/*
187 * Check if a given access (described through the I/D, W/R and U/S bits of a
188 * page fault error code pfec) causes a permission fault with the given PTE
189 * access rights (in ACC_* format).
190 *
191 * Return zero if the access does not fault; return the page fault error code
192 * if the access faults.
193 */
194static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
195				  unsigned pte_access, unsigned pte_pkey,
196				  u64 access)
197{
198	/* strip nested paging fault error codes */
199	unsigned int pfec = access;
200	unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
201
202	/*
203	 * For explicit supervisor accesses, SMAP is disabled if EFLAGS.AC = 1.
204	 * For implicit supervisor accesses, SMAP cannot be overridden.
205	 *
206	 * SMAP works on supervisor accesses only, and not_smap can
207	 * be set or not set when user access with neither has any bearing
208	 * on the result.
209	 *
210	 * We put the SMAP checking bit in place of the PFERR_RSVD_MASK bit;
211	 * this bit will always be zero in pfec, but it will be one in index
212	 * if SMAP checks are being disabled.
213	 */
214	u64 implicit_access = access & PFERR_IMPLICIT_ACCESS;
215	bool not_smap = ((rflags & X86_EFLAGS_AC) | implicit_access) == X86_EFLAGS_AC;
216	int index = (pfec + (not_smap << PFERR_RSVD_BIT)) >> 1;
 
 
 
217	u32 errcode = PFERR_PRESENT_MASK;
218	bool fault;
219
220	kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
221
222	fault = (mmu->permissions[index] >> pte_access) & 1;
223
224	WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
225	if (unlikely(mmu->pkru_mask)) {
226		u32 pkru_bits, offset;
227
228		/*
229		* PKRU defines 32 bits, there are 16 domains and 2
230		* attribute bits per domain in pkru.  pte_pkey is the
231		* index of the protection domain, so pte_pkey * 2 is
232		* is the index of the first bit for the domain.
233		*/
234		pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
235
236		/* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
237		offset = (pfec & ~1) +
238			((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
239
240		pkru_bits &= mmu->pkru_mask >> offset;
241		errcode |= -pkru_bits & PFERR_PK_MASK;
242		fault |= (pkru_bits != 0);
243	}
244
245	return -(u32)fault & errcode;
246}
247
248bool __kvm_mmu_honors_guest_mtrrs(bool vm_has_noncoherent_dma);
249
250static inline bool kvm_mmu_honors_guest_mtrrs(struct kvm *kvm)
251{
252	return __kvm_mmu_honors_guest_mtrrs(kvm_arch_has_noncoherent_dma(kvm));
253}
254
255void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
256
 
 
 
 
257int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
258
259int kvm_mmu_post_init_vm(struct kvm *kvm);
260void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
261
262static inline bool kvm_shadow_root_allocated(struct kvm *kvm)
263{
264	/*
265	 * Read shadow_root_allocated before related pointers. Hence, threads
266	 * reading shadow_root_allocated in any lock context are guaranteed to
267	 * see the pointers. Pairs with smp_store_release in
268	 * mmu_first_shadow_root_alloc.
269	 */
270	return smp_load_acquire(&kvm->arch.shadow_root_allocated);
271}
272
273#ifdef CONFIG_X86_64
274extern bool tdp_mmu_enabled;
275#else
276#define tdp_mmu_enabled false
277#endif
278
279static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
280{
281	return !tdp_mmu_enabled || kvm_shadow_root_allocated(kvm);
282}
283
284static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
285{
286	/* KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K) must be 0. */
287	return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
288		(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
289}
290
291static inline unsigned long
292__kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, unsigned long npages,
293		      int level)
294{
295	return gfn_to_index(slot->base_gfn + npages - 1,
296			    slot->base_gfn, level) + 1;
297}
298
299static inline unsigned long
300kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, int level)
301{
302	return __kvm_mmu_slot_lpages(slot, slot->npages, level);
303}
304
305static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count)
306{
307	atomic64_add(count, &kvm->stat.pages[level - 1]);
308}
309
310gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
311			   struct x86_exception *exception);
312
313static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu,
314				      struct kvm_mmu *mmu,
315				      gpa_t gpa, u64 access,
316				      struct x86_exception *exception)
317{
318	if (mmu != &vcpu->arch.nested_mmu)
319		return gpa;
320	return translate_nested_gpa(vcpu, gpa, access, exception);
321}
322#endif