Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Copyright (C) 2012,2013 - ARM Ltd
  4 * Author: Marc Zyngier <marc.zyngier@arm.com>
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6
  7#ifndef __ARM64_KVM_MMU_H__
  8#define __ARM64_KVM_MMU_H__
  9
 10#include <asm/page.h>
 11#include <asm/memory.h>
 12#include <asm/mmu.h>
 13#include <asm/cpufeature.h>
 14
 15/*
 16 * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
 17 * "negative" addresses. This makes it impossible to directly share
 18 * mappings with the kernel.
 19 *
 20 * Instead, give the HYP mode its own VA region at a fixed offset from
 21 * the kernel by just masking the top bits (which are all ones for a
 22 * kernel address). We need to find out how many bits to mask.
 23 *
 24 * We want to build a set of page tables that cover both parts of the
 25 * idmap (the trampoline page used to initialize EL2), and our normal
 26 * runtime VA space, at the same time.
 27 *
 28 * Given that the kernel uses VA_BITS for its entire address space,
 29 * and that half of that space (VA_BITS - 1) is used for the linear
 30 * mapping, we can also limit the EL2 space to (VA_BITS - 1).
 31 *
 32 * The main question is "Within the VA_BITS space, does EL2 use the
 33 * top or the bottom half of that space to shadow the kernel's linear
 34 * mapping?". As we need to idmap the trampoline page, this is
 35 * determined by the range in which this page lives.
 36 *
 37 * If the page is in the bottom half, we have to use the top half. If
 38 * the page is in the top half, we have to use the bottom half:
 39 *
 40 * T = __pa_symbol(__hyp_idmap_text_start)
 41 * if (T & BIT(VA_BITS - 1))
 42 *	HYP_VA_MIN = 0  //idmap in upper half
 43 * else
 44 *	HYP_VA_MIN = 1 << (VA_BITS - 1)
 45 * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
 46 *
 
 
 
 
 
 
 
 
 
 
 47 * When using VHE, there are no separate hyp mappings and all KVM
 48 * functionality is already mapped as part of the main kernel
 49 * mappings, and none of this applies in that case.
 50 */
 51
 
 
 
 52#ifdef __ASSEMBLY__
 53
 54#include <asm/alternative.h>
 
 55
 56/*
 57 * Convert a kernel VA into a HYP VA.
 58 * reg: VA to be converted.
 59 *
 60 * The actual code generation takes place in kvm_update_va_mask, and
 61 * the instructions below are only there to reserve the space and
 62 * perform the register allocation (kvm_update_va_mask uses the
 63 * specific registers encoded in the instructions).
 
 
 
 
 
 
 
 
 
 
 64 */
 65.macro kern_hyp_va	reg
 66alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask
 67	and     \reg, \reg, #1		/* mask with va_mask */
 68	ror	\reg, \reg, #1		/* rotate to the first tag bit */
 69	add	\reg, \reg, #0		/* insert the low 12 bits of the tag */
 70	add	\reg, \reg, #0, lsl 12	/* insert the top 12 bits of the tag */
 71	ror	\reg, \reg, #63		/* rotate back */
 72alternative_cb_end
 73.endm
 74
 75/*
 76 * Convert a hypervisor VA to a PA
 77 * reg: hypervisor address to be converted in place
 78 * tmp: temporary register
 79 */
 80.macro hyp_pa reg, tmp
 81	ldr_l	\tmp, hyp_physvirt_offset
 82	add	\reg, \reg, \tmp
 83.endm
 84
 85/*
 86 * Convert a hypervisor VA to a kernel image address
 87 * reg: hypervisor address to be converted in place
 88 * tmp: temporary register
 89 *
 90 * The actual code generation takes place in kvm_get_kimage_voffset, and
 91 * the instructions below are only there to reserve the space and
 92 * perform the register allocation (kvm_get_kimage_voffset uses the
 93 * specific registers encoded in the instructions).
 94 */
 95.macro hyp_kimg_va reg, tmp
 96	/* Convert hyp VA -> PA. */
 97	hyp_pa	\reg, \tmp
 98
 99	/* Load kimage_voffset. */
100alternative_cb ARM64_ALWAYS_SYSTEM, kvm_get_kimage_voffset
101	movz	\tmp, #0
102	movk	\tmp, #0, lsl #16
103	movk	\tmp, #0, lsl #32
104	movk	\tmp, #0, lsl #48
105alternative_cb_end
106
107	/* Convert PA -> kimg VA. */
108	add	\reg, \reg, \tmp
109.endm
110
111#else
112
113#include <linux/pgtable.h>
114#include <asm/pgalloc.h>
115#include <asm/cache.h>
116#include <asm/cacheflush.h>
117#include <asm/mmu_context.h>
118#include <asm/kvm_host.h>
119
120void kvm_update_va_mask(struct alt_instr *alt,
121			__le32 *origptr, __le32 *updptr, int nr_inst);
122void kvm_compute_layout(void);
123void kvm_apply_hyp_relocations(void);
124
125#define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset)
126
127static __always_inline unsigned long __kern_hyp_va(unsigned long v)
128{
129	asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
130				    "ror %0, %0, #1\n"
131				    "add %0, %0, #0\n"
132				    "add %0, %0, #0, lsl 12\n"
133				    "ror %0, %0, #63\n",
134				    ARM64_ALWAYS_SYSTEM,
135				    kvm_update_va_mask)
136		     : "+r" (v));
137	return v;
138}
139
140#define kern_hyp_va(v) 	((typeof(v))(__kern_hyp_va((unsigned long)(v))))
141
142/*
143 * We currently support using a VM-specified IPA size. For backward
144 * compatibility, the default IPA size is fixed to 40bits.
145 */
146#define KVM_PHYS_SHIFT	(40)
 
 
147
148#define kvm_phys_shift(kvm)		VTCR_EL2_IPA(kvm->arch.vtcr)
149#define kvm_phys_size(kvm)		(_AC(1, ULL) << kvm_phys_shift(kvm))
150#define kvm_phys_mask(kvm)		(kvm_phys_size(kvm) - _AC(1, ULL))
151
152#include <asm/kvm_pgtable.h>
153#include <asm/stage2_pgtable.h>
154
155int kvm_share_hyp(void *from, void *to);
156void kvm_unshare_hyp(void *from, void *to);
157int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
158int __create_hyp_mappings(unsigned long start, unsigned long size,
159			  unsigned long phys, enum kvm_pgtable_prot prot);
160int hyp_alloc_private_va_range(size_t size, unsigned long *haddr);
161int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
162			   void __iomem **kaddr,
163			   void __iomem **haddr);
164int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
165			     void **haddr);
166void free_hyp_pgds(void);
167
168void stage2_unmap_vm(struct kvm *kvm);
169int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type);
170void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
171int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
172			  phys_addr_t pa, unsigned long size, bool writable);
173
174int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
 
 
175
176phys_addr_t kvm_mmu_get_httbr(void);
177phys_addr_t kvm_get_idmap_vector(void);
178int kvm_mmu_init(u32 *hyp_va_bits);
 
 
 
 
 
179
180static inline void *__kvm_vector_slot2addr(void *base,
181					   enum arm64_hyp_spectre_vector slot)
182{
183	int idx = slot - (slot != HYP_VECTOR_DIRECT);
 
 
184
185	return base + (idx * SZ_2K);
 
 
 
186}
187
188struct kvm;
 
 
 
189
190#define kvm_flush_dcache_to_poc(a,l)	\
191	dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
 
 
 
 
 
 
 
 
192
193static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
194{
195	return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
196}
197
198static inline void __clean_dcache_guest_page(void *va, size_t size)
199{
200	/*
201	 * With FWB, we ensure that the guest always accesses memory using
202	 * cacheable attributes, and we don't have to clean to PoC when
203	 * faulting in pages. Furthermore, FWB implies IDC, so cleaning to
204	 * PoU is not required either in this case.
205	 */
206	if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
207		return;
208
209	kvm_flush_dcache_to_poc(va, size);
 
 
210}
211
212static inline void __invalidate_icache_guest_page(void *va, size_t size)
213{
214	if (icache_is_aliasing()) {
215		/* any kind of VIPT cache */
216		icache_inval_all_pou();
217	} else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
218		/* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
219		icache_inval_pou((unsigned long)va, (unsigned long)va + size);
220	}
221}
222
223void kvm_set_way_flush(struct kvm_vcpu *vcpu);
224void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
225
226static inline unsigned int kvm_get_vmid_bits(void)
227{
228	int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
 
 
 
 
 
 
 
 
 
 
 
 
229
230	return get_vmid_bits(reg);
 
 
231}
232
233/*
234 * We are not in the kvm->srcu critical section most of the time, so we take
235 * the SRCU read lock here. Since we copy the data from the user page, we
236 * can immediately drop the lock again.
237 */
238static inline int kvm_read_guest_lock(struct kvm *kvm,
239				      gpa_t gpa, void *data, unsigned long len)
240{
241	int srcu_idx = srcu_read_lock(&kvm->srcu);
242	int ret = kvm_read_guest(kvm, gpa, data, len);
243
244	srcu_read_unlock(&kvm->srcu, srcu_idx);
245
246	return ret;
 
 
 
 
 
 
247}
248
249static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
250				       const void *data, unsigned long len)
251{
252	int srcu_idx = srcu_read_lock(&kvm->srcu);
253	int ret = kvm_write_guest(kvm, gpa, data, len);
 
254
255	srcu_read_unlock(&kvm->srcu, srcu_idx);
 
 
 
 
256
257	return ret;
 
 
 
258}
259
260#define kvm_phys_to_vttbr(addr)		phys_to_ttbr(addr)
261
262/*
263 * When this is (directly or indirectly) used on the TLB invalidation
264 * path, we rely on a previously issued DSB so that page table updates
265 * and VMID reads are correctly ordered.
266 */
267static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
268{
269	struct kvm_vmid *vmid = &mmu->vmid;
270	u64 vmid_field, baddr;
271	u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
272
273	baddr = mmu->pgd_phys;
274	vmid_field = atomic64_read(&vmid->id) << VTTBR_VMID_SHIFT;
275	vmid_field &= VTTBR_VMID_MASK(kvm_arm_vmid_bits);
276	return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
277}
278
279/*
280 * Must be called from hyp code running at EL2 with an updated VTTBR
281 * and interrupts disabled.
282 */
283static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu,
284					  struct kvm_arch *arch)
285{
286	write_sysreg(arch->vtcr, vtcr_el2);
287	write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
 
 
 
 
 
 
 
288
289	/*
290	 * ARM errata 1165522 and 1530923 require the actual execution of the
291	 * above before we can switch to the EL1/EL0 translation regime used by
292	 * the guest.
 
293	 */
294	asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
 
 
295}
296
297static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
298{
299	return container_of(mmu->arch, struct kvm, arch);
 
 
300}
 
301#endif /* __ASSEMBLY__ */
302#endif /* __ARM64_KVM_MMU_H__ */
v4.10.11
 
  1/*
  2 * Copyright (C) 2012,2013 - ARM Ltd
  3 * Author: Marc Zyngier <marc.zyngier@arm.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify
  6 * it under the terms of the GNU General Public License version 2 as
  7 * published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 16 */
 17
 18#ifndef __ARM64_KVM_MMU_H__
 19#define __ARM64_KVM_MMU_H__
 20
 21#include <asm/page.h>
 22#include <asm/memory.h>
 
 23#include <asm/cpufeature.h>
 24
 25/*
 26 * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
 27 * "negative" addresses. This makes it impossible to directly share
 28 * mappings with the kernel.
 29 *
 30 * Instead, give the HYP mode its own VA region at a fixed offset from
 31 * the kernel by just masking the top bits (which are all ones for a
 32 * kernel address). We need to find out how many bits to mask.
 33 *
 34 * We want to build a set of page tables that cover both parts of the
 35 * idmap (the trampoline page used to initialize EL2), and our normal
 36 * runtime VA space, at the same time.
 37 *
 38 * Given that the kernel uses VA_BITS for its entire address space,
 39 * and that half of that space (VA_BITS - 1) is used for the linear
 40 * mapping, we can also limit the EL2 space to (VA_BITS - 1).
 41 *
 42 * The main question is "Within the VA_BITS space, does EL2 use the
 43 * top or the bottom half of that space to shadow the kernel's linear
 44 * mapping?". As we need to idmap the trampoline page, this is
 45 * determined by the range in which this page lives.
 46 *
 47 * If the page is in the bottom half, we have to use the top half. If
 48 * the page is in the top half, we have to use the bottom half:
 49 *
 50 * T = __virt_to_phys(__hyp_idmap_text_start)
 51 * if (T & BIT(VA_BITS - 1))
 52 *	HYP_VA_MIN = 0  //idmap in upper half
 53 * else
 54 *	HYP_VA_MIN = 1 << (VA_BITS - 1)
 55 * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
 56 *
 57 * This of course assumes that the trampoline page exists within the
 58 * VA_BITS range. If it doesn't, then it means we're in the odd case
 59 * where the kernel idmap (as well as HYP) uses more levels than the
 60 * kernel runtime page tables (as seen when the kernel is configured
 61 * for 4k pages, 39bits VA, and yet memory lives just above that
 62 * limit, forcing the idmap to use 4 levels of page tables while the
 63 * kernel itself only uses 3). In this particular case, it doesn't
 64 * matter which side of VA_BITS we use, as we're guaranteed not to
 65 * conflict with anything.
 66 *
 67 * When using VHE, there are no separate hyp mappings and all KVM
 68 * functionality is already mapped as part of the main kernel
 69 * mappings, and none of this applies in that case.
 70 */
 71
 72#define HYP_PAGE_OFFSET_HIGH_MASK	((UL(1) << VA_BITS) - 1)
 73#define HYP_PAGE_OFFSET_LOW_MASK	((UL(1) << (VA_BITS - 1)) - 1)
 74
 75#ifdef __ASSEMBLY__
 76
 77#include <asm/alternative.h>
 78#include <asm/cpufeature.h>
 79
 80/*
 81 * Convert a kernel VA into a HYP VA.
 82 * reg: VA to be converted.
 83 *
 84 * This generates the following sequences:
 85 * - High mask:
 86 *		and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
 87 *		nop
 88 * - Low mask:
 89 *		and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
 90 *		and x0, x0, #HYP_PAGE_OFFSET_LOW_MASK
 91 * - VHE:
 92 *		nop
 93 *		nop
 94 *
 95 * The "low mask" version works because the mask is a strict subset of
 96 * the "high mask", hence performing the first mask for nothing.
 97 * Should be completely invisible on any viable CPU.
 98 */
 99.macro kern_hyp_va	reg
100alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
101	and     \reg, \reg, #HYP_PAGE_OFFSET_HIGH_MASK
102alternative_else_nop_endif
103alternative_if ARM64_HYP_OFFSET_LOW
104	and     \reg, \reg, #HYP_PAGE_OFFSET_LOW_MASK
105alternative_else_nop_endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106.endm
107
108#else
109
 
110#include <asm/pgalloc.h>
111#include <asm/cachetype.h>
112#include <asm/cacheflush.h>
113#include <asm/mmu_context.h>
114#include <asm/pgtable.h>
115
116static inline unsigned long __kern_hyp_va(unsigned long v)
117{
118	asm volatile(ALTERNATIVE("and %0, %0, %1",
119				 "nop",
120				 ARM64_HAS_VIRT_HOST_EXTN)
121		     : "+r" (v)
122		     : "i" (HYP_PAGE_OFFSET_HIGH_MASK));
123	asm volatile(ALTERNATIVE("nop",
124				 "and %0, %0, %1",
125				 ARM64_HYP_OFFSET_LOW)
126		     : "+r" (v)
127		     : "i" (HYP_PAGE_OFFSET_LOW_MASK));
 
 
 
 
 
128	return v;
129}
130
131#define kern_hyp_va(v) 	((typeof(v))(__kern_hyp_va((unsigned long)(v))))
132
133/*
134 * We currently only support a 40bit IPA.
 
135 */
136#define KVM_PHYS_SHIFT	(40)
137#define KVM_PHYS_SIZE	(1UL << KVM_PHYS_SHIFT)
138#define KVM_PHYS_MASK	(KVM_PHYS_SIZE - 1UL)
139
 
 
 
 
 
140#include <asm/stage2_pgtable.h>
141
142int create_hyp_mappings(void *from, void *to, pgprot_t prot);
143int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
 
 
 
 
 
 
 
 
 
144void free_hyp_pgds(void);
145
146void stage2_unmap_vm(struct kvm *kvm);
147int kvm_alloc_stage2_pgd(struct kvm *kvm);
148void kvm_free_stage2_pgd(struct kvm *kvm);
149int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
150			  phys_addr_t pa, unsigned long size, bool writable);
151
152int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
153
154void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
155
156phys_addr_t kvm_mmu_get_httbr(void);
157phys_addr_t kvm_get_idmap_vector(void);
158phys_addr_t kvm_get_idmap_start(void);
159int kvm_mmu_init(void);
160void kvm_clear_hyp_idmap(void);
161
162#define	kvm_set_pte(ptep, pte)		set_pte(ptep, pte)
163#define	kvm_set_pmd(pmdp, pmd)		set_pmd(pmdp, pmd)
164
165static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
 
166{
167	pte_val(pte) |= PTE_S2_RDWR;
168	return pte;
169}
170
171static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
172{
173	pmd_val(pmd) |= PMD_S2_RDWR;
174	return pmd;
175}
176
177static inline void kvm_set_s2pte_readonly(pte_t *pte)
178{
179	pteval_t pteval;
180	unsigned long tmp;
181
182	asm volatile("//	kvm_set_s2pte_readonly\n"
183	"	prfm	pstl1strm, %2\n"
184	"1:	ldxr	%0, %2\n"
185	"	and	%0, %0, %3		// clear PTE_S2_RDWR\n"
186	"	orr	%0, %0, %4		// set PTE_S2_RDONLY\n"
187	"	stxr	%w1, %0, %2\n"
188	"	cbnz	%w1, 1b\n"
189	: "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*pte))
190	: "L" (~PTE_S2_RDWR), "L" (PTE_S2_RDONLY));
191}
192
193static inline bool kvm_s2pte_readonly(pte_t *pte)
194{
195	return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
196}
197
198static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
199{
200	kvm_set_s2pte_readonly((pte_t *)pmd);
201}
 
 
 
 
 
 
202
203static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
204{
205	return kvm_s2pte_readonly((pte_t *)pmd);
206}
207
208static inline bool kvm_page_empty(void *ptr)
209{
210	struct page *ptr_page = virt_to_page(ptr);
211	return page_count(ptr_page) == 1;
 
 
 
 
 
212}
213
214#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
 
215
216#ifdef __PAGETABLE_PMD_FOLDED
217#define hyp_pmd_table_empty(pmdp) (0)
218#else
219#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
220#endif
221
222#ifdef __PAGETABLE_PUD_FOLDED
223#define hyp_pud_table_empty(pudp) (0)
224#else
225#define hyp_pud_table_empty(pudp) kvm_page_empty(pudp)
226#endif
227
228struct kvm;
229
230#define kvm_flush_dcache_to_poc(a,l)	__flush_dcache_area((a), (l))
231
232static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
233{
234	return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
235}
236
237static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
238					       kvm_pfn_t pfn,
239					       unsigned long size,
240					       bool ipa_uncached)
 
 
 
241{
242	void *va = page_address(pfn_to_page(pfn));
 
243
244	kvm_flush_dcache_to_poc(va, size);
245
246	if (!icache_is_aliasing()) {		/* PIPT */
247		flush_icache_range((unsigned long)va,
248				   (unsigned long)va + size);
249	} else if (!icache_is_aivivt()) {	/* non ASID-tagged VIVT */
250		/* any kind of VIPT cache */
251		__flush_icache_all();
252	}
253}
254
255static inline void __kvm_flush_dcache_pte(pte_t pte)
 
256{
257	struct page *page = pte_page(pte);
258	kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
259}
260
261static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
262{
263	struct page *page = pmd_page(pmd);
264	kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
265}
266
267static inline void __kvm_flush_dcache_pud(pud_t pud)
268{
269	struct page *page = pud_page(pud);
270	kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
271}
272
273#define kvm_virt_to_phys(x)		__virt_to_phys((unsigned long)(x))
274
275void kvm_set_way_flush(struct kvm_vcpu *vcpu);
276void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
277
278static inline bool __kvm_cpu_uses_extended_idmap(void)
 
 
279{
280	return __cpu_uses_extended_idmap();
 
 
 
 
 
 
 
281}
282
283static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
284				       pgd_t *hyp_pgd,
285				       pgd_t *merged_hyp_pgd,
286				       unsigned long hyp_idmap_start)
 
 
287{
288	int idmap_idx;
289
290	/*
291	 * Use the first entry to access the HYP mappings. It is
292	 * guaranteed to be free, otherwise we wouldn't use an
293	 * extended idmap.
294	 */
295	VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
296	merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE);
297
298	/*
299	 * Create another extended level entry that points to the boot HYP map,
300	 * which contains an ID mapping of the HYP init code. We essentially
301	 * merge the boot and runtime HYP maps by doing so, but they don't
302	 * overlap anyway, so this is fine.
303	 */
304	idmap_idx = hyp_idmap_start >> VA_BITS;
305	VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
306	merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
307}
308
309static inline unsigned int kvm_get_vmid_bits(void)
310{
311	int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1);
312
313	return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
314}
315
316#endif /* __ASSEMBLY__ */
317#endif /* __ARM64_KVM_MMU_H__ */