Loading...
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_MMU_H__
19#define __ARM64_KVM_MMU_H__
20
21#include <asm/page.h>
22#include <asm/memory.h>
23
24/*
25 * As we only have the TTBR0_EL2 register, we cannot express
26 * "negative" addresses. This makes it impossible to directly share
27 * mappings with the kernel.
28 *
29 * Instead, give the HYP mode its own VA region at a fixed offset from
30 * the kernel by just masking the top bits (which are all ones for a
31 * kernel address).
32 */
33#define HYP_PAGE_OFFSET_SHIFT VA_BITS
34#define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
35#define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)
36
37/*
38 * Our virtual mapping for the idmap-ed MMU-enable code. Must be
39 * shared across all the page-tables. Conveniently, we use the last
40 * possible page, where no kernel mapping will ever exist.
41 */
42#define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
43
44#ifdef __ASSEMBLY__
45
46/*
47 * Convert a kernel VA into a HYP VA.
48 * reg: VA to be converted.
49 */
50.macro kern_hyp_va reg
51 and \reg, \reg, #HYP_PAGE_OFFSET_MASK
52.endm
53
54#else
55
56#include <asm/cachetype.h>
57#include <asm/cacheflush.h>
58
59#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
60
61/*
62 * Align KVM with the kernel's view of physical memory. Should be
63 * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration.
64 */
65#define KVM_PHYS_SHIFT PHYS_MASK_SHIFT
66#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
67#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
68
69/* Make sure we get the right size, and thus the right alignment */
70#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - PGDIR_SHIFT))
71#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
72
73int create_hyp_mappings(void *from, void *to);
74int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
75void free_boot_hyp_pgd(void);
76void free_hyp_pgds(void);
77
78int kvm_alloc_stage2_pgd(struct kvm *kvm);
79void kvm_free_stage2_pgd(struct kvm *kvm);
80int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
81 phys_addr_t pa, unsigned long size);
82
83int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
84
85void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
86
87phys_addr_t kvm_mmu_get_httbr(void);
88phys_addr_t kvm_mmu_get_boot_httbr(void);
89phys_addr_t kvm_get_idmap_vector(void);
90int kvm_mmu_init(void);
91void kvm_clear_hyp_idmap(void);
92
93#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
94#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
95
96static inline bool kvm_is_write_fault(unsigned long esr)
97{
98 unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT;
99
100 if (esr_ec == ESR_EL2_EC_IABT)
101 return false;
102
103 if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR))
104 return false;
105
106 return true;
107}
108
109static inline void kvm_clean_pgd(pgd_t *pgd) {}
110static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
111static inline void kvm_clean_pte(pte_t *pte) {}
112static inline void kvm_clean_pte_entry(pte_t *pte) {}
113
114static inline void kvm_set_s2pte_writable(pte_t *pte)
115{
116 pte_val(*pte) |= PTE_S2_RDWR;
117}
118
119static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
120{
121 pmd_val(*pmd) |= PMD_S2_RDWR;
122}
123
124#define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end)
125#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
126#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
127
128struct kvm;
129
130#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
131
132static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
133{
134 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
135}
136
137static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
138 unsigned long size)
139{
140 if (!vcpu_has_cache_enabled(vcpu))
141 kvm_flush_dcache_to_poc((void *)hva, size);
142
143 if (!icache_is_aliasing()) { /* PIPT */
144 flush_icache_range(hva, hva + size);
145 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
146 /* any kind of VIPT cache */
147 __flush_icache_all();
148 }
149}
150
151#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
152
153void stage2_flush_vm(struct kvm *kvm);
154
155#endif /* __ASSEMBLY__ */
156#endif /* __ARM64_KVM_MMU_H__ */
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __ARM64_KVM_MMU_H__
19#define __ARM64_KVM_MMU_H__
20
21#include <asm/page.h>
22#include <asm/memory.h>
23#include <asm/cpufeature.h>
24
25/*
26 * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
27 * "negative" addresses. This makes it impossible to directly share
28 * mappings with the kernel.
29 *
30 * Instead, give the HYP mode its own VA region at a fixed offset from
31 * the kernel by just masking the top bits (which are all ones for a
32 * kernel address). We need to find out how many bits to mask.
33 *
34 * We want to build a set of page tables that cover both parts of the
35 * idmap (the trampoline page used to initialize EL2), and our normal
36 * runtime VA space, at the same time.
37 *
38 * Given that the kernel uses VA_BITS for its entire address space,
39 * and that half of that space (VA_BITS - 1) is used for the linear
40 * mapping, we can also limit the EL2 space to (VA_BITS - 1).
41 *
42 * The main question is "Within the VA_BITS space, does EL2 use the
43 * top or the bottom half of that space to shadow the kernel's linear
44 * mapping?". As we need to idmap the trampoline page, this is
45 * determined by the range in which this page lives.
46 *
47 * If the page is in the bottom half, we have to use the top half. If
48 * the page is in the top half, we have to use the bottom half:
49 *
50 * T = __virt_to_phys(__hyp_idmap_text_start)
51 * if (T & BIT(VA_BITS - 1))
52 * HYP_VA_MIN = 0 //idmap in upper half
53 * else
54 * HYP_VA_MIN = 1 << (VA_BITS - 1)
55 * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
56 *
57 * This of course assumes that the trampoline page exists within the
58 * VA_BITS range. If it doesn't, then it means we're in the odd case
59 * where the kernel idmap (as well as HYP) uses more levels than the
60 * kernel runtime page tables (as seen when the kernel is configured
61 * for 4k pages, 39bits VA, and yet memory lives just above that
62 * limit, forcing the idmap to use 4 levels of page tables while the
63 * kernel itself only uses 3). In this particular case, it doesn't
64 * matter which side of VA_BITS we use, as we're guaranteed not to
65 * conflict with anything.
66 *
67 * When using VHE, there are no separate hyp mappings and all KVM
68 * functionality is already mapped as part of the main kernel
69 * mappings, and none of this applies in that case.
70 */
71
72#define HYP_PAGE_OFFSET_HIGH_MASK ((UL(1) << VA_BITS) - 1)
73#define HYP_PAGE_OFFSET_LOW_MASK ((UL(1) << (VA_BITS - 1)) - 1)
74
75#ifdef __ASSEMBLY__
76
77#include <asm/alternative.h>
78#include <asm/cpufeature.h>
79
80/*
81 * Convert a kernel VA into a HYP VA.
82 * reg: VA to be converted.
83 *
84 * This generates the following sequences:
85 * - High mask:
86 * and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
87 * nop
88 * - Low mask:
89 * and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
90 * and x0, x0, #HYP_PAGE_OFFSET_LOW_MASK
91 * - VHE:
92 * nop
93 * nop
94 *
95 * The "low mask" version works because the mask is a strict subset of
96 * the "high mask", hence performing the first mask for nothing.
97 * Should be completely invisible on any viable CPU.
98 */
99.macro kern_hyp_va reg
100alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
101 and \reg, \reg, #HYP_PAGE_OFFSET_HIGH_MASK
102alternative_else_nop_endif
103alternative_if ARM64_HYP_OFFSET_LOW
104 and \reg, \reg, #HYP_PAGE_OFFSET_LOW_MASK
105alternative_else_nop_endif
106.endm
107
108#else
109
110#include <asm/pgalloc.h>
111#include <asm/cachetype.h>
112#include <asm/cacheflush.h>
113#include <asm/mmu_context.h>
114#include <asm/pgtable.h>
115
116static inline unsigned long __kern_hyp_va(unsigned long v)
117{
118 asm volatile(ALTERNATIVE("and %0, %0, %1",
119 "nop",
120 ARM64_HAS_VIRT_HOST_EXTN)
121 : "+r" (v)
122 : "i" (HYP_PAGE_OFFSET_HIGH_MASK));
123 asm volatile(ALTERNATIVE("nop",
124 "and %0, %0, %1",
125 ARM64_HYP_OFFSET_LOW)
126 : "+r" (v)
127 : "i" (HYP_PAGE_OFFSET_LOW_MASK));
128 return v;
129}
130
131#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
132
133/*
134 * We currently only support a 40bit IPA.
135 */
136#define KVM_PHYS_SHIFT (40)
137#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
138#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
139
140#include <asm/stage2_pgtable.h>
141
142int create_hyp_mappings(void *from, void *to, pgprot_t prot);
143int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
144void free_hyp_pgds(void);
145
146void stage2_unmap_vm(struct kvm *kvm);
147int kvm_alloc_stage2_pgd(struct kvm *kvm);
148void kvm_free_stage2_pgd(struct kvm *kvm);
149int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
150 phys_addr_t pa, unsigned long size, bool writable);
151
152int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
153
154void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
155
156phys_addr_t kvm_mmu_get_httbr(void);
157phys_addr_t kvm_get_idmap_vector(void);
158phys_addr_t kvm_get_idmap_start(void);
159int kvm_mmu_init(void);
160void kvm_clear_hyp_idmap(void);
161
162#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
163#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
164
165static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
166{
167 pte_val(pte) |= PTE_S2_RDWR;
168 return pte;
169}
170
171static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
172{
173 pmd_val(pmd) |= PMD_S2_RDWR;
174 return pmd;
175}
176
177static inline void kvm_set_s2pte_readonly(pte_t *pte)
178{
179 pteval_t pteval;
180 unsigned long tmp;
181
182 asm volatile("// kvm_set_s2pte_readonly\n"
183 " prfm pstl1strm, %2\n"
184 "1: ldxr %0, %2\n"
185 " and %0, %0, %3 // clear PTE_S2_RDWR\n"
186 " orr %0, %0, %4 // set PTE_S2_RDONLY\n"
187 " stxr %w1, %0, %2\n"
188 " cbnz %w1, 1b\n"
189 : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*pte))
190 : "L" (~PTE_S2_RDWR), "L" (PTE_S2_RDONLY));
191}
192
193static inline bool kvm_s2pte_readonly(pte_t *pte)
194{
195 return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
196}
197
198static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
199{
200 kvm_set_s2pte_readonly((pte_t *)pmd);
201}
202
203static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
204{
205 return kvm_s2pte_readonly((pte_t *)pmd);
206}
207
208static inline bool kvm_page_empty(void *ptr)
209{
210 struct page *ptr_page = virt_to_page(ptr);
211 return page_count(ptr_page) == 1;
212}
213
214#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
215
216#ifdef __PAGETABLE_PMD_FOLDED
217#define hyp_pmd_table_empty(pmdp) (0)
218#else
219#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
220#endif
221
222#ifdef __PAGETABLE_PUD_FOLDED
223#define hyp_pud_table_empty(pudp) (0)
224#else
225#define hyp_pud_table_empty(pudp) kvm_page_empty(pudp)
226#endif
227
228struct kvm;
229
230#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
231
232static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
233{
234 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
235}
236
237static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
238 kvm_pfn_t pfn,
239 unsigned long size,
240 bool ipa_uncached)
241{
242 void *va = page_address(pfn_to_page(pfn));
243
244 kvm_flush_dcache_to_poc(va, size);
245
246 if (!icache_is_aliasing()) { /* PIPT */
247 flush_icache_range((unsigned long)va,
248 (unsigned long)va + size);
249 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
250 /* any kind of VIPT cache */
251 __flush_icache_all();
252 }
253}
254
255static inline void __kvm_flush_dcache_pte(pte_t pte)
256{
257 struct page *page = pte_page(pte);
258 kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
259}
260
261static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
262{
263 struct page *page = pmd_page(pmd);
264 kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
265}
266
267static inline void __kvm_flush_dcache_pud(pud_t pud)
268{
269 struct page *page = pud_page(pud);
270 kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
271}
272
273#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
274
275void kvm_set_way_flush(struct kvm_vcpu *vcpu);
276void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
277
278static inline bool __kvm_cpu_uses_extended_idmap(void)
279{
280 return __cpu_uses_extended_idmap();
281}
282
283static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
284 pgd_t *hyp_pgd,
285 pgd_t *merged_hyp_pgd,
286 unsigned long hyp_idmap_start)
287{
288 int idmap_idx;
289
290 /*
291 * Use the first entry to access the HYP mappings. It is
292 * guaranteed to be free, otherwise we wouldn't use an
293 * extended idmap.
294 */
295 VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
296 merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE);
297
298 /*
299 * Create another extended level entry that points to the boot HYP map,
300 * which contains an ID mapping of the HYP init code. We essentially
301 * merge the boot and runtime HYP maps by doing so, but they don't
302 * overlap anyway, so this is fine.
303 */
304 idmap_idx = hyp_idmap_start >> VA_BITS;
305 VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
306 merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
307}
308
309static inline unsigned int kvm_get_vmid_bits(void)
310{
311 int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1);
312
313 return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
314}
315
316#endif /* __ASSEMBLY__ */
317#endif /* __ARM64_KVM_MMU_H__ */