Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#ifndef __ARM_KVM_ASM_H__
8#define __ARM_KVM_ASM_H__
9
10#include <asm/hyp_image.h>
11#include <asm/insn.h>
12#include <asm/virt.h>
13
14#define ARM_EXIT_WITH_SERROR_BIT 31
15#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
16#define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
17#define ARM_SERROR_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
18
19#define ARM_EXCEPTION_IRQ 0
20#define ARM_EXCEPTION_EL1_SERROR 1
21#define ARM_EXCEPTION_TRAP 2
22#define ARM_EXCEPTION_IL 3
23/* The hyp-stub will return this for any kvm_call_hyp() call */
24#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
25
26#define kvm_arm_exception_type \
27 {ARM_EXCEPTION_IRQ, "IRQ" }, \
28 {ARM_EXCEPTION_EL1_SERROR, "SERROR" }, \
29 {ARM_EXCEPTION_TRAP, "TRAP" }, \
30 {ARM_EXCEPTION_HYP_GONE, "HYP_GONE" }
31
32/*
33 * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
34 * that jumps over this.
35 */
36#define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE)
37
38#define KVM_HOST_SMCCC_ID(id) \
39 ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
40 ARM_SMCCC_SMC_64, \
41 ARM_SMCCC_OWNER_VENDOR_HYP, \
42 (id))
43
44#define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
45
46#define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init 0
47
48#ifndef __ASSEMBLY__
49
50#include <linux/mm.h>
51
52enum __kvm_host_smccc_func {
53 /* Hypercalls available only prior to pKVM finalisation */
54 /* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */
55 __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1,
56 __KVM_HOST_SMCCC_FUNC___pkvm_init,
57 __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping,
58 __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector,
59 __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs,
60 __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs,
61 __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config,
62 __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
63
64 /* Hypercalls available after pKVM finalisation */
65 __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
66 __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp,
67 __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
68 __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
69 __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
70 __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
71 __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa_nsh,
72 __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
73 __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range,
74 __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
75 __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
76 __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr,
77 __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr,
78 __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
79 __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs,
80 __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_init_traps,
81 __KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
82 __KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
83 __KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm,
84};
85
86#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
87#define DECLARE_KVM_NVHE_SYM(sym) extern char kvm_nvhe_sym(sym)[]
88
89/*
90 * Define a pair of symbols sharing the same name but one defined in
91 * VHE and the other in nVHE hyp implementations.
92 */
93#define DECLARE_KVM_HYP_SYM(sym) \
94 DECLARE_KVM_VHE_SYM(sym); \
95 DECLARE_KVM_NVHE_SYM(sym)
96
97#define DECLARE_KVM_VHE_PER_CPU(type, sym) \
98 DECLARE_PER_CPU(type, sym)
99#define DECLARE_KVM_NVHE_PER_CPU(type, sym) \
100 DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
101
102#define DECLARE_KVM_HYP_PER_CPU(type, sym) \
103 DECLARE_KVM_VHE_PER_CPU(type, sym); \
104 DECLARE_KVM_NVHE_PER_CPU(type, sym)
105
106/*
107 * Compute pointer to a symbol defined in nVHE percpu region.
108 * Returns NULL if percpu memory has not been allocated yet.
109 */
110#define this_cpu_ptr_nvhe_sym(sym) per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
111#define per_cpu_ptr_nvhe_sym(sym, cpu) \
112 ({ \
113 unsigned long base, off; \
114 base = kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu]; \
115 off = (unsigned long)&CHOOSE_NVHE_SYM(sym) - \
116 (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start); \
117 base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL; \
118 })
119
120#if defined(__KVM_NVHE_HYPERVISOR__)
121
122#define CHOOSE_NVHE_SYM(sym) sym
123#define CHOOSE_HYP_SYM(sym) CHOOSE_NVHE_SYM(sym)
124
125/* The nVHE hypervisor shouldn't even try to access VHE symbols */
126extern void *__nvhe_undefined_symbol;
127#define CHOOSE_VHE_SYM(sym) __nvhe_undefined_symbol
128#define this_cpu_ptr_hyp_sym(sym) (&__nvhe_undefined_symbol)
129#define per_cpu_ptr_hyp_sym(sym, cpu) (&__nvhe_undefined_symbol)
130
131#elif defined(__KVM_VHE_HYPERVISOR__)
132
133#define CHOOSE_VHE_SYM(sym) sym
134#define CHOOSE_HYP_SYM(sym) CHOOSE_VHE_SYM(sym)
135
136/* The VHE hypervisor shouldn't even try to access nVHE symbols */
137extern void *__vhe_undefined_symbol;
138#define CHOOSE_NVHE_SYM(sym) __vhe_undefined_symbol
139#define this_cpu_ptr_hyp_sym(sym) (&__vhe_undefined_symbol)
140#define per_cpu_ptr_hyp_sym(sym, cpu) (&__vhe_undefined_symbol)
141
142#else
143
144/*
145 * BIG FAT WARNINGS:
146 *
147 * - Don't be tempted to change the following is_kernel_in_hyp_mode()
148 * to has_vhe(). has_vhe() is implemented as a *final* capability,
149 * while this is used early at boot time, when the capabilities are
150 * not final yet....
151 *
152 * - Don't let the nVHE hypervisor have access to this, as it will
153 * pick the *wrong* symbol (yes, it runs at EL2...).
154 */
155#define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() \
156 ? CHOOSE_VHE_SYM(sym) \
157 : CHOOSE_NVHE_SYM(sym))
158
159#define this_cpu_ptr_hyp_sym(sym) (is_kernel_in_hyp_mode() \
160 ? this_cpu_ptr(&sym) \
161 : this_cpu_ptr_nvhe_sym(sym))
162
163#define per_cpu_ptr_hyp_sym(sym, cpu) (is_kernel_in_hyp_mode() \
164 ? per_cpu_ptr(&sym, cpu) \
165 : per_cpu_ptr_nvhe_sym(sym, cpu))
166
167#define CHOOSE_VHE_SYM(sym) sym
168#define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym)
169
170#endif
171
172struct kvm_nvhe_init_params {
173 unsigned long mair_el2;
174 unsigned long tcr_el2;
175 unsigned long tpidr_el2;
176 unsigned long stack_hyp_va;
177 unsigned long stack_pa;
178 phys_addr_t pgd_pa;
179 unsigned long hcr_el2;
180 unsigned long vttbr;
181 unsigned long vtcr;
182};
183
184/*
185 * Used by the host in EL1 to dump the nVHE hypervisor backtrace on
186 * hyp_panic() in non-protected mode.
187 *
188 * @stack_base: hyp VA of the hyp_stack base.
189 * @overflow_stack_base: hyp VA of the hyp_overflow_stack base.
190 * @fp: hyp FP where the backtrace begins.
191 * @pc: hyp PC where the backtrace begins.
192 */
193struct kvm_nvhe_stacktrace_info {
194 unsigned long stack_base;
195 unsigned long overflow_stack_base;
196 unsigned long fp;
197 unsigned long pc;
198};
199
200/* Translate a kernel address @ptr into its equivalent linear mapping */
201#define kvm_ksym_ref(ptr) \
202 ({ \
203 void *val = (ptr); \
204 if (!is_kernel_in_hyp_mode()) \
205 val = lm_alias((ptr)); \
206 val; \
207 })
208#define kvm_ksym_ref_nvhe(sym) kvm_ksym_ref(kvm_nvhe_sym(sym))
209
210struct kvm;
211struct kvm_vcpu;
212struct kvm_s2_mmu;
213
214DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
215DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
216#define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init)
217#define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
218
219extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[];
220DECLARE_KVM_NVHE_SYM(__per_cpu_start);
221DECLARE_KVM_NVHE_SYM(__per_cpu_end);
222
223DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
224#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
225
226extern void __kvm_flush_vm_context(void);
227extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
228extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
229 int level);
230extern void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
231 phys_addr_t ipa,
232 int level);
233extern void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
234 phys_addr_t start, unsigned long pages);
235extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
236
237extern void __kvm_timer_set_cntvoff(u64 cntvoff);
238
239extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
240
241extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
242
243extern u64 __vgic_v3_get_gic_config(void);
244extern u64 __vgic_v3_read_vmcr(void);
245extern void __vgic_v3_write_vmcr(u32 vmcr);
246extern void __vgic_v3_init_lrs(void);
247
248extern u64 __kvm_get_mdcr_el2(void);
249
250#define __KVM_EXTABLE(from, to) \
251 " .pushsection __kvm_ex_table, \"a\"\n" \
252 " .align 3\n" \
253 " .long (" #from " - .), (" #to " - .)\n" \
254 " .popsection\n"
255
256
257#define __kvm_at(at_op, addr) \
258( { \
259 int __kvm_at_err = 0; \
260 u64 spsr, elr; \
261 asm volatile( \
262 " mrs %1, spsr_el2\n" \
263 " mrs %2, elr_el2\n" \
264 "1: at "at_op", %3\n" \
265 " isb\n" \
266 " b 9f\n" \
267 "2: msr spsr_el2, %1\n" \
268 " msr elr_el2, %2\n" \
269 " mov %w0, %4\n" \
270 "9:\n" \
271 __KVM_EXTABLE(1b, 2b) \
272 : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \
273 : "r" (addr), "i" (-EFAULT)); \
274 __kvm_at_err; \
275} )
276
277void __noreturn hyp_panic(void);
278asmlinkage void kvm_unexpected_el2_exception(void);
279asmlinkage void __noreturn hyp_panic(void);
280asmlinkage void __noreturn hyp_panic_bad_stack(void);
281asmlinkage void kvm_unexpected_el2_exception(void);
282struct kvm_cpu_context;
283void handle_trap(struct kvm_cpu_context *host_ctxt);
284asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on);
285void __noreturn __pkvm_init_finalise(void);
286void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
287void kvm_patch_vector_branch(struct alt_instr *alt,
288 __le32 *origptr, __le32 *updptr, int nr_inst);
289void kvm_get_kimage_voffset(struct alt_instr *alt,
290 __le32 *origptr, __le32 *updptr, int nr_inst);
291void kvm_compute_final_ctr_el0(struct alt_instr *alt,
292 __le32 *origptr, __le32 *updptr, int nr_inst);
293void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt,
294 u64 elr_phys, u64 par, uintptr_t vcpu, u64 far, u64 hpfar);
295
296#else /* __ASSEMBLY__ */
297
298.macro get_host_ctxt reg, tmp
299 adr_this_cpu \reg, kvm_host_data, \tmp
300 add \reg, \reg, #HOST_DATA_CONTEXT
301.endm
302
303.macro get_vcpu_ptr vcpu, ctxt
304 get_host_ctxt \ctxt, \vcpu
305 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
306.endm
307
308.macro get_loaded_vcpu vcpu, ctxt
309 adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
310 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
311.endm
312
313.macro set_loaded_vcpu vcpu, ctxt, tmp
314 adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
315 str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
316.endm
317
318/*
319 * KVM extable for unexpected exceptions.
320 * Create a struct kvm_exception_table_entry output to a section that can be
321 * mapped by EL2. The table is not sorted.
322 *
323 * The caller must ensure:
324 * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
325 * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
326 */
327.macro _kvm_extable, from, to
328 .pushsection __kvm_ex_table, "a"
329 .align 3
330 .long (\from - .), (\to - .)
331 .popsection
332.endm
333
334#define CPU_XREG_OFFSET(x) (CPU_USER_PT_REGS + 8*x)
335#define CPU_LR_OFFSET CPU_XREG_OFFSET(30)
336#define CPU_SP_EL0_OFFSET (CPU_LR_OFFSET + 8)
337
338/*
339 * We treat x18 as callee-saved as the host may use it as a platform
340 * register (e.g. for shadow call stack).
341 */
342.macro save_callee_saved_regs ctxt
343 str x18, [\ctxt, #CPU_XREG_OFFSET(18)]
344 stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
345 stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
346 stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
347 stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
348 stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
349 stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
350.endm
351
352.macro restore_callee_saved_regs ctxt
353 // We require \ctxt is not x18-x28
354 ldr x18, [\ctxt, #CPU_XREG_OFFSET(18)]
355 ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
356 ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
357 ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
358 ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
359 ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
360 ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
361.endm
362
363.macro save_sp_el0 ctxt, tmp
364 mrs \tmp, sp_el0
365 str \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
366.endm
367
368.macro restore_sp_el0 ctxt, tmp
369 ldr \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
370 msr sp_el0, \tmp
371.endm
372
373#endif
374
375#endif /* __ARM_KVM_ASM_H__ */