Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/kvm/handle_exit.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 */
10
11#include <linux/kvm.h>
12#include <linux/kvm_host.h>
13
14#include <kvm/arm_psci.h>
15
16#include <asm/esr.h>
17#include <asm/exception.h>
18#include <asm/kvm_asm.h>
19#include <asm/kvm_coproc.h>
20#include <asm/kvm_emulate.h>
21#include <asm/kvm_mmu.h>
22#include <asm/debug-monitors.h>
23#include <asm/traps.h>
24
25#define CREATE_TRACE_POINTS
26#include "trace.h"
27
28typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
29
30static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr)
31{
32 if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
33 kvm_inject_vabt(vcpu);
34}
35
36static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
37{
38 int ret;
39
40 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
41 kvm_vcpu_hvc_get_imm(vcpu));
42 vcpu->stat.hvc_exit_stat++;
43
44 ret = kvm_hvc_call_handler(vcpu);
45 if (ret < 0) {
46 vcpu_set_reg(vcpu, 0, ~0UL);
47 return 1;
48 }
49
50 return ret;
51}
52
53static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
54{
55 /*
56 * "If an SMC instruction executed at Non-secure EL1 is
57 * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
58 * Trap exception, not a Secure Monitor Call exception [...]"
59 *
60 * We need to advance the PC after the trap, as it would
61 * otherwise return to the same address...
62 */
63 vcpu_set_reg(vcpu, 0, ~0UL);
64 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
65 return 1;
66}
67
68/*
69 * Guest access to FP/ASIMD registers are routed to this handler only
70 * when the system doesn't support FP/ASIMD.
71 */
72static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run)
73{
74 kvm_inject_undefined(vcpu);
75 return 1;
76}
77
78/**
79 * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
80 * instruction executed by a guest
81 *
82 * @vcpu: the vcpu pointer
83 *
84 * WFE: Yield the CPU and come back to this vcpu when the scheduler
85 * decides to.
86 * WFI: Simply call kvm_vcpu_block(), which will halt execution of
87 * world-switches and schedule other host processes until there is an
88 * incoming IRQ or FIQ to the VM.
89 */
90static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
91{
92 if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
93 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
94 vcpu->stat.wfe_exit_stat++;
95 kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
96 } else {
97 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
98 vcpu->stat.wfi_exit_stat++;
99 kvm_vcpu_block(vcpu);
100 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
101 }
102
103 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
104
105 return 1;
106}
107
108/**
109 * kvm_handle_guest_debug - handle a debug exception instruction
110 *
111 * @vcpu: the vcpu pointer
112 * @run: access to the kvm_run structure for results
113 *
114 * We route all debug exceptions through the same handler. If both the
115 * guest and host are using the same debug facilities it will be up to
116 * userspace to re-inject the correct exception for guest delivery.
117 *
118 * @return: 0 (while setting run->exit_reason), -1 for error
119 */
120static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
121{
122 u32 hsr = kvm_vcpu_get_hsr(vcpu);
123 int ret = 0;
124
125 run->exit_reason = KVM_EXIT_DEBUG;
126 run->debug.arch.hsr = hsr;
127
128 switch (ESR_ELx_EC(hsr)) {
129 case ESR_ELx_EC_WATCHPT_LOW:
130 run->debug.arch.far = vcpu->arch.fault.far_el2;
131 /* fall through */
132 case ESR_ELx_EC_SOFTSTP_LOW:
133 case ESR_ELx_EC_BREAKPT_LOW:
134 case ESR_ELx_EC_BKPT32:
135 case ESR_ELx_EC_BRK64:
136 break;
137 default:
138 kvm_err("%s: un-handled case hsr: %#08x\n",
139 __func__, (unsigned int) hsr);
140 ret = -1;
141 break;
142 }
143
144 return ret;
145}
146
147static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
148{
149 u32 hsr = kvm_vcpu_get_hsr(vcpu);
150
151 kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n",
152 hsr, esr_get_class_string(hsr));
153
154 kvm_inject_undefined(vcpu);
155 return 1;
156}
157
158static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
159{
160 /* Until SVE is supported for guests: */
161 kvm_inject_undefined(vcpu);
162 return 1;
163}
164
165#define __ptrauth_save_key(regs, key) \
166({ \
167 regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
168 regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
169})
170
171/*
172 * Handle the guest trying to use a ptrauth instruction, or trying to access a
173 * ptrauth register.
174 */
175void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu)
176{
177 struct kvm_cpu_context *ctxt;
178
179 if (vcpu_has_ptrauth(vcpu)) {
180 vcpu_ptrauth_enable(vcpu);
181 ctxt = vcpu->arch.host_cpu_context;
182 __ptrauth_save_key(ctxt->sys_regs, APIA);
183 __ptrauth_save_key(ctxt->sys_regs, APIB);
184 __ptrauth_save_key(ctxt->sys_regs, APDA);
185 __ptrauth_save_key(ctxt->sys_regs, APDB);
186 __ptrauth_save_key(ctxt->sys_regs, APGA);
187 } else {
188 kvm_inject_undefined(vcpu);
189 }
190}
191
192/*
193 * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
194 * a NOP).
195 */
196static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run)
197{
198 kvm_arm_vcpu_ptrauth_trap(vcpu);
199 return 1;
200}
201
202static exit_handle_fn arm_exit_handlers[] = {
203 [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
204 [ESR_ELx_EC_WFx] = kvm_handle_wfx,
205 [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
206 [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
207 [ESR_ELx_EC_CP14_MR] = kvm_handle_cp14_32,
208 [ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store,
209 [ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64,
210 [ESR_ELx_EC_HVC32] = handle_hvc,
211 [ESR_ELx_EC_SMC32] = handle_smc,
212 [ESR_ELx_EC_HVC64] = handle_hvc,
213 [ESR_ELx_EC_SMC64] = handle_smc,
214 [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
215 [ESR_ELx_EC_SVE] = handle_sve,
216 [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
217 [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
218 [ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
219 [ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
220 [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
221 [ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
222 [ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
223 [ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd,
224 [ESR_ELx_EC_PAC] = kvm_handle_ptrauth,
225};
226
227static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
228{
229 u32 hsr = kvm_vcpu_get_hsr(vcpu);
230 u8 hsr_ec = ESR_ELx_EC(hsr);
231
232 return arm_exit_handlers[hsr_ec];
233}
234
235/*
236 * We may be single-stepping an emulated instruction. If the emulation
237 * has been completed in the kernel, we can return to userspace with a
238 * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
239 * emulation first.
240 */
241static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run)
242{
243 int handled;
244
245 /*
246 * See ARM ARM B1.14.1: "Hyp traps on instructions
247 * that fail their condition code check"
248 */
249 if (!kvm_condition_valid(vcpu)) {
250 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
251 handled = 1;
252 } else {
253 exit_handle_fn exit_handler;
254
255 exit_handler = kvm_get_exit_handler(vcpu);
256 handled = exit_handler(vcpu, run);
257 }
258
259 return handled;
260}
261
262/*
263 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
264 * proper exit to userspace.
265 */
266int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
267 int exception_index)
268{
269 if (ARM_SERROR_PENDING(exception_index)) {
270 u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
271
272 /*
273 * HVC/SMC already have an adjusted PC, which we need
274 * to correct in order to return to after having
275 * injected the SError.
276 */
277 if (hsr_ec == ESR_ELx_EC_HVC32 || hsr_ec == ESR_ELx_EC_HVC64 ||
278 hsr_ec == ESR_ELx_EC_SMC32 || hsr_ec == ESR_ELx_EC_SMC64) {
279 u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2;
280 *vcpu_pc(vcpu) -= adj;
281 }
282
283 return 1;
284 }
285
286 exception_index = ARM_EXCEPTION_CODE(exception_index);
287
288 switch (exception_index) {
289 case ARM_EXCEPTION_IRQ:
290 return 1;
291 case ARM_EXCEPTION_EL1_SERROR:
292 return 1;
293 case ARM_EXCEPTION_TRAP:
294 return handle_trap_exceptions(vcpu, run);
295 case ARM_EXCEPTION_HYP_GONE:
296 /*
297 * EL2 has been reset to the hyp-stub. This happens when a guest
298 * is pre-empted by kvm_reboot()'s shutdown call.
299 */
300 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
301 return 0;
302 case ARM_EXCEPTION_IL:
303 /*
304 * We attempted an illegal exception return. Guest state must
305 * have been corrupted somehow. Give up.
306 */
307 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
308 return -EINVAL;
309 default:
310 kvm_pr_unimpl("Unsupported exception type: %d",
311 exception_index);
312 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
313 return 0;
314 }
315}
316
317/* For exit types that need handling before we can be preempted */
318void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
319 int exception_index)
320{
321 if (ARM_SERROR_PENDING(exception_index)) {
322 if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {
323 u64 disr = kvm_vcpu_get_disr(vcpu);
324
325 kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
326 } else {
327 kvm_inject_vabt(vcpu);
328 }
329
330 return;
331 }
332
333 exception_index = ARM_EXCEPTION_CODE(exception_index);
334
335 if (exception_index == ARM_EXCEPTION_EL1_SERROR)
336 kvm_handle_guest_serror(vcpu, kvm_vcpu_get_hsr(vcpu));
337}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/kvm/handle_exit.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 */
10
11#include <linux/kvm.h>
12#include <linux/kvm_host.h>
13
14#include <asm/esr.h>
15#include <asm/exception.h>
16#include <asm/kvm_asm.h>
17#include <asm/kvm_emulate.h>
18#include <asm/kvm_mmu.h>
19#include <asm/debug-monitors.h>
20#include <asm/stacktrace/nvhe.h>
21#include <asm/traps.h>
22
23#include <kvm/arm_hypercalls.h>
24
25#define CREATE_TRACE_POINTS
26#include "trace_handle_exit.h"
27
28typedef int (*exit_handle_fn)(struct kvm_vcpu *);
29
30static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
31{
32 if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
33 kvm_inject_vabt(vcpu);
34}
35
36static int handle_hvc(struct kvm_vcpu *vcpu)
37{
38 int ret;
39
40 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
41 kvm_vcpu_hvc_get_imm(vcpu));
42 vcpu->stat.hvc_exit_stat++;
43
44 ret = kvm_hvc_call_handler(vcpu);
45 if (ret < 0) {
46 vcpu_set_reg(vcpu, 0, ~0UL);
47 return 1;
48 }
49
50 return ret;
51}
52
53static int handle_smc(struct kvm_vcpu *vcpu)
54{
55 /*
56 * "If an SMC instruction executed at Non-secure EL1 is
57 * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
58 * Trap exception, not a Secure Monitor Call exception [...]"
59 *
60 * We need to advance the PC after the trap, as it would
61 * otherwise return to the same address...
62 */
63 vcpu_set_reg(vcpu, 0, ~0UL);
64 kvm_incr_pc(vcpu);
65 return 1;
66}
67
68/*
69 * Guest access to FP/ASIMD registers are routed to this handler only
70 * when the system doesn't support FP/ASIMD.
71 */
72static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
73{
74 kvm_inject_undefined(vcpu);
75 return 1;
76}
77
78/**
79 * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
80 * instruction executed by a guest
81 *
82 * @vcpu: the vcpu pointer
83 *
84 * WFE[T]: Yield the CPU and come back to this vcpu when the scheduler
85 * decides to.
86 * WFI: Simply call kvm_vcpu_halt(), which will halt execution of
87 * world-switches and schedule other host processes until there is an
88 * incoming IRQ or FIQ to the VM.
89 * WFIT: Same as WFI, with a timed wakeup implemented as a background timer
90 *
91 * WF{I,E}T can immediately return if the deadline has already expired.
92 */
93static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
94{
95 u64 esr = kvm_vcpu_get_esr(vcpu);
96
97 if (esr & ESR_ELx_WFx_ISS_WFE) {
98 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
99 vcpu->stat.wfe_exit_stat++;
100 } else {
101 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
102 vcpu->stat.wfi_exit_stat++;
103 }
104
105 if (esr & ESR_ELx_WFx_ISS_WFxT) {
106 if (esr & ESR_ELx_WFx_ISS_RV) {
107 u64 val, now;
108
109 now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT);
110 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
111
112 if (now >= val)
113 goto out;
114 } else {
115 /* Treat WFxT as WFx if RN is invalid */
116 esr &= ~ESR_ELx_WFx_ISS_WFxT;
117 }
118 }
119
120 if (esr & ESR_ELx_WFx_ISS_WFE) {
121 kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
122 } else {
123 if (esr & ESR_ELx_WFx_ISS_WFxT)
124 vcpu_set_flag(vcpu, IN_WFIT);
125
126 kvm_vcpu_wfi(vcpu);
127 }
128out:
129 kvm_incr_pc(vcpu);
130
131 return 1;
132}
133
134/**
135 * kvm_handle_guest_debug - handle a debug exception instruction
136 *
137 * @vcpu: the vcpu pointer
138 *
139 * We route all debug exceptions through the same handler. If both the
140 * guest and host are using the same debug facilities it will be up to
141 * userspace to re-inject the correct exception for guest delivery.
142 *
143 * @return: 0 (while setting vcpu->run->exit_reason)
144 */
145static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
146{
147 struct kvm_run *run = vcpu->run;
148 u64 esr = kvm_vcpu_get_esr(vcpu);
149
150 run->exit_reason = KVM_EXIT_DEBUG;
151 run->debug.arch.hsr = lower_32_bits(esr);
152 run->debug.arch.hsr_high = upper_32_bits(esr);
153 run->flags = KVM_DEBUG_ARCH_HSR_HIGH_VALID;
154
155 switch (ESR_ELx_EC(esr)) {
156 case ESR_ELx_EC_WATCHPT_LOW:
157 run->debug.arch.far = vcpu->arch.fault.far_el2;
158 break;
159 case ESR_ELx_EC_SOFTSTP_LOW:
160 vcpu_clear_flag(vcpu, DBG_SS_ACTIVE_PENDING);
161 break;
162 }
163
164 return 0;
165}
166
167static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
168{
169 u64 esr = kvm_vcpu_get_esr(vcpu);
170
171 kvm_pr_unimpl("Unknown exception class: esr: %#016llx -- %s\n",
172 esr, esr_get_class_string(esr));
173
174 kvm_inject_undefined(vcpu);
175 return 1;
176}
177
178/*
179 * Guest access to SVE registers should be routed to this handler only
180 * when the system doesn't support SVE.
181 */
182static int handle_sve(struct kvm_vcpu *vcpu)
183{
184 kvm_inject_undefined(vcpu);
185 return 1;
186}
187
188/*
189 * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
190 * a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
191 * that we can do is give the guest an UNDEF.
192 */
193static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
194{
195 kvm_inject_undefined(vcpu);
196 return 1;
197}
198
199static exit_handle_fn arm_exit_handlers[] = {
200 [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
201 [ESR_ELx_EC_WFx] = kvm_handle_wfx,
202 [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
203 [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
204 [ESR_ELx_EC_CP14_MR] = kvm_handle_cp14_32,
205 [ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store,
206 [ESR_ELx_EC_CP10_ID] = kvm_handle_cp10_id,
207 [ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64,
208 [ESR_ELx_EC_HVC32] = handle_hvc,
209 [ESR_ELx_EC_SMC32] = handle_smc,
210 [ESR_ELx_EC_HVC64] = handle_hvc,
211 [ESR_ELx_EC_SMC64] = handle_smc,
212 [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
213 [ESR_ELx_EC_SVE] = handle_sve,
214 [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
215 [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
216 [ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
217 [ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
218 [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
219 [ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
220 [ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
221 [ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd,
222 [ESR_ELx_EC_PAC] = kvm_handle_ptrauth,
223};
224
225static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
226{
227 u64 esr = kvm_vcpu_get_esr(vcpu);
228 u8 esr_ec = ESR_ELx_EC(esr);
229
230 return arm_exit_handlers[esr_ec];
231}
232
233/*
234 * We may be single-stepping an emulated instruction. If the emulation
235 * has been completed in the kernel, we can return to userspace with a
236 * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
237 * emulation first.
238 */
239static int handle_trap_exceptions(struct kvm_vcpu *vcpu)
240{
241 int handled;
242
243 /*
244 * See ARM ARM B1.14.1: "Hyp traps on instructions
245 * that fail their condition code check"
246 */
247 if (!kvm_condition_valid(vcpu)) {
248 kvm_incr_pc(vcpu);
249 handled = 1;
250 } else {
251 exit_handle_fn exit_handler;
252
253 exit_handler = kvm_get_exit_handler(vcpu);
254 handled = exit_handler(vcpu);
255 }
256
257 return handled;
258}
259
260/*
261 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
262 * proper exit to userspace.
263 */
264int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
265{
266 struct kvm_run *run = vcpu->run;
267
268 if (ARM_SERROR_PENDING(exception_index)) {
269 /*
270 * The SError is handled by handle_exit_early(). If the guest
271 * survives it will re-execute the original instruction.
272 */
273 return 1;
274 }
275
276 exception_index = ARM_EXCEPTION_CODE(exception_index);
277
278 switch (exception_index) {
279 case ARM_EXCEPTION_IRQ:
280 return 1;
281 case ARM_EXCEPTION_EL1_SERROR:
282 return 1;
283 case ARM_EXCEPTION_TRAP:
284 return handle_trap_exceptions(vcpu);
285 case ARM_EXCEPTION_HYP_GONE:
286 /*
287 * EL2 has been reset to the hyp-stub. This happens when a guest
288 * is pre-emptied by kvm_reboot()'s shutdown call.
289 */
290 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
291 return 0;
292 case ARM_EXCEPTION_IL:
293 /*
294 * We attempted an illegal exception return. Guest state must
295 * have been corrupted somehow. Give up.
296 */
297 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
298 return -EINVAL;
299 default:
300 kvm_pr_unimpl("Unsupported exception type: %d",
301 exception_index);
302 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
303 return 0;
304 }
305}
306
307/* For exit types that need handling before we can be preempted */
308void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
309{
310 if (ARM_SERROR_PENDING(exception_index)) {
311 if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {
312 u64 disr = kvm_vcpu_get_disr(vcpu);
313
314 kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
315 } else {
316 kvm_inject_vabt(vcpu);
317 }
318
319 return;
320 }
321
322 exception_index = ARM_EXCEPTION_CODE(exception_index);
323
324 if (exception_index == ARM_EXCEPTION_EL1_SERROR)
325 kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
326}
327
328void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
329 u64 elr_virt, u64 elr_phys,
330 u64 par, uintptr_t vcpu,
331 u64 far, u64 hpfar) {
332 u64 elr_in_kimg = __phys_to_kimg(elr_phys);
333 u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr_virt;
334 u64 mode = spsr & PSR_MODE_MASK;
335 u64 panic_addr = elr_virt + hyp_offset;
336
337 if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) {
338 kvm_err("Invalid host exception to nVHE hyp!\n");
339 } else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
340 (esr & ESR_ELx_BRK64_ISS_COMMENT_MASK) == BUG_BRK_IMM) {
341 const char *file = NULL;
342 unsigned int line = 0;
343
344 /* All hyp bugs, including warnings, are treated as fatal. */
345 if (!is_protected_kvm_enabled() ||
346 IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
347 struct bug_entry *bug = find_bug(elr_in_kimg);
348
349 if (bug)
350 bug_get_file_line(bug, &file, &line);
351 }
352
353 if (file)
354 kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
355 else
356 kvm_err("nVHE hyp BUG at: [<%016llx>] %pB!\n", panic_addr,
357 (void *)(panic_addr + kaslr_offset()));
358 } else {
359 kvm_err("nVHE hyp panic at: [<%016llx>] %pB!\n", panic_addr,
360 (void *)(panic_addr + kaslr_offset()));
361 }
362
363 /* Dump the nVHE hypervisor backtrace */
364 kvm_nvhe_dump_backtrace(hyp_offset);
365
366 /*
367 * Hyp has panicked and we're going to handle that by panicking the
368 * kernel. The kernel offset will be revealed in the panic so we're
369 * also safe to reveal the hyp offset as a debugging aid for translating
370 * hyp VAs to vmlinux addresses.
371 */
372 kvm_err("Hyp Offset: 0x%llx\n", hyp_offset);
373
374 panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%016llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n",
375 spsr, elr_virt, esr, far, hpfar, par, vcpu);
376}