Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Fault injection for both 32 and 64bit guests.
  4 *
  5 * Copyright (C) 2012,2013 - ARM Ltd
  6 * Author: Marc Zyngier <marc.zyngier@arm.com>
  7 *
  8 * Based on arch/arm/kvm/emulate.c
  9 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
 10 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
 11 */
 12
 13#include <linux/kvm_host.h>
 14#include <asm/kvm_emulate.h>
 15#include <asm/esr.h>
 16
 17#define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
 18				 PSR_I_BIT | PSR_D_BIT)
 19
 20#define CURRENT_EL_SP_EL0_VECTOR	0x0
 21#define CURRENT_EL_SP_ELx_VECTOR	0x200
 22#define LOWER_EL_AArch64_VECTOR		0x400
 23#define LOWER_EL_AArch32_VECTOR		0x600
 24
 25enum exception_type {
 26	except_type_sync	= 0,
 27	except_type_irq		= 0x80,
 28	except_type_fiq		= 0x100,
 29	except_type_serror	= 0x180,
 30};
 31
 32static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 33{
 
 34	u64 exc_offset;
 35
 36	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
 37	case PSR_MODE_EL1t:
 38		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
 39		break;
 40	case PSR_MODE_EL1h:
 41		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
 42		break;
 43	case PSR_MODE_EL0t:
 
 44		exc_offset = LOWER_EL_AArch64_VECTOR;
 
 
 
 
 
 
 
 
 45		break;
 46	default:
 47		exc_offset = LOWER_EL_AArch32_VECTOR;
 
 48	}
 49
 50	return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 51}
 52
 53static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
 54{
 55	unsigned long cpsr = *vcpu_cpsr(vcpu);
 56	bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
 57	u32 esr = 0;
 58
 59	vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
 60	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
 61
 62	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
 63	vcpu_write_spsr(vcpu, cpsr);
 64
 65	vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
 66
 67	/*
 68	 * Build an {i,d}abort, depending on the level and the
 69	 * instruction set. Report an external synchronous abort.
 70	 */
 71	if (kvm_vcpu_trap_il_is32bit(vcpu))
 72		esr |= ESR_ELx_IL;
 73
 74	/*
 75	 * Here, the guest runs in AArch64 mode when in EL1. If we get
 76	 * an AArch32 fault, it means we managed to trap an EL0 fault.
 77	 */
 78	if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
 79		esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
 80	else
 81		esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
 82
 83	if (!is_iabt)
 84		esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
 85
 86	vcpu_write_sys_reg(vcpu, esr | ESR_ELx_FSC_EXTABT, ESR_EL1);
 87}
 88
 89static void inject_undef64(struct kvm_vcpu *vcpu)
 90{
 91	unsigned long cpsr = *vcpu_cpsr(vcpu);
 92	u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
 93
 94	vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
 95	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
 96
 97	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
 98	vcpu_write_spsr(vcpu, cpsr);
 99
100	/*
101	 * Build an unknown exception, depending on the instruction
102	 * set.
103	 */
104	if (kvm_vcpu_trap_il_is32bit(vcpu))
105		esr |= ESR_ELx_IL;
106
107	vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
108}
109
110/**
111 * kvm_inject_dabt - inject a data abort into the guest
112 * @vcpu: The VCPU to receive the undefined exception
113 * @addr: The address to report in the DFAR
114 *
115 * It is assumed that this code is called from the VCPU thread and that the
116 * VCPU therefore is not currently executing guest code.
117 */
118void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
119{
120	if (vcpu_el1_is_32bit(vcpu))
121		kvm_inject_dabt32(vcpu, addr);
122	else
123		inject_abt64(vcpu, false, addr);
124}
125
126/**
127 * kvm_inject_pabt - inject a prefetch abort into the guest
128 * @vcpu: The VCPU to receive the undefined exception
129 * @addr: The address to report in the DFAR
130 *
131 * It is assumed that this code is called from the VCPU thread and that the
132 * VCPU therefore is not currently executing guest code.
133 */
134void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
135{
136	if (vcpu_el1_is_32bit(vcpu))
137		kvm_inject_pabt32(vcpu, addr);
138	else
139		inject_abt64(vcpu, true, addr);
140}
141
142/**
143 * kvm_inject_undefined - inject an undefined instruction into the guest
144 *
145 * It is assumed that this code is called from the VCPU thread and that the
146 * VCPU therefore is not currently executing guest code.
147 */
148void kvm_inject_undefined(struct kvm_vcpu *vcpu)
149{
150	if (vcpu_el1_is_32bit(vcpu))
151		kvm_inject_undef32(vcpu);
152	else
153		inject_undef64(vcpu);
154}
155
156void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
157{
158	vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
159	*vcpu_hcr(vcpu) |= HCR_VSE;
160}
161
162/**
163 * kvm_inject_vabt - inject an async abort / SError into the guest
164 * @vcpu: The VCPU to receive the exception
165 *
166 * It is assumed that this code is called from the VCPU thread and that the
167 * VCPU therefore is not currently executing guest code.
168 *
169 * Systems with the RAS Extensions specify an imp-def ESR (ISV/IDS = 1) with
170 * the remaining ISS all-zeros so that this error is not interpreted as an
171 * uncategorized RAS error. Without the RAS Extensions we can't specify an ESR
172 * value, so the CPU generates an imp-def value.
173 */
174void kvm_inject_vabt(struct kvm_vcpu *vcpu)
175{
176	kvm_set_sei_esr(vcpu, ESR_ELx_ISV);
177}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Fault injection for both 32 and 64bit guests.
  4 *
  5 * Copyright (C) 2012,2013 - ARM Ltd
  6 * Author: Marc Zyngier <marc.zyngier@arm.com>
  7 *
  8 * Based on arch/arm/kvm/emulate.c
  9 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
 10 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
 11 */
 12
 13#include <linux/kvm_host.h>
 14#include <asm/kvm_emulate.h>
 15#include <asm/esr.h>
 16
 
 
 
 17#define CURRENT_EL_SP_EL0_VECTOR	0x0
 18#define CURRENT_EL_SP_ELx_VECTOR	0x200
 19#define LOWER_EL_AArch64_VECTOR		0x400
 20#define LOWER_EL_AArch32_VECTOR		0x600
 21
 22enum exception_type {
 23	except_type_sync	= 0,
 24	except_type_irq		= 0x80,
 25	except_type_fiq		= 0x100,
 26	except_type_serror	= 0x180,
 27};
 28
 29/*
 30 * This performs the exception entry at a given EL (@target_mode), stashing PC
 31 * and PSTATE into ELR and SPSR respectively, and compute the new PC/PSTATE.
 32 * The EL passed to this function *must* be a non-secure, privileged mode with
 33 * bit 0 being set (PSTATE.SP == 1).
 34 *
 35 * When an exception is taken, most PSTATE fields are left unchanged in the
 36 * handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
 37 * of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
 38 * layouts, so we don't need to shuffle these for exceptions from AArch32 EL0.
 39 *
 40 * For the SPSR_ELx layout for AArch64, see ARM DDI 0487E.a page C5-429.
 41 * For the SPSR_ELx layout for AArch32, see ARM DDI 0487E.a page C5-426.
 42 *
 43 * Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
 44 * MSB to LSB.
 45 */
 46static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
 47			      enum exception_type type)
 48{
 49	unsigned long sctlr, vbar, old, new, mode;
 50	u64 exc_offset;
 51
 52	mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
 53
 54	if      (mode == target_mode)
 
 
 55		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
 56	else if ((mode | PSR_MODE_THREAD_BIT) == target_mode)
 57		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
 58	else if (!(mode & PSR_MODE32_BIT))
 59		exc_offset = LOWER_EL_AArch64_VECTOR;
 60	else
 61		exc_offset = LOWER_EL_AArch32_VECTOR;
 62
 63	switch (target_mode) {
 64	case PSR_MODE_EL1h:
 65		vbar = vcpu_read_sys_reg(vcpu, VBAR_EL1);
 66		sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
 67		vcpu_write_sys_reg(vcpu, *vcpu_pc(vcpu), ELR_EL1);
 68		break;
 69	default:
 70		/* Don't do that */
 71		BUG();
 72	}
 73
 74	*vcpu_pc(vcpu) = vbar + exc_offset + type;
 75
 76	old = *vcpu_cpsr(vcpu);
 77	new = 0;
 78
 79	new |= (old & PSR_N_BIT);
 80	new |= (old & PSR_Z_BIT);
 81	new |= (old & PSR_C_BIT);
 82	new |= (old & PSR_V_BIT);
 83
 84	// TODO: TCO (if/when ARMv8.5-MemTag is exposed to guests)
 85
 86	new |= (old & PSR_DIT_BIT);
 87
 88	// PSTATE.UAO is set to zero upon any exception to AArch64
 89	// See ARM DDI 0487E.a, page D5-2579.
 90
 91	// PSTATE.PAN is unchanged unless SCTLR_ELx.SPAN == 0b0
 92	// SCTLR_ELx.SPAN is RES1 when ARMv8.1-PAN is not implemented
 93	// See ARM DDI 0487E.a, page D5-2578.
 94	new |= (old & PSR_PAN_BIT);
 95	if (!(sctlr & SCTLR_EL1_SPAN))
 96		new |= PSR_PAN_BIT;
 97
 98	// PSTATE.SS is set to zero upon any exception to AArch64
 99	// See ARM DDI 0487E.a, page D2-2452.
100
101	// PSTATE.IL is set to zero upon any exception to AArch64
102	// See ARM DDI 0487E.a, page D1-2306.
103
104	// PSTATE.SSBS is set to SCTLR_ELx.DSSBS upon any exception to AArch64
105	// See ARM DDI 0487E.a, page D13-3258
106	if (sctlr & SCTLR_ELx_DSSBS)
107		new |= PSR_SSBS_BIT;
108
109	// PSTATE.BTYPE is set to zero upon any exception to AArch64
110	// See ARM DDI 0487E.a, pages D1-2293 to D1-2294.
111
112	new |= PSR_D_BIT;
113	new |= PSR_A_BIT;
114	new |= PSR_I_BIT;
115	new |= PSR_F_BIT;
116
117	new |= target_mode;
118
119	*vcpu_cpsr(vcpu) = new;
120	vcpu_write_spsr(vcpu, old);
121}
122
123static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
124{
125	unsigned long cpsr = *vcpu_cpsr(vcpu);
126	bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
127	u32 esr = 0;
128
129	enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
 
 
 
 
130
131	vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
132
133	/*
134	 * Build an {i,d}abort, depending on the level and the
135	 * instruction set. Report an external synchronous abort.
136	 */
137	if (kvm_vcpu_trap_il_is32bit(vcpu))
138		esr |= ESR_ELx_IL;
139
140	/*
141	 * Here, the guest runs in AArch64 mode when in EL1. If we get
142	 * an AArch32 fault, it means we managed to trap an EL0 fault.
143	 */
144	if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
145		esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
146	else
147		esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
148
149	if (!is_iabt)
150		esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
151
152	vcpu_write_sys_reg(vcpu, esr | ESR_ELx_FSC_EXTABT, ESR_EL1);
153}
154
155static void inject_undef64(struct kvm_vcpu *vcpu)
156{
 
157	u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
158
159	enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
 
 
 
 
160
161	/*
162	 * Build an unknown exception, depending on the instruction
163	 * set.
164	 */
165	if (kvm_vcpu_trap_il_is32bit(vcpu))
166		esr |= ESR_ELx_IL;
167
168	vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
169}
170
171/**
172 * kvm_inject_dabt - inject a data abort into the guest
173 * @vcpu: The VCPU to receive the data abort
174 * @addr: The address to report in the DFAR
175 *
176 * It is assumed that this code is called from the VCPU thread and that the
177 * VCPU therefore is not currently executing guest code.
178 */
179void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
180{
181	if (vcpu_el1_is_32bit(vcpu))
182		kvm_inject_dabt32(vcpu, addr);
183	else
184		inject_abt64(vcpu, false, addr);
185}
186
187/**
188 * kvm_inject_pabt - inject a prefetch abort into the guest
189 * @vcpu: The VCPU to receive the prefetch abort
190 * @addr: The address to report in the DFAR
191 *
192 * It is assumed that this code is called from the VCPU thread and that the
193 * VCPU therefore is not currently executing guest code.
194 */
195void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
196{
197	if (vcpu_el1_is_32bit(vcpu))
198		kvm_inject_pabt32(vcpu, addr);
199	else
200		inject_abt64(vcpu, true, addr);
201}
202
203/**
204 * kvm_inject_undefined - inject an undefined instruction into the guest
205 *
206 * It is assumed that this code is called from the VCPU thread and that the
207 * VCPU therefore is not currently executing guest code.
208 */
209void kvm_inject_undefined(struct kvm_vcpu *vcpu)
210{
211	if (vcpu_el1_is_32bit(vcpu))
212		kvm_inject_undef32(vcpu);
213	else
214		inject_undef64(vcpu);
215}
216
217void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
218{
219	vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
220	*vcpu_hcr(vcpu) |= HCR_VSE;
221}
222
223/**
224 * kvm_inject_vabt - inject an async abort / SError into the guest
225 * @vcpu: The VCPU to receive the exception
226 *
227 * It is assumed that this code is called from the VCPU thread and that the
228 * VCPU therefore is not currently executing guest code.
229 *
230 * Systems with the RAS Extensions specify an imp-def ESR (ISV/IDS = 1) with
231 * the remaining ISS all-zeros so that this error is not interpreted as an
232 * uncategorized RAS error. Without the RAS Extensions we can't specify an ESR
233 * value, so the CPU generates an imp-def value.
234 */
235void kvm_inject_vabt(struct kvm_vcpu *vcpu)
236{
237	kvm_set_sei_esr(vcpu, ESR_ELx_ISV);
238}