Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Fault injection for both 32 and 64bit guests.
  4 *
  5 * Copyright (C) 2012,2013 - ARM Ltd
  6 * Author: Marc Zyngier <marc.zyngier@arm.com>
  7 *
  8 * Based on arch/arm/kvm/emulate.c
  9 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
 10 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
 11 */
 12
 13#include <linux/kvm_host.h>
 14#include <asm/kvm_emulate.h>
 
 15#include <asm/esr.h>
 16
 17#define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
 18				 PSR_I_BIT | PSR_D_BIT)
 19
 20#define CURRENT_EL_SP_EL0_VECTOR	0x0
 21#define CURRENT_EL_SP_ELx_VECTOR	0x200
 22#define LOWER_EL_AArch64_VECTOR		0x400
 23#define LOWER_EL_AArch32_VECTOR		0x600
 24
 25enum exception_type {
 26	except_type_sync	= 0,
 27	except_type_irq		= 0x80,
 28	except_type_fiq		= 0x100,
 29	except_type_serror	= 0x180,
 30};
 31
 32static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
 33{
 34	u64 exc_offset;
 
 
 
 
 35
 36	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
 37	case PSR_MODE_EL1t:
 38		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
 
 
 
 
 
 
 
 39		break;
 40	case PSR_MODE_EL1h:
 41		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
 
 42		break;
 43	case PSR_MODE_EL0t:
 44		exc_offset = LOWER_EL_AArch64_VECTOR;
 
 
 
 45		break;
 46	default:
 47		exc_offset = LOWER_EL_AArch32_VECTOR;
 48	}
 
 49
 50	return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
 
 
 51}
 52
 53static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
 54{
 55	unsigned long cpsr = *vcpu_cpsr(vcpu);
 56	bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
 57	u32 esr = 0;
 58
 59	vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
 60	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
 61
 62	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
 63	vcpu_write_spsr(vcpu, cpsr);
 64
 65	vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
 66
 67	/*
 68	 * Build an {i,d}abort, depending on the level and the
 69	 * instruction set. Report an external synchronous abort.
 70	 */
 71	if (kvm_vcpu_trap_il_is32bit(vcpu))
 72		esr |= ESR_ELx_IL;
 73
 74	/*
 75	 * Here, the guest runs in AArch64 mode when in EL1. If we get
 76	 * an AArch32 fault, it means we managed to trap an EL0 fault.
 77	 */
 78	if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
 79		esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
 80	else
 81		esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
 82
 83	if (!is_iabt)
 84		esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
 85
 86	vcpu_write_sys_reg(vcpu, esr | ESR_ELx_FSC_EXTABT, ESR_EL1);
 
 
 
 
 
 
 
 
 87}
 88
 89static void inject_undef64(struct kvm_vcpu *vcpu)
 90{
 91	unsigned long cpsr = *vcpu_cpsr(vcpu);
 92	u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
 93
 94	vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
 95	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
 96
 97	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
 98	vcpu_write_spsr(vcpu, cpsr);
 99
100	/*
101	 * Build an unknown exception, depending on the instruction
102	 * set.
103	 */
104	if (kvm_vcpu_trap_il_is32bit(vcpu))
105		esr |= ESR_ELx_IL;
106
107	vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108}
109
110/**
111 * kvm_inject_dabt - inject a data abort into the guest
112 * @vcpu: The VCPU to receive the undefined exception
113 * @addr: The address to report in the DFAR
114 *
115 * It is assumed that this code is called from the VCPU thread and that the
116 * VCPU therefore is not currently executing guest code.
117 */
118void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
119{
120	if (vcpu_el1_is_32bit(vcpu))
121		kvm_inject_dabt32(vcpu, addr);
122	else
123		inject_abt64(vcpu, false, addr);
124}
125
126/**
127 * kvm_inject_pabt - inject a prefetch abort into the guest
128 * @vcpu: The VCPU to receive the undefined exception
129 * @addr: The address to report in the DFAR
130 *
131 * It is assumed that this code is called from the VCPU thread and that the
132 * VCPU therefore is not currently executing guest code.
133 */
134void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
135{
136	if (vcpu_el1_is_32bit(vcpu))
137		kvm_inject_pabt32(vcpu, addr);
138	else
139		inject_abt64(vcpu, true, addr);
140}
141
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142/**
143 * kvm_inject_undefined - inject an undefined instruction into the guest
 
144 *
145 * It is assumed that this code is called from the VCPU thread and that the
146 * VCPU therefore is not currently executing guest code.
147 */
148void kvm_inject_undefined(struct kvm_vcpu *vcpu)
149{
150	if (vcpu_el1_is_32bit(vcpu))
151		kvm_inject_undef32(vcpu);
152	else
153		inject_undef64(vcpu);
154}
155
156void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
157{
158	vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
159	*vcpu_hcr(vcpu) |= HCR_VSE;
160}
161
162/**
163 * kvm_inject_vabt - inject an async abort / SError into the guest
164 * @vcpu: The VCPU to receive the exception
165 *
166 * It is assumed that this code is called from the VCPU thread and that the
167 * VCPU therefore is not currently executing guest code.
168 *
169 * Systems with the RAS Extensions specify an imp-def ESR (ISV/IDS = 1) with
170 * the remaining ISS all-zeros so that this error is not interpreted as an
171 * uncategorized RAS error. Without the RAS Extensions we can't specify an ESR
172 * value, so the CPU generates an imp-def value.
173 */
174void kvm_inject_vabt(struct kvm_vcpu *vcpu)
175{
176	kvm_set_sei_esr(vcpu, ESR_ELx_ISV);
177}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Fault injection for both 32 and 64bit guests.
  4 *
  5 * Copyright (C) 2012,2013 - ARM Ltd
  6 * Author: Marc Zyngier <marc.zyngier@arm.com>
  7 *
  8 * Based on arch/arm/kvm/emulate.c
  9 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
 10 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
 11 */
 12
 13#include <linux/kvm_host.h>
 14#include <asm/kvm_emulate.h>
 15#include <asm/kvm_nested.h>
 16#include <asm/esr.h>
 17
 18static void pend_sync_exception(struct kvm_vcpu *vcpu)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 19{
 20	/* If not nesting, EL1 is the only possible exception target */
 21	if (likely(!vcpu_has_nv(vcpu))) {
 22		kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
 23		return;
 24	}
 25
 26	/*
 27	 * With NV, we need to pick between EL1 and EL2. Note that we
 28	 * never deal with a nesting exception here, hence never
 29	 * changing context, and the exception itself can be delayed
 30	 * until the next entry.
 31	 */
 32	switch(*vcpu_cpsr(vcpu) & PSR_MODE_MASK) {
 33	case PSR_MODE_EL2h:
 34	case PSR_MODE_EL2t:
 35		kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC);
 36		break;
 37	case PSR_MODE_EL1h:
 38	case PSR_MODE_EL1t:
 39		kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
 40		break;
 41	case PSR_MODE_EL0t:
 42		if (vcpu_el2_tge_is_set(vcpu))
 43			kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC);
 44		else
 45			kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
 46		break;
 47	default:
 48		BUG();
 49	}
 50}
 51
 52static bool match_target_el(struct kvm_vcpu *vcpu, unsigned long target)
 53{
 54	return (vcpu_get_flag(vcpu, EXCEPT_MASK) == target);
 55}
 56
 57static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
 58{
 59	unsigned long cpsr = *vcpu_cpsr(vcpu);
 60	bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
 61	u64 esr = 0;
 
 
 
 
 
 
 62
 63	pend_sync_exception(vcpu);
 64
 65	/*
 66	 * Build an {i,d}abort, depending on the level and the
 67	 * instruction set. Report an external synchronous abort.
 68	 */
 69	if (kvm_vcpu_trap_il_is32bit(vcpu))
 70		esr |= ESR_ELx_IL;
 71
 72	/*
 73	 * Here, the guest runs in AArch64 mode when in EL1. If we get
 74	 * an AArch32 fault, it means we managed to trap an EL0 fault.
 75	 */
 76	if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
 77		esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
 78	else
 79		esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
 80
 81	if (!is_iabt)
 82		esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
 83
 84	esr |= ESR_ELx_FSC_EXTABT;
 85
 86	if (match_target_el(vcpu, unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC))) {
 87		vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
 88		vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
 89	} else {
 90		vcpu_write_sys_reg(vcpu, addr, FAR_EL2);
 91		vcpu_write_sys_reg(vcpu, esr, ESR_EL2);
 92	}
 93}
 94
 95static void inject_undef64(struct kvm_vcpu *vcpu)
 96{
 97	u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
 
 98
 99	pend_sync_exception(vcpu);
 
 
 
 
100
101	/*
102	 * Build an unknown exception, depending on the instruction
103	 * set.
104	 */
105	if (kvm_vcpu_trap_il_is32bit(vcpu))
106		esr |= ESR_ELx_IL;
107
108	if (match_target_el(vcpu, unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC)))
109		vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
110	else
111		vcpu_write_sys_reg(vcpu, esr, ESR_EL2);
112}
113
114#define DFSR_FSC_EXTABT_LPAE	0x10
115#define DFSR_FSC_EXTABT_nLPAE	0x08
116#define DFSR_LPAE		BIT(9)
117#define TTBCR_EAE		BIT(31)
118
119static void inject_undef32(struct kvm_vcpu *vcpu)
120{
121	kvm_pend_exception(vcpu, EXCEPT_AA32_UND);
122}
123
124/*
125 * Modelled after TakeDataAbortException() and TakePrefetchAbortException
126 * pseudocode.
127 */
128static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
129{
130	u64 far;
131	u32 fsr;
132
133	/* Give the guest an IMPLEMENTATION DEFINED exception */
134	if (vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) {
135		fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
136	} else {
137		/* no need to shuffle FS[4] into DFSR[10] as it's 0 */
138		fsr = DFSR_FSC_EXTABT_nLPAE;
139	}
140
141	far = vcpu_read_sys_reg(vcpu, FAR_EL1);
142
143	if (is_pabt) {
144		kvm_pend_exception(vcpu, EXCEPT_AA32_IABT);
145		far &= GENMASK(31, 0);
146		far |= (u64)addr << 32;
147		vcpu_write_sys_reg(vcpu, fsr, IFSR32_EL2);
148	} else { /* !iabt */
149		kvm_pend_exception(vcpu, EXCEPT_AA32_DABT);
150		far &= GENMASK(63, 32);
151		far |= addr;
152		vcpu_write_sys_reg(vcpu, fsr, ESR_EL1);
153	}
154
155	vcpu_write_sys_reg(vcpu, far, FAR_EL1);
156}
157
158/**
159 * kvm_inject_dabt - inject a data abort into the guest
160 * @vcpu: The VCPU to receive the data abort
161 * @addr: The address to report in the DFAR
162 *
163 * It is assumed that this code is called from the VCPU thread and that the
164 * VCPU therefore is not currently executing guest code.
165 */
166void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
167{
168	if (vcpu_el1_is_32bit(vcpu))
169		inject_abt32(vcpu, false, addr);
170	else
171		inject_abt64(vcpu, false, addr);
172}
173
174/**
175 * kvm_inject_pabt - inject a prefetch abort into the guest
176 * @vcpu: The VCPU to receive the prefetch abort
177 * @addr: The address to report in the DFAR
178 *
179 * It is assumed that this code is called from the VCPU thread and that the
180 * VCPU therefore is not currently executing guest code.
181 */
182void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
183{
184	if (vcpu_el1_is_32bit(vcpu))
185		inject_abt32(vcpu, true, addr);
186	else
187		inject_abt64(vcpu, true, addr);
188}
189
190void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
191{
192	unsigned long addr, esr;
193
194	addr  = kvm_vcpu_get_fault_ipa(vcpu);
195	addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
196
197	if (kvm_vcpu_trap_is_iabt(vcpu))
198		kvm_inject_pabt(vcpu, addr);
199	else
200		kvm_inject_dabt(vcpu, addr);
201
202	/*
203	 * If AArch64 or LPAE, set FSC to 0 to indicate an Address
204	 * Size Fault at level 0, as if exceeding PARange.
205	 *
206	 * Non-LPAE guests will only get the external abort, as there
207	 * is no way to describe the ASF.
208	 */
209	if (vcpu_el1_is_32bit(vcpu) &&
210	    !(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE))
211		return;
212
213	esr = vcpu_read_sys_reg(vcpu, ESR_EL1);
214	esr &= ~GENMASK_ULL(5, 0);
215	vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
216}
217
218/**
219 * kvm_inject_undefined - inject an undefined instruction into the guest
220 * @vcpu: The vCPU in which to inject the exception
221 *
222 * It is assumed that this code is called from the VCPU thread and that the
223 * VCPU therefore is not currently executing guest code.
224 */
225void kvm_inject_undefined(struct kvm_vcpu *vcpu)
226{
227	if (vcpu_el1_is_32bit(vcpu))
228		inject_undef32(vcpu);
229	else
230		inject_undef64(vcpu);
231}
232
233void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
234{
235	vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
236	*vcpu_hcr(vcpu) |= HCR_VSE;
237}
238
239/**
240 * kvm_inject_vabt - inject an async abort / SError into the guest
241 * @vcpu: The VCPU to receive the exception
242 *
243 * It is assumed that this code is called from the VCPU thread and that the
244 * VCPU therefore is not currently executing guest code.
245 *
246 * Systems with the RAS Extensions specify an imp-def ESR (ISV/IDS = 1) with
247 * the remaining ISS all-zeros so that this error is not interpreted as an
248 * uncategorized RAS error. Without the RAS Extensions we can't specify an ESR
249 * value, so the CPU generates an imp-def value.
250 */
251void kvm_inject_vabt(struct kvm_vcpu *vcpu)
252{
253	kvm_set_sei_esr(vcpu, ESR_ELx_ISV);
254}