Loading...
1/*
2 * Debug and Guest Debug support
3 *
4 * Copyright (C) 2015 - Linaro Ltd
5 * Author: Alex Bennée <alex.bennee@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/kvm_host.h>
21#include <linux/hw_breakpoint.h>
22
23#include <asm/debug-monitors.h>
24#include <asm/kvm_asm.h>
25#include <asm/kvm_arm.h>
26#include <asm/kvm_emulate.h>
27
28#include "trace.h"
29
30/* These are the bits of MDSCR_EL1 we may manipulate */
31#define MDSCR_EL1_DEBUG_MASK (DBG_MDSCR_SS | \
32 DBG_MDSCR_KDE | \
33 DBG_MDSCR_MDE)
34
35static DEFINE_PER_CPU(u32, mdcr_el2);
36
37/**
38 * save/restore_guest_debug_regs
39 *
40 * For some debug operations we need to tweak some guest registers. As
41 * a result we need to save the state of those registers before we
42 * make those modifications.
43 *
44 * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled
45 * after we have restored the preserved value to the main context.
46 */
47static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
48{
49 u64 val = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
50
51 vcpu->arch.guest_debug_preserved.mdscr_el1 = val;
52
53 trace_kvm_arm_set_dreg32("Saved MDSCR_EL1",
54 vcpu->arch.guest_debug_preserved.mdscr_el1);
55}
56
57static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
58{
59 u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1;
60
61 vcpu_write_sys_reg(vcpu, val, MDSCR_EL1);
62
63 trace_kvm_arm_set_dreg32("Restored MDSCR_EL1",
64 vcpu_read_sys_reg(vcpu, MDSCR_EL1));
65}
66
67/**
68 * kvm_arm_init_debug - grab what we need for debug
69 *
70 * Currently the sole task of this function is to retrieve the initial
71 * value of mdcr_el2 so we can preserve MDCR_EL2.HPMN which has
72 * presumably been set-up by some knowledgeable bootcode.
73 *
74 * It is called once per-cpu during CPU hyp initialisation.
75 */
76
77void kvm_arm_init_debug(void)
78{
79 __this_cpu_write(mdcr_el2, kvm_call_hyp(__kvm_get_mdcr_el2));
80}
81
82/**
83 * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
84 */
85
86void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
87{
88 vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state;
89}
90
91/**
92 * kvm_arm_setup_debug - set up debug related stuff
93 *
94 * @vcpu: the vcpu pointer
95 *
96 * This is called before each entry into the hypervisor to setup any
97 * debug related registers. Currently this just ensures we will trap
98 * access to:
99 * - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
100 * - Debug ROM Address (MDCR_EL2_TDRA)
101 * - OS related registers (MDCR_EL2_TDOSA)
102 * - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
103 *
104 * Additionally, KVM only traps guest accesses to the debug registers if
105 * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
106 * flag on vcpu->arch.debug_flags). Since the guest must not interfere
107 * with the hardware state when debugging the guest, we must ensure that
108 * trapping is enabled whenever we are debugging the guest using the
109 * debug registers.
110 */
111
112void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
113{
114 bool trap_debug = !(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY);
115 unsigned long mdscr;
116
117 trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
118
119 /*
120 * This also clears MDCR_EL2_E2PB_MASK to disable guest access
121 * to the profiling buffer.
122 */
123 vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
124 vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
125 MDCR_EL2_TPMS |
126 MDCR_EL2_TPMCR |
127 MDCR_EL2_TDRA |
128 MDCR_EL2_TDOSA);
129
130 /* Is Guest debugging in effect? */
131 if (vcpu->guest_debug) {
132 /* Route all software debug exceptions to EL2 */
133 vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
134
135 /* Save guest debug state */
136 save_guest_debug_regs(vcpu);
137
138 /*
139 * Single Step (ARM ARM D2.12.3 The software step state
140 * machine)
141 *
142 * If we are doing Single Step we need to manipulate
143 * the guest's MDSCR_EL1.SS and PSTATE.SS. Once the
144 * step has occurred the hypervisor will trap the
145 * debug exception and we return to userspace.
146 *
147 * If the guest attempts to single step its userspace
148 * we would have to deal with a trapped exception
149 * while in the guest kernel. Because this would be
150 * hard to unwind we suppress the guest's ability to
151 * do so by masking MDSCR_EL.SS.
152 *
153 * This confuses guest debuggers which use
154 * single-step behind the scenes but everything
155 * returns to normal once the host is no longer
156 * debugging the system.
157 */
158 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
159 *vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
160 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
161 mdscr |= DBG_MDSCR_SS;
162 vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
163 } else {
164 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
165 mdscr &= ~DBG_MDSCR_SS;
166 vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
167 }
168
169 trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu));
170
171 /*
172 * HW Breakpoints and watchpoints
173 *
174 * We simply switch the debug_ptr to point to our new
175 * external_debug_state which has been populated by the
176 * debug ioctl. The existing KVM_ARM64_DEBUG_DIRTY
177 * mechanism ensures the registers are updated on the
178 * world switch.
179 */
180 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
181 /* Enable breakpoints/watchpoints */
182 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
183 mdscr |= DBG_MDSCR_MDE;
184 vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
185
186 vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
187 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
188 trap_debug = true;
189
190 trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
191 &vcpu->arch.debug_ptr->dbg_bcr[0],
192 &vcpu->arch.debug_ptr->dbg_bvr[0]);
193
194 trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
195 &vcpu->arch.debug_ptr->dbg_wcr[0],
196 &vcpu->arch.debug_ptr->dbg_wvr[0]);
197 }
198 }
199
200 BUG_ON(!vcpu->guest_debug &&
201 vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
202
203 /* Trap debug register access */
204 if (trap_debug)
205 vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
206
207 /* If KDE or MDE are set, perform a full save/restore cycle. */
208 if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
209 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
210
211 trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
212 trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
213}
214
215void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
216{
217 trace_kvm_arm_clear_debug(vcpu->guest_debug);
218
219 if (vcpu->guest_debug) {
220 restore_guest_debug_regs(vcpu);
221
222 /*
223 * If we were using HW debug we need to restore the
224 * debug_ptr to the guest debug state.
225 */
226 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
227 kvm_arm_reset_debug_ptr(vcpu);
228
229 trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
230 &vcpu->arch.debug_ptr->dbg_bcr[0],
231 &vcpu->arch.debug_ptr->dbg_bvr[0]);
232
233 trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
234 &vcpu->arch.debug_ptr->dbg_wcr[0],
235 &vcpu->arch.debug_ptr->dbg_wvr[0]);
236 }
237 }
238}
239
240
241/*
242 * After successfully emulating an instruction, we might want to
243 * return to user space with a KVM_EXIT_DEBUG. We can only do this
244 * once the emulation is complete, though, so for userspace emulations
245 * we have to wait until we have re-entered KVM before calling this
246 * helper.
247 *
248 * Return true (and set exit_reason) to return to userspace or false
249 * if no further action is required.
250 */
251bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
252{
253 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
254 run->exit_reason = KVM_EXIT_DEBUG;
255 run->debug.arch.hsr = ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT;
256 return true;
257 }
258 return false;
259}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Debug and Guest Debug support
4 *
5 * Copyright (C) 2015 - Linaro Ltd
6 * Author: Alex Bennée <alex.bennee@linaro.org>
7 */
8
9#include <linux/kvm_host.h>
10#include <linux/hw_breakpoint.h>
11
12#include <asm/debug-monitors.h>
13#include <asm/kvm_asm.h>
14#include <asm/kvm_arm.h>
15#include <asm/kvm_emulate.h>
16
17#include "trace.h"
18
19/* These are the bits of MDSCR_EL1 we may manipulate */
20#define MDSCR_EL1_DEBUG_MASK (DBG_MDSCR_SS | \
21 DBG_MDSCR_KDE | \
22 DBG_MDSCR_MDE)
23
24static DEFINE_PER_CPU(u64, mdcr_el2);
25
26/**
27 * save/restore_guest_debug_regs
28 *
29 * For some debug operations we need to tweak some guest registers. As
30 * a result we need to save the state of those registers before we
31 * make those modifications.
32 *
33 * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled
34 * after we have restored the preserved value to the main context.
35 *
36 * When single-step is enabled by userspace, we tweak PSTATE.SS on every
37 * guest entry. Preserve PSTATE.SS so we can restore the original value
38 * for the vcpu after the single-step is disabled.
39 */
40static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
41{
42 u64 val = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
43
44 vcpu->arch.guest_debug_preserved.mdscr_el1 = val;
45
46 trace_kvm_arm_set_dreg32("Saved MDSCR_EL1",
47 vcpu->arch.guest_debug_preserved.mdscr_el1);
48
49 vcpu->arch.guest_debug_preserved.pstate_ss =
50 (*vcpu_cpsr(vcpu) & DBG_SPSR_SS);
51}
52
53static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
54{
55 u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1;
56
57 vcpu_write_sys_reg(vcpu, val, MDSCR_EL1);
58
59 trace_kvm_arm_set_dreg32("Restored MDSCR_EL1",
60 vcpu_read_sys_reg(vcpu, MDSCR_EL1));
61
62 if (vcpu->arch.guest_debug_preserved.pstate_ss)
63 *vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
64 else
65 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
66}
67
68/**
69 * kvm_arm_init_debug - grab what we need for debug
70 *
71 * Currently the sole task of this function is to retrieve the initial
72 * value of mdcr_el2 so we can preserve MDCR_EL2.HPMN which has
73 * presumably been set-up by some knowledgeable bootcode.
74 *
75 * It is called once per-cpu during CPU hyp initialisation.
76 */
77
78void kvm_arm_init_debug(void)
79{
80 __this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
81}
82
83/**
84 * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
85 *
86 * @vcpu: the vcpu pointer
87 *
88 * This ensures we will trap access to:
89 * - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
90 * - Debug ROM Address (MDCR_EL2_TDRA)
91 * - OS related registers (MDCR_EL2_TDOSA)
92 * - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
93 * - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
94 * - Self-hosted Trace (MDCR_EL2_TTRF/MDCR_EL2_E2TB)
95 */
96static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
97{
98 /*
99 * This also clears MDCR_EL2_E2PB_MASK and MDCR_EL2_E2TB_MASK
100 * to disable guest access to the profiling and trace buffers
101 */
102 vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
103 vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
104 MDCR_EL2_TPMS |
105 MDCR_EL2_TTRF |
106 MDCR_EL2_TPMCR |
107 MDCR_EL2_TDRA |
108 MDCR_EL2_TDOSA);
109
110 /* Is the VM being debugged by userspace? */
111 if (vcpu->guest_debug)
112 /* Route all software debug exceptions to EL2 */
113 vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
114
115 /*
116 * Trap debug register access when one of the following is true:
117 * - Userspace is using the hardware to debug the guest
118 * (KVM_GUESTDBG_USE_HW is set).
119 * - The guest is not using debug (DEBUG_DIRTY clear).
120 * - The guest has enabled the OS Lock (debug exceptions are blocked).
121 */
122 if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
123 !vcpu_get_flag(vcpu, DEBUG_DIRTY) ||
124 kvm_vcpu_os_lock_enabled(vcpu))
125 vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
126
127 trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
128}
129
130/**
131 * kvm_arm_vcpu_init_debug - setup vcpu debug traps
132 *
133 * @vcpu: the vcpu pointer
134 *
135 * Set vcpu initial mdcr_el2 value.
136 */
137void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
138{
139 preempt_disable();
140 kvm_arm_setup_mdcr_el2(vcpu);
141 preempt_enable();
142}
143
144/**
145 * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
146 */
147
148void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
149{
150 vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state;
151}
152
153/**
154 * kvm_arm_setup_debug - set up debug related stuff
155 *
156 * @vcpu: the vcpu pointer
157 *
158 * This is called before each entry into the hypervisor to setup any
159 * debug related registers.
160 *
161 * Additionally, KVM only traps guest accesses to the debug registers if
162 * the guest is not actively using them (see the DEBUG_DIRTY
163 * flag on vcpu->arch.iflags). Since the guest must not interfere
164 * with the hardware state when debugging the guest, we must ensure that
165 * trapping is enabled whenever we are debugging the guest using the
166 * debug registers.
167 */
168
169void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
170{
171 unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
172
173 trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
174
175 kvm_arm_setup_mdcr_el2(vcpu);
176
177 /* Check if we need to use the debug registers. */
178 if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) {
179 /* Save guest debug state */
180 save_guest_debug_regs(vcpu);
181
182 /*
183 * Single Step (ARM ARM D2.12.3 The software step state
184 * machine)
185 *
186 * If we are doing Single Step we need to manipulate
187 * the guest's MDSCR_EL1.SS and PSTATE.SS. Once the
188 * step has occurred the hypervisor will trap the
189 * debug exception and we return to userspace.
190 *
191 * If the guest attempts to single step its userspace
192 * we would have to deal with a trapped exception
193 * while in the guest kernel. Because this would be
194 * hard to unwind we suppress the guest's ability to
195 * do so by masking MDSCR_EL.SS.
196 *
197 * This confuses guest debuggers which use
198 * single-step behind the scenes but everything
199 * returns to normal once the host is no longer
200 * debugging the system.
201 */
202 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
203 /*
204 * If the software step state at the last guest exit
205 * was Active-pending, we don't set DBG_SPSR_SS so
206 * that the state is maintained (to not run another
207 * single-step until the pending Software Step
208 * exception is taken).
209 */
210 if (!vcpu_get_flag(vcpu, DBG_SS_ACTIVE_PENDING))
211 *vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
212 else
213 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
214
215 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
216 mdscr |= DBG_MDSCR_SS;
217 vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
218 } else {
219 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
220 mdscr &= ~DBG_MDSCR_SS;
221 vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
222 }
223
224 trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu));
225
226 /*
227 * HW Breakpoints and watchpoints
228 *
229 * We simply switch the debug_ptr to point to our new
230 * external_debug_state which has been populated by the
231 * debug ioctl. The existing DEBUG_DIRTY mechanism ensures
232 * the registers are updated on the world switch.
233 */
234 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
235 /* Enable breakpoints/watchpoints */
236 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
237 mdscr |= DBG_MDSCR_MDE;
238 vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
239
240 vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
241 vcpu_set_flag(vcpu, DEBUG_DIRTY);
242
243 trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
244 &vcpu->arch.debug_ptr->dbg_bcr[0],
245 &vcpu->arch.debug_ptr->dbg_bvr[0]);
246
247 trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
248 &vcpu->arch.debug_ptr->dbg_wcr[0],
249 &vcpu->arch.debug_ptr->dbg_wvr[0]);
250
251 /*
252 * The OS Lock blocks debug exceptions in all ELs when it is
253 * enabled. If the guest has enabled the OS Lock, constrain its
254 * effects to the guest. Emulate the behavior by clearing
255 * MDSCR_EL1.MDE. In so doing, we ensure that host debug
256 * exceptions are unaffected by guest configuration of the OS
257 * Lock.
258 */
259 } else if (kvm_vcpu_os_lock_enabled(vcpu)) {
260 mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
261 mdscr &= ~DBG_MDSCR_MDE;
262 vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
263 }
264 }
265
266 BUG_ON(!vcpu->guest_debug &&
267 vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
268
269 /* If KDE or MDE are set, perform a full save/restore cycle. */
270 if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
271 vcpu_set_flag(vcpu, DEBUG_DIRTY);
272
273 /* Write mdcr_el2 changes since vcpu_load on VHE systems */
274 if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
275 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
276
277 trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
278}
279
280void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
281{
282 trace_kvm_arm_clear_debug(vcpu->guest_debug);
283
284 /*
285 * Restore the guest's debug registers if we were using them.
286 */
287 if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) {
288 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
289 if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS))
290 /*
291 * Mark the vcpu as ACTIVE_PENDING
292 * until Software Step exception is taken.
293 */
294 vcpu_set_flag(vcpu, DBG_SS_ACTIVE_PENDING);
295 }
296
297 restore_guest_debug_regs(vcpu);
298
299 /*
300 * If we were using HW debug we need to restore the
301 * debug_ptr to the guest debug state.
302 */
303 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
304 kvm_arm_reset_debug_ptr(vcpu);
305
306 trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
307 &vcpu->arch.debug_ptr->dbg_bcr[0],
308 &vcpu->arch.debug_ptr->dbg_bvr[0]);
309
310 trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
311 &vcpu->arch.debug_ptr->dbg_wcr[0],
312 &vcpu->arch.debug_ptr->dbg_wvr[0]);
313 }
314 }
315}
316
317void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
318{
319 u64 dfr0;
320
321 /* For VHE, there is nothing to do */
322 if (has_vhe())
323 return;
324
325 dfr0 = read_sysreg(id_aa64dfr0_el1);
326 /*
327 * If SPE is present on this CPU and is available at current EL,
328 * we may need to check if the host state needs to be saved.
329 */
330 if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
331 !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(PMBIDR_EL1_P_SHIFT)))
332 vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE);
333
334 /* Check if we have TRBE implemented and available at the host */
335 if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) &&
336 !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_P))
337 vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
338}
339
340void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu)
341{
342 vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_SPE);
343 vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
344}