Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Debug and Guest Debug support
  4 *
  5 * Copyright (C) 2015 - Linaro Ltd
  6 * Author: Alex Bennée <alex.bennee@linaro.org>
  7 */
  8
  9#include <linux/kvm_host.h>
 10#include <linux/hw_breakpoint.h>
 11
 12#include <asm/debug-monitors.h>
 13#include <asm/kvm_asm.h>
 14#include <asm/kvm_arm.h>
 15#include <asm/kvm_emulate.h>
 16
 17#include "trace.h"
 18
 19/* These are the bits of MDSCR_EL1 we may manipulate */
 20#define MDSCR_EL1_DEBUG_MASK	(DBG_MDSCR_SS | \
 21				DBG_MDSCR_KDE | \
 22				DBG_MDSCR_MDE)
 23
 24static DEFINE_PER_CPU(u32, mdcr_el2);
 25
 26/**
 27 * save/restore_guest_debug_regs
 28 *
 29 * For some debug operations we need to tweak some guest registers. As
 30 * a result we need to save the state of those registers before we
 31 * make those modifications.
 32 *
 33 * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled
 34 * after we have restored the preserved value to the main context.
 
 
 
 
 35 */
 36static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
 37{
 38	u64 val = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
 39
 40	vcpu->arch.guest_debug_preserved.mdscr_el1 = val;
 41
 42	trace_kvm_arm_set_dreg32("Saved MDSCR_EL1",
 43				vcpu->arch.guest_debug_preserved.mdscr_el1);
 
 
 
 44}
 45
 46static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
 47{
 48	u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1;
 49
 50	vcpu_write_sys_reg(vcpu, val, MDSCR_EL1);
 51
 52	trace_kvm_arm_set_dreg32("Restored MDSCR_EL1",
 53				vcpu_read_sys_reg(vcpu, MDSCR_EL1));
 
 
 
 
 
 54}
 55
 56/**
 57 * kvm_arm_init_debug - grab what we need for debug
 58 *
 59 * Currently the sole task of this function is to retrieve the initial
 60 * value of mdcr_el2 so we can preserve MDCR_EL2.HPMN which has
 61 * presumably been set-up by some knowledgeable bootcode.
 62 *
 63 * It is called once per-cpu during CPU hyp initialisation.
 64 */
 65
 66void kvm_arm_init_debug(void)
 67{
 68	__this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
 69}
 70
 71/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 72 * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
 73 */
 74
 75void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
 76{
 77	vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state;
 78}
 79
 80/**
 81 * kvm_arm_setup_debug - set up debug related stuff
 82 *
 83 * @vcpu:	the vcpu pointer
 84 *
 85 * This is called before each entry into the hypervisor to setup any
 86 * debug related registers. Currently this just ensures we will trap
 87 * access to:
 88 *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
 89 *  - Debug ROM Address (MDCR_EL2_TDRA)
 90 *  - OS related registers (MDCR_EL2_TDOSA)
 91 *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
 92 *
 93 * Additionally, KVM only traps guest accesses to the debug registers if
 94 * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
 95 * flag on vcpu->arch.flags).  Since the guest must not interfere
 96 * with the hardware state when debugging the guest, we must ensure that
 97 * trapping is enabled whenever we are debugging the guest using the
 98 * debug registers.
 99 */
100
101void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
102{
103	bool trap_debug = !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY);
104	unsigned long mdscr;
105
106	trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
107
108	/*
109	 * This also clears MDCR_EL2_E2PB_MASK to disable guest access
110	 * to the profiling buffer.
111	 */
112	vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
113	vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
114				MDCR_EL2_TPMS |
115				MDCR_EL2_TPMCR |
116				MDCR_EL2_TDRA |
117				MDCR_EL2_TDOSA);
118
119	/* Is Guest debugging in effect? */
120	if (vcpu->guest_debug) {
121		/* Route all software debug exceptions to EL2 */
122		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
123
 
 
124		/* Save guest debug state */
125		save_guest_debug_regs(vcpu);
126
127		/*
128		 * Single Step (ARM ARM D2.12.3 The software step state
129		 * machine)
130		 *
131		 * If we are doing Single Step we need to manipulate
132		 * the guest's MDSCR_EL1.SS and PSTATE.SS. Once the
133		 * step has occurred the hypervisor will trap the
134		 * debug exception and we return to userspace.
135		 *
136		 * If the guest attempts to single step its userspace
137		 * we would have to deal with a trapped exception
138		 * while in the guest kernel. Because this would be
139		 * hard to unwind we suppress the guest's ability to
140		 * do so by masking MDSCR_EL.SS.
141		 *
142		 * This confuses guest debuggers which use
143		 * single-step behind the scenes but everything
144		 * returns to normal once the host is no longer
145		 * debugging the system.
146		 */
147		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
148			*vcpu_cpsr(vcpu) |=  DBG_SPSR_SS;
 
 
 
 
 
 
 
 
 
 
 
149			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
150			mdscr |= DBG_MDSCR_SS;
151			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
152		} else {
153			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
154			mdscr &= ~DBG_MDSCR_SS;
155			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
156		}
157
158		trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu));
159
160		/*
161		 * HW Breakpoints and watchpoints
162		 *
163		 * We simply switch the debug_ptr to point to our new
164		 * external_debug_state which has been populated by the
165		 * debug ioctl. The existing KVM_ARM64_DEBUG_DIRTY
166		 * mechanism ensures the registers are updated on the
167		 * world switch.
168		 */
169		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
170			/* Enable breakpoints/watchpoints */
171			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
172			mdscr |= DBG_MDSCR_MDE;
173			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
174
175			vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
176			vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
177			trap_debug = true;
178
179			trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
180						&vcpu->arch.debug_ptr->dbg_bcr[0],
181						&vcpu->arch.debug_ptr->dbg_bvr[0]);
182
183			trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
184						&vcpu->arch.debug_ptr->dbg_wcr[0],
185						&vcpu->arch.debug_ptr->dbg_wvr[0]);
 
 
 
 
 
 
 
 
 
 
 
 
 
186		}
187	}
188
189	BUG_ON(!vcpu->guest_debug &&
190		vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
191
192	/* Trap debug register access */
193	if (trap_debug)
194		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
195
196	/* If KDE or MDE are set, perform a full save/restore cycle. */
197	if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
198		vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
 
 
 
 
199
200	trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
201	trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
202}
203
204void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
205{
206	trace_kvm_arm_clear_debug(vcpu->guest_debug);
207
208	if (vcpu->guest_debug) {
 
 
 
 
 
 
 
 
 
 
 
 
209		restore_guest_debug_regs(vcpu);
210
211		/*
212		 * If we were using HW debug we need to restore the
213		 * debug_ptr to the guest debug state.
214		 */
215		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
216			kvm_arm_reset_debug_ptr(vcpu);
217
218			trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
219						&vcpu->arch.debug_ptr->dbg_bcr[0],
220						&vcpu->arch.debug_ptr->dbg_bvr[0]);
221
222			trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
223						&vcpu->arch.debug_ptr->dbg_wcr[0],
224						&vcpu->arch.debug_ptr->dbg_wvr[0]);
225		}
226	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227}
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Debug and Guest Debug support
  4 *
  5 * Copyright (C) 2015 - Linaro Ltd
  6 * Author: Alex Bennée <alex.bennee@linaro.org>
  7 */
  8
  9#include <linux/kvm_host.h>
 10#include <linux/hw_breakpoint.h>
 11
 12#include <asm/debug-monitors.h>
 13#include <asm/kvm_asm.h>
 14#include <asm/kvm_arm.h>
 15#include <asm/kvm_emulate.h>
 16
 17#include "trace.h"
 18
 19/* These are the bits of MDSCR_EL1 we may manipulate */
 20#define MDSCR_EL1_DEBUG_MASK	(DBG_MDSCR_SS | \
 21				DBG_MDSCR_KDE | \
 22				DBG_MDSCR_MDE)
 23
 24static DEFINE_PER_CPU(u64, mdcr_el2);
 25
 26/**
 27 * save/restore_guest_debug_regs
 28 *
 29 * For some debug operations we need to tweak some guest registers. As
 30 * a result we need to save the state of those registers before we
 31 * make those modifications.
 32 *
 33 * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled
 34 * after we have restored the preserved value to the main context.
 35 *
 36 * When single-step is enabled by userspace, we tweak PSTATE.SS on every
 37 * guest entry. Preserve PSTATE.SS so we can restore the original value
 38 * for the vcpu after the single-step is disabled.
 39 */
 40static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
 41{
 42	u64 val = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
 43
 44	vcpu->arch.guest_debug_preserved.mdscr_el1 = val;
 45
 46	trace_kvm_arm_set_dreg32("Saved MDSCR_EL1",
 47				vcpu->arch.guest_debug_preserved.mdscr_el1);
 48
 49	vcpu->arch.guest_debug_preserved.pstate_ss =
 50					(*vcpu_cpsr(vcpu) & DBG_SPSR_SS);
 51}
 52
 53static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
 54{
 55	u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1;
 56
 57	vcpu_write_sys_reg(vcpu, val, MDSCR_EL1);
 58
 59	trace_kvm_arm_set_dreg32("Restored MDSCR_EL1",
 60				vcpu_read_sys_reg(vcpu, MDSCR_EL1));
 61
 62	if (vcpu->arch.guest_debug_preserved.pstate_ss)
 63		*vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
 64	else
 65		*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
 66}
 67
 68/**
 69 * kvm_arm_init_debug - grab what we need for debug
 70 *
 71 * Currently the sole task of this function is to retrieve the initial
 72 * value of mdcr_el2 so we can preserve MDCR_EL2.HPMN which has
 73 * presumably been set-up by some knowledgeable bootcode.
 74 *
 75 * It is called once per-cpu during CPU hyp initialisation.
 76 */
 77
 78void kvm_arm_init_debug(void)
 79{
 80	__this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
 81}
 82
 83/**
 84 * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
 85 *
 86 * @vcpu:	the vcpu pointer
 87 *
 88 * This ensures we will trap access to:
 89 *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
 90 *  - Debug ROM Address (MDCR_EL2_TDRA)
 91 *  - OS related registers (MDCR_EL2_TDOSA)
 92 *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
 93 *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
 94 *  - Self-hosted Trace (MDCR_EL2_TTRF/MDCR_EL2_E2TB)
 95 */
 96static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
 97{
 98	/*
 99	 * This also clears MDCR_EL2_E2PB_MASK and MDCR_EL2_E2TB_MASK
100	 * to disable guest access to the profiling and trace buffers
101	 */
102	vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
103	vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
104				MDCR_EL2_TPMS |
105				MDCR_EL2_TTRF |
106				MDCR_EL2_TPMCR |
107				MDCR_EL2_TDRA |
108				MDCR_EL2_TDOSA);
109
110	/* Is the VM being debugged by userspace? */
111	if (vcpu->guest_debug)
112		/* Route all software debug exceptions to EL2 */
113		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
114
115	/*
116	 * Trap debug register access when one of the following is true:
117	 *  - Userspace is using the hardware to debug the guest
118	 *  (KVM_GUESTDBG_USE_HW is set).
119	 *  - The guest is not using debug (DEBUG_DIRTY clear).
120	 *  - The guest has enabled the OS Lock (debug exceptions are blocked).
121	 */
122	if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
123	    !vcpu_get_flag(vcpu, DEBUG_DIRTY) ||
124	    kvm_vcpu_os_lock_enabled(vcpu))
125		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
126
127	trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
128}
129
130/**
131 * kvm_arm_vcpu_init_debug - setup vcpu debug traps
132 *
133 * @vcpu:	the vcpu pointer
134 *
135 * Set vcpu initial mdcr_el2 value.
136 */
137void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
138{
139	preempt_disable();
140	kvm_arm_setup_mdcr_el2(vcpu);
141	preempt_enable();
142}
143
144/**
145 * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
146 */
147
148void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
149{
150	vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state;
151}
152
153/**
154 * kvm_arm_setup_debug - set up debug related stuff
155 *
156 * @vcpu:	the vcpu pointer
157 *
158 * This is called before each entry into the hypervisor to setup any
159 * debug related registers.
 
 
 
 
 
160 *
161 * Additionally, KVM only traps guest accesses to the debug registers if
162 * the guest is not actively using them (see the DEBUG_DIRTY
163 * flag on vcpu->arch.iflags).  Since the guest must not interfere
164 * with the hardware state when debugging the guest, we must ensure that
165 * trapping is enabled whenever we are debugging the guest using the
166 * debug registers.
167 */
168
169void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
170{
171	unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
 
172
173	trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
174
175	kvm_arm_setup_mdcr_el2(vcpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
177	/* Check if we need to use the debug registers. */
178	if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) {
179		/* Save guest debug state */
180		save_guest_debug_regs(vcpu);
181
182		/*
183		 * Single Step (ARM ARM D2.12.3 The software step state
184		 * machine)
185		 *
186		 * If we are doing Single Step we need to manipulate
187		 * the guest's MDSCR_EL1.SS and PSTATE.SS. Once the
188		 * step has occurred the hypervisor will trap the
189		 * debug exception and we return to userspace.
190		 *
191		 * If the guest attempts to single step its userspace
192		 * we would have to deal with a trapped exception
193		 * while in the guest kernel. Because this would be
194		 * hard to unwind we suppress the guest's ability to
195		 * do so by masking MDSCR_EL.SS.
196		 *
197		 * This confuses guest debuggers which use
198		 * single-step behind the scenes but everything
199		 * returns to normal once the host is no longer
200		 * debugging the system.
201		 */
202		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
203			/*
204			 * If the software step state at the last guest exit
205			 * was Active-pending, we don't set DBG_SPSR_SS so
206			 * that the state is maintained (to not run another
207			 * single-step until the pending Software Step
208			 * exception is taken).
209			 */
210			if (!vcpu_get_flag(vcpu, DBG_SS_ACTIVE_PENDING))
211				*vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
212			else
213				*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
214
215			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
216			mdscr |= DBG_MDSCR_SS;
217			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
218		} else {
219			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
220			mdscr &= ~DBG_MDSCR_SS;
221			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
222		}
223
224		trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu));
225
226		/*
227		 * HW Breakpoints and watchpoints
228		 *
229		 * We simply switch the debug_ptr to point to our new
230		 * external_debug_state which has been populated by the
231		 * debug ioctl. The existing DEBUG_DIRTY mechanism ensures
232		 * the registers are updated on the world switch.
 
233		 */
234		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
235			/* Enable breakpoints/watchpoints */
236			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
237			mdscr |= DBG_MDSCR_MDE;
238			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
239
240			vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
241			vcpu_set_flag(vcpu, DEBUG_DIRTY);
 
242
243			trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
244						&vcpu->arch.debug_ptr->dbg_bcr[0],
245						&vcpu->arch.debug_ptr->dbg_bvr[0]);
246
247			trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
248						&vcpu->arch.debug_ptr->dbg_wcr[0],
249						&vcpu->arch.debug_ptr->dbg_wvr[0]);
250
251		/*
252		 * The OS Lock blocks debug exceptions in all ELs when it is
253		 * enabled. If the guest has enabled the OS Lock, constrain its
254		 * effects to the guest. Emulate the behavior by clearing
255		 * MDSCR_EL1.MDE. In so doing, we ensure that host debug
256		 * exceptions are unaffected by guest configuration of the OS
257		 * Lock.
258		 */
259		} else if (kvm_vcpu_os_lock_enabled(vcpu)) {
260			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
261			mdscr &= ~DBG_MDSCR_MDE;
262			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
263		}
264	}
265
266	BUG_ON(!vcpu->guest_debug &&
267		vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
268
 
 
 
 
269	/* If KDE or MDE are set, perform a full save/restore cycle. */
270	if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
271		vcpu_set_flag(vcpu, DEBUG_DIRTY);
272
273	/* Write mdcr_el2 changes since vcpu_load on VHE systems */
274	if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
275		write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
276
 
277	trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
278}
279
280void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
281{
282	trace_kvm_arm_clear_debug(vcpu->guest_debug);
283
284	/*
285	 * Restore the guest's debug registers if we were using them.
286	 */
287	if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) {
288		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
289			if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS))
290				/*
291				 * Mark the vcpu as ACTIVE_PENDING
292				 * until Software Step exception is taken.
293				 */
294				vcpu_set_flag(vcpu, DBG_SS_ACTIVE_PENDING);
295		}
296
297		restore_guest_debug_regs(vcpu);
298
299		/*
300		 * If we were using HW debug we need to restore the
301		 * debug_ptr to the guest debug state.
302		 */
303		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
304			kvm_arm_reset_debug_ptr(vcpu);
305
306			trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
307						&vcpu->arch.debug_ptr->dbg_bcr[0],
308						&vcpu->arch.debug_ptr->dbg_bvr[0]);
309
310			trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
311						&vcpu->arch.debug_ptr->dbg_wcr[0],
312						&vcpu->arch.debug_ptr->dbg_wvr[0]);
313		}
314	}
315}
316
317void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
318{
319	u64 dfr0;
320
321	/* For VHE, there is nothing to do */
322	if (has_vhe())
323		return;
324
325	dfr0 = read_sysreg(id_aa64dfr0_el1);
326	/*
327	 * If SPE is present on this CPU and is available at current EL,
328	 * we may need to check if the host state needs to be saved.
329	 */
330	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) &&
331	    !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT)))
332		vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_SPE);
333
334	/* Check if we have TRBE implemented and available at the host */
335	if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) &&
336	    !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG))
337		vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
338}
339
340void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu)
341{
342	vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_SPE);
343	vcpu_clear_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
344}