Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef ASM_KVM_CACHE_REGS_H
  3#define ASM_KVM_CACHE_REGS_H
  4
  5#include <linux/kvm_host.h>
  6
  7#define KVM_POSSIBLE_CR0_GUEST_BITS	(X86_CR0_TS | X86_CR0_WP)
  8#define KVM_POSSIBLE_CR4_GUEST_BITS				  \
  9	(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
 10	 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
 11
 12#define X86_CR0_PDPTR_BITS    (X86_CR0_CD | X86_CR0_NW | X86_CR0_PG)
 13#define X86_CR4_TLBFLUSH_BITS (X86_CR4_PGE | X86_CR4_PCIDE | X86_CR4_PAE | X86_CR4_SMEP)
 14#define X86_CR4_PDPTR_BITS    (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP)
 15
 16static_assert(!(KVM_POSSIBLE_CR0_GUEST_BITS & X86_CR0_PDPTR_BITS));
 17
 18#define BUILD_KVM_GPR_ACCESSORS(lname, uname)				      \
 19static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
 20{									      \
 21	return vcpu->arch.regs[VCPU_REGS_##uname];			      \
 22}									      \
 23static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu,	      \
 24						unsigned long val)	      \
 25{									      \
 26	vcpu->arch.regs[VCPU_REGS_##uname] = val;			      \
 27}
 28BUILD_KVM_GPR_ACCESSORS(rax, RAX)
 29BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
 30BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
 31BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
 32BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
 33BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
 34BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
 35#ifdef CONFIG_X86_64
 36BUILD_KVM_GPR_ACCESSORS(r8,  R8)
 37BUILD_KVM_GPR_ACCESSORS(r9,  R9)
 38BUILD_KVM_GPR_ACCESSORS(r10, R10)
 39BUILD_KVM_GPR_ACCESSORS(r11, R11)
 40BUILD_KVM_GPR_ACCESSORS(r12, R12)
 41BUILD_KVM_GPR_ACCESSORS(r13, R13)
 42BUILD_KVM_GPR_ACCESSORS(r14, R14)
 43BUILD_KVM_GPR_ACCESSORS(r15, R15)
 44#endif
 45
 46/*
 47 * Using the register cache from interrupt context is generally not allowed, as
 48 * caching a register and marking it available/dirty can't be done atomically,
 49 * i.e. accesses from interrupt context may clobber state or read stale data if
 50 * the vCPU task is in the process of updating the cache.  The exception is if
 51 * KVM is handling a PMI IRQ/NMI VM-Exit, as that bound code sequence doesn't
 52 * touch the cache, it runs after the cache is reset (post VM-Exit), and PMIs
 53 * need to access several registers that are cacheable.
 54 */
 55#define kvm_assert_register_caching_allowed(vcpu)		\
 56	lockdep_assert_once(in_task() || kvm_arch_pmi_in_guest(vcpu))
 57
 58/*
 59 * avail  dirty
 60 * 0	  0	  register in VMCS/VMCB
 61 * 0	  1	  *INVALID*
 62 * 1	  0	  register in vcpu->arch
 63 * 1	  1	  register in vcpu->arch, needs to be stored back
 64 */
 65static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
 66					     enum kvm_reg reg)
 67{
 68	kvm_assert_register_caching_allowed(vcpu);
 69	return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
 70}
 71
 72static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
 73					 enum kvm_reg reg)
 74{
 75	kvm_assert_register_caching_allowed(vcpu);
 76	return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
 77}
 78
 79static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
 80					       enum kvm_reg reg)
 81{
 82	kvm_assert_register_caching_allowed(vcpu);
 83	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
 84}
 85
 86static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
 87					   enum kvm_reg reg)
 88{
 89	kvm_assert_register_caching_allowed(vcpu);
 90	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
 91	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
 92}
 93
 94/*
 95 * kvm_register_test_and_mark_available() is a special snowflake that uses an
 96 * arch bitop directly to avoid the explicit instrumentation that comes with
 97 * the generic bitops.  This allows code that cannot be instrumented (noinstr
 98 * functions), e.g. the low level VM-Enter/VM-Exit paths, to cache registers.
 99 */
100static __always_inline bool kvm_register_test_and_mark_available(struct kvm_vcpu *vcpu,
101								 enum kvm_reg reg)
102{
103	kvm_assert_register_caching_allowed(vcpu);
104	return arch___test_and_set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
105}
106
107/*
108 * The "raw" register helpers are only for cases where the full 64 bits of a
109 * register are read/written irrespective of current vCPU mode.  In other words,
110 * odds are good you shouldn't be using the raw variants.
111 */
112static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg)
113{
114	if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
115		return 0;
116
117	if (!kvm_register_is_available(vcpu, reg))
118		kvm_x86_call(cache_reg)(vcpu, reg);
119
120	return vcpu->arch.regs[reg];
121}
122
123static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg,
124					  unsigned long val)
125{
126	if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
127		return;
128
129	vcpu->arch.regs[reg] = val;
130	kvm_register_mark_dirty(vcpu, reg);
131}
132
133static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
134{
135	return kvm_register_read_raw(vcpu, VCPU_REGS_RIP);
136}
137
138static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
139{
140	kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val);
141}
142
143static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
144{
145	return kvm_register_read_raw(vcpu, VCPU_REGS_RSP);
146}
147
148static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
149{
150	kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val);
151}
152
153static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
154{
155	might_sleep();  /* on svm */
156
157	if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
158		kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_PDPTR);
159
160	return vcpu->arch.walk_mmu->pdptrs[index];
161}
162
163static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value)
164{
165	vcpu->arch.walk_mmu->pdptrs[index] = value;
166}
167
168static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
169{
170	ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
171	if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
172	    !kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
173		kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR0);
174	return vcpu->arch.cr0 & mask;
175}
176
177static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu,
178					       unsigned long cr0_bit)
179{
180	BUILD_BUG_ON(!is_power_of_2(cr0_bit));
181
182	return !!kvm_read_cr0_bits(vcpu, cr0_bit);
183}
184
185static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
186{
187	return kvm_read_cr0_bits(vcpu, ~0UL);
188}
189
190static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
191{
192	ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
193	if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
194	    !kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
195		kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR4);
196	return vcpu->arch.cr4 & mask;
197}
198
199static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu,
200					       unsigned long cr4_bit)
201{
202	BUILD_BUG_ON(!is_power_of_2(cr4_bit));
203
204	return !!kvm_read_cr4_bits(vcpu, cr4_bit);
205}
206
207static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
208{
209	if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
210		kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR3);
211	return vcpu->arch.cr3;
212}
213
214static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
215{
216	return kvm_read_cr4_bits(vcpu, ~0UL);
217}
218
219static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
220{
221	return (kvm_rax_read(vcpu) & -1u)
222		| ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
223}
224
225static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
226{
227	vcpu->arch.hflags |= HF_GUEST_MASK;
228	vcpu->stat.guest_mode = 1;
229}
230
231static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
232{
233	vcpu->arch.hflags &= ~HF_GUEST_MASK;
234
235	if (vcpu->arch.load_eoi_exitmap_pending) {
236		vcpu->arch.load_eoi_exitmap_pending = false;
237		kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
238	}
239
240	vcpu->stat.guest_mode = 0;
241}
242
243static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
244{
245	return vcpu->arch.hflags & HF_GUEST_MASK;
246}
247
248#endif
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef ASM_KVM_CACHE_REGS_H
  3#define ASM_KVM_CACHE_REGS_H
  4
  5#include <linux/kvm_host.h>
  6
  7#define KVM_POSSIBLE_CR0_GUEST_BITS	(X86_CR0_TS | X86_CR0_WP)
  8#define KVM_POSSIBLE_CR4_GUEST_BITS				  \
  9	(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
 10	 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
 11
 12#define X86_CR0_PDPTR_BITS    (X86_CR0_CD | X86_CR0_NW | X86_CR0_PG)
 13#define X86_CR4_TLBFLUSH_BITS (X86_CR4_PGE | X86_CR4_PCIDE | X86_CR4_PAE | X86_CR4_SMEP)
 14#define X86_CR4_PDPTR_BITS    (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP)
 15
 16static_assert(!(KVM_POSSIBLE_CR0_GUEST_BITS & X86_CR0_PDPTR_BITS));
 17
 18#define BUILD_KVM_GPR_ACCESSORS(lname, uname)				      \
 19static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
 20{									      \
 21	return vcpu->arch.regs[VCPU_REGS_##uname];			      \
 22}									      \
 23static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu,	      \
 24						unsigned long val)	      \
 25{									      \
 26	vcpu->arch.regs[VCPU_REGS_##uname] = val;			      \
 27}
 28BUILD_KVM_GPR_ACCESSORS(rax, RAX)
 29BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
 30BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
 31BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
 32BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
 33BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
 34BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
 35#ifdef CONFIG_X86_64
 36BUILD_KVM_GPR_ACCESSORS(r8,  R8)
 37BUILD_KVM_GPR_ACCESSORS(r9,  R9)
 38BUILD_KVM_GPR_ACCESSORS(r10, R10)
 39BUILD_KVM_GPR_ACCESSORS(r11, R11)
 40BUILD_KVM_GPR_ACCESSORS(r12, R12)
 41BUILD_KVM_GPR_ACCESSORS(r13, R13)
 42BUILD_KVM_GPR_ACCESSORS(r14, R14)
 43BUILD_KVM_GPR_ACCESSORS(r15, R15)
 44#endif
 45
 46/*
 
 
 
 
 
 
 
 
 
 
 
 
 47 * avail  dirty
 48 * 0	  0	  register in VMCS/VMCB
 49 * 0	  1	  *INVALID*
 50 * 1	  0	  register in vcpu->arch
 51 * 1	  1	  register in vcpu->arch, needs to be stored back
 52 */
 53static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
 54					     enum kvm_reg reg)
 55{
 
 56	return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
 57}
 58
 59static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
 60					 enum kvm_reg reg)
 61{
 
 62	return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
 63}
 64
 65static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
 66					       enum kvm_reg reg)
 67{
 
 68	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
 69}
 70
 71static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
 72					   enum kvm_reg reg)
 73{
 
 74	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
 75	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
 76}
 77
 78/*
 79 * kvm_register_test_and_mark_available() is a special snowflake that uses an
 80 * arch bitop directly to avoid the explicit instrumentation that comes with
 81 * the generic bitops.  This allows code that cannot be instrumented (noinstr
 82 * functions), e.g. the low level VM-Enter/VM-Exit paths, to cache registers.
 83 */
 84static __always_inline bool kvm_register_test_and_mark_available(struct kvm_vcpu *vcpu,
 85								 enum kvm_reg reg)
 86{
 
 87	return arch___test_and_set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
 88}
 89
 90/*
 91 * The "raw" register helpers are only for cases where the full 64 bits of a
 92 * register are read/written irrespective of current vCPU mode.  In other words,
 93 * odds are good you shouldn't be using the raw variants.
 94 */
 95static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg)
 96{
 97	if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
 98		return 0;
 99
100	if (!kvm_register_is_available(vcpu, reg))
101		static_call(kvm_x86_cache_reg)(vcpu, reg);
102
103	return vcpu->arch.regs[reg];
104}
105
106static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg,
107					  unsigned long val)
108{
109	if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
110		return;
111
112	vcpu->arch.regs[reg] = val;
113	kvm_register_mark_dirty(vcpu, reg);
114}
115
116static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
117{
118	return kvm_register_read_raw(vcpu, VCPU_REGS_RIP);
119}
120
121static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
122{
123	kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val);
124}
125
126static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
127{
128	return kvm_register_read_raw(vcpu, VCPU_REGS_RSP);
129}
130
131static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
132{
133	kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val);
134}
135
136static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
137{
138	might_sleep();  /* on svm */
139
140	if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
141		static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_PDPTR);
142
143	return vcpu->arch.walk_mmu->pdptrs[index];
144}
145
146static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value)
147{
148	vcpu->arch.walk_mmu->pdptrs[index] = value;
149}
150
151static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
152{
153	ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
154	if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
155	    !kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
156		static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR0);
157	return vcpu->arch.cr0 & mask;
158}
159
160static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu,
161					       unsigned long cr0_bit)
162{
163	BUILD_BUG_ON(!is_power_of_2(cr0_bit));
164
165	return !!kvm_read_cr0_bits(vcpu, cr0_bit);
166}
167
168static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
169{
170	return kvm_read_cr0_bits(vcpu, ~0UL);
171}
172
173static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
174{
175	ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
176	if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
177	    !kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
178		static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR4);
179	return vcpu->arch.cr4 & mask;
180}
181
182static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu,
183					       unsigned long cr4_bit)
184{
185	BUILD_BUG_ON(!is_power_of_2(cr4_bit));
186
187	return !!kvm_read_cr4_bits(vcpu, cr4_bit);
188}
189
190static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
191{
192	if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
193		static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR3);
194	return vcpu->arch.cr3;
195}
196
197static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
198{
199	return kvm_read_cr4_bits(vcpu, ~0UL);
200}
201
202static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
203{
204	return (kvm_rax_read(vcpu) & -1u)
205		| ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
206}
207
208static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
209{
210	vcpu->arch.hflags |= HF_GUEST_MASK;
211	vcpu->stat.guest_mode = 1;
212}
213
214static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
215{
216	vcpu->arch.hflags &= ~HF_GUEST_MASK;
217
218	if (vcpu->arch.load_eoi_exitmap_pending) {
219		vcpu->arch.load_eoi_exitmap_pending = false;
220		kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
221	}
222
223	vcpu->stat.guest_mode = 0;
224}
225
226static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
227{
228	return vcpu->arch.hflags & HF_GUEST_MASK;
229}
230
231#endif