Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef ASM_KVM_CACHE_REGS_H
3#define ASM_KVM_CACHE_REGS_H
4
5#include <linux/kvm_host.h>
6
7#define KVM_POSSIBLE_CR0_GUEST_BITS (X86_CR0_TS | X86_CR0_WP)
8#define KVM_POSSIBLE_CR4_GUEST_BITS \
9 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
10 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
11
12#define X86_CR0_PDPTR_BITS (X86_CR0_CD | X86_CR0_NW | X86_CR0_PG)
13#define X86_CR4_TLBFLUSH_BITS (X86_CR4_PGE | X86_CR4_PCIDE | X86_CR4_PAE | X86_CR4_SMEP)
14#define X86_CR4_PDPTR_BITS (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP)
15
16static_assert(!(KVM_POSSIBLE_CR0_GUEST_BITS & X86_CR0_PDPTR_BITS));
17
18#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
19static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
20{ \
21 return vcpu->arch.regs[VCPU_REGS_##uname]; \
22} \
23static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
24 unsigned long val) \
25{ \
26 vcpu->arch.regs[VCPU_REGS_##uname] = val; \
27}
28BUILD_KVM_GPR_ACCESSORS(rax, RAX)
29BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
30BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
31BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
32BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
33BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
34BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
35#ifdef CONFIG_X86_64
36BUILD_KVM_GPR_ACCESSORS(r8, R8)
37BUILD_KVM_GPR_ACCESSORS(r9, R9)
38BUILD_KVM_GPR_ACCESSORS(r10, R10)
39BUILD_KVM_GPR_ACCESSORS(r11, R11)
40BUILD_KVM_GPR_ACCESSORS(r12, R12)
41BUILD_KVM_GPR_ACCESSORS(r13, R13)
42BUILD_KVM_GPR_ACCESSORS(r14, R14)
43BUILD_KVM_GPR_ACCESSORS(r15, R15)
44#endif
45
46/*
47 * Using the register cache from interrupt context is generally not allowed, as
48 * caching a register and marking it available/dirty can't be done atomically,
49 * i.e. accesses from interrupt context may clobber state or read stale data if
50 * the vCPU task is in the process of updating the cache. The exception is if
51 * KVM is handling a PMI IRQ/NMI VM-Exit, as that bound code sequence doesn't
52 * touch the cache, it runs after the cache is reset (post VM-Exit), and PMIs
53 * need to access several registers that are cacheable.
54 */
55#define kvm_assert_register_caching_allowed(vcpu) \
56 lockdep_assert_once(in_task() || kvm_arch_pmi_in_guest(vcpu))
57
58/*
59 * avail dirty
60 * 0 0 register in VMCS/VMCB
61 * 0 1 *INVALID*
62 * 1 0 register in vcpu->arch
63 * 1 1 register in vcpu->arch, needs to be stored back
64 */
65static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
66 enum kvm_reg reg)
67{
68 kvm_assert_register_caching_allowed(vcpu);
69 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
70}
71
72static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
73 enum kvm_reg reg)
74{
75 kvm_assert_register_caching_allowed(vcpu);
76 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
77}
78
79static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
80 enum kvm_reg reg)
81{
82 kvm_assert_register_caching_allowed(vcpu);
83 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
84}
85
86static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
87 enum kvm_reg reg)
88{
89 kvm_assert_register_caching_allowed(vcpu);
90 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
91 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
92}
93
94/*
95 * kvm_register_test_and_mark_available() is a special snowflake that uses an
96 * arch bitop directly to avoid the explicit instrumentation that comes with
97 * the generic bitops. This allows code that cannot be instrumented (noinstr
98 * functions), e.g. the low level VM-Enter/VM-Exit paths, to cache registers.
99 */
100static __always_inline bool kvm_register_test_and_mark_available(struct kvm_vcpu *vcpu,
101 enum kvm_reg reg)
102{
103 kvm_assert_register_caching_allowed(vcpu);
104 return arch___test_and_set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
105}
106
107/*
108 * The "raw" register helpers are only for cases where the full 64 bits of a
109 * register are read/written irrespective of current vCPU mode. In other words,
110 * odds are good you shouldn't be using the raw variants.
111 */
112static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg)
113{
114 if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
115 return 0;
116
117 if (!kvm_register_is_available(vcpu, reg))
118 kvm_x86_call(cache_reg)(vcpu, reg);
119
120 return vcpu->arch.regs[reg];
121}
122
123static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg,
124 unsigned long val)
125{
126 if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
127 return;
128
129 vcpu->arch.regs[reg] = val;
130 kvm_register_mark_dirty(vcpu, reg);
131}
132
133static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
134{
135 return kvm_register_read_raw(vcpu, VCPU_REGS_RIP);
136}
137
138static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
139{
140 kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val);
141}
142
143static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
144{
145 return kvm_register_read_raw(vcpu, VCPU_REGS_RSP);
146}
147
148static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
149{
150 kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val);
151}
152
153static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
154{
155 might_sleep(); /* on svm */
156
157 if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
158 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_PDPTR);
159
160 return vcpu->arch.walk_mmu->pdptrs[index];
161}
162
163static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value)
164{
165 vcpu->arch.walk_mmu->pdptrs[index] = value;
166}
167
168static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
169{
170 ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
171 if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
172 !kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
173 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR0);
174 return vcpu->arch.cr0 & mask;
175}
176
177static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu,
178 unsigned long cr0_bit)
179{
180 BUILD_BUG_ON(!is_power_of_2(cr0_bit));
181
182 return !!kvm_read_cr0_bits(vcpu, cr0_bit);
183}
184
185static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
186{
187 return kvm_read_cr0_bits(vcpu, ~0UL);
188}
189
190static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
191{
192 ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
193 if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
194 !kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
195 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR4);
196 return vcpu->arch.cr4 & mask;
197}
198
199static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu,
200 unsigned long cr4_bit)
201{
202 BUILD_BUG_ON(!is_power_of_2(cr4_bit));
203
204 return !!kvm_read_cr4_bits(vcpu, cr4_bit);
205}
206
207static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
208{
209 if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
210 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR3);
211 return vcpu->arch.cr3;
212}
213
214static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
215{
216 return kvm_read_cr4_bits(vcpu, ~0UL);
217}
218
219static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
220{
221 return (kvm_rax_read(vcpu) & -1u)
222 | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
223}
224
225static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
226{
227 vcpu->arch.hflags |= HF_GUEST_MASK;
228 vcpu->stat.guest_mode = 1;
229}
230
231static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
232{
233 vcpu->arch.hflags &= ~HF_GUEST_MASK;
234
235 if (vcpu->arch.load_eoi_exitmap_pending) {
236 vcpu->arch.load_eoi_exitmap_pending = false;
237 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
238 }
239
240 vcpu->stat.guest_mode = 0;
241}
242
243static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
244{
245 return vcpu->arch.hflags & HF_GUEST_MASK;
246}
247
248#endif
1#ifndef ASM_KVM_CACHE_REGS_H
2#define ASM_KVM_CACHE_REGS_H
3
4#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
5#define KVM_POSSIBLE_CR4_GUEST_BITS \
6 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
7 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE)
8
9static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
10 enum kvm_reg reg)
11{
12 if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail))
13 kvm_x86_ops->cache_reg(vcpu, reg);
14
15 return vcpu->arch.regs[reg];
16}
17
18static inline void kvm_register_write(struct kvm_vcpu *vcpu,
19 enum kvm_reg reg,
20 unsigned long val)
21{
22 vcpu->arch.regs[reg] = val;
23 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
24 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
25}
26
27static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
28{
29 return kvm_register_read(vcpu, VCPU_REGS_RIP);
30}
31
32static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
33{
34 kvm_register_write(vcpu, VCPU_REGS_RIP, val);
35}
36
37static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
38{
39 might_sleep(); /* on svm */
40
41 if (!test_bit(VCPU_EXREG_PDPTR,
42 (unsigned long *)&vcpu->arch.regs_avail))
43 kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR);
44
45 return vcpu->arch.walk_mmu->pdptrs[index];
46}
47
48static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
49{
50 ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
51 if (tmask & vcpu->arch.cr0_guest_owned_bits)
52 kvm_x86_ops->decache_cr0_guest_bits(vcpu);
53 return vcpu->arch.cr0 & mask;
54}
55
56static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
57{
58 return kvm_read_cr0_bits(vcpu, ~0UL);
59}
60
61static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
62{
63 ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
64 if (tmask & vcpu->arch.cr4_guest_owned_bits)
65 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
66 return vcpu->arch.cr4 & mask;
67}
68
69static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
70{
71 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
72 kvm_x86_ops->decache_cr3(vcpu);
73 return vcpu->arch.cr3;
74}
75
76static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
77{
78 return kvm_read_cr4_bits(vcpu, ~0UL);
79}
80
81static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
82{
83 return (kvm_register_read(vcpu, VCPU_REGS_RAX) & -1u)
84 | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
85}
86
87static inline u32 kvm_read_pkru(struct kvm_vcpu *vcpu)
88{
89 return kvm_x86_ops->get_pkru(vcpu);
90}
91
92static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
93{
94 vcpu->arch.hflags |= HF_GUEST_MASK;
95}
96
97static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
98{
99 vcpu->arch.hflags &= ~HF_GUEST_MASK;
100}
101
102static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
103{
104 return vcpu->arch.hflags & HF_GUEST_MASK;
105}
106
107static inline bool is_smm(struct kvm_vcpu *vcpu)
108{
109 return vcpu->arch.hflags & HF_SMM_MASK;
110}
111
112#endif