Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef ARCH_X86_KVM_CPUID_H
3#define ARCH_X86_KVM_CPUID_H
4
5#include "x86.h"
6#include <asm/cpu.h>
7#include <asm/processor.h>
8
9int kvm_update_cpuid(struct kvm_vcpu *vcpu);
10bool kvm_mpx_supported(void);
11struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
12 u32 function, u32 index);
13int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
14 struct kvm_cpuid_entry2 __user *entries,
15 unsigned int type);
16int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
17 struct kvm_cpuid *cpuid,
18 struct kvm_cpuid_entry __user *entries);
19int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
20 struct kvm_cpuid2 *cpuid,
21 struct kvm_cpuid_entry2 __user *entries);
22int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
23 struct kvm_cpuid2 *cpuid,
24 struct kvm_cpuid_entry2 __user *entries);
25bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
26 u32 *ecx, u32 *edx, bool check_limit);
27
28int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
29
30static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
31{
32 return vcpu->arch.maxphyaddr;
33}
34
35struct cpuid_reg {
36 u32 function;
37 u32 index;
38 int reg;
39};
40
41static const struct cpuid_reg reverse_cpuid[] = {
42 [CPUID_1_EDX] = { 1, 0, CPUID_EDX},
43 [CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
44 [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
45 [CPUID_1_ECX] = { 1, 0, CPUID_ECX},
46 [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
47 [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
48 [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX},
49 [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX},
50 [CPUID_F_0_EDX] = { 0xf, 0, CPUID_EDX},
51 [CPUID_F_1_EDX] = { 0xf, 1, CPUID_EDX},
52 [CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
53 [CPUID_6_EAX] = { 6, 0, CPUID_EAX},
54 [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
55 [CPUID_7_ECX] = { 7, 0, CPUID_ECX},
56 [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
57 [CPUID_7_EDX] = { 7, 0, CPUID_EDX},
58};
59
60static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature)
61{
62 unsigned x86_leaf = x86_feature / 32;
63
64 BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
65 BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
66
67 return reverse_cpuid[x86_leaf];
68}
69
70static __always_inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned x86_feature)
71{
72 struct kvm_cpuid_entry2 *entry;
73 const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
74
75 entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
76 if (!entry)
77 return NULL;
78
79 switch (cpuid.reg) {
80 case CPUID_EAX:
81 return &entry->eax;
82 case CPUID_EBX:
83 return &entry->ebx;
84 case CPUID_ECX:
85 return &entry->ecx;
86 case CPUID_EDX:
87 return &entry->edx;
88 default:
89 BUILD_BUG();
90 return NULL;
91 }
92}
93
94static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_feature)
95{
96 int *reg;
97
98 if (x86_feature == X86_FEATURE_XSAVE &&
99 !static_cpu_has(X86_FEATURE_XSAVE))
100 return false;
101
102 reg = guest_cpuid_get_register(vcpu, x86_feature);
103 if (!reg)
104 return false;
105
106 return *reg & bit(x86_feature);
107}
108
109static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, unsigned x86_feature)
110{
111 int *reg;
112
113 reg = guest_cpuid_get_register(vcpu, x86_feature);
114 if (reg)
115 *reg &= ~bit(x86_feature);
116}
117
118static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
119{
120 struct kvm_cpuid_entry2 *best;
121
122 best = kvm_find_cpuid_entry(vcpu, 0, 0);
123 return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
124}
125
126static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
127{
128 struct kvm_cpuid_entry2 *best;
129
130 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
131 if (!best)
132 return -1;
133
134 return x86_family(best->eax);
135}
136
137static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
138{
139 struct kvm_cpuid_entry2 *best;
140
141 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
142 if (!best)
143 return -1;
144
145 return x86_model(best->eax);
146}
147
148static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
149{
150 struct kvm_cpuid_entry2 *best;
151
152 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
153 if (!best)
154 return -1;
155
156 return x86_stepping(best->eax);
157}
158
159static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
160{
161 return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
162}
163
164static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
165{
166 return vcpu->arch.msr_misc_features_enables &
167 MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
168}
169
170#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef ARCH_X86_KVM_CPUID_H
3#define ARCH_X86_KVM_CPUID_H
4
5#include "x86.h"
6#include "reverse_cpuid.h"
7#include <asm/cpu.h>
8#include <asm/processor.h>
9#include <uapi/asm/kvm_para.h>
10
11extern u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
12void kvm_set_cpu_caps(void);
13
14void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
15void kvm_update_pv_runtime(struct kvm_vcpu *vcpu);
16struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
17 u32 function, u32 index);
18struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
19 u32 function);
20int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
21 struct kvm_cpuid_entry2 __user *entries,
22 unsigned int type);
23int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
24 struct kvm_cpuid *cpuid,
25 struct kvm_cpuid_entry __user *entries);
26int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
27 struct kvm_cpuid2 *cpuid,
28 struct kvm_cpuid_entry2 __user *entries);
29int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
30 struct kvm_cpuid2 *cpuid,
31 struct kvm_cpuid_entry2 __user *entries);
32bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
33 u32 *ecx, u32 *edx, bool exact_only);
34
35u32 xstate_required_size(u64 xstate_bv, bool compacted);
36
37int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
38u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu);
39
40static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
41{
42 return vcpu->arch.maxphyaddr;
43}
44
45static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
46{
47 return !(gpa & vcpu->arch.reserved_gpa_bits);
48}
49
50static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu,
51 gpa_t gpa, gpa_t alignment)
52{
53 return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa);
54}
55
56static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
57{
58 return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE);
59}
60
61static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
62 unsigned int leaf)
63{
64 u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
65
66 BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
67 *reg = kvm_cpu_caps[leaf];
68}
69
70static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
71 unsigned int x86_feature)
72{
73 const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
74 struct kvm_cpuid_entry2 *entry;
75
76 entry = kvm_find_cpuid_entry_index(vcpu, cpuid.function, cpuid.index);
77 if (!entry)
78 return NULL;
79
80 return __cpuid_entry_get_reg(entry, cpuid.reg);
81}
82
83static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
84 unsigned int x86_feature)
85{
86 u32 *reg;
87
88 reg = guest_cpuid_get_register(vcpu, x86_feature);
89 if (!reg)
90 return false;
91
92 return *reg & __feature_bit(x86_feature);
93}
94
95static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu,
96 unsigned int x86_feature)
97{
98 u32 *reg;
99
100 reg = guest_cpuid_get_register(vcpu, x86_feature);
101 if (reg)
102 *reg &= ~__feature_bit(x86_feature);
103}
104
105static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
106{
107 struct kvm_cpuid_entry2 *best;
108
109 best = kvm_find_cpuid_entry(vcpu, 0);
110 return best &&
111 (is_guest_vendor_amd(best->ebx, best->ecx, best->edx) ||
112 is_guest_vendor_hygon(best->ebx, best->ecx, best->edx));
113}
114
115static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
116{
117 struct kvm_cpuid_entry2 *best;
118
119 best = kvm_find_cpuid_entry(vcpu, 0);
120 return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
121}
122
123static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
124{
125 struct kvm_cpuid_entry2 *best;
126
127 best = kvm_find_cpuid_entry(vcpu, 0x1);
128 if (!best)
129 return -1;
130
131 return x86_family(best->eax);
132}
133
134static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
135{
136 struct kvm_cpuid_entry2 *best;
137
138 best = kvm_find_cpuid_entry(vcpu, 0x1);
139 if (!best)
140 return -1;
141
142 return x86_model(best->eax);
143}
144
145static inline bool cpuid_model_is_consistent(struct kvm_vcpu *vcpu)
146{
147 return boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
148}
149
150static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
151{
152 struct kvm_cpuid_entry2 *best;
153
154 best = kvm_find_cpuid_entry(vcpu, 0x1);
155 if (!best)
156 return -1;
157
158 return x86_stepping(best->eax);
159}
160
161static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
162{
163 return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
164 guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
165 guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
166 guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
167}
168
169static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
170{
171 return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
172 guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB) ||
173 guest_cpuid_has(vcpu, X86_FEATURE_SBPB));
174}
175
176static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
177{
178 return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
179}
180
181static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
182{
183 return vcpu->arch.msr_misc_features_enables &
184 MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
185}
186
187static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
188{
189 unsigned int x86_leaf = __feature_leaf(x86_feature);
190
191 reverse_cpuid_check(x86_leaf);
192 kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
193}
194
195static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
196{
197 unsigned int x86_leaf = __feature_leaf(x86_feature);
198
199 reverse_cpuid_check(x86_leaf);
200 kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
201}
202
203static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
204{
205 unsigned int x86_leaf = __feature_leaf(x86_feature);
206
207 reverse_cpuid_check(x86_leaf);
208 return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
209}
210
211static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
212{
213 return !!kvm_cpu_cap_get(x86_feature);
214}
215
216static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
217{
218 if (boot_cpu_has(x86_feature))
219 kvm_cpu_cap_set(x86_feature);
220}
221
222static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu,
223 unsigned int kvm_feature)
224{
225 if (!vcpu->arch.pv_cpuid.enforce)
226 return true;
227
228 return vcpu->arch.pv_cpuid.features & (1u << kvm_feature);
229}
230
231enum kvm_governed_features {
232#define KVM_GOVERNED_FEATURE(x) KVM_GOVERNED_##x,
233#include "governed_features.h"
234 KVM_NR_GOVERNED_FEATURES
235};
236
237static __always_inline int kvm_governed_feature_index(unsigned int x86_feature)
238{
239 switch (x86_feature) {
240#define KVM_GOVERNED_FEATURE(x) case x: return KVM_GOVERNED_##x;
241#include "governed_features.h"
242 default:
243 return -1;
244 }
245}
246
247static __always_inline bool kvm_is_governed_feature(unsigned int x86_feature)
248{
249 return kvm_governed_feature_index(x86_feature) >= 0;
250}
251
252static __always_inline void kvm_governed_feature_set(struct kvm_vcpu *vcpu,
253 unsigned int x86_feature)
254{
255 BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
256
257 __set_bit(kvm_governed_feature_index(x86_feature),
258 vcpu->arch.governed_features.enabled);
259}
260
261static __always_inline void kvm_governed_feature_check_and_set(struct kvm_vcpu *vcpu,
262 unsigned int x86_feature)
263{
264 if (kvm_cpu_cap_has(x86_feature) && guest_cpuid_has(vcpu, x86_feature))
265 kvm_governed_feature_set(vcpu, x86_feature);
266}
267
268static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu,
269 unsigned int x86_feature)
270{
271 BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
272
273 return test_bit(kvm_governed_feature_index(x86_feature),
274 vcpu->arch.governed_features.enabled);
275}
276
277static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
278{
279 if (guest_can_use(vcpu, X86_FEATURE_LAM))
280 cr3 &= ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
281
282 return kvm_vcpu_is_legal_gpa(vcpu, cr3);
283}
284
285#endif