Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef ARCH_X86_KVM_CPUID_H
3#define ARCH_X86_KVM_CPUID_H
4
5#include "reverse_cpuid.h"
6#include <asm/cpu.h>
7#include <asm/processor.h>
8#include <uapi/asm/kvm_para.h>
9
10extern u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
11void kvm_set_cpu_caps(void);
12
13void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
14void kvm_update_pv_runtime(struct kvm_vcpu *vcpu);
15struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
16 u32 function, u32 index);
17struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
18 u32 function);
19int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
20 struct kvm_cpuid_entry2 __user *entries,
21 unsigned int type);
22int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
23 struct kvm_cpuid *cpuid,
24 struct kvm_cpuid_entry __user *entries);
25int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
26 struct kvm_cpuid2 *cpuid,
27 struct kvm_cpuid_entry2 __user *entries);
28int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
29 struct kvm_cpuid2 *cpuid,
30 struct kvm_cpuid_entry2 __user *entries);
31bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
32 u32 *ecx, u32 *edx, bool exact_only);
33
34void __init kvm_init_xstate_sizes(void);
35u32 xstate_required_size(u64 xstate_bv, bool compacted);
36
37int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
38u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu);
39
40static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
41{
42 return vcpu->arch.maxphyaddr;
43}
44
45static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
46{
47 return !(gpa & vcpu->arch.reserved_gpa_bits);
48}
49
50static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu,
51 gpa_t gpa, gpa_t alignment)
52{
53 return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa);
54}
55
56static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
57{
58 return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE);
59}
60
61static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
62 unsigned int leaf)
63{
64 u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
65
66 BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
67 *reg = kvm_cpu_caps[leaf];
68}
69
70static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
71 unsigned int x86_feature)
72{
73 const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
74 struct kvm_cpuid_entry2 *entry;
75
76 entry = kvm_find_cpuid_entry_index(vcpu, cpuid.function, cpuid.index);
77 if (!entry)
78 return NULL;
79
80 return __cpuid_entry_get_reg(entry, cpuid.reg);
81}
82
83static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
84 unsigned int x86_feature)
85{
86 u32 *reg;
87
88 reg = guest_cpuid_get_register(vcpu, x86_feature);
89 if (!reg)
90 return false;
91
92 return *reg & __feature_bit(x86_feature);
93}
94
95static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu,
96 unsigned int x86_feature)
97{
98 u32 *reg;
99
100 reg = guest_cpuid_get_register(vcpu, x86_feature);
101 if (reg)
102 *reg &= ~__feature_bit(x86_feature);
103}
104
105static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
106{
107 return vcpu->arch.is_amd_compatible;
108}
109
110static inline bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu)
111{
112 return !guest_cpuid_is_amd_compatible(vcpu);
113}
114
115static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
116{
117 struct kvm_cpuid_entry2 *best;
118
119 best = kvm_find_cpuid_entry(vcpu, 0x1);
120 if (!best)
121 return -1;
122
123 return x86_family(best->eax);
124}
125
126static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
127{
128 struct kvm_cpuid_entry2 *best;
129
130 best = kvm_find_cpuid_entry(vcpu, 0x1);
131 if (!best)
132 return -1;
133
134 return x86_model(best->eax);
135}
136
137static inline bool cpuid_model_is_consistent(struct kvm_vcpu *vcpu)
138{
139 return boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
140}
141
142static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
143{
144 struct kvm_cpuid_entry2 *best;
145
146 best = kvm_find_cpuid_entry(vcpu, 0x1);
147 if (!best)
148 return -1;
149
150 return x86_stepping(best->eax);
151}
152
153static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
154{
155 return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
156 guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
157 guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
158 guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
159}
160
161static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
162{
163 return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
164 guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB) ||
165 guest_cpuid_has(vcpu, X86_FEATURE_SBPB));
166}
167
168static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
169{
170 return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
171}
172
173static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
174{
175 return vcpu->arch.msr_misc_features_enables &
176 MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
177}
178
179static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
180{
181 unsigned int x86_leaf = __feature_leaf(x86_feature);
182
183 reverse_cpuid_check(x86_leaf);
184 kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
185}
186
187static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
188{
189 unsigned int x86_leaf = __feature_leaf(x86_feature);
190
191 reverse_cpuid_check(x86_leaf);
192 kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
193}
194
195static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
196{
197 unsigned int x86_leaf = __feature_leaf(x86_feature);
198
199 reverse_cpuid_check(x86_leaf);
200 return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
201}
202
203static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
204{
205 return !!kvm_cpu_cap_get(x86_feature);
206}
207
208static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
209{
210 if (boot_cpu_has(x86_feature))
211 kvm_cpu_cap_set(x86_feature);
212}
213
214static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu,
215 unsigned int kvm_feature)
216{
217 if (!vcpu->arch.pv_cpuid.enforce)
218 return true;
219
220 return vcpu->arch.pv_cpuid.features & (1u << kvm_feature);
221}
222
223enum kvm_governed_features {
224#define KVM_GOVERNED_FEATURE(x) KVM_GOVERNED_##x,
225#include "governed_features.h"
226 KVM_NR_GOVERNED_FEATURES
227};
228
229static __always_inline int kvm_governed_feature_index(unsigned int x86_feature)
230{
231 switch (x86_feature) {
232#define KVM_GOVERNED_FEATURE(x) case x: return KVM_GOVERNED_##x;
233#include "governed_features.h"
234 default:
235 return -1;
236 }
237}
238
239static __always_inline bool kvm_is_governed_feature(unsigned int x86_feature)
240{
241 return kvm_governed_feature_index(x86_feature) >= 0;
242}
243
244static __always_inline void kvm_governed_feature_set(struct kvm_vcpu *vcpu,
245 unsigned int x86_feature)
246{
247 BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
248
249 __set_bit(kvm_governed_feature_index(x86_feature),
250 vcpu->arch.governed_features.enabled);
251}
252
253static __always_inline void kvm_governed_feature_check_and_set(struct kvm_vcpu *vcpu,
254 unsigned int x86_feature)
255{
256 if (kvm_cpu_cap_has(x86_feature) && guest_cpuid_has(vcpu, x86_feature))
257 kvm_governed_feature_set(vcpu, x86_feature);
258}
259
260static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu,
261 unsigned int x86_feature)
262{
263 BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
264
265 return test_bit(kvm_governed_feature_index(x86_feature),
266 vcpu->arch.governed_features.enabled);
267}
268
269static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
270{
271 if (guest_can_use(vcpu, X86_FEATURE_LAM))
272 cr3 &= ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
273
274 return kvm_vcpu_is_legal_gpa(vcpu, cr3);
275}
276
277#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef ARCH_X86_KVM_CPUID_H
3#define ARCH_X86_KVM_CPUID_H
4
5#include "x86.h"
6#include <asm/cpu.h>
7#include <asm/processor.h>
8
9extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly;
10void kvm_set_cpu_caps(void);
11
12void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
13struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
14 u32 function, u32 index);
15int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
16 struct kvm_cpuid_entry2 __user *entries,
17 unsigned int type);
18int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
19 struct kvm_cpuid *cpuid,
20 struct kvm_cpuid_entry __user *entries);
21int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
22 struct kvm_cpuid2 *cpuid,
23 struct kvm_cpuid_entry2 __user *entries);
24int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
25 struct kvm_cpuid2 *cpuid,
26 struct kvm_cpuid_entry2 __user *entries);
27bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
28 u32 *ecx, u32 *edx, bool exact_only);
29
30int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
31
32static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
33{
34 return vcpu->arch.maxphyaddr;
35}
36
37struct cpuid_reg {
38 u32 function;
39 u32 index;
40 int reg;
41};
42
43static const struct cpuid_reg reverse_cpuid[] = {
44 [CPUID_1_EDX] = { 1, 0, CPUID_EDX},
45 [CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
46 [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
47 [CPUID_1_ECX] = { 1, 0, CPUID_ECX},
48 [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
49 [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
50 [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX},
51 [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX},
52 [CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
53 [CPUID_6_EAX] = { 6, 0, CPUID_EAX},
54 [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
55 [CPUID_7_ECX] = { 7, 0, CPUID_ECX},
56 [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
57 [CPUID_7_EDX] = { 7, 0, CPUID_EDX},
58 [CPUID_7_1_EAX] = { 7, 1, CPUID_EAX},
59};
60
61/*
62 * Reverse CPUID and its derivatives can only be used for hardware-defined
63 * feature words, i.e. words whose bits directly correspond to a CPUID leaf.
64 * Retrieving a feature bit or masking guest CPUID from a Linux-defined word
65 * is nonsensical as the bit number/mask is an arbitrary software-defined value
66 * and can't be used by KVM to query/control guest capabilities. And obviously
67 * the leaf being queried must have an entry in the lookup table.
68 */
69static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
70{
71 BUILD_BUG_ON(x86_leaf == CPUID_LNX_1);
72 BUILD_BUG_ON(x86_leaf == CPUID_LNX_2);
73 BUILD_BUG_ON(x86_leaf == CPUID_LNX_3);
74 BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
75 BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
76 BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
77}
78
79/*
80 * Retrieve the bit mask from an X86_FEATURE_* definition. Features contain
81 * the hardware defined bit number (stored in bits 4:0) and a software defined
82 * "word" (stored in bits 31:5). The word is used to index into arrays of
83 * bit masks that hold the per-cpu feature capabilities, e.g. this_cpu_has().
84 */
85static __always_inline u32 __feature_bit(int x86_feature)
86{
87 reverse_cpuid_check(x86_feature / 32);
88 return 1 << (x86_feature & 31);
89}
90
91#define feature_bit(name) __feature_bit(X86_FEATURE_##name)
92
93static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature)
94{
95 unsigned int x86_leaf = x86_feature / 32;
96
97 reverse_cpuid_check(x86_leaf);
98 return reverse_cpuid[x86_leaf];
99}
100
101static __always_inline u32 *__cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
102 u32 reg)
103{
104 switch (reg) {
105 case CPUID_EAX:
106 return &entry->eax;
107 case CPUID_EBX:
108 return &entry->ebx;
109 case CPUID_ECX:
110 return &entry->ecx;
111 case CPUID_EDX:
112 return &entry->edx;
113 default:
114 BUILD_BUG();
115 return NULL;
116 }
117}
118
119static __always_inline u32 *cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
120 unsigned int x86_feature)
121{
122 const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
123
124 return __cpuid_entry_get_reg(entry, cpuid.reg);
125}
126
127static __always_inline u32 cpuid_entry_get(struct kvm_cpuid_entry2 *entry,
128 unsigned int x86_feature)
129{
130 u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
131
132 return *reg & __feature_bit(x86_feature);
133}
134
135static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry,
136 unsigned int x86_feature)
137{
138 return cpuid_entry_get(entry, x86_feature);
139}
140
141static __always_inline void cpuid_entry_clear(struct kvm_cpuid_entry2 *entry,
142 unsigned int x86_feature)
143{
144 u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
145
146 *reg &= ~__feature_bit(x86_feature);
147}
148
149static __always_inline void cpuid_entry_set(struct kvm_cpuid_entry2 *entry,
150 unsigned int x86_feature)
151{
152 u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
153
154 *reg |= __feature_bit(x86_feature);
155}
156
157static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry,
158 unsigned int x86_feature,
159 bool set)
160{
161 u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
162
163 /*
164 * Open coded instead of using cpuid_entry_{clear,set}() to coerce the
165 * compiler into using CMOV instead of Jcc when possible.
166 */
167 if (set)
168 *reg |= __feature_bit(x86_feature);
169 else
170 *reg &= ~__feature_bit(x86_feature);
171}
172
173static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
174 enum cpuid_leafs leaf)
175{
176 u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
177
178 BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
179 *reg = kvm_cpu_caps[leaf];
180}
181
182static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
183 unsigned int x86_feature)
184{
185 const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
186 struct kvm_cpuid_entry2 *entry;
187
188 entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
189 if (!entry)
190 return NULL;
191
192 return __cpuid_entry_get_reg(entry, cpuid.reg);
193}
194
195static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
196 unsigned int x86_feature)
197{
198 u32 *reg;
199
200 reg = guest_cpuid_get_register(vcpu, x86_feature);
201 if (!reg)
202 return false;
203
204 return *reg & __feature_bit(x86_feature);
205}
206
207static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu,
208 unsigned int x86_feature)
209{
210 u32 *reg;
211
212 reg = guest_cpuid_get_register(vcpu, x86_feature);
213 if (reg)
214 *reg &= ~__feature_bit(x86_feature);
215}
216
217static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
218{
219 struct kvm_cpuid_entry2 *best;
220
221 best = kvm_find_cpuid_entry(vcpu, 0, 0);
222 return best &&
223 (is_guest_vendor_amd(best->ebx, best->ecx, best->edx) ||
224 is_guest_vendor_hygon(best->ebx, best->ecx, best->edx));
225}
226
227static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
228{
229 struct kvm_cpuid_entry2 *best;
230
231 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
232 if (!best)
233 return -1;
234
235 return x86_family(best->eax);
236}
237
238static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
239{
240 struct kvm_cpuid_entry2 *best;
241
242 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
243 if (!best)
244 return -1;
245
246 return x86_model(best->eax);
247}
248
249static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
250{
251 struct kvm_cpuid_entry2 *best;
252
253 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
254 if (!best)
255 return -1;
256
257 return x86_stepping(best->eax);
258}
259
260static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
261{
262 return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
263}
264
265static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
266{
267 return vcpu->arch.msr_misc_features_enables &
268 MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
269}
270
271static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
272{
273 unsigned int x86_leaf = x86_feature / 32;
274
275 reverse_cpuid_check(x86_leaf);
276 kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
277}
278
279static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
280{
281 unsigned int x86_leaf = x86_feature / 32;
282
283 reverse_cpuid_check(x86_leaf);
284 kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
285}
286
287static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
288{
289 unsigned int x86_leaf = x86_feature / 32;
290
291 reverse_cpuid_check(x86_leaf);
292 return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
293}
294
295static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
296{
297 return !!kvm_cpu_cap_get(x86_feature);
298}
299
300static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
301{
302 if (boot_cpu_has(x86_feature))
303 kvm_cpu_cap_set(x86_feature);
304}
305
306static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
307{
308 return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
309}
310
311#endif