Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef ARCH_X86_KVM_CPUID_H
3#define ARCH_X86_KVM_CPUID_H
4
5#include "reverse_cpuid.h"
6#include <asm/cpu.h>
7#include <asm/processor.h>
8#include <uapi/asm/kvm_para.h>
9
10extern u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
11void kvm_set_cpu_caps(void);
12
13void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
14void kvm_update_pv_runtime(struct kvm_vcpu *vcpu);
15struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
16 u32 function, u32 index);
17struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
18 u32 function);
19int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
20 struct kvm_cpuid_entry2 __user *entries,
21 unsigned int type);
22int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
23 struct kvm_cpuid *cpuid,
24 struct kvm_cpuid_entry __user *entries);
25int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
26 struct kvm_cpuid2 *cpuid,
27 struct kvm_cpuid_entry2 __user *entries);
28int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
29 struct kvm_cpuid2 *cpuid,
30 struct kvm_cpuid_entry2 __user *entries);
31bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
32 u32 *ecx, u32 *edx, bool exact_only);
33
34void __init kvm_init_xstate_sizes(void);
35u32 xstate_required_size(u64 xstate_bv, bool compacted);
36
37int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
38u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu);
39
40static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
41{
42 return vcpu->arch.maxphyaddr;
43}
44
45static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
46{
47 return !(gpa & vcpu->arch.reserved_gpa_bits);
48}
49
50static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu,
51 gpa_t gpa, gpa_t alignment)
52{
53 return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa);
54}
55
56static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
57{
58 return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE);
59}
60
61static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
62 unsigned int leaf)
63{
64 u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
65
66 BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
67 *reg = kvm_cpu_caps[leaf];
68}
69
70static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
71 unsigned int x86_feature)
72{
73 const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
74 struct kvm_cpuid_entry2 *entry;
75
76 entry = kvm_find_cpuid_entry_index(vcpu, cpuid.function, cpuid.index);
77 if (!entry)
78 return NULL;
79
80 return __cpuid_entry_get_reg(entry, cpuid.reg);
81}
82
83static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
84 unsigned int x86_feature)
85{
86 u32 *reg;
87
88 reg = guest_cpuid_get_register(vcpu, x86_feature);
89 if (!reg)
90 return false;
91
92 return *reg & __feature_bit(x86_feature);
93}
94
95static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu,
96 unsigned int x86_feature)
97{
98 u32 *reg;
99
100 reg = guest_cpuid_get_register(vcpu, x86_feature);
101 if (reg)
102 *reg &= ~__feature_bit(x86_feature);
103}
104
105static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
106{
107 return vcpu->arch.is_amd_compatible;
108}
109
110static inline bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu)
111{
112 return !guest_cpuid_is_amd_compatible(vcpu);
113}
114
115static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
116{
117 struct kvm_cpuid_entry2 *best;
118
119 best = kvm_find_cpuid_entry(vcpu, 0x1);
120 if (!best)
121 return -1;
122
123 return x86_family(best->eax);
124}
125
126static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
127{
128 struct kvm_cpuid_entry2 *best;
129
130 best = kvm_find_cpuid_entry(vcpu, 0x1);
131 if (!best)
132 return -1;
133
134 return x86_model(best->eax);
135}
136
137static inline bool cpuid_model_is_consistent(struct kvm_vcpu *vcpu)
138{
139 return boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
140}
141
142static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
143{
144 struct kvm_cpuid_entry2 *best;
145
146 best = kvm_find_cpuid_entry(vcpu, 0x1);
147 if (!best)
148 return -1;
149
150 return x86_stepping(best->eax);
151}
152
153static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
154{
155 return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
156 guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
157 guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
158 guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
159}
160
161static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
162{
163 return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
164 guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB) ||
165 guest_cpuid_has(vcpu, X86_FEATURE_SBPB));
166}
167
168static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
169{
170 return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
171}
172
173static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
174{
175 return vcpu->arch.msr_misc_features_enables &
176 MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
177}
178
179static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
180{
181 unsigned int x86_leaf = __feature_leaf(x86_feature);
182
183 reverse_cpuid_check(x86_leaf);
184 kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
185}
186
187static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
188{
189 unsigned int x86_leaf = __feature_leaf(x86_feature);
190
191 reverse_cpuid_check(x86_leaf);
192 kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
193}
194
195static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
196{
197 unsigned int x86_leaf = __feature_leaf(x86_feature);
198
199 reverse_cpuid_check(x86_leaf);
200 return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
201}
202
203static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
204{
205 return !!kvm_cpu_cap_get(x86_feature);
206}
207
208static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
209{
210 if (boot_cpu_has(x86_feature))
211 kvm_cpu_cap_set(x86_feature);
212}
213
214static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu,
215 unsigned int kvm_feature)
216{
217 if (!vcpu->arch.pv_cpuid.enforce)
218 return true;
219
220 return vcpu->arch.pv_cpuid.features & (1u << kvm_feature);
221}
222
223enum kvm_governed_features {
224#define KVM_GOVERNED_FEATURE(x) KVM_GOVERNED_##x,
225#include "governed_features.h"
226 KVM_NR_GOVERNED_FEATURES
227};
228
229static __always_inline int kvm_governed_feature_index(unsigned int x86_feature)
230{
231 switch (x86_feature) {
232#define KVM_GOVERNED_FEATURE(x) case x: return KVM_GOVERNED_##x;
233#include "governed_features.h"
234 default:
235 return -1;
236 }
237}
238
239static __always_inline bool kvm_is_governed_feature(unsigned int x86_feature)
240{
241 return kvm_governed_feature_index(x86_feature) >= 0;
242}
243
244static __always_inline void kvm_governed_feature_set(struct kvm_vcpu *vcpu,
245 unsigned int x86_feature)
246{
247 BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
248
249 __set_bit(kvm_governed_feature_index(x86_feature),
250 vcpu->arch.governed_features.enabled);
251}
252
253static __always_inline void kvm_governed_feature_check_and_set(struct kvm_vcpu *vcpu,
254 unsigned int x86_feature)
255{
256 if (kvm_cpu_cap_has(x86_feature) && guest_cpuid_has(vcpu, x86_feature))
257 kvm_governed_feature_set(vcpu, x86_feature);
258}
259
260static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu,
261 unsigned int x86_feature)
262{
263 BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
264
265 return test_bit(kvm_governed_feature_index(x86_feature),
266 vcpu->arch.governed_features.enabled);
267}
268
269static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
270{
271 if (guest_can_use(vcpu, X86_FEATURE_LAM))
272 cr3 &= ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
273
274 return kvm_vcpu_is_legal_gpa(vcpu, cr3);
275}
276
277#endif
1#ifndef ARCH_X86_KVM_CPUID_H
2#define ARCH_X86_KVM_CPUID_H
3
4#include "x86.h"
5#include <asm/cpu.h>
6
7int kvm_update_cpuid(struct kvm_vcpu *vcpu);
8bool kvm_mpx_supported(void);
9struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
10 u32 function, u32 index);
11int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
12 struct kvm_cpuid_entry2 __user *entries,
13 unsigned int type);
14int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
15 struct kvm_cpuid *cpuid,
16 struct kvm_cpuid_entry __user *entries);
17int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
18 struct kvm_cpuid2 *cpuid,
19 struct kvm_cpuid_entry2 __user *entries);
20int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21 struct kvm_cpuid2 *cpuid,
22 struct kvm_cpuid_entry2 __user *entries);
23void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
24
25int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
26
27static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
28{
29 return vcpu->arch.maxphyaddr;
30}
31
32static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
33{
34 struct kvm_cpuid_entry2 *best;
35
36 if (!static_cpu_has(X86_FEATURE_XSAVE))
37 return false;
38
39 best = kvm_find_cpuid_entry(vcpu, 1, 0);
40 return best && (best->ecx & bit(X86_FEATURE_XSAVE));
41}
42
43static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
44{
45 struct kvm_cpuid_entry2 *best;
46
47 best = kvm_find_cpuid_entry(vcpu, 1, 0);
48 return best && (best->edx & bit(X86_FEATURE_MTRR));
49}
50
51static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
52{
53 struct kvm_cpuid_entry2 *best;
54
55 best = kvm_find_cpuid_entry(vcpu, 7, 0);
56 return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST));
57}
58
59static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
60{
61 struct kvm_cpuid_entry2 *best;
62
63 best = kvm_find_cpuid_entry(vcpu, 7, 0);
64 return best && (best->ebx & bit(X86_FEATURE_SMEP));
65}
66
67static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu)
68{
69 struct kvm_cpuid_entry2 *best;
70
71 best = kvm_find_cpuid_entry(vcpu, 7, 0);
72 return best && (best->ebx & bit(X86_FEATURE_SMAP));
73}
74
75static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
76{
77 struct kvm_cpuid_entry2 *best;
78
79 best = kvm_find_cpuid_entry(vcpu, 7, 0);
80 return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
81}
82
83static inline bool guest_cpuid_has_pku(struct kvm_vcpu *vcpu)
84{
85 struct kvm_cpuid_entry2 *best;
86
87 best = kvm_find_cpuid_entry(vcpu, 7, 0);
88 return best && (best->ecx & bit(X86_FEATURE_PKU));
89}
90
91static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu)
92{
93 struct kvm_cpuid_entry2 *best;
94
95 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
96 return best && (best->edx & bit(X86_FEATURE_LM));
97}
98
99static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu)
100{
101 struct kvm_cpuid_entry2 *best;
102
103 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
104 return best && (best->ecx & bit(X86_FEATURE_OSVW));
105}
106
107static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
108{
109 struct kvm_cpuid_entry2 *best;
110
111 best = kvm_find_cpuid_entry(vcpu, 1, 0);
112 return best && (best->ecx & bit(X86_FEATURE_PCID));
113}
114
115static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
116{
117 struct kvm_cpuid_entry2 *best;
118
119 best = kvm_find_cpuid_entry(vcpu, 1, 0);
120 return best && (best->ecx & bit(X86_FEATURE_X2APIC));
121}
122
123static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
124{
125 struct kvm_cpuid_entry2 *best;
126
127 best = kvm_find_cpuid_entry(vcpu, 0, 0);
128 return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
129}
130
131static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu)
132{
133 struct kvm_cpuid_entry2 *best;
134
135 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
136 return best && (best->edx & bit(X86_FEATURE_GBPAGES));
137}
138
139static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
140{
141 struct kvm_cpuid_entry2 *best;
142
143 best = kvm_find_cpuid_entry(vcpu, 7, 0);
144 return best && (best->ebx & bit(X86_FEATURE_RTM));
145}
146
147static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
148{
149 struct kvm_cpuid_entry2 *best;
150
151 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
152 return best && (best->edx & bit(X86_FEATURE_RDTSCP));
153}
154
155/*
156 * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
157 */
158#define BIT_NRIPS 3
159
160static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu)
161{
162 struct kvm_cpuid_entry2 *best;
163
164 best = kvm_find_cpuid_entry(vcpu, 0x8000000a, 0);
165
166 /*
167 * NRIPS is a scattered cpuid feature, so we can't use
168 * X86_FEATURE_NRIPS here (X86_FEATURE_NRIPS would be bit
169 * position 8, not 3).
170 */
171 return best && (best->edx & bit(BIT_NRIPS));
172}
173#undef BIT_NRIPS
174
175static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
176{
177 struct kvm_cpuid_entry2 *best;
178
179 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
180 if (!best)
181 return -1;
182
183 return x86_family(best->eax);
184}
185
186static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
187{
188 struct kvm_cpuid_entry2 *best;
189
190 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
191 if (!best)
192 return -1;
193
194 return x86_model(best->eax);
195}
196
197static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
198{
199 struct kvm_cpuid_entry2 *best;
200
201 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
202 if (!best)
203 return -1;
204
205 return x86_stepping(best->eax);
206}
207
208#endif