Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef ARCH_X86_KVM_CPUID_H
  3#define ARCH_X86_KVM_CPUID_H
  4
 
  5#include "reverse_cpuid.h"
  6#include <asm/cpu.h>
  7#include <asm/processor.h>
  8#include <uapi/asm/kvm_para.h>
  9
 10extern u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
 11void kvm_set_cpu_caps(void);
 12
 13void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
 14void kvm_update_pv_runtime(struct kvm_vcpu *vcpu);
 15struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
 16						    u32 function, u32 index);
 17struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
 18					      u32 function);
 19int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
 20			    struct kvm_cpuid_entry2 __user *entries,
 21			    unsigned int type);
 22int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
 23			     struct kvm_cpuid *cpuid,
 24			     struct kvm_cpuid_entry __user *entries);
 25int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
 26			      struct kvm_cpuid2 *cpuid,
 27			      struct kvm_cpuid_entry2 __user *entries);
 28int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
 29			      struct kvm_cpuid2 *cpuid,
 30			      struct kvm_cpuid_entry2 __user *entries);
 31bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
 32	       u32 *ecx, u32 *edx, bool exact_only);
 33
 34void __init kvm_init_xstate_sizes(void);
 35u32 xstate_required_size(u64 xstate_bv, bool compacted);
 36
 37int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
 38u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu);
 39
 40static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
 41{
 42	return vcpu->arch.maxphyaddr;
 43}
 44
 45static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
 46{
 47	return !(gpa & vcpu->arch.reserved_gpa_bits);
 48}
 49
 50static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu,
 51						 gpa_t gpa, gpa_t alignment)
 52{
 53	return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa);
 54}
 55
 56static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
 57{
 58	return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE);
 59}
 60
 61static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
 62						 unsigned int leaf)
 63{
 64	u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
 65
 66	BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
 67	*reg = kvm_cpu_caps[leaf];
 68}
 69
 70static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
 71						     unsigned int x86_feature)
 72{
 73	const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
 74	struct kvm_cpuid_entry2 *entry;
 75
 76	entry = kvm_find_cpuid_entry_index(vcpu, cpuid.function, cpuid.index);
 77	if (!entry)
 78		return NULL;
 79
 80	return __cpuid_entry_get_reg(entry, cpuid.reg);
 81}
 82
 83static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
 84					    unsigned int x86_feature)
 85{
 86	u32 *reg;
 87
 88	reg = guest_cpuid_get_register(vcpu, x86_feature);
 89	if (!reg)
 90		return false;
 91
 92	return *reg & __feature_bit(x86_feature);
 93}
 94
 95static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu,
 96					      unsigned int x86_feature)
 97{
 98	u32 *reg;
 99
100	reg = guest_cpuid_get_register(vcpu, x86_feature);
101	if (reg)
102		*reg &= ~__feature_bit(x86_feature);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103}
104
105static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
106{
107	return vcpu->arch.is_amd_compatible;
108}
109
110static inline bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu)
111{
112	return !guest_cpuid_is_amd_compatible(vcpu);
113}
114
115static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
116{
117	struct kvm_cpuid_entry2 *best;
118
119	best = kvm_find_cpuid_entry(vcpu, 0x1);
120	if (!best)
121		return -1;
122
123	return x86_family(best->eax);
124}
125
126static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
127{
128	struct kvm_cpuid_entry2 *best;
129
130	best = kvm_find_cpuid_entry(vcpu, 0x1);
131	if (!best)
132		return -1;
133
134	return x86_model(best->eax);
135}
136
137static inline bool cpuid_model_is_consistent(struct kvm_vcpu *vcpu)
138{
139	return boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
140}
141
142static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
143{
144	struct kvm_cpuid_entry2 *best;
145
146	best = kvm_find_cpuid_entry(vcpu, 0x1);
147	if (!best)
148		return -1;
149
150	return x86_stepping(best->eax);
151}
152
153static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
154{
155	return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
156		guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
157		guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
158		guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
159}
160
161static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
162{
163	return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
164		guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB) ||
165		guest_cpuid_has(vcpu, X86_FEATURE_SBPB));
166}
167
168static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
169{
170	return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
171}
172
173static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
174{
175	return vcpu->arch.msr_misc_features_enables &
176		  MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
177}
178
179static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
180{
181	unsigned int x86_leaf = __feature_leaf(x86_feature);
182
183	reverse_cpuid_check(x86_leaf);
184	kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
185}
186
187static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
188{
189	unsigned int x86_leaf = __feature_leaf(x86_feature);
190
191	reverse_cpuid_check(x86_leaf);
192	kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
193}
194
195static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
196{
197	unsigned int x86_leaf = __feature_leaf(x86_feature);
198
199	reverse_cpuid_check(x86_leaf);
200	return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
201}
202
203static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
204{
205	return !!kvm_cpu_cap_get(x86_feature);
206}
207
208static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
209{
210	if (boot_cpu_has(x86_feature))
211		kvm_cpu_cap_set(x86_feature);
212}
213
214static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu,
215					 unsigned int kvm_feature)
216{
217	if (!vcpu->arch.pv_cpuid.enforce)
218		return true;
219
220	return vcpu->arch.pv_cpuid.features & (1u << kvm_feature);
221}
222
223enum kvm_governed_features {
224#define KVM_GOVERNED_FEATURE(x) KVM_GOVERNED_##x,
225#include "governed_features.h"
226	KVM_NR_GOVERNED_FEATURES
227};
228
229static __always_inline int kvm_governed_feature_index(unsigned int x86_feature)
230{
231	switch (x86_feature) {
232#define KVM_GOVERNED_FEATURE(x) case x: return KVM_GOVERNED_##x;
233#include "governed_features.h"
234	default:
235		return -1;
236	}
237}
238
239static __always_inline bool kvm_is_governed_feature(unsigned int x86_feature)
240{
241	return kvm_governed_feature_index(x86_feature) >= 0;
242}
243
244static __always_inline void kvm_governed_feature_set(struct kvm_vcpu *vcpu,
245						     unsigned int x86_feature)
246{
247	BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
248
249	__set_bit(kvm_governed_feature_index(x86_feature),
250		  vcpu->arch.governed_features.enabled);
251}
252
253static __always_inline void kvm_governed_feature_check_and_set(struct kvm_vcpu *vcpu,
254							       unsigned int x86_feature)
255{
256	if (kvm_cpu_cap_has(x86_feature) && guest_cpuid_has(vcpu, x86_feature))
257		kvm_governed_feature_set(vcpu, x86_feature);
258}
259
260static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu,
261					  unsigned int x86_feature)
262{
263	BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
264
265	return test_bit(kvm_governed_feature_index(x86_feature),
266			vcpu->arch.governed_features.enabled);
267}
268
269static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
270{
271	if (guest_can_use(vcpu, X86_FEATURE_LAM))
272		cr3 &= ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
273
274	return kvm_vcpu_is_legal_gpa(vcpu, cr3);
275}
276
277#endif
v6.9.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef ARCH_X86_KVM_CPUID_H
  3#define ARCH_X86_KVM_CPUID_H
  4
  5#include "x86.h"
  6#include "reverse_cpuid.h"
  7#include <asm/cpu.h>
  8#include <asm/processor.h>
  9#include <uapi/asm/kvm_para.h>
 10
 11extern u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
 12void kvm_set_cpu_caps(void);
 13
 14void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
 15void kvm_update_pv_runtime(struct kvm_vcpu *vcpu);
 16struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
 17						    u32 function, u32 index);
 18struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
 19					      u32 function);
 20int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
 21			    struct kvm_cpuid_entry2 __user *entries,
 22			    unsigned int type);
 23int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
 24			     struct kvm_cpuid *cpuid,
 25			     struct kvm_cpuid_entry __user *entries);
 26int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
 27			      struct kvm_cpuid2 *cpuid,
 28			      struct kvm_cpuid_entry2 __user *entries);
 29int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
 30			      struct kvm_cpuid2 *cpuid,
 31			      struct kvm_cpuid_entry2 __user *entries);
 32bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
 33	       u32 *ecx, u32 *edx, bool exact_only);
 34
 
 35u32 xstate_required_size(u64 xstate_bv, bool compacted);
 36
 37int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
 38u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu);
 39
 40static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
 41{
 42	return vcpu->arch.maxphyaddr;
 43}
 44
 45static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
 46{
 47	return !(gpa & vcpu->arch.reserved_gpa_bits);
 48}
 49
 50static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu,
 51						 gpa_t gpa, gpa_t alignment)
 52{
 53	return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa);
 54}
 55
 56static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
 57{
 58	return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE);
 59}
 60
 61static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
 62						 unsigned int leaf)
 63{
 64	u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
 65
 66	BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
 67	*reg = kvm_cpu_caps[leaf];
 68}
 69
 70static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
 71						     unsigned int x86_feature)
 72{
 73	const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
 74	struct kvm_cpuid_entry2 *entry;
 75
 76	entry = kvm_find_cpuid_entry_index(vcpu, cpuid.function, cpuid.index);
 77	if (!entry)
 78		return NULL;
 79
 80	return __cpuid_entry_get_reg(entry, cpuid.reg);
 81}
 82
 83static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
 84					    unsigned int x86_feature)
 85{
 86	u32 *reg;
 87
 88	reg = guest_cpuid_get_register(vcpu, x86_feature);
 89	if (!reg)
 90		return false;
 91
 92	return *reg & __feature_bit(x86_feature);
 93}
 94
 95static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu,
 96					      unsigned int x86_feature)
 97{
 98	u32 *reg;
 99
100	reg = guest_cpuid_get_register(vcpu, x86_feature);
101	if (reg)
102		*reg &= ~__feature_bit(x86_feature);
103}
104
105static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
106{
107	struct kvm_cpuid_entry2 *best;
108
109	best = kvm_find_cpuid_entry(vcpu, 0);
110	return best &&
111	       (is_guest_vendor_amd(best->ebx, best->ecx, best->edx) ||
112		is_guest_vendor_hygon(best->ebx, best->ecx, best->edx));
113}
114
115static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
116{
117	struct kvm_cpuid_entry2 *best;
118
119	best = kvm_find_cpuid_entry(vcpu, 0);
120	return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
121}
122
123static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
124{
125	return vcpu->arch.is_amd_compatible;
126}
127
128static inline bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu)
129{
130	return !guest_cpuid_is_amd_compatible(vcpu);
131}
132
133static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
134{
135	struct kvm_cpuid_entry2 *best;
136
137	best = kvm_find_cpuid_entry(vcpu, 0x1);
138	if (!best)
139		return -1;
140
141	return x86_family(best->eax);
142}
143
144static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
145{
146	struct kvm_cpuid_entry2 *best;
147
148	best = kvm_find_cpuid_entry(vcpu, 0x1);
149	if (!best)
150		return -1;
151
152	return x86_model(best->eax);
153}
154
155static inline bool cpuid_model_is_consistent(struct kvm_vcpu *vcpu)
156{
157	return boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
158}
159
160static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
161{
162	struct kvm_cpuid_entry2 *best;
163
164	best = kvm_find_cpuid_entry(vcpu, 0x1);
165	if (!best)
166		return -1;
167
168	return x86_stepping(best->eax);
169}
170
171static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
172{
173	return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
174		guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
175		guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
176		guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
177}
178
179static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
180{
181	return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
182		guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB) ||
183		guest_cpuid_has(vcpu, X86_FEATURE_SBPB));
184}
185
186static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
187{
188	return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
189}
190
191static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
192{
193	return vcpu->arch.msr_misc_features_enables &
194		  MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
195}
196
197static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
198{
199	unsigned int x86_leaf = __feature_leaf(x86_feature);
200
201	reverse_cpuid_check(x86_leaf);
202	kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
203}
204
205static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
206{
207	unsigned int x86_leaf = __feature_leaf(x86_feature);
208
209	reverse_cpuid_check(x86_leaf);
210	kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
211}
212
213static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
214{
215	unsigned int x86_leaf = __feature_leaf(x86_feature);
216
217	reverse_cpuid_check(x86_leaf);
218	return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
219}
220
221static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
222{
223	return !!kvm_cpu_cap_get(x86_feature);
224}
225
226static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
227{
228	if (boot_cpu_has(x86_feature))
229		kvm_cpu_cap_set(x86_feature);
230}
231
232static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu,
233					 unsigned int kvm_feature)
234{
235	if (!vcpu->arch.pv_cpuid.enforce)
236		return true;
237
238	return vcpu->arch.pv_cpuid.features & (1u << kvm_feature);
239}
240
241enum kvm_governed_features {
242#define KVM_GOVERNED_FEATURE(x) KVM_GOVERNED_##x,
243#include "governed_features.h"
244	KVM_NR_GOVERNED_FEATURES
245};
246
247static __always_inline int kvm_governed_feature_index(unsigned int x86_feature)
248{
249	switch (x86_feature) {
250#define KVM_GOVERNED_FEATURE(x) case x: return KVM_GOVERNED_##x;
251#include "governed_features.h"
252	default:
253		return -1;
254	}
255}
256
257static __always_inline bool kvm_is_governed_feature(unsigned int x86_feature)
258{
259	return kvm_governed_feature_index(x86_feature) >= 0;
260}
261
262static __always_inline void kvm_governed_feature_set(struct kvm_vcpu *vcpu,
263						     unsigned int x86_feature)
264{
265	BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
266
267	__set_bit(kvm_governed_feature_index(x86_feature),
268		  vcpu->arch.governed_features.enabled);
269}
270
271static __always_inline void kvm_governed_feature_check_and_set(struct kvm_vcpu *vcpu,
272							       unsigned int x86_feature)
273{
274	if (kvm_cpu_cap_has(x86_feature) && guest_cpuid_has(vcpu, x86_feature))
275		kvm_governed_feature_set(vcpu, x86_feature);
276}
277
278static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu,
279					  unsigned int x86_feature)
280{
281	BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
282
283	return test_bit(kvm_governed_feature_index(x86_feature),
284			vcpu->arch.governed_features.enabled);
285}
286
287static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
288{
289	if (guest_can_use(vcpu, X86_FEATURE_LAM))
290		cr3 &= ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
291
292	return kvm_vcpu_is_legal_gpa(vcpu, cr3);
293}
294
295#endif