Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 * cpuid support routines
5 *
6 * derived from arch/x86/kvm/x86.c
7 *
8 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
9 * Copyright IBM Corporation, 2008
10 */
11
12#include <linux/kvm_host.h>
13#include <linux/export.h>
14#include <linux/vmalloc.h>
15#include <linux/uaccess.h>
16#include <linux/sched/stat.h>
17
18#include <asm/processor.h>
19#include <asm/user.h>
20#include <asm/fpu/xstate.h>
21#include <asm/sgx.h>
22#include <asm/cpuid.h>
23#include "cpuid.h"
24#include "lapic.h"
25#include "mmu.h"
26#include "trace.h"
27#include "pmu.h"
28
29/*
30 * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be
31 * aligned to sizeof(unsigned long) because it's not accessed via bitops.
32 */
33u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
34EXPORT_SYMBOL_GPL(kvm_cpu_caps);
35
36u32 xstate_required_size(u64 xstate_bv, bool compacted)
37{
38 int feature_bit = 0;
39 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
40
41 xstate_bv &= XFEATURE_MASK_EXTEND;
42 while (xstate_bv) {
43 if (xstate_bv & 0x1) {
44 u32 eax, ebx, ecx, edx, offset;
45 cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
46 /* ECX[1]: 64B alignment in compacted form */
47 if (compacted)
48 offset = (ecx & 0x2) ? ALIGN(ret, 64) : ret;
49 else
50 offset = ebx;
51 ret = max(ret, offset + eax);
52 }
53
54 xstate_bv >>= 1;
55 feature_bit++;
56 }
57
58 return ret;
59}
60
61/*
62 * This one is tied to SSB in the user API, and not
63 * visible in /proc/cpuinfo.
64 */
65#define KVM_X86_FEATURE_AMD_PSFD (13*32+28) /* Predictive Store Forwarding Disable */
66
67#define F feature_bit
68
69/* Scattered Flag - For features that are scattered by cpufeatures.h. */
70#define SF(name) \
71({ \
72 BUILD_BUG_ON(X86_FEATURE_##name >= MAX_CPU_FEATURES); \
73 (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0); \
74})
75
76/*
77 * Magic value used by KVM when querying userspace-provided CPUID entries and
78 * doesn't care about the CPIUD index because the index of the function in
79 * question is not significant. Note, this magic value must have at least one
80 * bit set in bits[63:32] and must be consumed as a u64 by cpuid_entry2_find()
81 * to avoid false positives when processing guest CPUID input.
82 */
83#define KVM_CPUID_INDEX_NOT_SIGNIFICANT -1ull
84
85static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
86 struct kvm_cpuid_entry2 *entries, int nent, u32 function, u64 index)
87{
88 struct kvm_cpuid_entry2 *e;
89 int i;
90
91 for (i = 0; i < nent; i++) {
92 e = &entries[i];
93
94 if (e->function != function)
95 continue;
96
97 /*
98 * If the index isn't significant, use the first entry with a
99 * matching function. It's userspace's responsibilty to not
100 * provide "duplicate" entries in all cases.
101 */
102 if (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index)
103 return e;
104
105
106 /*
107 * Similarly, use the first matching entry if KVM is doing a
108 * lookup (as opposed to emulating CPUID) for a function that's
109 * architecturally defined as not having a significant index.
110 */
111 if (index == KVM_CPUID_INDEX_NOT_SIGNIFICANT) {
112 /*
113 * Direct lookups from KVM should not diverge from what
114 * KVM defines internally (the architectural behavior).
115 */
116 WARN_ON_ONCE(cpuid_function_is_indexed(function));
117 return e;
118 }
119 }
120
121 return NULL;
122}
123
124static int kvm_check_cpuid(struct kvm_vcpu *vcpu,
125 struct kvm_cpuid_entry2 *entries,
126 int nent)
127{
128 struct kvm_cpuid_entry2 *best;
129 u64 xfeatures;
130
131 /*
132 * The existing code assumes virtual address is 48-bit or 57-bit in the
133 * canonical address checks; exit if it is ever changed.
134 */
135 best = cpuid_entry2_find(entries, nent, 0x80000008,
136 KVM_CPUID_INDEX_NOT_SIGNIFICANT);
137 if (best) {
138 int vaddr_bits = (best->eax & 0xff00) >> 8;
139
140 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
141 return -EINVAL;
142 }
143
144 /*
145 * Exposing dynamic xfeatures to the guest requires additional
146 * enabling in the FPU, e.g. to expand the guest XSAVE state size.
147 */
148 best = cpuid_entry2_find(entries, nent, 0xd, 0);
149 if (!best)
150 return 0;
151
152 xfeatures = best->eax | ((u64)best->edx << 32);
153 xfeatures &= XFEATURE_MASK_USER_DYNAMIC;
154 if (!xfeatures)
155 return 0;
156
157 return fpu_enable_guest_xfd_features(&vcpu->arch.guest_fpu, xfeatures);
158}
159
160/* Check whether the supplied CPUID data is equal to what is already set for the vCPU. */
161static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
162 int nent)
163{
164 struct kvm_cpuid_entry2 *orig;
165 int i;
166
167 if (nent != vcpu->arch.cpuid_nent)
168 return -EINVAL;
169
170 for (i = 0; i < nent; i++) {
171 orig = &vcpu->arch.cpuid_entries[i];
172 if (e2[i].function != orig->function ||
173 e2[i].index != orig->index ||
174 e2[i].flags != orig->flags ||
175 e2[i].eax != orig->eax || e2[i].ebx != orig->ebx ||
176 e2[i].ecx != orig->ecx || e2[i].edx != orig->edx)
177 return -EINVAL;
178 }
179
180 return 0;
181}
182
183static void kvm_update_kvm_cpuid_base(struct kvm_vcpu *vcpu)
184{
185 u32 function;
186 struct kvm_cpuid_entry2 *entry;
187
188 vcpu->arch.kvm_cpuid_base = 0;
189
190 for_each_possible_hypervisor_cpuid_base(function) {
191 entry = kvm_find_cpuid_entry(vcpu, function);
192
193 if (entry) {
194 u32 signature[3];
195
196 signature[0] = entry->ebx;
197 signature[1] = entry->ecx;
198 signature[2] = entry->edx;
199
200 BUILD_BUG_ON(sizeof(signature) > sizeof(KVM_SIGNATURE));
201 if (!memcmp(signature, KVM_SIGNATURE, sizeof(signature))) {
202 vcpu->arch.kvm_cpuid_base = function;
203 break;
204 }
205 }
206 }
207}
208
209static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu,
210 struct kvm_cpuid_entry2 *entries, int nent)
211{
212 u32 base = vcpu->arch.kvm_cpuid_base;
213
214 if (!base)
215 return NULL;
216
217 return cpuid_entry2_find(entries, nent, base | KVM_CPUID_FEATURES,
218 KVM_CPUID_INDEX_NOT_SIGNIFICANT);
219}
220
221static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
222{
223 return __kvm_find_kvm_cpuid_features(vcpu, vcpu->arch.cpuid_entries,
224 vcpu->arch.cpuid_nent);
225}
226
227void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
228{
229 struct kvm_cpuid_entry2 *best = kvm_find_kvm_cpuid_features(vcpu);
230
231 /*
232 * save the feature bitmap to avoid cpuid lookup for every PV
233 * operation
234 */
235 if (best)
236 vcpu->arch.pv_cpuid.features = best->eax;
237}
238
239/*
240 * Calculate guest's supported XCR0 taking into account guest CPUID data and
241 * KVM's supported XCR0 (comprised of host's XCR0 and KVM_SUPPORTED_XCR0).
242 */
243static u64 cpuid_get_supported_xcr0(struct kvm_cpuid_entry2 *entries, int nent)
244{
245 struct kvm_cpuid_entry2 *best;
246
247 best = cpuid_entry2_find(entries, nent, 0xd, 0);
248 if (!best)
249 return 0;
250
251 return (best->eax | ((u64)best->edx << 32)) & kvm_caps.supported_xcr0;
252}
253
254static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries,
255 int nent)
256{
257 struct kvm_cpuid_entry2 *best;
258 u64 guest_supported_xcr0 = cpuid_get_supported_xcr0(entries, nent);
259
260 best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
261 if (best) {
262 /* Update OSXSAVE bit */
263 if (boot_cpu_has(X86_FEATURE_XSAVE))
264 cpuid_entry_change(best, X86_FEATURE_OSXSAVE,
265 kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE));
266
267 cpuid_entry_change(best, X86_FEATURE_APIC,
268 vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
269 }
270
271 best = cpuid_entry2_find(entries, nent, 7, 0);
272 if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
273 cpuid_entry_change(best, X86_FEATURE_OSPKE,
274 kvm_read_cr4_bits(vcpu, X86_CR4_PKE));
275
276 best = cpuid_entry2_find(entries, nent, 0xD, 0);
277 if (best)
278 best->ebx = xstate_required_size(vcpu->arch.xcr0, false);
279
280 best = cpuid_entry2_find(entries, nent, 0xD, 1);
281 if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
282 cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
283 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
284
285 best = __kvm_find_kvm_cpuid_features(vcpu, entries, nent);
286 if (kvm_hlt_in_guest(vcpu->kvm) && best &&
287 (best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
288 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
289
290 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
291 best = cpuid_entry2_find(entries, nent, 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
292 if (best)
293 cpuid_entry_change(best, X86_FEATURE_MWAIT,
294 vcpu->arch.ia32_misc_enable_msr &
295 MSR_IA32_MISC_ENABLE_MWAIT);
296 }
297
298 /*
299 * Bits 127:0 of the allowed SECS.ATTRIBUTES (CPUID.0x12.0x1) enumerate
300 * the supported XSAVE Feature Request Mask (XFRM), i.e. the enclave's
301 * requested XCR0 value. The enclave's XFRM must be a subset of XCRO
302 * at the time of EENTER, thus adjust the allowed XFRM by the guest's
303 * supported XCR0. Similar to XCR0 handling, FP and SSE are forced to
304 * '1' even on CPUs that don't support XSAVE.
305 */
306 best = cpuid_entry2_find(entries, nent, 0x12, 0x1);
307 if (best) {
308 best->ecx &= guest_supported_xcr0 & 0xffffffff;
309 best->edx &= guest_supported_xcr0 >> 32;
310 best->ecx |= XFEATURE_MASK_FPSSE;
311 }
312}
313
314void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
315{
316 __kvm_update_cpuid_runtime(vcpu, vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
317}
318EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime);
319
320static bool kvm_cpuid_has_hyperv(struct kvm_cpuid_entry2 *entries, int nent)
321{
322 struct kvm_cpuid_entry2 *entry;
323
324 entry = cpuid_entry2_find(entries, nent, HYPERV_CPUID_INTERFACE,
325 KVM_CPUID_INDEX_NOT_SIGNIFICANT);
326 return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX;
327}
328
329static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
330{
331 struct kvm_lapic *apic = vcpu->arch.apic;
332 struct kvm_cpuid_entry2 *best;
333
334 best = kvm_find_cpuid_entry(vcpu, 1);
335 if (best && apic) {
336 if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER))
337 apic->lapic_timer.timer_mode_mask = 3 << 17;
338 else
339 apic->lapic_timer.timer_mode_mask = 1 << 17;
340
341 kvm_apic_set_version(vcpu);
342 }
343
344 vcpu->arch.guest_supported_xcr0 =
345 cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
346
347 /*
348 * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
349 * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
350 * supported by the host.
351 */
352 vcpu->arch.guest_fpu.fpstate->user_xfeatures = vcpu->arch.guest_supported_xcr0 |
353 XFEATURE_MASK_FPSSE;
354
355 kvm_update_pv_runtime(vcpu);
356
357 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
358 vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
359
360 kvm_pmu_refresh(vcpu);
361 vcpu->arch.cr4_guest_rsvd_bits =
362 __cr4_reserved_bits(guest_cpuid_has, vcpu);
363
364 kvm_hv_set_cpuid(vcpu, kvm_cpuid_has_hyperv(vcpu->arch.cpuid_entries,
365 vcpu->arch.cpuid_nent));
366
367 /* Invoke the vendor callback only after the above state is updated. */
368 static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu);
369
370 /*
371 * Except for the MMU, which needs to do its thing any vendor specific
372 * adjustments to the reserved GPA bits.
373 */
374 kvm_mmu_after_set_cpuid(vcpu);
375}
376
377int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
378{
379 struct kvm_cpuid_entry2 *best;
380
381 best = kvm_find_cpuid_entry(vcpu, 0x80000000);
382 if (!best || best->eax < 0x80000008)
383 goto not_found;
384 best = kvm_find_cpuid_entry(vcpu, 0x80000008);
385 if (best)
386 return best->eax & 0xff;
387not_found:
388 return 36;
389}
390
391/*
392 * This "raw" version returns the reserved GPA bits without any adjustments for
393 * encryption technologies that usurp bits. The raw mask should be used if and
394 * only if hardware does _not_ strip the usurped bits, e.g. in virtual MTRRs.
395 */
396u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu)
397{
398 return rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
399}
400
401static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
402 int nent)
403{
404 int r;
405
406 __kvm_update_cpuid_runtime(vcpu, e2, nent);
407
408 /*
409 * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
410 * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
411 * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page
412 * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with
413 * the core vCPU model on the fly. It would've been better to forbid any
414 * KVM_SET_CPUID{,2} calls after KVM_RUN altogether but unfortunately
415 * some VMMs (e.g. QEMU) reuse vCPU fds for CPU hotplug/unplug and do
416 * KVM_SET_CPUID{,2} again. To support this legacy behavior, check
417 * whether the supplied CPUID data is equal to what's already set.
418 */
419 if (vcpu->arch.last_vmentry_cpu != -1) {
420 r = kvm_cpuid_check_equal(vcpu, e2, nent);
421 if (r)
422 return r;
423
424 kvfree(e2);
425 return 0;
426 }
427
428 if (kvm_cpuid_has_hyperv(e2, nent)) {
429 r = kvm_hv_vcpu_init(vcpu);
430 if (r)
431 return r;
432 }
433
434 r = kvm_check_cpuid(vcpu, e2, nent);
435 if (r)
436 return r;
437
438 kvfree(vcpu->arch.cpuid_entries);
439 vcpu->arch.cpuid_entries = e2;
440 vcpu->arch.cpuid_nent = nent;
441
442 kvm_update_kvm_cpuid_base(vcpu);
443 kvm_vcpu_after_set_cpuid(vcpu);
444
445 return 0;
446}
447
448/* when an old userspace process fills a new kernel module */
449int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
450 struct kvm_cpuid *cpuid,
451 struct kvm_cpuid_entry __user *entries)
452{
453 int r, i;
454 struct kvm_cpuid_entry *e = NULL;
455 struct kvm_cpuid_entry2 *e2 = NULL;
456
457 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
458 return -E2BIG;
459
460 if (cpuid->nent) {
461 e = vmemdup_user(entries, array_size(sizeof(*e), cpuid->nent));
462 if (IS_ERR(e))
463 return PTR_ERR(e);
464
465 e2 = kvmalloc_array(cpuid->nent, sizeof(*e2), GFP_KERNEL_ACCOUNT);
466 if (!e2) {
467 r = -ENOMEM;
468 goto out_free_cpuid;
469 }
470 }
471 for (i = 0; i < cpuid->nent; i++) {
472 e2[i].function = e[i].function;
473 e2[i].eax = e[i].eax;
474 e2[i].ebx = e[i].ebx;
475 e2[i].ecx = e[i].ecx;
476 e2[i].edx = e[i].edx;
477 e2[i].index = 0;
478 e2[i].flags = 0;
479 e2[i].padding[0] = 0;
480 e2[i].padding[1] = 0;
481 e2[i].padding[2] = 0;
482 }
483
484 r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
485 if (r)
486 kvfree(e2);
487
488out_free_cpuid:
489 kvfree(e);
490
491 return r;
492}
493
494int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
495 struct kvm_cpuid2 *cpuid,
496 struct kvm_cpuid_entry2 __user *entries)
497{
498 struct kvm_cpuid_entry2 *e2 = NULL;
499 int r;
500
501 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
502 return -E2BIG;
503
504 if (cpuid->nent) {
505 e2 = vmemdup_user(entries, array_size(sizeof(*e2), cpuid->nent));
506 if (IS_ERR(e2))
507 return PTR_ERR(e2);
508 }
509
510 r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
511 if (r)
512 kvfree(e2);
513
514 return r;
515}
516
517int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
518 struct kvm_cpuid2 *cpuid,
519 struct kvm_cpuid_entry2 __user *entries)
520{
521 int r;
522
523 r = -E2BIG;
524 if (cpuid->nent < vcpu->arch.cpuid_nent)
525 goto out;
526 r = -EFAULT;
527 if (copy_to_user(entries, vcpu->arch.cpuid_entries,
528 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
529 goto out;
530 return 0;
531
532out:
533 cpuid->nent = vcpu->arch.cpuid_nent;
534 return r;
535}
536
537/* Mask kvm_cpu_caps for @leaf with the raw CPUID capabilities of this CPU. */
538static __always_inline void __kvm_cpu_cap_mask(unsigned int leaf)
539{
540 const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32);
541 struct kvm_cpuid_entry2 entry;
542
543 reverse_cpuid_check(leaf);
544
545 cpuid_count(cpuid.function, cpuid.index,
546 &entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
547
548 kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(&entry, cpuid.reg);
549}
550
551static __always_inline
552void kvm_cpu_cap_init_kvm_defined(enum kvm_only_cpuid_leafs leaf, u32 mask)
553{
554 /* Use kvm_cpu_cap_mask for leafs that aren't KVM-only. */
555 BUILD_BUG_ON(leaf < NCAPINTS);
556
557 kvm_cpu_caps[leaf] = mask;
558
559 __kvm_cpu_cap_mask(leaf);
560}
561
562static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
563{
564 /* Use kvm_cpu_cap_init_kvm_defined for KVM-only leafs. */
565 BUILD_BUG_ON(leaf >= NCAPINTS);
566
567 kvm_cpu_caps[leaf] &= mask;
568
569 __kvm_cpu_cap_mask(leaf);
570}
571
572void kvm_set_cpu_caps(void)
573{
574#ifdef CONFIG_X86_64
575 unsigned int f_gbpages = F(GBPAGES);
576 unsigned int f_lm = F(LM);
577 unsigned int f_xfd = F(XFD);
578#else
579 unsigned int f_gbpages = 0;
580 unsigned int f_lm = 0;
581 unsigned int f_xfd = 0;
582#endif
583 memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps));
584
585 BUILD_BUG_ON(sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)) >
586 sizeof(boot_cpu_data.x86_capability));
587
588 memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability,
589 sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)));
590
591 kvm_cpu_cap_mask(CPUID_1_ECX,
592 /*
593 * NOTE: MONITOR (and MWAIT) are emulated as NOP, but *not*
594 * advertised to guests via CPUID!
595 */
596 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
597 0 /* DS-CPL, VMX, SMX, EST */ |
598 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
599 F(FMA) | F(CX16) | 0 /* xTPR Update */ | F(PDCM) |
600 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
601 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
602 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
603 F(F16C) | F(RDRAND)
604 );
605 /* KVM emulates x2apic in software irrespective of host support. */
606 kvm_cpu_cap_set(X86_FEATURE_X2APIC);
607
608 kvm_cpu_cap_mask(CPUID_1_EDX,
609 F(FPU) | F(VME) | F(DE) | F(PSE) |
610 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
611 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
612 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
613 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
614 0 /* Reserved, DS, ACPI */ | F(MMX) |
615 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
616 0 /* HTT, TM, Reserved, PBE */
617 );
618
619 kvm_cpu_cap_mask(CPUID_7_0_EBX,
620 F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) |
621 F(FDP_EXCPTN_ONLY) | F(SMEP) | F(BMI2) | F(ERMS) | F(INVPCID) |
622 F(RTM) | F(ZERO_FCS_FDS) | 0 /*MPX*/ | F(AVX512F) |
623 F(AVX512DQ) | F(RDSEED) | F(ADX) | F(SMAP) | F(AVX512IFMA) |
624 F(CLFLUSHOPT) | F(CLWB) | 0 /*INTEL_PT*/ | F(AVX512PF) |
625 F(AVX512ER) | F(AVX512CD) | F(SHA_NI) | F(AVX512BW) |
626 F(AVX512VL));
627
628 kvm_cpu_cap_mask(CPUID_7_ECX,
629 F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
630 F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
631 F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
632 F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ |
633 F(SGX_LC) | F(BUS_LOCK_DETECT)
634 );
635 /* Set LA57 based on hardware capability. */
636 if (cpuid_ecx(7) & F(LA57))
637 kvm_cpu_cap_set(X86_FEATURE_LA57);
638
639 /*
640 * PKU not yet implemented for shadow paging and requires OSPKE
641 * to be set on the host. Clear it if that is not the case
642 */
643 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
644 kvm_cpu_cap_clear(X86_FEATURE_PKU);
645
646 kvm_cpu_cap_mask(CPUID_7_EDX,
647 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
648 F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
649 F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) |
650 F(SERIALIZE) | F(TSXLDTRK) | F(AVX512_FP16) |
651 F(AMX_TILE) | F(AMX_INT8) | F(AMX_BF16)
652 );
653
654 /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */
655 kvm_cpu_cap_set(X86_FEATURE_TSC_ADJUST);
656 kvm_cpu_cap_set(X86_FEATURE_ARCH_CAPABILITIES);
657
658 if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS))
659 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL);
660 if (boot_cpu_has(X86_FEATURE_STIBP))
661 kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP);
662 if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
663 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD);
664
665 kvm_cpu_cap_mask(CPUID_7_1_EAX,
666 F(AVX_VNNI) | F(AVX512_BF16) | F(CMPCCXADD) | F(AMX_FP16) |
667 F(AVX_IFMA)
668 );
669
670 kvm_cpu_cap_init_kvm_defined(CPUID_7_1_EDX,
671 F(AVX_VNNI_INT8) | F(AVX_NE_CONVERT) | F(PREFETCHITI)
672 );
673
674 kvm_cpu_cap_mask(CPUID_D_1_EAX,
675 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd
676 );
677
678 kvm_cpu_cap_init_kvm_defined(CPUID_12_EAX,
679 SF(SGX1) | SF(SGX2) | SF(SGX_EDECCSSA)
680 );
681
682 kvm_cpu_cap_mask(CPUID_8000_0001_ECX,
683 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
684 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
685 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
686 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) |
687 F(TOPOEXT) | 0 /* PERFCTR_CORE */
688 );
689
690 kvm_cpu_cap_mask(CPUID_8000_0001_EDX,
691 F(FPU) | F(VME) | F(DE) | F(PSE) |
692 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
693 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
694 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
695 F(PAT) | F(PSE36) | 0 /* Reserved */ |
696 F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
697 F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) |
698 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW)
699 );
700
701 if (!tdp_enabled && IS_ENABLED(CONFIG_X86_64))
702 kvm_cpu_cap_set(X86_FEATURE_GBPAGES);
703
704 kvm_cpu_cap_mask(CPUID_8000_0008_EBX,
705 F(CLZERO) | F(XSAVEERPTR) |
706 F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
707 F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) |
708 __feature_bit(KVM_X86_FEATURE_AMD_PSFD)
709 );
710
711 /*
712 * AMD has separate bits for each SPEC_CTRL bit.
713 * arch/x86/kernel/cpu/bugs.c is kind enough to
714 * record that in cpufeatures so use them.
715 */
716 if (boot_cpu_has(X86_FEATURE_IBPB))
717 kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB);
718 if (boot_cpu_has(X86_FEATURE_IBRS))
719 kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS);
720 if (boot_cpu_has(X86_FEATURE_STIBP))
721 kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP);
722 if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
723 kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD);
724 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
725 kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO);
726 /*
727 * The preference is to use SPEC CTRL MSR instead of the
728 * VIRT_SPEC MSR.
729 */
730 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
731 !boot_cpu_has(X86_FEATURE_AMD_SSBD))
732 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
733
734 /*
735 * Hide all SVM features by default, SVM will set the cap bits for
736 * features it emulates and/or exposes for L1.
737 */
738 kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0);
739
740 kvm_cpu_cap_mask(CPUID_8000_001F_EAX,
741 0 /* SME */ | F(SEV) | 0 /* VM_PAGE_FLUSH */ | F(SEV_ES) |
742 F(SME_COHERENT));
743
744 kvm_cpu_cap_mask(CPUID_C000_0001_EDX,
745 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
746 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
747 F(PMM) | F(PMM_EN)
748 );
749
750 /*
751 * Hide RDTSCP and RDPID if either feature is reported as supported but
752 * probing MSR_TSC_AUX failed. This is purely a sanity check and
753 * should never happen, but the guest will likely crash if RDTSCP or
754 * RDPID is misreported, and KVM has botched MSR_TSC_AUX emulation in
755 * the past. For example, the sanity check may fire if this instance of
756 * KVM is running as L1 on top of an older, broken KVM.
757 */
758 if (WARN_ON((kvm_cpu_cap_has(X86_FEATURE_RDTSCP) ||
759 kvm_cpu_cap_has(X86_FEATURE_RDPID)) &&
760 !kvm_is_supported_user_return_msr(MSR_TSC_AUX))) {
761 kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
762 kvm_cpu_cap_clear(X86_FEATURE_RDPID);
763 }
764}
765EXPORT_SYMBOL_GPL(kvm_set_cpu_caps);
766
767struct kvm_cpuid_array {
768 struct kvm_cpuid_entry2 *entries;
769 int maxnent;
770 int nent;
771};
772
773static struct kvm_cpuid_entry2 *get_next_cpuid(struct kvm_cpuid_array *array)
774{
775 if (array->nent >= array->maxnent)
776 return NULL;
777
778 return &array->entries[array->nent++];
779}
780
781static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
782 u32 function, u32 index)
783{
784 struct kvm_cpuid_entry2 *entry = get_next_cpuid(array);
785
786 if (!entry)
787 return NULL;
788
789 memset(entry, 0, sizeof(*entry));
790 entry->function = function;
791 entry->index = index;
792 switch (function & 0xC0000000) {
793 case 0x40000000:
794 /* Hypervisor leaves are always synthesized by __do_cpuid_func. */
795 return entry;
796
797 case 0x80000000:
798 /*
799 * 0x80000021 is sometimes synthesized by __do_cpuid_func, which
800 * would result in out-of-bounds calls to do_host_cpuid.
801 */
802 {
803 static int max_cpuid_80000000;
804 if (!READ_ONCE(max_cpuid_80000000))
805 WRITE_ONCE(max_cpuid_80000000, cpuid_eax(0x80000000));
806 if (function > READ_ONCE(max_cpuid_80000000))
807 return entry;
808 }
809 break;
810
811 default:
812 break;
813 }
814
815 cpuid_count(entry->function, entry->index,
816 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
817
818 if (cpuid_function_is_indexed(function))
819 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
820
821 return entry;
822}
823
824static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
825{
826 struct kvm_cpuid_entry2 *entry;
827
828 if (array->nent >= array->maxnent)
829 return -E2BIG;
830
831 entry = &array->entries[array->nent];
832 entry->function = func;
833 entry->index = 0;
834 entry->flags = 0;
835
836 switch (func) {
837 case 0:
838 entry->eax = 7;
839 ++array->nent;
840 break;
841 case 1:
842 entry->ecx = F(MOVBE);
843 ++array->nent;
844 break;
845 case 7:
846 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
847 entry->eax = 0;
848 if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
849 entry->ecx = F(RDPID);
850 ++array->nent;
851 break;
852 default:
853 break;
854 }
855
856 return 0;
857}
858
859static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
860{
861 struct kvm_cpuid_entry2 *entry;
862 int r, i, max_idx;
863
864 /* all calls to cpuid_count() should be made on the same cpu */
865 get_cpu();
866
867 r = -E2BIG;
868
869 entry = do_host_cpuid(array, function, 0);
870 if (!entry)
871 goto out;
872
873 switch (function) {
874 case 0:
875 /* Limited to the highest leaf implemented in KVM. */
876 entry->eax = min(entry->eax, 0x1fU);
877 break;
878 case 1:
879 cpuid_entry_override(entry, CPUID_1_EDX);
880 cpuid_entry_override(entry, CPUID_1_ECX);
881 break;
882 case 2:
883 /*
884 * On ancient CPUs, function 2 entries are STATEFUL. That is,
885 * CPUID(function=2, index=0) may return different results each
886 * time, with the least-significant byte in EAX enumerating the
887 * number of times software should do CPUID(2, 0).
888 *
889 * Modern CPUs, i.e. every CPU KVM has *ever* run on are less
890 * idiotic. Intel's SDM states that EAX & 0xff "will always
891 * return 01H. Software should ignore this value and not
892 * interpret it as an informational descriptor", while AMD's
893 * APM states that CPUID(2) is reserved.
894 *
895 * WARN if a frankenstein CPU that supports virtualization and
896 * a stateful CPUID.0x2 is encountered.
897 */
898 WARN_ON_ONCE((entry->eax & 0xff) > 1);
899 break;
900 /* functions 4 and 0x8000001d have additional index. */
901 case 4:
902 case 0x8000001d:
903 /*
904 * Read entries until the cache type in the previous entry is
905 * zero, i.e. indicates an invalid entry.
906 */
907 for (i = 1; entry->eax & 0x1f; ++i) {
908 entry = do_host_cpuid(array, function, i);
909 if (!entry)
910 goto out;
911 }
912 break;
913 case 6: /* Thermal management */
914 entry->eax = 0x4; /* allow ARAT */
915 entry->ebx = 0;
916 entry->ecx = 0;
917 entry->edx = 0;
918 break;
919 /* function 7 has additional index. */
920 case 7:
921 entry->eax = min(entry->eax, 1u);
922 cpuid_entry_override(entry, CPUID_7_0_EBX);
923 cpuid_entry_override(entry, CPUID_7_ECX);
924 cpuid_entry_override(entry, CPUID_7_EDX);
925
926 /* KVM only supports 0x7.0 and 0x7.1, capped above via min(). */
927 if (entry->eax == 1) {
928 entry = do_host_cpuid(array, function, 1);
929 if (!entry)
930 goto out;
931
932 cpuid_entry_override(entry, CPUID_7_1_EAX);
933 cpuid_entry_override(entry, CPUID_7_1_EDX);
934 entry->ebx = 0;
935 entry->ecx = 0;
936 }
937 break;
938 case 0xa: { /* Architectural Performance Monitoring */
939 union cpuid10_eax eax;
940 union cpuid10_edx edx;
941
942 if (!static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
943 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
944 break;
945 }
946
947 eax.split.version_id = kvm_pmu_cap.version;
948 eax.split.num_counters = kvm_pmu_cap.num_counters_gp;
949 eax.split.bit_width = kvm_pmu_cap.bit_width_gp;
950 eax.split.mask_length = kvm_pmu_cap.events_mask_len;
951 edx.split.num_counters_fixed = kvm_pmu_cap.num_counters_fixed;
952 edx.split.bit_width_fixed = kvm_pmu_cap.bit_width_fixed;
953
954 if (kvm_pmu_cap.version)
955 edx.split.anythread_deprecated = 1;
956 edx.split.reserved1 = 0;
957 edx.split.reserved2 = 0;
958
959 entry->eax = eax.full;
960 entry->ebx = kvm_pmu_cap.events_mask;
961 entry->ecx = 0;
962 entry->edx = edx.full;
963 break;
964 }
965 case 0x1f:
966 case 0xb:
967 /*
968 * No topology; a valid topology is indicated by the presence
969 * of subleaf 1.
970 */
971 entry->eax = entry->ebx = entry->ecx = 0;
972 break;
973 case 0xd: {
974 u64 permitted_xcr0 = kvm_caps.supported_xcr0 & xstate_get_guest_group_perm();
975 u64 permitted_xss = kvm_caps.supported_xss;
976
977 entry->eax &= permitted_xcr0;
978 entry->ebx = xstate_required_size(permitted_xcr0, false);
979 entry->ecx = entry->ebx;
980 entry->edx &= permitted_xcr0 >> 32;
981 if (!permitted_xcr0)
982 break;
983
984 entry = do_host_cpuid(array, function, 1);
985 if (!entry)
986 goto out;
987
988 cpuid_entry_override(entry, CPUID_D_1_EAX);
989 if (entry->eax & (F(XSAVES)|F(XSAVEC)))
990 entry->ebx = xstate_required_size(permitted_xcr0 | permitted_xss,
991 true);
992 else {
993 WARN_ON_ONCE(permitted_xss != 0);
994 entry->ebx = 0;
995 }
996 entry->ecx &= permitted_xss;
997 entry->edx &= permitted_xss >> 32;
998
999 for (i = 2; i < 64; ++i) {
1000 bool s_state;
1001 if (permitted_xcr0 & BIT_ULL(i))
1002 s_state = false;
1003 else if (permitted_xss & BIT_ULL(i))
1004 s_state = true;
1005 else
1006 continue;
1007
1008 entry = do_host_cpuid(array, function, i);
1009 if (!entry)
1010 goto out;
1011
1012 /*
1013 * The supported check above should have filtered out
1014 * invalid sub-leafs. Only valid sub-leafs should
1015 * reach this point, and they should have a non-zero
1016 * save state size. Furthermore, check whether the
1017 * processor agrees with permitted_xcr0/permitted_xss
1018 * on whether this is an XCR0- or IA32_XSS-managed area.
1019 */
1020 if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) {
1021 --array->nent;
1022 continue;
1023 }
1024
1025 if (!kvm_cpu_cap_has(X86_FEATURE_XFD))
1026 entry->ecx &= ~BIT_ULL(2);
1027 entry->edx = 0;
1028 }
1029 break;
1030 }
1031 case 0x12:
1032 /* Intel SGX */
1033 if (!kvm_cpu_cap_has(X86_FEATURE_SGX)) {
1034 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1035 break;
1036 }
1037
1038 /*
1039 * Index 0: Sub-features, MISCSELECT (a.k.a extended features)
1040 * and max enclave sizes. The SGX sub-features and MISCSELECT
1041 * are restricted by kernel and KVM capabilities (like most
1042 * feature flags), while enclave size is unrestricted.
1043 */
1044 cpuid_entry_override(entry, CPUID_12_EAX);
1045 entry->ebx &= SGX_MISC_EXINFO;
1046
1047 entry = do_host_cpuid(array, function, 1);
1048 if (!entry)
1049 goto out;
1050
1051 /*
1052 * Index 1: SECS.ATTRIBUTES. ATTRIBUTES are restricted a la
1053 * feature flags. Advertise all supported flags, including
1054 * privileged attributes that require explicit opt-in from
1055 * userspace. ATTRIBUTES.XFRM is not adjusted as userspace is
1056 * expected to derive it from supported XCR0.
1057 */
1058 entry->eax &= SGX_ATTR_PRIV_MASK | SGX_ATTR_UNPRIV_MASK;
1059 entry->ebx &= 0;
1060 break;
1061 /* Intel PT */
1062 case 0x14:
1063 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) {
1064 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1065 break;
1066 }
1067
1068 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1069 if (!do_host_cpuid(array, function, i))
1070 goto out;
1071 }
1072 break;
1073 /* Intel AMX TILE */
1074 case 0x1d:
1075 if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) {
1076 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1077 break;
1078 }
1079
1080 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1081 if (!do_host_cpuid(array, function, i))
1082 goto out;
1083 }
1084 break;
1085 case 0x1e: /* TMUL information */
1086 if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) {
1087 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1088 break;
1089 }
1090 break;
1091 case KVM_CPUID_SIGNATURE: {
1092 const u32 *sigptr = (const u32 *)KVM_SIGNATURE;
1093 entry->eax = KVM_CPUID_FEATURES;
1094 entry->ebx = sigptr[0];
1095 entry->ecx = sigptr[1];
1096 entry->edx = sigptr[2];
1097 break;
1098 }
1099 case KVM_CPUID_FEATURES:
1100 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
1101 (1 << KVM_FEATURE_NOP_IO_DELAY) |
1102 (1 << KVM_FEATURE_CLOCKSOURCE2) |
1103 (1 << KVM_FEATURE_ASYNC_PF) |
1104 (1 << KVM_FEATURE_PV_EOI) |
1105 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
1106 (1 << KVM_FEATURE_PV_UNHALT) |
1107 (1 << KVM_FEATURE_PV_TLB_FLUSH) |
1108 (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) |
1109 (1 << KVM_FEATURE_PV_SEND_IPI) |
1110 (1 << KVM_FEATURE_POLL_CONTROL) |
1111 (1 << KVM_FEATURE_PV_SCHED_YIELD) |
1112 (1 << KVM_FEATURE_ASYNC_PF_INT);
1113
1114 if (sched_info_on())
1115 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
1116
1117 entry->ebx = 0;
1118 entry->ecx = 0;
1119 entry->edx = 0;
1120 break;
1121 case 0x80000000:
1122 entry->eax = min(entry->eax, 0x80000021);
1123 /*
1124 * Serializing LFENCE is reported in a multitude of ways, and
1125 * NullSegClearsBase is not reported in CPUID on Zen2; help
1126 * userspace by providing the CPUID leaf ourselves.
1127 *
1128 * However, only do it if the host has CPUID leaf 0x8000001d.
1129 * QEMU thinks that it can query the host blindly for that
1130 * CPUID leaf if KVM reports that it supports 0x8000001d or
1131 * above. The processor merrily returns values from the
1132 * highest Intel leaf which QEMU tries to use as the guest's
1133 * 0x8000001d. Even worse, this can result in an infinite
1134 * loop if said highest leaf has no subleaves indexed by ECX.
1135 */
1136 if (entry->eax >= 0x8000001d &&
1137 (static_cpu_has(X86_FEATURE_LFENCE_RDTSC)
1138 || !static_cpu_has_bug(X86_BUG_NULL_SEG)))
1139 entry->eax = max(entry->eax, 0x80000021);
1140 break;
1141 case 0x80000001:
1142 entry->ebx &= ~GENMASK(27, 16);
1143 cpuid_entry_override(entry, CPUID_8000_0001_EDX);
1144 cpuid_entry_override(entry, CPUID_8000_0001_ECX);
1145 break;
1146 case 0x80000006:
1147 /* Drop reserved bits, pass host L2 cache and TLB info. */
1148 entry->edx &= ~GENMASK(17, 16);
1149 break;
1150 case 0x80000007: /* Advanced power management */
1151 /* invariant TSC is CPUID.80000007H:EDX[8] */
1152 entry->edx &= (1 << 8);
1153 /* mask against host */
1154 entry->edx &= boot_cpu_data.x86_power;
1155 entry->eax = entry->ebx = entry->ecx = 0;
1156 break;
1157 case 0x80000008: {
1158 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
1159 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
1160 unsigned phys_as = entry->eax & 0xff;
1161
1162 /*
1163 * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as
1164 * the guest operates in the same PA space as the host, i.e.
1165 * reductions in MAXPHYADDR for memory encryption affect shadow
1166 * paging, too.
1167 *
1168 * If TDP is enabled but an explicit guest MAXPHYADDR is not
1169 * provided, use the raw bare metal MAXPHYADDR as reductions to
1170 * the HPAs do not affect GPAs.
1171 */
1172 if (!tdp_enabled)
1173 g_phys_as = boot_cpu_data.x86_phys_bits;
1174 else if (!g_phys_as)
1175 g_phys_as = phys_as;
1176
1177 entry->eax = g_phys_as | (virt_as << 8);
1178 entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
1179 entry->edx = 0;
1180 cpuid_entry_override(entry, CPUID_8000_0008_EBX);
1181 break;
1182 }
1183 case 0x8000000A:
1184 if (!kvm_cpu_cap_has(X86_FEATURE_SVM)) {
1185 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1186 break;
1187 }
1188 entry->eax = 1; /* SVM revision 1 */
1189 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
1190 ASID emulation to nested SVM */
1191 entry->ecx = 0; /* Reserved */
1192 cpuid_entry_override(entry, CPUID_8000_000A_EDX);
1193 break;
1194 case 0x80000019:
1195 entry->ecx = entry->edx = 0;
1196 break;
1197 case 0x8000001a:
1198 entry->eax &= GENMASK(2, 0);
1199 entry->ebx = entry->ecx = entry->edx = 0;
1200 break;
1201 case 0x8000001e:
1202 /* Do not return host topology information. */
1203 entry->eax = entry->ebx = entry->ecx = 0;
1204 entry->edx = 0; /* reserved */
1205 break;
1206 case 0x8000001F:
1207 if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) {
1208 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1209 } else {
1210 cpuid_entry_override(entry, CPUID_8000_001F_EAX);
1211 /* Clear NumVMPL since KVM does not support VMPL. */
1212 entry->ebx &= ~GENMASK(31, 12);
1213 /*
1214 * Enumerate '0' for "PA bits reduction", the adjusted
1215 * MAXPHYADDR is enumerated directly (see 0x80000008).
1216 */
1217 entry->ebx &= ~GENMASK(11, 6);
1218 }
1219 break;
1220 case 0x80000020:
1221 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1222 break;
1223 case 0x80000021:
1224 entry->ebx = entry->ecx = entry->edx = 0;
1225 /*
1226 * Pass down these bits:
1227 * EAX 0 NNDBP, Processor ignores nested data breakpoints
1228 * EAX 2 LAS, LFENCE always serializing
1229 * EAX 6 NSCB, Null selector clear base
1230 *
1231 * Other defined bits are for MSRs that KVM does not expose:
1232 * EAX 3 SPCL, SMM page configuration lock
1233 * EAX 13 PCMSR, Prefetch control MSR
1234 *
1235 * KVM doesn't support SMM_CTL.
1236 * EAX 9 SMM_CTL MSR is not supported
1237 */
1238 entry->eax &= BIT(0) | BIT(2) | BIT(6);
1239 entry->eax |= BIT(9);
1240 if (static_cpu_has(X86_FEATURE_LFENCE_RDTSC))
1241 entry->eax |= BIT(2);
1242 if (!static_cpu_has_bug(X86_BUG_NULL_SEG))
1243 entry->eax |= BIT(6);
1244 break;
1245 /*Add support for Centaur's CPUID instruction*/
1246 case 0xC0000000:
1247 /*Just support up to 0xC0000004 now*/
1248 entry->eax = min(entry->eax, 0xC0000004);
1249 break;
1250 case 0xC0000001:
1251 cpuid_entry_override(entry, CPUID_C000_0001_EDX);
1252 break;
1253 case 3: /* Processor serial number */
1254 case 5: /* MONITOR/MWAIT */
1255 case 0xC0000002:
1256 case 0xC0000003:
1257 case 0xC0000004:
1258 default:
1259 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1260 break;
1261 }
1262
1263 r = 0;
1264
1265out:
1266 put_cpu();
1267
1268 return r;
1269}
1270
1271static int do_cpuid_func(struct kvm_cpuid_array *array, u32 func,
1272 unsigned int type)
1273{
1274 if (type == KVM_GET_EMULATED_CPUID)
1275 return __do_cpuid_func_emulated(array, func);
1276
1277 return __do_cpuid_func(array, func);
1278}
1279
1280#define CENTAUR_CPUID_SIGNATURE 0xC0000000
1281
1282static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func,
1283 unsigned int type)
1284{
1285 u32 limit;
1286 int r;
1287
1288 if (func == CENTAUR_CPUID_SIGNATURE &&
1289 boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR)
1290 return 0;
1291
1292 r = do_cpuid_func(array, func, type);
1293 if (r)
1294 return r;
1295
1296 limit = array->entries[array->nent - 1].eax;
1297 for (func = func + 1; func <= limit; ++func) {
1298 r = do_cpuid_func(array, func, type);
1299 if (r)
1300 break;
1301 }
1302
1303 return r;
1304}
1305
1306static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
1307 __u32 num_entries, unsigned int ioctl_type)
1308{
1309 int i;
1310 __u32 pad[3];
1311
1312 if (ioctl_type != KVM_GET_EMULATED_CPUID)
1313 return false;
1314
1315 /*
1316 * We want to make sure that ->padding is being passed clean from
1317 * userspace in case we want to use it for something in the future.
1318 *
1319 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
1320 * have to give ourselves satisfied only with the emulated side. /me
1321 * sheds a tear.
1322 */
1323 for (i = 0; i < num_entries; i++) {
1324 if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
1325 return true;
1326
1327 if (pad[0] || pad[1] || pad[2])
1328 return true;
1329 }
1330 return false;
1331}
1332
1333int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
1334 struct kvm_cpuid_entry2 __user *entries,
1335 unsigned int type)
1336{
1337 static const u32 funcs[] = {
1338 0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE,
1339 };
1340
1341 struct kvm_cpuid_array array = {
1342 .nent = 0,
1343 };
1344 int r, i;
1345
1346 if (cpuid->nent < 1)
1347 return -E2BIG;
1348 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1349 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
1350
1351 if (sanity_check_entries(entries, cpuid->nent, type))
1352 return -EINVAL;
1353
1354 array.entries = kvcalloc(cpuid->nent, sizeof(struct kvm_cpuid_entry2), GFP_KERNEL);
1355 if (!array.entries)
1356 return -ENOMEM;
1357
1358 array.maxnent = cpuid->nent;
1359
1360 for (i = 0; i < ARRAY_SIZE(funcs); i++) {
1361 r = get_cpuid_func(&array, funcs[i], type);
1362 if (r)
1363 goto out_free;
1364 }
1365 cpuid->nent = array.nent;
1366
1367 if (copy_to_user(entries, array.entries,
1368 array.nent * sizeof(struct kvm_cpuid_entry2)))
1369 r = -EFAULT;
1370
1371out_free:
1372 kvfree(array.entries);
1373 return r;
1374}
1375
1376struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
1377 u32 function, u32 index)
1378{
1379 return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent,
1380 function, index);
1381}
1382EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry_index);
1383
1384struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
1385 u32 function)
1386{
1387 return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent,
1388 function, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
1389}
1390EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
1391
1392/*
1393 * Intel CPUID semantics treats any query for an out-of-range leaf as if the
1394 * highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics
1395 * returns all zeroes for any undefined leaf, whether or not the leaf is in
1396 * range. Centaur/VIA follows Intel semantics.
1397 *
1398 * A leaf is considered out-of-range if its function is higher than the maximum
1399 * supported leaf of its associated class or if its associated class does not
1400 * exist.
1401 *
1402 * There are three primary classes to be considered, with their respective
1403 * ranges described as "<base> - <top>[,<base2> - <top2>] inclusive. A primary
1404 * class exists if a guest CPUID entry for its <base> leaf exists. For a given
1405 * class, CPUID.<base>.EAX contains the max supported leaf for the class.
1406 *
1407 * - Basic: 0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff
1408 * - Hypervisor: 0x40000000 - 0x4fffffff
1409 * - Extended: 0x80000000 - 0xbfffffff
1410 * - Centaur: 0xc0000000 - 0xcfffffff
1411 *
1412 * The Hypervisor class is further subdivided into sub-classes that each act as
1413 * their own independent class associated with a 0x100 byte range. E.g. if Qemu
1414 * is advertising support for both HyperV and KVM, the resulting Hypervisor
1415 * CPUID sub-classes are:
1416 *
1417 * - HyperV: 0x40000000 - 0x400000ff
1418 * - KVM: 0x40000100 - 0x400001ff
1419 */
1420static struct kvm_cpuid_entry2 *
1421get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index)
1422{
1423 struct kvm_cpuid_entry2 *basic, *class;
1424 u32 function = *fn_ptr;
1425
1426 basic = kvm_find_cpuid_entry(vcpu, 0);
1427 if (!basic)
1428 return NULL;
1429
1430 if (is_guest_vendor_amd(basic->ebx, basic->ecx, basic->edx) ||
1431 is_guest_vendor_hygon(basic->ebx, basic->ecx, basic->edx))
1432 return NULL;
1433
1434 if (function >= 0x40000000 && function <= 0x4fffffff)
1435 class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00);
1436 else if (function >= 0xc0000000)
1437 class = kvm_find_cpuid_entry(vcpu, 0xc0000000);
1438 else
1439 class = kvm_find_cpuid_entry(vcpu, function & 0x80000000);
1440
1441 if (class && function <= class->eax)
1442 return NULL;
1443
1444 /*
1445 * Leaf specific adjustments are also applied when redirecting to the
1446 * max basic entry, e.g. if the max basic leaf is 0xb but there is no
1447 * entry for CPUID.0xb.index (see below), then the output value for EDX
1448 * needs to be pulled from CPUID.0xb.1.
1449 */
1450 *fn_ptr = basic->eax;
1451
1452 /*
1453 * The class does not exist or the requested function is out of range;
1454 * the effective CPUID entry is the max basic leaf. Note, the index of
1455 * the original requested leaf is observed!
1456 */
1457 return kvm_find_cpuid_entry_index(vcpu, basic->eax, index);
1458}
1459
1460bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
1461 u32 *ecx, u32 *edx, bool exact_only)
1462{
1463 u32 orig_function = *eax, function = *eax, index = *ecx;
1464 struct kvm_cpuid_entry2 *entry;
1465 bool exact, used_max_basic = false;
1466
1467 entry = kvm_find_cpuid_entry_index(vcpu, function, index);
1468 exact = !!entry;
1469
1470 if (!entry && !exact_only) {
1471 entry = get_out_of_range_cpuid_entry(vcpu, &function, index);
1472 used_max_basic = !!entry;
1473 }
1474
1475 if (entry) {
1476 *eax = entry->eax;
1477 *ebx = entry->ebx;
1478 *ecx = entry->ecx;
1479 *edx = entry->edx;
1480 if (function == 7 && index == 0) {
1481 u64 data;
1482 if (!__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, &data, true) &&
1483 (data & TSX_CTRL_CPUID_CLEAR))
1484 *ebx &= ~(F(RTM) | F(HLE));
1485 }
1486 } else {
1487 *eax = *ebx = *ecx = *edx = 0;
1488 /*
1489 * When leaf 0BH or 1FH is defined, CL is pass-through
1490 * and EDX is always the x2APIC ID, even for undefined
1491 * subleaves. Index 1 will exist iff the leaf is
1492 * implemented, so we pass through CL iff leaf 1
1493 * exists. EDX can be copied from any existing index.
1494 */
1495 if (function == 0xb || function == 0x1f) {
1496 entry = kvm_find_cpuid_entry_index(vcpu, function, 1);
1497 if (entry) {
1498 *ecx = index & 0xff;
1499 *edx = entry->edx;
1500 }
1501 }
1502 }
1503 trace_kvm_cpuid(orig_function, index, *eax, *ebx, *ecx, *edx, exact,
1504 used_max_basic);
1505 return exact;
1506}
1507EXPORT_SYMBOL_GPL(kvm_cpuid);
1508
1509int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1510{
1511 u32 eax, ebx, ecx, edx;
1512
1513 if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
1514 return 1;
1515
1516 eax = kvm_rax_read(vcpu);
1517 ecx = kvm_rcx_read(vcpu);
1518 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false);
1519 kvm_rax_write(vcpu, eax);
1520 kvm_rbx_write(vcpu, ebx);
1521 kvm_rcx_write(vcpu, ecx);
1522 kvm_rdx_write(vcpu, edx);
1523 return kvm_skip_emulated_instruction(vcpu);
1524}
1525EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 * cpuid support routines
4 *
5 * derived from arch/x86/kvm/x86.c
6 *
7 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
8 * Copyright IBM Corporation, 2008
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
12 *
13 */
14
15#include <linux/kvm_host.h>
16#include <linux/export.h>
17#include <linux/vmalloc.h>
18#include <linux/uaccess.h>
19#include <linux/sched/stat.h>
20
21#include <asm/processor.h>
22#include <asm/user.h>
23#include <asm/fpu/xstate.h>
24#include "cpuid.h"
25#include "lapic.h"
26#include "mmu.h"
27#include "trace.h"
28#include "pmu.h"
29
30static u32 xstate_required_size(u64 xstate_bv, bool compacted)
31{
32 int feature_bit = 0;
33 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
34
35 xstate_bv &= XFEATURE_MASK_EXTEND;
36 while (xstate_bv) {
37 if (xstate_bv & 0x1) {
38 u32 eax, ebx, ecx, edx, offset;
39 cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
40 offset = compacted ? ret : ebx;
41 ret = max(ret, offset + eax);
42 }
43
44 xstate_bv >>= 1;
45 feature_bit++;
46 }
47
48 return ret;
49}
50
51bool kvm_mpx_supported(void)
52{
53 return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
54 && kvm_x86_ops->mpx_supported());
55}
56EXPORT_SYMBOL_GPL(kvm_mpx_supported);
57
58u64 kvm_supported_xcr0(void)
59{
60 u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
61
62 if (!kvm_mpx_supported())
63 xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
64
65 return xcr0;
66}
67
68#define F(x) bit(X86_FEATURE_##x)
69
70/* For scattered features from cpufeatures.h; we currently expose none */
71#define KF(x) bit(KVM_CPUID_BIT_##x)
72
73int kvm_update_cpuid(struct kvm_vcpu *vcpu)
74{
75 struct kvm_cpuid_entry2 *best;
76 struct kvm_lapic *apic = vcpu->arch.apic;
77
78 best = kvm_find_cpuid_entry(vcpu, 1, 0);
79 if (!best)
80 return 0;
81
82 /* Update OSXSAVE bit */
83 if (boot_cpu_has(X86_FEATURE_XSAVE) && best->function == 0x1) {
84 best->ecx &= ~F(OSXSAVE);
85 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
86 best->ecx |= F(OSXSAVE);
87 }
88
89 best->edx &= ~F(APIC);
90 if (vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)
91 best->edx |= F(APIC);
92
93 if (apic) {
94 if (best->ecx & F(TSC_DEADLINE_TIMER))
95 apic->lapic_timer.timer_mode_mask = 3 << 17;
96 else
97 apic->lapic_timer.timer_mode_mask = 1 << 17;
98 }
99
100 best = kvm_find_cpuid_entry(vcpu, 7, 0);
101 if (best) {
102 /* Update OSPKE bit */
103 if (boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) {
104 best->ecx &= ~F(OSPKE);
105 if (kvm_read_cr4_bits(vcpu, X86_CR4_PKE))
106 best->ecx |= F(OSPKE);
107 }
108 }
109
110 best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
111 if (!best) {
112 vcpu->arch.guest_supported_xcr0 = 0;
113 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
114 } else {
115 vcpu->arch.guest_supported_xcr0 =
116 (best->eax | ((u64)best->edx << 32)) &
117 kvm_supported_xcr0();
118 vcpu->arch.guest_xstate_size = best->ebx =
119 xstate_required_size(vcpu->arch.xcr0, false);
120 }
121
122 best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
123 if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
124 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
125
126 /*
127 * The existing code assumes virtual address is 48-bit or 57-bit in the
128 * canonical address checks; exit if it is ever changed.
129 */
130 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
131 if (best) {
132 int vaddr_bits = (best->eax & 0xff00) >> 8;
133
134 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
135 return -EINVAL;
136 }
137
138 best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
139 if (kvm_hlt_in_guest(vcpu->kvm) && best &&
140 (best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
141 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
142
143 /* Update physical-address width */
144 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
145 kvm_mmu_reset_context(vcpu);
146
147 kvm_pmu_refresh(vcpu);
148 return 0;
149}
150
151static int is_efer_nx(void)
152{
153 unsigned long long efer = 0;
154
155 rdmsrl_safe(MSR_EFER, &efer);
156 return efer & EFER_NX;
157}
158
159static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
160{
161 int i;
162 struct kvm_cpuid_entry2 *e, *entry;
163
164 entry = NULL;
165 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
166 e = &vcpu->arch.cpuid_entries[i];
167 if (e->function == 0x80000001) {
168 entry = e;
169 break;
170 }
171 }
172 if (entry && (entry->edx & F(NX)) && !is_efer_nx()) {
173 entry->edx &= ~F(NX);
174 printk(KERN_INFO "kvm: guest NX capability removed\n");
175 }
176}
177
178int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
179{
180 struct kvm_cpuid_entry2 *best;
181
182 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
183 if (!best || best->eax < 0x80000008)
184 goto not_found;
185 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
186 if (best)
187 return best->eax & 0xff;
188not_found:
189 return 36;
190}
191EXPORT_SYMBOL_GPL(cpuid_query_maxphyaddr);
192
193/* when an old userspace process fills a new kernel module */
194int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
195 struct kvm_cpuid *cpuid,
196 struct kvm_cpuid_entry __user *entries)
197{
198 int r, i;
199 struct kvm_cpuid_entry *cpuid_entries = NULL;
200
201 r = -E2BIG;
202 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
203 goto out;
204 r = -ENOMEM;
205 if (cpuid->nent) {
206 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) *
207 cpuid->nent);
208 if (!cpuid_entries)
209 goto out;
210 r = -EFAULT;
211 if (copy_from_user(cpuid_entries, entries,
212 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
213 goto out;
214 }
215 for (i = 0; i < cpuid->nent; i++) {
216 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
217 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
218 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
219 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
220 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
221 vcpu->arch.cpuid_entries[i].index = 0;
222 vcpu->arch.cpuid_entries[i].flags = 0;
223 vcpu->arch.cpuid_entries[i].padding[0] = 0;
224 vcpu->arch.cpuid_entries[i].padding[1] = 0;
225 vcpu->arch.cpuid_entries[i].padding[2] = 0;
226 }
227 vcpu->arch.cpuid_nent = cpuid->nent;
228 cpuid_fix_nx_cap(vcpu);
229 kvm_apic_set_version(vcpu);
230 kvm_x86_ops->cpuid_update(vcpu);
231 r = kvm_update_cpuid(vcpu);
232
233out:
234 vfree(cpuid_entries);
235 return r;
236}
237
238int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
239 struct kvm_cpuid2 *cpuid,
240 struct kvm_cpuid_entry2 __user *entries)
241{
242 int r;
243
244 r = -E2BIG;
245 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
246 goto out;
247 r = -EFAULT;
248 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
249 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
250 goto out;
251 vcpu->arch.cpuid_nent = cpuid->nent;
252 kvm_apic_set_version(vcpu);
253 kvm_x86_ops->cpuid_update(vcpu);
254 r = kvm_update_cpuid(vcpu);
255out:
256 return r;
257}
258
259int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
260 struct kvm_cpuid2 *cpuid,
261 struct kvm_cpuid_entry2 __user *entries)
262{
263 int r;
264
265 r = -E2BIG;
266 if (cpuid->nent < vcpu->arch.cpuid_nent)
267 goto out;
268 r = -EFAULT;
269 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
270 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
271 goto out;
272 return 0;
273
274out:
275 cpuid->nent = vcpu->arch.cpuid_nent;
276 return r;
277}
278
279static void cpuid_mask(u32 *word, int wordnum)
280{
281 *word &= boot_cpu_data.x86_capability[wordnum];
282}
283
284static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
285 u32 index)
286{
287 entry->function = function;
288 entry->index = index;
289 cpuid_count(entry->function, entry->index,
290 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
291 entry->flags = 0;
292}
293
294static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry,
295 u32 func, u32 index, int *nent, int maxnent)
296{
297 switch (func) {
298 case 0:
299 entry->eax = 7;
300 ++*nent;
301 break;
302 case 1:
303 entry->ecx = F(MOVBE);
304 ++*nent;
305 break;
306 case 7:
307 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
308 if (index == 0)
309 entry->ecx = F(RDPID);
310 ++*nent;
311 default:
312 break;
313 }
314
315 entry->function = func;
316 entry->index = index;
317
318 return 0;
319}
320
321static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
322 u32 index, int *nent, int maxnent)
323{
324 int r;
325 unsigned f_nx = is_efer_nx() ? F(NX) : 0;
326#ifdef CONFIG_X86_64
327 unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
328 ? F(GBPAGES) : 0;
329 unsigned f_lm = F(LM);
330#else
331 unsigned f_gbpages = 0;
332 unsigned f_lm = 0;
333#endif
334 unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
335 unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
336 unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
337 unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
338 unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
339
340 /* cpuid 1.edx */
341 const u32 kvm_cpuid_1_edx_x86_features =
342 F(FPU) | F(VME) | F(DE) | F(PSE) |
343 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
344 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
345 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
346 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
347 0 /* Reserved, DS, ACPI */ | F(MMX) |
348 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
349 0 /* HTT, TM, Reserved, PBE */;
350 /* cpuid 0x80000001.edx */
351 const u32 kvm_cpuid_8000_0001_edx_x86_features =
352 F(FPU) | F(VME) | F(DE) | F(PSE) |
353 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
354 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
355 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
356 F(PAT) | F(PSE36) | 0 /* Reserved */ |
357 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
358 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
359 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
360 /* cpuid 1.ecx */
361 const u32 kvm_cpuid_1_ecx_x86_features =
362 /* NOTE: MONITOR (and MWAIT) are emulated as NOP,
363 * but *not* advertised to guests via CPUID ! */
364 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
365 0 /* DS-CPL, VMX, SMX, EST */ |
366 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
367 F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
368 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
369 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
370 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
371 F(F16C) | F(RDRAND);
372 /* cpuid 0x80000001.ecx */
373 const u32 kvm_cpuid_8000_0001_ecx_x86_features =
374 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
375 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
376 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
377 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) |
378 F(TOPOEXT) | F(PERFCTR_CORE);
379
380 /* cpuid 0x80000008.ebx */
381 const u32 kvm_cpuid_8000_0008_ebx_x86_features =
382 F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
383
384 /* cpuid 0xC0000001.edx */
385 const u32 kvm_cpuid_C000_0001_edx_x86_features =
386 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
387 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
388 F(PMM) | F(PMM_EN);
389
390 /* cpuid 7.0.ebx */
391 const u32 kvm_cpuid_7_0_ebx_x86_features =
392 F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
393 F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
394 F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
395 F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
396 F(SHA_NI) | F(AVX512BW) | F(AVX512VL);
397
398 /* cpuid 0xD.1.eax */
399 const u32 kvm_cpuid_D_1_eax_x86_features =
400 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves;
401
402 /* cpuid 7.0.ecx*/
403 const u32 kvm_cpuid_7_0_ecx_x86_features =
404 F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ |
405 F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
406 F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG);
407
408 /* cpuid 7.0.edx*/
409 const u32 kvm_cpuid_7_0_edx_x86_features =
410 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
411 F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES);
412
413 /* all calls to cpuid_count() should be made on the same cpu */
414 get_cpu();
415
416 r = -E2BIG;
417
418 if (*nent >= maxnent)
419 goto out;
420
421 do_cpuid_1_ent(entry, function, index);
422 ++*nent;
423
424 switch (function) {
425 case 0:
426 entry->eax = min(entry->eax, (u32)0xd);
427 break;
428 case 1:
429 entry->edx &= kvm_cpuid_1_edx_x86_features;
430 cpuid_mask(&entry->edx, CPUID_1_EDX);
431 entry->ecx &= kvm_cpuid_1_ecx_x86_features;
432 cpuid_mask(&entry->ecx, CPUID_1_ECX);
433 /* we support x2apic emulation even if host does not support
434 * it since we emulate x2apic in software */
435 entry->ecx |= F(X2APIC);
436 break;
437 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
438 * may return different values. This forces us to get_cpu() before
439 * issuing the first command, and also to emulate this annoying behavior
440 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
441 case 2: {
442 int t, times = entry->eax & 0xff;
443
444 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
445 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
446 for (t = 1; t < times; ++t) {
447 if (*nent >= maxnent)
448 goto out;
449
450 do_cpuid_1_ent(&entry[t], function, 0);
451 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
452 ++*nent;
453 }
454 break;
455 }
456 /* function 4 has additional index. */
457 case 4: {
458 int i, cache_type;
459
460 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
461 /* read more entries until cache_type is zero */
462 for (i = 1; ; ++i) {
463 if (*nent >= maxnent)
464 goto out;
465
466 cache_type = entry[i - 1].eax & 0x1f;
467 if (!cache_type)
468 break;
469 do_cpuid_1_ent(&entry[i], function, i);
470 entry[i].flags |=
471 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
472 ++*nent;
473 }
474 break;
475 }
476 case 6: /* Thermal management */
477 entry->eax = 0x4; /* allow ARAT */
478 entry->ebx = 0;
479 entry->ecx = 0;
480 entry->edx = 0;
481 break;
482 case 7: {
483 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
484 /* Mask ebx against host capability word 9 */
485 if (index == 0) {
486 entry->ebx &= kvm_cpuid_7_0_ebx_x86_features;
487 cpuid_mask(&entry->ebx, CPUID_7_0_EBX);
488 // TSC_ADJUST is emulated
489 entry->ebx |= F(TSC_ADJUST);
490 entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
491 cpuid_mask(&entry->ecx, CPUID_7_ECX);
492 entry->ecx |= f_umip;
493 /* PKU is not yet implemented for shadow paging. */
494 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
495 entry->ecx &= ~F(PKU);
496 entry->edx &= kvm_cpuid_7_0_edx_x86_features;
497 cpuid_mask(&entry->edx, CPUID_7_EDX);
498 /*
499 * We emulate ARCH_CAPABILITIES in software even
500 * if the host doesn't support it.
501 */
502 entry->edx |= F(ARCH_CAPABILITIES);
503 } else {
504 entry->ebx = 0;
505 entry->ecx = 0;
506 entry->edx = 0;
507 }
508 entry->eax = 0;
509 break;
510 }
511 case 9:
512 break;
513 case 0xa: { /* Architectural Performance Monitoring */
514 struct x86_pmu_capability cap;
515 union cpuid10_eax eax;
516 union cpuid10_edx edx;
517
518 perf_get_x86_pmu_capability(&cap);
519
520 /*
521 * Only support guest architectural pmu on a host
522 * with architectural pmu.
523 */
524 if (!cap.version)
525 memset(&cap, 0, sizeof(cap));
526
527 eax.split.version_id = min(cap.version, 2);
528 eax.split.num_counters = cap.num_counters_gp;
529 eax.split.bit_width = cap.bit_width_gp;
530 eax.split.mask_length = cap.events_mask_len;
531
532 edx.split.num_counters_fixed = cap.num_counters_fixed;
533 edx.split.bit_width_fixed = cap.bit_width_fixed;
534 edx.split.reserved = 0;
535
536 entry->eax = eax.full;
537 entry->ebx = cap.events_mask;
538 entry->ecx = 0;
539 entry->edx = edx.full;
540 break;
541 }
542 /* function 0xb has additional index. */
543 case 0xb: {
544 int i, level_type;
545
546 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
547 /* read more entries until level_type is zero */
548 for (i = 1; ; ++i) {
549 if (*nent >= maxnent)
550 goto out;
551
552 level_type = entry[i - 1].ecx & 0xff00;
553 if (!level_type)
554 break;
555 do_cpuid_1_ent(&entry[i], function, i);
556 entry[i].flags |=
557 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
558 ++*nent;
559 }
560 break;
561 }
562 case 0xd: {
563 int idx, i;
564 u64 supported = kvm_supported_xcr0();
565
566 entry->eax &= supported;
567 entry->ebx = xstate_required_size(supported, false);
568 entry->ecx = entry->ebx;
569 entry->edx &= supported >> 32;
570 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
571 if (!supported)
572 break;
573
574 for (idx = 1, i = 1; idx < 64; ++idx) {
575 u64 mask = ((u64)1 << idx);
576 if (*nent >= maxnent)
577 goto out;
578
579 do_cpuid_1_ent(&entry[i], function, idx);
580 if (idx == 1) {
581 entry[i].eax &= kvm_cpuid_D_1_eax_x86_features;
582 cpuid_mask(&entry[i].eax, CPUID_D_1_EAX);
583 entry[i].ebx = 0;
584 if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
585 entry[i].ebx =
586 xstate_required_size(supported,
587 true);
588 } else {
589 if (entry[i].eax == 0 || !(supported & mask))
590 continue;
591 if (WARN_ON_ONCE(entry[i].ecx & 1))
592 continue;
593 }
594 entry[i].ecx = 0;
595 entry[i].edx = 0;
596 entry[i].flags |=
597 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
598 ++*nent;
599 ++i;
600 }
601 break;
602 }
603 case KVM_CPUID_SIGNATURE: {
604 static const char signature[12] = "KVMKVMKVM\0\0";
605 const u32 *sigptr = (const u32 *)signature;
606 entry->eax = KVM_CPUID_FEATURES;
607 entry->ebx = sigptr[0];
608 entry->ecx = sigptr[1];
609 entry->edx = sigptr[2];
610 break;
611 }
612 case KVM_CPUID_FEATURES:
613 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
614 (1 << KVM_FEATURE_NOP_IO_DELAY) |
615 (1 << KVM_FEATURE_CLOCKSOURCE2) |
616 (1 << KVM_FEATURE_ASYNC_PF) |
617 (1 << KVM_FEATURE_PV_EOI) |
618 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
619 (1 << KVM_FEATURE_PV_UNHALT) |
620 (1 << KVM_FEATURE_PV_TLB_FLUSH) |
621 (1 << KVM_FEATURE_ASYNC_PF_VMEXIT);
622
623 if (sched_info_on())
624 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
625
626 entry->ebx = 0;
627 entry->ecx = 0;
628 entry->edx = 0;
629 break;
630 case 0x80000000:
631 entry->eax = min(entry->eax, 0x8000001f);
632 break;
633 case 0x80000001:
634 entry->edx &= kvm_cpuid_8000_0001_edx_x86_features;
635 cpuid_mask(&entry->edx, CPUID_8000_0001_EDX);
636 entry->ecx &= kvm_cpuid_8000_0001_ecx_x86_features;
637 cpuid_mask(&entry->ecx, CPUID_8000_0001_ECX);
638 break;
639 case 0x80000007: /* Advanced power management */
640 /* invariant TSC is CPUID.80000007H:EDX[8] */
641 entry->edx &= (1 << 8);
642 /* mask against host */
643 entry->edx &= boot_cpu_data.x86_power;
644 entry->eax = entry->ebx = entry->ecx = 0;
645 break;
646 case 0x80000008: {
647 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
648 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
649 unsigned phys_as = entry->eax & 0xff;
650
651 if (!g_phys_as)
652 g_phys_as = phys_as;
653 entry->eax = g_phys_as | (virt_as << 8);
654 entry->edx = 0;
655 /*
656 * IBRS, IBPB and VIRT_SSBD aren't necessarily present in
657 * hardware cpuid
658 */
659 if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
660 entry->ebx |= F(AMD_IBPB);
661 if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
662 entry->ebx |= F(AMD_IBRS);
663 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
664 entry->ebx |= F(VIRT_SSBD);
665 entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
666 cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
667 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
668 entry->ebx |= F(VIRT_SSBD);
669 break;
670 }
671 case 0x80000019:
672 entry->ecx = entry->edx = 0;
673 break;
674 case 0x8000001a:
675 break;
676 case 0x8000001d:
677 break;
678 /*Add support for Centaur's CPUID instruction*/
679 case 0xC0000000:
680 /*Just support up to 0xC0000004 now*/
681 entry->eax = min(entry->eax, 0xC0000004);
682 break;
683 case 0xC0000001:
684 entry->edx &= kvm_cpuid_C000_0001_edx_x86_features;
685 cpuid_mask(&entry->edx, CPUID_C000_0001_EDX);
686 break;
687 case 3: /* Processor serial number */
688 case 5: /* MONITOR/MWAIT */
689 case 0xC0000002:
690 case 0xC0000003:
691 case 0xC0000004:
692 default:
693 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
694 break;
695 }
696
697 kvm_x86_ops->set_supported_cpuid(function, entry);
698
699 r = 0;
700
701out:
702 put_cpu();
703
704 return r;
705}
706
707static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 func,
708 u32 idx, int *nent, int maxnent, unsigned int type)
709{
710 if (type == KVM_GET_EMULATED_CPUID)
711 return __do_cpuid_ent_emulated(entry, func, idx, nent, maxnent);
712
713 return __do_cpuid_ent(entry, func, idx, nent, maxnent);
714}
715
716#undef F
717
718struct kvm_cpuid_param {
719 u32 func;
720 u32 idx;
721 bool has_leaf_count;
722 bool (*qualifier)(const struct kvm_cpuid_param *param);
723};
724
725static bool is_centaur_cpu(const struct kvm_cpuid_param *param)
726{
727 return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR;
728}
729
730static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
731 __u32 num_entries, unsigned int ioctl_type)
732{
733 int i;
734 __u32 pad[3];
735
736 if (ioctl_type != KVM_GET_EMULATED_CPUID)
737 return false;
738
739 /*
740 * We want to make sure that ->padding is being passed clean from
741 * userspace in case we want to use it for something in the future.
742 *
743 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
744 * have to give ourselves satisfied only with the emulated side. /me
745 * sheds a tear.
746 */
747 for (i = 0; i < num_entries; i++) {
748 if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
749 return true;
750
751 if (pad[0] || pad[1] || pad[2])
752 return true;
753 }
754 return false;
755}
756
757int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
758 struct kvm_cpuid_entry2 __user *entries,
759 unsigned int type)
760{
761 struct kvm_cpuid_entry2 *cpuid_entries;
762 int limit, nent = 0, r = -E2BIG, i;
763 u32 func;
764 static const struct kvm_cpuid_param param[] = {
765 { .func = 0, .has_leaf_count = true },
766 { .func = 0x80000000, .has_leaf_count = true },
767 { .func = 0xC0000000, .qualifier = is_centaur_cpu, .has_leaf_count = true },
768 { .func = KVM_CPUID_SIGNATURE },
769 { .func = KVM_CPUID_FEATURES },
770 };
771
772 if (cpuid->nent < 1)
773 goto out;
774 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
775 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
776
777 if (sanity_check_entries(entries, cpuid->nent, type))
778 return -EINVAL;
779
780 r = -ENOMEM;
781 cpuid_entries = vzalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
782 if (!cpuid_entries)
783 goto out;
784
785 r = 0;
786 for (i = 0; i < ARRAY_SIZE(param); i++) {
787 const struct kvm_cpuid_param *ent = ¶m[i];
788
789 if (ent->qualifier && !ent->qualifier(ent))
790 continue;
791
792 r = do_cpuid_ent(&cpuid_entries[nent], ent->func, ent->idx,
793 &nent, cpuid->nent, type);
794
795 if (r)
796 goto out_free;
797
798 if (!ent->has_leaf_count)
799 continue;
800
801 limit = cpuid_entries[nent - 1].eax;
802 for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func)
803 r = do_cpuid_ent(&cpuid_entries[nent], func, ent->idx,
804 &nent, cpuid->nent, type);
805
806 if (r)
807 goto out_free;
808 }
809
810 r = -EFAULT;
811 if (copy_to_user(entries, cpuid_entries,
812 nent * sizeof(struct kvm_cpuid_entry2)))
813 goto out_free;
814 cpuid->nent = nent;
815 r = 0;
816
817out_free:
818 vfree(cpuid_entries);
819out:
820 return r;
821}
822
823static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
824{
825 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
826 struct kvm_cpuid_entry2 *ej;
827 int j = i;
828 int nent = vcpu->arch.cpuid_nent;
829
830 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
831 /* when no next entry is found, the current entry[i] is reselected */
832 do {
833 j = (j + 1) % nent;
834 ej = &vcpu->arch.cpuid_entries[j];
835 } while (ej->function != e->function);
836
837 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
838
839 return j;
840}
841
842/* find an entry with matching function, matching index (if needed), and that
843 * should be read next (if it's stateful) */
844static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
845 u32 function, u32 index)
846{
847 if (e->function != function)
848 return 0;
849 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
850 return 0;
851 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
852 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
853 return 0;
854 return 1;
855}
856
857struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
858 u32 function, u32 index)
859{
860 int i;
861 struct kvm_cpuid_entry2 *best = NULL;
862
863 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
864 struct kvm_cpuid_entry2 *e;
865
866 e = &vcpu->arch.cpuid_entries[i];
867 if (is_matching_cpuid_entry(e, function, index)) {
868 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
869 move_to_next_stateful_cpuid_entry(vcpu, i);
870 best = e;
871 break;
872 }
873 }
874 return best;
875}
876EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
877
878/*
879 * If no match is found, check whether we exceed the vCPU's limit
880 * and return the content of the highest valid _standard_ leaf instead.
881 * This is to satisfy the CPUID specification.
882 */
883static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
884 u32 function, u32 index)
885{
886 struct kvm_cpuid_entry2 *maxlevel;
887
888 maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
889 if (!maxlevel || maxlevel->eax >= function)
890 return NULL;
891 if (function & 0x80000000) {
892 maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
893 if (!maxlevel)
894 return NULL;
895 }
896 return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
897}
898
899bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
900 u32 *ecx, u32 *edx, bool check_limit)
901{
902 u32 function = *eax, index = *ecx;
903 struct kvm_cpuid_entry2 *best;
904 bool entry_found = true;
905
906 best = kvm_find_cpuid_entry(vcpu, function, index);
907
908 if (!best) {
909 entry_found = false;
910 if (!check_limit)
911 goto out;
912
913 best = check_cpuid_limit(vcpu, function, index);
914 }
915
916out:
917 if (best) {
918 *eax = best->eax;
919 *ebx = best->ebx;
920 *ecx = best->ecx;
921 *edx = best->edx;
922 } else
923 *eax = *ebx = *ecx = *edx = 0;
924 trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx, entry_found);
925 return entry_found;
926}
927EXPORT_SYMBOL_GPL(kvm_cpuid);
928
929int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
930{
931 u32 eax, ebx, ecx, edx;
932
933 if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
934 return 1;
935
936 eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
937 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
938 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, true);
939 kvm_register_write(vcpu, VCPU_REGS_RAX, eax);
940 kvm_register_write(vcpu, VCPU_REGS_RBX, ebx);
941 kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
942 kvm_register_write(vcpu, VCPU_REGS_RDX, edx);
943 return kvm_skip_emulated_instruction(vcpu);
944}
945EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);