Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 * cpuid support routines
5 *
6 * derived from arch/x86/kvm/x86.c
7 *
8 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
9 * Copyright IBM Corporation, 2008
10 */
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/kvm_host.h>
14#include "linux/lockdep.h"
15#include <linux/export.h>
16#include <linux/vmalloc.h>
17#include <linux/uaccess.h>
18#include <linux/sched/stat.h>
19
20#include <asm/processor.h>
21#include <asm/user.h>
22#include <asm/fpu/xstate.h>
23#include <asm/sgx.h>
24#include <asm/cpuid.h>
25#include "cpuid.h"
26#include "lapic.h"
27#include "mmu.h"
28#include "trace.h"
29#include "pmu.h"
30#include "xen.h"
31
32/*
33 * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be
34 * aligned to sizeof(unsigned long) because it's not accessed via bitops.
35 */
36u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
37EXPORT_SYMBOL_GPL(kvm_cpu_caps);
38
39struct cpuid_xstate_sizes {
40 u32 eax;
41 u32 ebx;
42 u32 ecx;
43};
44
45static struct cpuid_xstate_sizes xstate_sizes[XFEATURE_MAX] __ro_after_init;
46
47void __init kvm_init_xstate_sizes(void)
48{
49 u32 ign;
50 int i;
51
52 for (i = XFEATURE_YMM; i < ARRAY_SIZE(xstate_sizes); i++) {
53 struct cpuid_xstate_sizes *xs = &xstate_sizes[i];
54
55 cpuid_count(0xD, i, &xs->eax, &xs->ebx, &xs->ecx, &ign);
56 }
57}
58
59u32 xstate_required_size(u64 xstate_bv, bool compacted)
60{
61 int feature_bit = 0;
62 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
63
64 xstate_bv &= XFEATURE_MASK_EXTEND;
65 while (xstate_bv) {
66 if (xstate_bv & 0x1) {
67 struct cpuid_xstate_sizes *xs = &xstate_sizes[feature_bit];
68 u32 offset;
69
70 /* ECX[1]: 64B alignment in compacted form */
71 if (compacted)
72 offset = (xs->ecx & 0x2) ? ALIGN(ret, 64) : ret;
73 else
74 offset = xs->ebx;
75 ret = max(ret, offset + xs->eax);
76 }
77
78 xstate_bv >>= 1;
79 feature_bit++;
80 }
81
82 return ret;
83}
84
85#define F feature_bit
86
87/* Scattered Flag - For features that are scattered by cpufeatures.h. */
88#define SF(name) \
89({ \
90 BUILD_BUG_ON(X86_FEATURE_##name >= MAX_CPU_FEATURES); \
91 (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0); \
92})
93
94/*
95 * Magic value used by KVM when querying userspace-provided CPUID entries and
96 * doesn't care about the CPIUD index because the index of the function in
97 * question is not significant. Note, this magic value must have at least one
98 * bit set in bits[63:32] and must be consumed as a u64 by cpuid_entry2_find()
99 * to avoid false positives when processing guest CPUID input.
100 */
101#define KVM_CPUID_INDEX_NOT_SIGNIFICANT -1ull
102
103static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
104 struct kvm_cpuid_entry2 *entries, int nent, u32 function, u64 index)
105{
106 struct kvm_cpuid_entry2 *e;
107 int i;
108
109 /*
110 * KVM has a semi-arbitrary rule that querying the guest's CPUID model
111 * with IRQs disabled is disallowed. The CPUID model can legitimately
112 * have over one hundred entries, i.e. the lookup is slow, and IRQs are
113 * typically disabled in KVM only when KVM is in a performance critical
114 * path, e.g. the core VM-Enter/VM-Exit run loop. Nothing will break
115 * if this rule is violated, this assertion is purely to flag potential
116 * performance issues. If this fires, consider moving the lookup out
117 * of the hotpath, e.g. by caching information during CPUID updates.
118 */
119 lockdep_assert_irqs_enabled();
120
121 for (i = 0; i < nent; i++) {
122 e = &entries[i];
123
124 if (e->function != function)
125 continue;
126
127 /*
128 * If the index isn't significant, use the first entry with a
129 * matching function. It's userspace's responsibility to not
130 * provide "duplicate" entries in all cases.
131 */
132 if (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index)
133 return e;
134
135
136 /*
137 * Similarly, use the first matching entry if KVM is doing a
138 * lookup (as opposed to emulating CPUID) for a function that's
139 * architecturally defined as not having a significant index.
140 */
141 if (index == KVM_CPUID_INDEX_NOT_SIGNIFICANT) {
142 /*
143 * Direct lookups from KVM should not diverge from what
144 * KVM defines internally (the architectural behavior).
145 */
146 WARN_ON_ONCE(cpuid_function_is_indexed(function));
147 return e;
148 }
149 }
150
151 return NULL;
152}
153
154static int kvm_check_cpuid(struct kvm_vcpu *vcpu,
155 struct kvm_cpuid_entry2 *entries,
156 int nent)
157{
158 struct kvm_cpuid_entry2 *best;
159 u64 xfeatures;
160
161 /*
162 * The existing code assumes virtual address is 48-bit or 57-bit in the
163 * canonical address checks; exit if it is ever changed.
164 */
165 best = cpuid_entry2_find(entries, nent, 0x80000008,
166 KVM_CPUID_INDEX_NOT_SIGNIFICANT);
167 if (best) {
168 int vaddr_bits = (best->eax & 0xff00) >> 8;
169
170 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
171 return -EINVAL;
172 }
173
174 /*
175 * Exposing dynamic xfeatures to the guest requires additional
176 * enabling in the FPU, e.g. to expand the guest XSAVE state size.
177 */
178 best = cpuid_entry2_find(entries, nent, 0xd, 0);
179 if (!best)
180 return 0;
181
182 xfeatures = best->eax | ((u64)best->edx << 32);
183 xfeatures &= XFEATURE_MASK_USER_DYNAMIC;
184 if (!xfeatures)
185 return 0;
186
187 return fpu_enable_guest_xfd_features(&vcpu->arch.guest_fpu, xfeatures);
188}
189
190/* Check whether the supplied CPUID data is equal to what is already set for the vCPU. */
191static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
192 int nent)
193{
194 struct kvm_cpuid_entry2 *orig;
195 int i;
196
197 if (nent != vcpu->arch.cpuid_nent)
198 return -EINVAL;
199
200 for (i = 0; i < nent; i++) {
201 orig = &vcpu->arch.cpuid_entries[i];
202 if (e2[i].function != orig->function ||
203 e2[i].index != orig->index ||
204 e2[i].flags != orig->flags ||
205 e2[i].eax != orig->eax || e2[i].ebx != orig->ebx ||
206 e2[i].ecx != orig->ecx || e2[i].edx != orig->edx)
207 return -EINVAL;
208 }
209
210 return 0;
211}
212
213static struct kvm_hypervisor_cpuid __kvm_get_hypervisor_cpuid(struct kvm_cpuid_entry2 *entries,
214 int nent, const char *sig)
215{
216 struct kvm_hypervisor_cpuid cpuid = {};
217 struct kvm_cpuid_entry2 *entry;
218 u32 base;
219
220 for_each_possible_hypervisor_cpuid_base(base) {
221 entry = cpuid_entry2_find(entries, nent, base, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
222
223 if (entry) {
224 u32 signature[3];
225
226 signature[0] = entry->ebx;
227 signature[1] = entry->ecx;
228 signature[2] = entry->edx;
229
230 if (!memcmp(signature, sig, sizeof(signature))) {
231 cpuid.base = base;
232 cpuid.limit = entry->eax;
233 break;
234 }
235 }
236 }
237
238 return cpuid;
239}
240
241static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu,
242 const char *sig)
243{
244 return __kvm_get_hypervisor_cpuid(vcpu->arch.cpuid_entries,
245 vcpu->arch.cpuid_nent, sig);
246}
247
248static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_cpuid_entry2 *entries,
249 int nent, u32 kvm_cpuid_base)
250{
251 return cpuid_entry2_find(entries, nent, kvm_cpuid_base | KVM_CPUID_FEATURES,
252 KVM_CPUID_INDEX_NOT_SIGNIFICANT);
253}
254
255static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
256{
257 u32 base = vcpu->arch.kvm_cpuid.base;
258
259 if (!base)
260 return NULL;
261
262 return __kvm_find_kvm_cpuid_features(vcpu->arch.cpuid_entries,
263 vcpu->arch.cpuid_nent, base);
264}
265
266void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
267{
268 struct kvm_cpuid_entry2 *best = kvm_find_kvm_cpuid_features(vcpu);
269
270 /*
271 * save the feature bitmap to avoid cpuid lookup for every PV
272 * operation
273 */
274 if (best)
275 vcpu->arch.pv_cpuid.features = best->eax;
276}
277
278/*
279 * Calculate guest's supported XCR0 taking into account guest CPUID data and
280 * KVM's supported XCR0 (comprised of host's XCR0 and KVM_SUPPORTED_XCR0).
281 */
282static u64 cpuid_get_supported_xcr0(struct kvm_cpuid_entry2 *entries, int nent)
283{
284 struct kvm_cpuid_entry2 *best;
285
286 best = cpuid_entry2_find(entries, nent, 0xd, 0);
287 if (!best)
288 return 0;
289
290 return (best->eax | ((u64)best->edx << 32)) & kvm_caps.supported_xcr0;
291}
292
293static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries,
294 int nent)
295{
296 struct kvm_cpuid_entry2 *best;
297 struct kvm_hypervisor_cpuid kvm_cpuid;
298
299 best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
300 if (best) {
301 /* Update OSXSAVE bit */
302 if (boot_cpu_has(X86_FEATURE_XSAVE))
303 cpuid_entry_change(best, X86_FEATURE_OSXSAVE,
304 kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE));
305
306 cpuid_entry_change(best, X86_FEATURE_APIC,
307 vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
308 }
309
310 best = cpuid_entry2_find(entries, nent, 7, 0);
311 if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
312 cpuid_entry_change(best, X86_FEATURE_OSPKE,
313 kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE));
314
315 best = cpuid_entry2_find(entries, nent, 0xD, 0);
316 if (best)
317 best->ebx = xstate_required_size(vcpu->arch.xcr0, false);
318
319 best = cpuid_entry2_find(entries, nent, 0xD, 1);
320 if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
321 cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
322 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
323
324 kvm_cpuid = __kvm_get_hypervisor_cpuid(entries, nent, KVM_SIGNATURE);
325 if (kvm_cpuid.base) {
326 best = __kvm_find_kvm_cpuid_features(entries, nent, kvm_cpuid.base);
327 if (kvm_hlt_in_guest(vcpu->kvm) && best)
328 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
329 }
330
331 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
332 best = cpuid_entry2_find(entries, nent, 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
333 if (best)
334 cpuid_entry_change(best, X86_FEATURE_MWAIT,
335 vcpu->arch.ia32_misc_enable_msr &
336 MSR_IA32_MISC_ENABLE_MWAIT);
337 }
338}
339
340void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
341{
342 __kvm_update_cpuid_runtime(vcpu, vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
343}
344EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime);
345
346static bool kvm_cpuid_has_hyperv(struct kvm_cpuid_entry2 *entries, int nent)
347{
348#ifdef CONFIG_KVM_HYPERV
349 struct kvm_cpuid_entry2 *entry;
350
351 entry = cpuid_entry2_find(entries, nent, HYPERV_CPUID_INTERFACE,
352 KVM_CPUID_INDEX_NOT_SIGNIFICANT);
353 return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX;
354#else
355 return false;
356#endif
357}
358
359static bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
360{
361 struct kvm_cpuid_entry2 *entry;
362
363 entry = kvm_find_cpuid_entry(vcpu, 0);
364 if (!entry)
365 return false;
366
367 return is_guest_vendor_amd(entry->ebx, entry->ecx, entry->edx) ||
368 is_guest_vendor_hygon(entry->ebx, entry->ecx, entry->edx);
369}
370
371static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
372{
373 struct kvm_lapic *apic = vcpu->arch.apic;
374 struct kvm_cpuid_entry2 *best;
375 bool allow_gbpages;
376
377 BUILD_BUG_ON(KVM_NR_GOVERNED_FEATURES > KVM_MAX_NR_GOVERNED_FEATURES);
378 bitmap_zero(vcpu->arch.governed_features.enabled,
379 KVM_MAX_NR_GOVERNED_FEATURES);
380
381 /*
382 * If TDP is enabled, let the guest use GBPAGES if they're supported in
383 * hardware. The hardware page walker doesn't let KVM disable GBPAGES,
384 * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA
385 * walk for performance and complexity reasons. Not to mention KVM
386 * _can't_ solve the problem because GVA->GPA walks aren't visible to
387 * KVM once a TDP translation is installed. Mimic hardware behavior so
388 * that KVM's is at least consistent, i.e. doesn't randomly inject #PF.
389 * If TDP is disabled, honor *only* guest CPUID as KVM has full control
390 * and can install smaller shadow pages if the host lacks 1GiB support.
391 */
392 allow_gbpages = tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
393 guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES);
394 if (allow_gbpages)
395 kvm_governed_feature_set(vcpu, X86_FEATURE_GBPAGES);
396
397 best = kvm_find_cpuid_entry(vcpu, 1);
398 if (best && apic) {
399 if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER))
400 apic->lapic_timer.timer_mode_mask = 3 << 17;
401 else
402 apic->lapic_timer.timer_mode_mask = 1 << 17;
403
404 kvm_apic_set_version(vcpu);
405 }
406
407 vcpu->arch.guest_supported_xcr0 =
408 cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
409
410 kvm_update_pv_runtime(vcpu);
411
412 vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu);
413 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
414 vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
415
416 kvm_pmu_refresh(vcpu);
417 vcpu->arch.cr4_guest_rsvd_bits =
418 __cr4_reserved_bits(guest_cpuid_has, vcpu);
419
420 kvm_hv_set_cpuid(vcpu, kvm_cpuid_has_hyperv(vcpu->arch.cpuid_entries,
421 vcpu->arch.cpuid_nent));
422
423 /* Invoke the vendor callback only after the above state is updated. */
424 kvm_x86_call(vcpu_after_set_cpuid)(vcpu);
425
426 /*
427 * Except for the MMU, which needs to do its thing any vendor specific
428 * adjustments to the reserved GPA bits.
429 */
430 kvm_mmu_after_set_cpuid(vcpu);
431}
432
433int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
434{
435 struct kvm_cpuid_entry2 *best;
436
437 best = kvm_find_cpuid_entry(vcpu, 0x80000000);
438 if (!best || best->eax < 0x80000008)
439 goto not_found;
440 best = kvm_find_cpuid_entry(vcpu, 0x80000008);
441 if (best)
442 return best->eax & 0xff;
443not_found:
444 return 36;
445}
446
447/*
448 * This "raw" version returns the reserved GPA bits without any adjustments for
449 * encryption technologies that usurp bits. The raw mask should be used if and
450 * only if hardware does _not_ strip the usurped bits, e.g. in virtual MTRRs.
451 */
452u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu)
453{
454 return rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
455}
456
457static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
458 int nent)
459{
460 int r;
461
462 __kvm_update_cpuid_runtime(vcpu, e2, nent);
463
464 /*
465 * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
466 * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
467 * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page
468 * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with
469 * the core vCPU model on the fly. It would've been better to forbid any
470 * KVM_SET_CPUID{,2} calls after KVM_RUN altogether but unfortunately
471 * some VMMs (e.g. QEMU) reuse vCPU fds for CPU hotplug/unplug and do
472 * KVM_SET_CPUID{,2} again. To support this legacy behavior, check
473 * whether the supplied CPUID data is equal to what's already set.
474 */
475 if (kvm_vcpu_has_run(vcpu)) {
476 r = kvm_cpuid_check_equal(vcpu, e2, nent);
477 if (r)
478 return r;
479
480 kvfree(e2);
481 return 0;
482 }
483
484#ifdef CONFIG_KVM_HYPERV
485 if (kvm_cpuid_has_hyperv(e2, nent)) {
486 r = kvm_hv_vcpu_init(vcpu);
487 if (r)
488 return r;
489 }
490#endif
491
492 r = kvm_check_cpuid(vcpu, e2, nent);
493 if (r)
494 return r;
495
496 kvfree(vcpu->arch.cpuid_entries);
497 vcpu->arch.cpuid_entries = e2;
498 vcpu->arch.cpuid_nent = nent;
499
500 vcpu->arch.kvm_cpuid = kvm_get_hypervisor_cpuid(vcpu, KVM_SIGNATURE);
501#ifdef CONFIG_KVM_XEN
502 vcpu->arch.xen.cpuid = kvm_get_hypervisor_cpuid(vcpu, XEN_SIGNATURE);
503#endif
504 kvm_vcpu_after_set_cpuid(vcpu);
505
506 return 0;
507}
508
509/* when an old userspace process fills a new kernel module */
510int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
511 struct kvm_cpuid *cpuid,
512 struct kvm_cpuid_entry __user *entries)
513{
514 int r, i;
515 struct kvm_cpuid_entry *e = NULL;
516 struct kvm_cpuid_entry2 *e2 = NULL;
517
518 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
519 return -E2BIG;
520
521 if (cpuid->nent) {
522 e = vmemdup_array_user(entries, cpuid->nent, sizeof(*e));
523 if (IS_ERR(e))
524 return PTR_ERR(e);
525
526 e2 = kvmalloc_array(cpuid->nent, sizeof(*e2), GFP_KERNEL_ACCOUNT);
527 if (!e2) {
528 r = -ENOMEM;
529 goto out_free_cpuid;
530 }
531 }
532 for (i = 0; i < cpuid->nent; i++) {
533 e2[i].function = e[i].function;
534 e2[i].eax = e[i].eax;
535 e2[i].ebx = e[i].ebx;
536 e2[i].ecx = e[i].ecx;
537 e2[i].edx = e[i].edx;
538 e2[i].index = 0;
539 e2[i].flags = 0;
540 e2[i].padding[0] = 0;
541 e2[i].padding[1] = 0;
542 e2[i].padding[2] = 0;
543 }
544
545 r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
546 if (r)
547 kvfree(e2);
548
549out_free_cpuid:
550 kvfree(e);
551
552 return r;
553}
554
555int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
556 struct kvm_cpuid2 *cpuid,
557 struct kvm_cpuid_entry2 __user *entries)
558{
559 struct kvm_cpuid_entry2 *e2 = NULL;
560 int r;
561
562 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
563 return -E2BIG;
564
565 if (cpuid->nent) {
566 e2 = vmemdup_array_user(entries, cpuid->nent, sizeof(*e2));
567 if (IS_ERR(e2))
568 return PTR_ERR(e2);
569 }
570
571 r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
572 if (r)
573 kvfree(e2);
574
575 return r;
576}
577
578int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
579 struct kvm_cpuid2 *cpuid,
580 struct kvm_cpuid_entry2 __user *entries)
581{
582 if (cpuid->nent < vcpu->arch.cpuid_nent)
583 return -E2BIG;
584
585 if (copy_to_user(entries, vcpu->arch.cpuid_entries,
586 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
587 return -EFAULT;
588
589 cpuid->nent = vcpu->arch.cpuid_nent;
590 return 0;
591}
592
593/* Mask kvm_cpu_caps for @leaf with the raw CPUID capabilities of this CPU. */
594static __always_inline void __kvm_cpu_cap_mask(unsigned int leaf)
595{
596 const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32);
597 struct kvm_cpuid_entry2 entry;
598
599 reverse_cpuid_check(leaf);
600
601 cpuid_count(cpuid.function, cpuid.index,
602 &entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
603
604 kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(&entry, cpuid.reg);
605}
606
607static __always_inline
608void kvm_cpu_cap_init_kvm_defined(enum kvm_only_cpuid_leafs leaf, u32 mask)
609{
610 /* Use kvm_cpu_cap_mask for leafs that aren't KVM-only. */
611 BUILD_BUG_ON(leaf < NCAPINTS);
612
613 kvm_cpu_caps[leaf] = mask;
614
615 __kvm_cpu_cap_mask(leaf);
616}
617
618static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
619{
620 /* Use kvm_cpu_cap_init_kvm_defined for KVM-only leafs. */
621 BUILD_BUG_ON(leaf >= NCAPINTS);
622
623 kvm_cpu_caps[leaf] &= mask;
624
625 __kvm_cpu_cap_mask(leaf);
626}
627
628void kvm_set_cpu_caps(void)
629{
630#ifdef CONFIG_X86_64
631 unsigned int f_gbpages = F(GBPAGES);
632 unsigned int f_lm = F(LM);
633 unsigned int f_xfd = F(XFD);
634#else
635 unsigned int f_gbpages = 0;
636 unsigned int f_lm = 0;
637 unsigned int f_xfd = 0;
638#endif
639 memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps));
640
641 BUILD_BUG_ON(sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)) >
642 sizeof(boot_cpu_data.x86_capability));
643
644 memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability,
645 sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)));
646
647 kvm_cpu_cap_mask(CPUID_1_ECX,
648 /*
649 * NOTE: MONITOR (and MWAIT) are emulated as NOP, but *not*
650 * advertised to guests via CPUID!
651 */
652 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
653 0 /* DS-CPL, VMX, SMX, EST */ |
654 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
655 F(FMA) | F(CX16) | 0 /* xTPR Update */ | F(PDCM) |
656 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
657 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
658 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
659 F(F16C) | F(RDRAND)
660 );
661 /* KVM emulates x2apic in software irrespective of host support. */
662 kvm_cpu_cap_set(X86_FEATURE_X2APIC);
663
664 kvm_cpu_cap_mask(CPUID_1_EDX,
665 F(FPU) | F(VME) | F(DE) | F(PSE) |
666 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
667 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
668 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
669 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
670 0 /* Reserved, DS, ACPI */ | F(MMX) |
671 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
672 0 /* HTT, TM, Reserved, PBE */
673 );
674
675 kvm_cpu_cap_mask(CPUID_7_0_EBX,
676 F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) |
677 F(FDP_EXCPTN_ONLY) | F(SMEP) | F(BMI2) | F(ERMS) | F(INVPCID) |
678 F(RTM) | F(ZERO_FCS_FDS) | 0 /*MPX*/ | F(AVX512F) |
679 F(AVX512DQ) | F(RDSEED) | F(ADX) | F(SMAP) | F(AVX512IFMA) |
680 F(CLFLUSHOPT) | F(CLWB) | 0 /*INTEL_PT*/ | F(AVX512PF) |
681 F(AVX512ER) | F(AVX512CD) | F(SHA_NI) | F(AVX512BW) |
682 F(AVX512VL));
683
684 kvm_cpu_cap_mask(CPUID_7_ECX,
685 F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
686 F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
687 F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
688 F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ |
689 F(SGX_LC) | F(BUS_LOCK_DETECT)
690 );
691 /* Set LA57 based on hardware capability. */
692 if (cpuid_ecx(7) & F(LA57))
693 kvm_cpu_cap_set(X86_FEATURE_LA57);
694
695 /*
696 * PKU not yet implemented for shadow paging and requires OSPKE
697 * to be set on the host. Clear it if that is not the case
698 */
699 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
700 kvm_cpu_cap_clear(X86_FEATURE_PKU);
701
702 kvm_cpu_cap_mask(CPUID_7_EDX,
703 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
704 F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
705 F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) |
706 F(SERIALIZE) | F(TSXLDTRK) | F(AVX512_FP16) |
707 F(AMX_TILE) | F(AMX_INT8) | F(AMX_BF16) | F(FLUSH_L1D)
708 );
709
710 /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */
711 kvm_cpu_cap_set(X86_FEATURE_TSC_ADJUST);
712 kvm_cpu_cap_set(X86_FEATURE_ARCH_CAPABILITIES);
713
714 if (boot_cpu_has(X86_FEATURE_AMD_IBPB_RET) &&
715 boot_cpu_has(X86_FEATURE_AMD_IBPB) &&
716 boot_cpu_has(X86_FEATURE_AMD_IBRS))
717 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL);
718 if (boot_cpu_has(X86_FEATURE_STIBP))
719 kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP);
720 if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
721 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD);
722
723 kvm_cpu_cap_mask(CPUID_7_1_EAX,
724 F(SHA512) | F(SM3) | F(SM4) | F(AVX_VNNI) | F(AVX512_BF16) |
725 F(CMPCCXADD) | F(FZRM) | F(FSRS) | F(FSRC) | F(AMX_FP16) |
726 F(AVX_IFMA) | F(LAM)
727 );
728
729 kvm_cpu_cap_init_kvm_defined(CPUID_7_1_EDX,
730 F(AVX_VNNI_INT8) | F(AVX_NE_CONVERT) | F(AMX_COMPLEX) |
731 F(AVX_VNNI_INT16) | F(PREFETCHITI) | F(AVX10)
732 );
733
734 kvm_cpu_cap_init_kvm_defined(CPUID_7_2_EDX,
735 F(INTEL_PSFD) | F(IPRED_CTRL) | F(RRSBA_CTRL) | F(DDPD_U) |
736 F(BHI_CTRL) | F(MCDT_NO)
737 );
738
739 kvm_cpu_cap_mask(CPUID_D_1_EAX,
740 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd
741 );
742
743 kvm_cpu_cap_init_kvm_defined(CPUID_12_EAX,
744 SF(SGX1) | SF(SGX2) | SF(SGX_EDECCSSA)
745 );
746
747 kvm_cpu_cap_init_kvm_defined(CPUID_24_0_EBX,
748 F(AVX10_128) | F(AVX10_256) | F(AVX10_512)
749 );
750
751 kvm_cpu_cap_mask(CPUID_8000_0001_ECX,
752 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
753 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
754 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
755 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) |
756 F(TOPOEXT) | 0 /* PERFCTR_CORE */
757 );
758
759 kvm_cpu_cap_mask(CPUID_8000_0001_EDX,
760 F(FPU) | F(VME) | F(DE) | F(PSE) |
761 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
762 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
763 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
764 F(PAT) | F(PSE36) | 0 /* Reserved */ |
765 F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
766 F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) |
767 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW)
768 );
769
770 if (!tdp_enabled && IS_ENABLED(CONFIG_X86_64))
771 kvm_cpu_cap_set(X86_FEATURE_GBPAGES);
772
773 kvm_cpu_cap_init_kvm_defined(CPUID_8000_0007_EDX,
774 SF(CONSTANT_TSC)
775 );
776
777 kvm_cpu_cap_mask(CPUID_8000_0008_EBX,
778 F(CLZERO) | F(XSAVEERPTR) |
779 F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
780 F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) |
781 F(AMD_PSFD) | F(AMD_IBPB_RET)
782 );
783
784 /*
785 * AMD has separate bits for each SPEC_CTRL bit.
786 * arch/x86/kernel/cpu/bugs.c is kind enough to
787 * record that in cpufeatures so use them.
788 */
789 if (boot_cpu_has(X86_FEATURE_IBPB)) {
790 kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB);
791 if (boot_cpu_has(X86_FEATURE_SPEC_CTRL) &&
792 !boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB))
793 kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB_RET);
794 }
795 if (boot_cpu_has(X86_FEATURE_IBRS))
796 kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS);
797 if (boot_cpu_has(X86_FEATURE_STIBP))
798 kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP);
799 if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
800 kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD);
801 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
802 kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO);
803 /*
804 * The preference is to use SPEC CTRL MSR instead of the
805 * VIRT_SPEC MSR.
806 */
807 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
808 !boot_cpu_has(X86_FEATURE_AMD_SSBD))
809 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
810
811 /*
812 * Hide all SVM features by default, SVM will set the cap bits for
813 * features it emulates and/or exposes for L1.
814 */
815 kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0);
816
817 kvm_cpu_cap_mask(CPUID_8000_001F_EAX,
818 0 /* SME */ | 0 /* SEV */ | 0 /* VM_PAGE_FLUSH */ | 0 /* SEV_ES */ |
819 F(SME_COHERENT));
820
821 kvm_cpu_cap_mask(CPUID_8000_0021_EAX,
822 F(NO_NESTED_DATA_BP) | F(LFENCE_RDTSC) | 0 /* SmmPgCfgLock */ |
823 F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */ |
824 F(WRMSR_XX_BASE_NS)
825 );
826
827 kvm_cpu_cap_check_and_set(X86_FEATURE_SBPB);
828 kvm_cpu_cap_check_and_set(X86_FEATURE_IBPB_BRTYPE);
829 kvm_cpu_cap_check_and_set(X86_FEATURE_SRSO_NO);
830
831 kvm_cpu_cap_init_kvm_defined(CPUID_8000_0022_EAX,
832 F(PERFMON_V2)
833 );
834
835 /*
836 * Synthesize "LFENCE is serializing" into the AMD-defined entry in
837 * KVM's supported CPUID if the feature is reported as supported by the
838 * kernel. LFENCE_RDTSC was a Linux-defined synthetic feature long
839 * before AMD joined the bandwagon, e.g. LFENCE is serializing on most
840 * CPUs that support SSE2. On CPUs that don't support AMD's leaf,
841 * kvm_cpu_cap_mask() will unfortunately drop the flag due to ANDing
842 * the mask with the raw host CPUID, and reporting support in AMD's
843 * leaf can make it easier for userspace to detect the feature.
844 */
845 if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
846 kvm_cpu_cap_set(X86_FEATURE_LFENCE_RDTSC);
847 if (!static_cpu_has_bug(X86_BUG_NULL_SEG))
848 kvm_cpu_cap_set(X86_FEATURE_NULL_SEL_CLR_BASE);
849 kvm_cpu_cap_set(X86_FEATURE_NO_SMM_CTL_MSR);
850
851 kvm_cpu_cap_mask(CPUID_C000_0001_EDX,
852 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
853 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
854 F(PMM) | F(PMM_EN)
855 );
856
857 /*
858 * Hide RDTSCP and RDPID if either feature is reported as supported but
859 * probing MSR_TSC_AUX failed. This is purely a sanity check and
860 * should never happen, but the guest will likely crash if RDTSCP or
861 * RDPID is misreported, and KVM has botched MSR_TSC_AUX emulation in
862 * the past. For example, the sanity check may fire if this instance of
863 * KVM is running as L1 on top of an older, broken KVM.
864 */
865 if (WARN_ON((kvm_cpu_cap_has(X86_FEATURE_RDTSCP) ||
866 kvm_cpu_cap_has(X86_FEATURE_RDPID)) &&
867 !kvm_is_supported_user_return_msr(MSR_TSC_AUX))) {
868 kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
869 kvm_cpu_cap_clear(X86_FEATURE_RDPID);
870 }
871}
872EXPORT_SYMBOL_GPL(kvm_set_cpu_caps);
873
874struct kvm_cpuid_array {
875 struct kvm_cpuid_entry2 *entries;
876 int maxnent;
877 int nent;
878};
879
880static struct kvm_cpuid_entry2 *get_next_cpuid(struct kvm_cpuid_array *array)
881{
882 if (array->nent >= array->maxnent)
883 return NULL;
884
885 return &array->entries[array->nent++];
886}
887
888static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
889 u32 function, u32 index)
890{
891 struct kvm_cpuid_entry2 *entry = get_next_cpuid(array);
892
893 if (!entry)
894 return NULL;
895
896 memset(entry, 0, sizeof(*entry));
897 entry->function = function;
898 entry->index = index;
899 switch (function & 0xC0000000) {
900 case 0x40000000:
901 /* Hypervisor leaves are always synthesized by __do_cpuid_func. */
902 return entry;
903
904 case 0x80000000:
905 /*
906 * 0x80000021 is sometimes synthesized by __do_cpuid_func, which
907 * would result in out-of-bounds calls to do_host_cpuid.
908 */
909 {
910 static int max_cpuid_80000000;
911 if (!READ_ONCE(max_cpuid_80000000))
912 WRITE_ONCE(max_cpuid_80000000, cpuid_eax(0x80000000));
913 if (function > READ_ONCE(max_cpuid_80000000))
914 return entry;
915 }
916 break;
917
918 default:
919 break;
920 }
921
922 cpuid_count(entry->function, entry->index,
923 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
924
925 if (cpuid_function_is_indexed(function))
926 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
927
928 return entry;
929}
930
931static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
932{
933 struct kvm_cpuid_entry2 *entry;
934
935 if (array->nent >= array->maxnent)
936 return -E2BIG;
937
938 entry = &array->entries[array->nent];
939 entry->function = func;
940 entry->index = 0;
941 entry->flags = 0;
942
943 switch (func) {
944 case 0:
945 entry->eax = 7;
946 ++array->nent;
947 break;
948 case 1:
949 entry->ecx = F(MOVBE);
950 ++array->nent;
951 break;
952 case 7:
953 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
954 entry->eax = 0;
955 if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
956 entry->ecx = F(RDPID);
957 ++array->nent;
958 break;
959 default:
960 break;
961 }
962
963 return 0;
964}
965
966static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
967{
968 struct kvm_cpuid_entry2 *entry;
969 int r, i, max_idx;
970
971 /* all calls to cpuid_count() should be made on the same cpu */
972 get_cpu();
973
974 r = -E2BIG;
975
976 entry = do_host_cpuid(array, function, 0);
977 if (!entry)
978 goto out;
979
980 switch (function) {
981 case 0:
982 /* Limited to the highest leaf implemented in KVM. */
983 entry->eax = min(entry->eax, 0x24U);
984 break;
985 case 1:
986 cpuid_entry_override(entry, CPUID_1_EDX);
987 cpuid_entry_override(entry, CPUID_1_ECX);
988 break;
989 case 2:
990 /*
991 * On ancient CPUs, function 2 entries are STATEFUL. That is,
992 * CPUID(function=2, index=0) may return different results each
993 * time, with the least-significant byte in EAX enumerating the
994 * number of times software should do CPUID(2, 0).
995 *
996 * Modern CPUs, i.e. every CPU KVM has *ever* run on are less
997 * idiotic. Intel's SDM states that EAX & 0xff "will always
998 * return 01H. Software should ignore this value and not
999 * interpret it as an informational descriptor", while AMD's
1000 * APM states that CPUID(2) is reserved.
1001 *
1002 * WARN if a frankenstein CPU that supports virtualization and
1003 * a stateful CPUID.0x2 is encountered.
1004 */
1005 WARN_ON_ONCE((entry->eax & 0xff) > 1);
1006 break;
1007 /* functions 4 and 0x8000001d have additional index. */
1008 case 4:
1009 case 0x8000001d:
1010 /*
1011 * Read entries until the cache type in the previous entry is
1012 * zero, i.e. indicates an invalid entry.
1013 */
1014 for (i = 1; entry->eax & 0x1f; ++i) {
1015 entry = do_host_cpuid(array, function, i);
1016 if (!entry)
1017 goto out;
1018 }
1019 break;
1020 case 6: /* Thermal management */
1021 entry->eax = 0x4; /* allow ARAT */
1022 entry->ebx = 0;
1023 entry->ecx = 0;
1024 entry->edx = 0;
1025 break;
1026 /* function 7 has additional index. */
1027 case 7:
1028 max_idx = entry->eax = min(entry->eax, 2u);
1029 cpuid_entry_override(entry, CPUID_7_0_EBX);
1030 cpuid_entry_override(entry, CPUID_7_ECX);
1031 cpuid_entry_override(entry, CPUID_7_EDX);
1032
1033 /* KVM only supports up to 0x7.2, capped above via min(). */
1034 if (max_idx >= 1) {
1035 entry = do_host_cpuid(array, function, 1);
1036 if (!entry)
1037 goto out;
1038
1039 cpuid_entry_override(entry, CPUID_7_1_EAX);
1040 cpuid_entry_override(entry, CPUID_7_1_EDX);
1041 entry->ebx = 0;
1042 entry->ecx = 0;
1043 }
1044 if (max_idx >= 2) {
1045 entry = do_host_cpuid(array, function, 2);
1046 if (!entry)
1047 goto out;
1048
1049 cpuid_entry_override(entry, CPUID_7_2_EDX);
1050 entry->ecx = 0;
1051 entry->ebx = 0;
1052 entry->eax = 0;
1053 }
1054 break;
1055 case 0xa: { /* Architectural Performance Monitoring */
1056 union cpuid10_eax eax;
1057 union cpuid10_edx edx;
1058
1059 if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
1060 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1061 break;
1062 }
1063
1064 eax.split.version_id = kvm_pmu_cap.version;
1065 eax.split.num_counters = kvm_pmu_cap.num_counters_gp;
1066 eax.split.bit_width = kvm_pmu_cap.bit_width_gp;
1067 eax.split.mask_length = kvm_pmu_cap.events_mask_len;
1068 edx.split.num_counters_fixed = kvm_pmu_cap.num_counters_fixed;
1069 edx.split.bit_width_fixed = kvm_pmu_cap.bit_width_fixed;
1070
1071 if (kvm_pmu_cap.version)
1072 edx.split.anythread_deprecated = 1;
1073 edx.split.reserved1 = 0;
1074 edx.split.reserved2 = 0;
1075
1076 entry->eax = eax.full;
1077 entry->ebx = kvm_pmu_cap.events_mask;
1078 entry->ecx = 0;
1079 entry->edx = edx.full;
1080 break;
1081 }
1082 case 0x1f:
1083 case 0xb:
1084 /*
1085 * No topology; a valid topology is indicated by the presence
1086 * of subleaf 1.
1087 */
1088 entry->eax = entry->ebx = entry->ecx = 0;
1089 break;
1090 case 0xd: {
1091 u64 permitted_xcr0 = kvm_get_filtered_xcr0();
1092 u64 permitted_xss = kvm_caps.supported_xss;
1093
1094 entry->eax &= permitted_xcr0;
1095 entry->ebx = xstate_required_size(permitted_xcr0, false);
1096 entry->ecx = entry->ebx;
1097 entry->edx &= permitted_xcr0 >> 32;
1098 if (!permitted_xcr0)
1099 break;
1100
1101 entry = do_host_cpuid(array, function, 1);
1102 if (!entry)
1103 goto out;
1104
1105 cpuid_entry_override(entry, CPUID_D_1_EAX);
1106 if (entry->eax & (F(XSAVES)|F(XSAVEC)))
1107 entry->ebx = xstate_required_size(permitted_xcr0 | permitted_xss,
1108 true);
1109 else {
1110 WARN_ON_ONCE(permitted_xss != 0);
1111 entry->ebx = 0;
1112 }
1113 entry->ecx &= permitted_xss;
1114 entry->edx &= permitted_xss >> 32;
1115
1116 for (i = 2; i < 64; ++i) {
1117 bool s_state;
1118 if (permitted_xcr0 & BIT_ULL(i))
1119 s_state = false;
1120 else if (permitted_xss & BIT_ULL(i))
1121 s_state = true;
1122 else
1123 continue;
1124
1125 entry = do_host_cpuid(array, function, i);
1126 if (!entry)
1127 goto out;
1128
1129 /*
1130 * The supported check above should have filtered out
1131 * invalid sub-leafs. Only valid sub-leafs should
1132 * reach this point, and they should have a non-zero
1133 * save state size. Furthermore, check whether the
1134 * processor agrees with permitted_xcr0/permitted_xss
1135 * on whether this is an XCR0- or IA32_XSS-managed area.
1136 */
1137 if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) {
1138 --array->nent;
1139 continue;
1140 }
1141
1142 if (!kvm_cpu_cap_has(X86_FEATURE_XFD))
1143 entry->ecx &= ~BIT_ULL(2);
1144 entry->edx = 0;
1145 }
1146 break;
1147 }
1148 case 0x12:
1149 /* Intel SGX */
1150 if (!kvm_cpu_cap_has(X86_FEATURE_SGX)) {
1151 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1152 break;
1153 }
1154
1155 /*
1156 * Index 0: Sub-features, MISCSELECT (a.k.a extended features)
1157 * and max enclave sizes. The SGX sub-features and MISCSELECT
1158 * are restricted by kernel and KVM capabilities (like most
1159 * feature flags), while enclave size is unrestricted.
1160 */
1161 cpuid_entry_override(entry, CPUID_12_EAX);
1162 entry->ebx &= SGX_MISC_EXINFO;
1163
1164 entry = do_host_cpuid(array, function, 1);
1165 if (!entry)
1166 goto out;
1167
1168 /*
1169 * Index 1: SECS.ATTRIBUTES. ATTRIBUTES are restricted a la
1170 * feature flags. Advertise all supported flags, including
1171 * privileged attributes that require explicit opt-in from
1172 * userspace. ATTRIBUTES.XFRM is not adjusted as userspace is
1173 * expected to derive it from supported XCR0.
1174 */
1175 entry->eax &= SGX_ATTR_PRIV_MASK | SGX_ATTR_UNPRIV_MASK;
1176 entry->ebx &= 0;
1177 break;
1178 /* Intel PT */
1179 case 0x14:
1180 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) {
1181 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1182 break;
1183 }
1184
1185 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1186 if (!do_host_cpuid(array, function, i))
1187 goto out;
1188 }
1189 break;
1190 /* Intel AMX TILE */
1191 case 0x1d:
1192 if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) {
1193 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1194 break;
1195 }
1196
1197 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1198 if (!do_host_cpuid(array, function, i))
1199 goto out;
1200 }
1201 break;
1202 case 0x1e: /* TMUL information */
1203 if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) {
1204 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1205 break;
1206 }
1207 break;
1208 case 0x24: {
1209 u8 avx10_version;
1210
1211 if (!kvm_cpu_cap_has(X86_FEATURE_AVX10)) {
1212 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1213 break;
1214 }
1215
1216 /*
1217 * The AVX10 version is encoded in EBX[7:0]. Note, the version
1218 * is guaranteed to be >=1 if AVX10 is supported. Note #2, the
1219 * version needs to be captured before overriding EBX features!
1220 */
1221 avx10_version = min_t(u8, entry->ebx & 0xff, 1);
1222 cpuid_entry_override(entry, CPUID_24_0_EBX);
1223 entry->ebx |= avx10_version;
1224
1225 entry->eax = 0;
1226 entry->ecx = 0;
1227 entry->edx = 0;
1228 break;
1229 }
1230 case KVM_CPUID_SIGNATURE: {
1231 const u32 *sigptr = (const u32 *)KVM_SIGNATURE;
1232 entry->eax = KVM_CPUID_FEATURES;
1233 entry->ebx = sigptr[0];
1234 entry->ecx = sigptr[1];
1235 entry->edx = sigptr[2];
1236 break;
1237 }
1238 case KVM_CPUID_FEATURES:
1239 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
1240 (1 << KVM_FEATURE_NOP_IO_DELAY) |
1241 (1 << KVM_FEATURE_CLOCKSOURCE2) |
1242 (1 << KVM_FEATURE_ASYNC_PF) |
1243 (1 << KVM_FEATURE_PV_EOI) |
1244 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
1245 (1 << KVM_FEATURE_PV_UNHALT) |
1246 (1 << KVM_FEATURE_PV_TLB_FLUSH) |
1247 (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) |
1248 (1 << KVM_FEATURE_PV_SEND_IPI) |
1249 (1 << KVM_FEATURE_POLL_CONTROL) |
1250 (1 << KVM_FEATURE_PV_SCHED_YIELD) |
1251 (1 << KVM_FEATURE_ASYNC_PF_INT);
1252
1253 if (sched_info_on())
1254 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
1255
1256 entry->ebx = 0;
1257 entry->ecx = 0;
1258 entry->edx = 0;
1259 break;
1260 case 0x80000000:
1261 entry->eax = min(entry->eax, 0x80000022);
1262 /*
1263 * Serializing LFENCE is reported in a multitude of ways, and
1264 * NullSegClearsBase is not reported in CPUID on Zen2; help
1265 * userspace by providing the CPUID leaf ourselves.
1266 *
1267 * However, only do it if the host has CPUID leaf 0x8000001d.
1268 * QEMU thinks that it can query the host blindly for that
1269 * CPUID leaf if KVM reports that it supports 0x8000001d or
1270 * above. The processor merrily returns values from the
1271 * highest Intel leaf which QEMU tries to use as the guest's
1272 * 0x8000001d. Even worse, this can result in an infinite
1273 * loop if said highest leaf has no subleaves indexed by ECX.
1274 */
1275 if (entry->eax >= 0x8000001d &&
1276 (static_cpu_has(X86_FEATURE_LFENCE_RDTSC)
1277 || !static_cpu_has_bug(X86_BUG_NULL_SEG)))
1278 entry->eax = max(entry->eax, 0x80000021);
1279 break;
1280 case 0x80000001:
1281 entry->ebx &= ~GENMASK(27, 16);
1282 cpuid_entry_override(entry, CPUID_8000_0001_EDX);
1283 cpuid_entry_override(entry, CPUID_8000_0001_ECX);
1284 break;
1285 case 0x80000005:
1286 /* Pass host L1 cache and TLB info. */
1287 break;
1288 case 0x80000006:
1289 /* Drop reserved bits, pass host L2 cache and TLB info. */
1290 entry->edx &= ~GENMASK(17, 16);
1291 break;
1292 case 0x80000007: /* Advanced power management */
1293 cpuid_entry_override(entry, CPUID_8000_0007_EDX);
1294
1295 /* mask against host */
1296 entry->edx &= boot_cpu_data.x86_power;
1297 entry->eax = entry->ebx = entry->ecx = 0;
1298 break;
1299 case 0x80000008: {
1300 /*
1301 * GuestPhysAddrSize (EAX[23:16]) is intended for software
1302 * use.
1303 *
1304 * KVM's ABI is to report the effective MAXPHYADDR for the
1305 * guest in PhysAddrSize (phys_as), and the maximum
1306 * *addressable* GPA in GuestPhysAddrSize (g_phys_as).
1307 *
1308 * GuestPhysAddrSize is valid if and only if TDP is enabled,
1309 * in which case the max GPA that can be addressed by KVM may
1310 * be less than the max GPA that can be legally generated by
1311 * the guest, e.g. if MAXPHYADDR>48 but the CPU doesn't
1312 * support 5-level TDP.
1313 */
1314 unsigned int virt_as = max((entry->eax >> 8) & 0xff, 48U);
1315 unsigned int phys_as, g_phys_as;
1316
1317 /*
1318 * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as
1319 * the guest operates in the same PA space as the host, i.e.
1320 * reductions in MAXPHYADDR for memory encryption affect shadow
1321 * paging, too.
1322 *
1323 * If TDP is enabled, use the raw bare metal MAXPHYADDR as
1324 * reductions to the HPAs do not affect GPAs. The max
1325 * addressable GPA is the same as the max effective GPA, except
1326 * that it's capped at 48 bits if 5-level TDP isn't supported
1327 * (hardware processes bits 51:48 only when walking the fifth
1328 * level page table).
1329 */
1330 if (!tdp_enabled) {
1331 phys_as = boot_cpu_data.x86_phys_bits;
1332 g_phys_as = 0;
1333 } else {
1334 phys_as = entry->eax & 0xff;
1335 g_phys_as = phys_as;
1336 if (kvm_mmu_get_max_tdp_level() < 5)
1337 g_phys_as = min(g_phys_as, 48);
1338 }
1339
1340 entry->eax = phys_as | (virt_as << 8) | (g_phys_as << 16);
1341 entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
1342 entry->edx = 0;
1343 cpuid_entry_override(entry, CPUID_8000_0008_EBX);
1344 break;
1345 }
1346 case 0x8000000A:
1347 if (!kvm_cpu_cap_has(X86_FEATURE_SVM)) {
1348 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1349 break;
1350 }
1351 entry->eax = 1; /* SVM revision 1 */
1352 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
1353 ASID emulation to nested SVM */
1354 entry->ecx = 0; /* Reserved */
1355 cpuid_entry_override(entry, CPUID_8000_000A_EDX);
1356 break;
1357 case 0x80000019:
1358 entry->ecx = entry->edx = 0;
1359 break;
1360 case 0x8000001a:
1361 entry->eax &= GENMASK(2, 0);
1362 entry->ebx = entry->ecx = entry->edx = 0;
1363 break;
1364 case 0x8000001e:
1365 /* Do not return host topology information. */
1366 entry->eax = entry->ebx = entry->ecx = 0;
1367 entry->edx = 0; /* reserved */
1368 break;
1369 case 0x8000001F:
1370 if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) {
1371 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1372 } else {
1373 cpuid_entry_override(entry, CPUID_8000_001F_EAX);
1374 /* Clear NumVMPL since KVM does not support VMPL. */
1375 entry->ebx &= ~GENMASK(31, 12);
1376 /*
1377 * Enumerate '0' for "PA bits reduction", the adjusted
1378 * MAXPHYADDR is enumerated directly (see 0x80000008).
1379 */
1380 entry->ebx &= ~GENMASK(11, 6);
1381 }
1382 break;
1383 case 0x80000020:
1384 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1385 break;
1386 case 0x80000021:
1387 entry->ebx = entry->ecx = entry->edx = 0;
1388 cpuid_entry_override(entry, CPUID_8000_0021_EAX);
1389 break;
1390 /* AMD Extended Performance Monitoring and Debug */
1391 case 0x80000022: {
1392 union cpuid_0x80000022_ebx ebx;
1393
1394 entry->ecx = entry->edx = 0;
1395 if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) {
1396 entry->eax = entry->ebx = 0;
1397 break;
1398 }
1399
1400 cpuid_entry_override(entry, CPUID_8000_0022_EAX);
1401
1402 if (kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2))
1403 ebx.split.num_core_pmc = kvm_pmu_cap.num_counters_gp;
1404 else if (kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE))
1405 ebx.split.num_core_pmc = AMD64_NUM_COUNTERS_CORE;
1406 else
1407 ebx.split.num_core_pmc = AMD64_NUM_COUNTERS;
1408
1409 entry->ebx = ebx.full;
1410 break;
1411 }
1412 /*Add support for Centaur's CPUID instruction*/
1413 case 0xC0000000:
1414 /*Just support up to 0xC0000004 now*/
1415 entry->eax = min(entry->eax, 0xC0000004);
1416 break;
1417 case 0xC0000001:
1418 cpuid_entry_override(entry, CPUID_C000_0001_EDX);
1419 break;
1420 case 3: /* Processor serial number */
1421 case 5: /* MONITOR/MWAIT */
1422 case 0xC0000002:
1423 case 0xC0000003:
1424 case 0xC0000004:
1425 default:
1426 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1427 break;
1428 }
1429
1430 r = 0;
1431
1432out:
1433 put_cpu();
1434
1435 return r;
1436}
1437
1438static int do_cpuid_func(struct kvm_cpuid_array *array, u32 func,
1439 unsigned int type)
1440{
1441 if (type == KVM_GET_EMULATED_CPUID)
1442 return __do_cpuid_func_emulated(array, func);
1443
1444 return __do_cpuid_func(array, func);
1445}
1446
1447#define CENTAUR_CPUID_SIGNATURE 0xC0000000
1448
1449static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func,
1450 unsigned int type)
1451{
1452 u32 limit;
1453 int r;
1454
1455 if (func == CENTAUR_CPUID_SIGNATURE &&
1456 boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR)
1457 return 0;
1458
1459 r = do_cpuid_func(array, func, type);
1460 if (r)
1461 return r;
1462
1463 limit = array->entries[array->nent - 1].eax;
1464 for (func = func + 1; func <= limit; ++func) {
1465 r = do_cpuid_func(array, func, type);
1466 if (r)
1467 break;
1468 }
1469
1470 return r;
1471}
1472
1473static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
1474 __u32 num_entries, unsigned int ioctl_type)
1475{
1476 int i;
1477 __u32 pad[3];
1478
1479 if (ioctl_type != KVM_GET_EMULATED_CPUID)
1480 return false;
1481
1482 /*
1483 * We want to make sure that ->padding is being passed clean from
1484 * userspace in case we want to use it for something in the future.
1485 *
1486 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
1487 * have to give ourselves satisfied only with the emulated side. /me
1488 * sheds a tear.
1489 */
1490 for (i = 0; i < num_entries; i++) {
1491 if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
1492 return true;
1493
1494 if (pad[0] || pad[1] || pad[2])
1495 return true;
1496 }
1497 return false;
1498}
1499
1500int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
1501 struct kvm_cpuid_entry2 __user *entries,
1502 unsigned int type)
1503{
1504 static const u32 funcs[] = {
1505 0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE,
1506 };
1507
1508 struct kvm_cpuid_array array = {
1509 .nent = 0,
1510 };
1511 int r, i;
1512
1513 if (cpuid->nent < 1)
1514 return -E2BIG;
1515 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1516 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
1517
1518 if (sanity_check_entries(entries, cpuid->nent, type))
1519 return -EINVAL;
1520
1521 array.entries = kvcalloc(cpuid->nent, sizeof(struct kvm_cpuid_entry2), GFP_KERNEL);
1522 if (!array.entries)
1523 return -ENOMEM;
1524
1525 array.maxnent = cpuid->nent;
1526
1527 for (i = 0; i < ARRAY_SIZE(funcs); i++) {
1528 r = get_cpuid_func(&array, funcs[i], type);
1529 if (r)
1530 goto out_free;
1531 }
1532 cpuid->nent = array.nent;
1533
1534 if (copy_to_user(entries, array.entries,
1535 array.nent * sizeof(struct kvm_cpuid_entry2)))
1536 r = -EFAULT;
1537
1538out_free:
1539 kvfree(array.entries);
1540 return r;
1541}
1542
1543struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
1544 u32 function, u32 index)
1545{
1546 return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent,
1547 function, index);
1548}
1549EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry_index);
1550
1551struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
1552 u32 function)
1553{
1554 return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent,
1555 function, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
1556}
1557EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
1558
1559/*
1560 * Intel CPUID semantics treats any query for an out-of-range leaf as if the
1561 * highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics
1562 * returns all zeroes for any undefined leaf, whether or not the leaf is in
1563 * range. Centaur/VIA follows Intel semantics.
1564 *
1565 * A leaf is considered out-of-range if its function is higher than the maximum
1566 * supported leaf of its associated class or if its associated class does not
1567 * exist.
1568 *
1569 * There are three primary classes to be considered, with their respective
1570 * ranges described as "<base> - <top>[,<base2> - <top2>] inclusive. A primary
1571 * class exists if a guest CPUID entry for its <base> leaf exists. For a given
1572 * class, CPUID.<base>.EAX contains the max supported leaf for the class.
1573 *
1574 * - Basic: 0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff
1575 * - Hypervisor: 0x40000000 - 0x4fffffff
1576 * - Extended: 0x80000000 - 0xbfffffff
1577 * - Centaur: 0xc0000000 - 0xcfffffff
1578 *
1579 * The Hypervisor class is further subdivided into sub-classes that each act as
1580 * their own independent class associated with a 0x100 byte range. E.g. if Qemu
1581 * is advertising support for both HyperV and KVM, the resulting Hypervisor
1582 * CPUID sub-classes are:
1583 *
1584 * - HyperV: 0x40000000 - 0x400000ff
1585 * - KVM: 0x40000100 - 0x400001ff
1586 */
1587static struct kvm_cpuid_entry2 *
1588get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index)
1589{
1590 struct kvm_cpuid_entry2 *basic, *class;
1591 u32 function = *fn_ptr;
1592
1593 basic = kvm_find_cpuid_entry(vcpu, 0);
1594 if (!basic)
1595 return NULL;
1596
1597 if (is_guest_vendor_amd(basic->ebx, basic->ecx, basic->edx) ||
1598 is_guest_vendor_hygon(basic->ebx, basic->ecx, basic->edx))
1599 return NULL;
1600
1601 if (function >= 0x40000000 && function <= 0x4fffffff)
1602 class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00);
1603 else if (function >= 0xc0000000)
1604 class = kvm_find_cpuid_entry(vcpu, 0xc0000000);
1605 else
1606 class = kvm_find_cpuid_entry(vcpu, function & 0x80000000);
1607
1608 if (class && function <= class->eax)
1609 return NULL;
1610
1611 /*
1612 * Leaf specific adjustments are also applied when redirecting to the
1613 * max basic entry, e.g. if the max basic leaf is 0xb but there is no
1614 * entry for CPUID.0xb.index (see below), then the output value for EDX
1615 * needs to be pulled from CPUID.0xb.1.
1616 */
1617 *fn_ptr = basic->eax;
1618
1619 /*
1620 * The class does not exist or the requested function is out of range;
1621 * the effective CPUID entry is the max basic leaf. Note, the index of
1622 * the original requested leaf is observed!
1623 */
1624 return kvm_find_cpuid_entry_index(vcpu, basic->eax, index);
1625}
1626
1627bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
1628 u32 *ecx, u32 *edx, bool exact_only)
1629{
1630 u32 orig_function = *eax, function = *eax, index = *ecx;
1631 struct kvm_cpuid_entry2 *entry;
1632 bool exact, used_max_basic = false;
1633
1634 entry = kvm_find_cpuid_entry_index(vcpu, function, index);
1635 exact = !!entry;
1636
1637 if (!entry && !exact_only) {
1638 entry = get_out_of_range_cpuid_entry(vcpu, &function, index);
1639 used_max_basic = !!entry;
1640 }
1641
1642 if (entry) {
1643 *eax = entry->eax;
1644 *ebx = entry->ebx;
1645 *ecx = entry->ecx;
1646 *edx = entry->edx;
1647 if (function == 7 && index == 0) {
1648 u64 data;
1649 if (!__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, &data, true) &&
1650 (data & TSX_CTRL_CPUID_CLEAR))
1651 *ebx &= ~(F(RTM) | F(HLE));
1652 } else if (function == 0x80000007) {
1653 if (kvm_hv_invtsc_suppressed(vcpu))
1654 *edx &= ~SF(CONSTANT_TSC);
1655 }
1656 } else {
1657 *eax = *ebx = *ecx = *edx = 0;
1658 /*
1659 * When leaf 0BH or 1FH is defined, CL is pass-through
1660 * and EDX is always the x2APIC ID, even for undefined
1661 * subleaves. Index 1 will exist iff the leaf is
1662 * implemented, so we pass through CL iff leaf 1
1663 * exists. EDX can be copied from any existing index.
1664 */
1665 if (function == 0xb || function == 0x1f) {
1666 entry = kvm_find_cpuid_entry_index(vcpu, function, 1);
1667 if (entry) {
1668 *ecx = index & 0xff;
1669 *edx = entry->edx;
1670 }
1671 }
1672 }
1673 trace_kvm_cpuid(orig_function, index, *eax, *ebx, *ecx, *edx, exact,
1674 used_max_basic);
1675 return exact;
1676}
1677EXPORT_SYMBOL_GPL(kvm_cpuid);
1678
1679int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1680{
1681 u32 eax, ebx, ecx, edx;
1682
1683 if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
1684 return 1;
1685
1686 eax = kvm_rax_read(vcpu);
1687 ecx = kvm_rcx_read(vcpu);
1688 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false);
1689 kvm_rax_write(vcpu, eax);
1690 kvm_rbx_write(vcpu, ebx);
1691 kvm_rcx_write(vcpu, ecx);
1692 kvm_rdx_write(vcpu, edx);
1693 return kvm_skip_emulated_instruction(vcpu);
1694}
1695EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 * cpuid support routines
5 *
6 * derived from arch/x86/kvm/x86.c
7 *
8 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
9 * Copyright IBM Corporation, 2008
10 */
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/kvm_host.h>
14#include "linux/lockdep.h"
15#include <linux/export.h>
16#include <linux/vmalloc.h>
17#include <linux/uaccess.h>
18#include <linux/sched/stat.h>
19
20#include <asm/processor.h>
21#include <asm/user.h>
22#include <asm/fpu/xstate.h>
23#include <asm/sgx.h>
24#include <asm/cpuid.h>
25#include "cpuid.h"
26#include "lapic.h"
27#include "mmu.h"
28#include "trace.h"
29#include "pmu.h"
30#include "xen.h"
31
32/*
33 * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be
34 * aligned to sizeof(unsigned long) because it's not accessed via bitops.
35 */
36u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
37EXPORT_SYMBOL_GPL(kvm_cpu_caps);
38
39u32 xstate_required_size(u64 xstate_bv, bool compacted)
40{
41 int feature_bit = 0;
42 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
43
44 xstate_bv &= XFEATURE_MASK_EXTEND;
45 while (xstate_bv) {
46 if (xstate_bv & 0x1) {
47 u32 eax, ebx, ecx, edx, offset;
48 cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
49 /* ECX[1]: 64B alignment in compacted form */
50 if (compacted)
51 offset = (ecx & 0x2) ? ALIGN(ret, 64) : ret;
52 else
53 offset = ebx;
54 ret = max(ret, offset + eax);
55 }
56
57 xstate_bv >>= 1;
58 feature_bit++;
59 }
60
61 return ret;
62}
63
64#define F feature_bit
65
66/* Scattered Flag - For features that are scattered by cpufeatures.h. */
67#define SF(name) \
68({ \
69 BUILD_BUG_ON(X86_FEATURE_##name >= MAX_CPU_FEATURES); \
70 (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0); \
71})
72
73/*
74 * Magic value used by KVM when querying userspace-provided CPUID entries and
75 * doesn't care about the CPIUD index because the index of the function in
76 * question is not significant. Note, this magic value must have at least one
77 * bit set in bits[63:32] and must be consumed as a u64 by cpuid_entry2_find()
78 * to avoid false positives when processing guest CPUID input.
79 */
80#define KVM_CPUID_INDEX_NOT_SIGNIFICANT -1ull
81
82static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
83 struct kvm_cpuid_entry2 *entries, int nent, u32 function, u64 index)
84{
85 struct kvm_cpuid_entry2 *e;
86 int i;
87
88 /*
89 * KVM has a semi-arbitrary rule that querying the guest's CPUID model
90 * with IRQs disabled is disallowed. The CPUID model can legitimately
91 * have over one hundred entries, i.e. the lookup is slow, and IRQs are
92 * typically disabled in KVM only when KVM is in a performance critical
93 * path, e.g. the core VM-Enter/VM-Exit run loop. Nothing will break
94 * if this rule is violated, this assertion is purely to flag potential
95 * performance issues. If this fires, consider moving the lookup out
96 * of the hotpath, e.g. by caching information during CPUID updates.
97 */
98 lockdep_assert_irqs_enabled();
99
100 for (i = 0; i < nent; i++) {
101 e = &entries[i];
102
103 if (e->function != function)
104 continue;
105
106 /*
107 * If the index isn't significant, use the first entry with a
108 * matching function. It's userspace's responsibility to not
109 * provide "duplicate" entries in all cases.
110 */
111 if (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index)
112 return e;
113
114
115 /*
116 * Similarly, use the first matching entry if KVM is doing a
117 * lookup (as opposed to emulating CPUID) for a function that's
118 * architecturally defined as not having a significant index.
119 */
120 if (index == KVM_CPUID_INDEX_NOT_SIGNIFICANT) {
121 /*
122 * Direct lookups from KVM should not diverge from what
123 * KVM defines internally (the architectural behavior).
124 */
125 WARN_ON_ONCE(cpuid_function_is_indexed(function));
126 return e;
127 }
128 }
129
130 return NULL;
131}
132
133static int kvm_check_cpuid(struct kvm_vcpu *vcpu,
134 struct kvm_cpuid_entry2 *entries,
135 int nent)
136{
137 struct kvm_cpuid_entry2 *best;
138 u64 xfeatures;
139
140 /*
141 * The existing code assumes virtual address is 48-bit or 57-bit in the
142 * canonical address checks; exit if it is ever changed.
143 */
144 best = cpuid_entry2_find(entries, nent, 0x80000008,
145 KVM_CPUID_INDEX_NOT_SIGNIFICANT);
146 if (best) {
147 int vaddr_bits = (best->eax & 0xff00) >> 8;
148
149 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
150 return -EINVAL;
151 }
152
153 /*
154 * Exposing dynamic xfeatures to the guest requires additional
155 * enabling in the FPU, e.g. to expand the guest XSAVE state size.
156 */
157 best = cpuid_entry2_find(entries, nent, 0xd, 0);
158 if (!best)
159 return 0;
160
161 xfeatures = best->eax | ((u64)best->edx << 32);
162 xfeatures &= XFEATURE_MASK_USER_DYNAMIC;
163 if (!xfeatures)
164 return 0;
165
166 return fpu_enable_guest_xfd_features(&vcpu->arch.guest_fpu, xfeatures);
167}
168
169/* Check whether the supplied CPUID data is equal to what is already set for the vCPU. */
170static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
171 int nent)
172{
173 struct kvm_cpuid_entry2 *orig;
174 int i;
175
176 if (nent != vcpu->arch.cpuid_nent)
177 return -EINVAL;
178
179 for (i = 0; i < nent; i++) {
180 orig = &vcpu->arch.cpuid_entries[i];
181 if (e2[i].function != orig->function ||
182 e2[i].index != orig->index ||
183 e2[i].flags != orig->flags ||
184 e2[i].eax != orig->eax || e2[i].ebx != orig->ebx ||
185 e2[i].ecx != orig->ecx || e2[i].edx != orig->edx)
186 return -EINVAL;
187 }
188
189 return 0;
190}
191
192static struct kvm_hypervisor_cpuid __kvm_get_hypervisor_cpuid(struct kvm_cpuid_entry2 *entries,
193 int nent, const char *sig)
194{
195 struct kvm_hypervisor_cpuid cpuid = {};
196 struct kvm_cpuid_entry2 *entry;
197 u32 base;
198
199 for_each_possible_hypervisor_cpuid_base(base) {
200 entry = cpuid_entry2_find(entries, nent, base, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
201
202 if (entry) {
203 u32 signature[3];
204
205 signature[0] = entry->ebx;
206 signature[1] = entry->ecx;
207 signature[2] = entry->edx;
208
209 if (!memcmp(signature, sig, sizeof(signature))) {
210 cpuid.base = base;
211 cpuid.limit = entry->eax;
212 break;
213 }
214 }
215 }
216
217 return cpuid;
218}
219
220static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu,
221 const char *sig)
222{
223 return __kvm_get_hypervisor_cpuid(vcpu->arch.cpuid_entries,
224 vcpu->arch.cpuid_nent, sig);
225}
226
227static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_cpuid_entry2 *entries,
228 int nent, u32 kvm_cpuid_base)
229{
230 return cpuid_entry2_find(entries, nent, kvm_cpuid_base | KVM_CPUID_FEATURES,
231 KVM_CPUID_INDEX_NOT_SIGNIFICANT);
232}
233
234static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
235{
236 u32 base = vcpu->arch.kvm_cpuid.base;
237
238 if (!base)
239 return NULL;
240
241 return __kvm_find_kvm_cpuid_features(vcpu->arch.cpuid_entries,
242 vcpu->arch.cpuid_nent, base);
243}
244
245void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
246{
247 struct kvm_cpuid_entry2 *best = kvm_find_kvm_cpuid_features(vcpu);
248
249 /*
250 * save the feature bitmap to avoid cpuid lookup for every PV
251 * operation
252 */
253 if (best)
254 vcpu->arch.pv_cpuid.features = best->eax;
255}
256
257/*
258 * Calculate guest's supported XCR0 taking into account guest CPUID data and
259 * KVM's supported XCR0 (comprised of host's XCR0 and KVM_SUPPORTED_XCR0).
260 */
261static u64 cpuid_get_supported_xcr0(struct kvm_cpuid_entry2 *entries, int nent)
262{
263 struct kvm_cpuid_entry2 *best;
264
265 best = cpuid_entry2_find(entries, nent, 0xd, 0);
266 if (!best)
267 return 0;
268
269 return (best->eax | ((u64)best->edx << 32)) & kvm_caps.supported_xcr0;
270}
271
272static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries,
273 int nent)
274{
275 struct kvm_cpuid_entry2 *best;
276 struct kvm_hypervisor_cpuid kvm_cpuid;
277
278 best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
279 if (best) {
280 /* Update OSXSAVE bit */
281 if (boot_cpu_has(X86_FEATURE_XSAVE))
282 cpuid_entry_change(best, X86_FEATURE_OSXSAVE,
283 kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE));
284
285 cpuid_entry_change(best, X86_FEATURE_APIC,
286 vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
287 }
288
289 best = cpuid_entry2_find(entries, nent, 7, 0);
290 if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
291 cpuid_entry_change(best, X86_FEATURE_OSPKE,
292 kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE));
293
294 best = cpuid_entry2_find(entries, nent, 0xD, 0);
295 if (best)
296 best->ebx = xstate_required_size(vcpu->arch.xcr0, false);
297
298 best = cpuid_entry2_find(entries, nent, 0xD, 1);
299 if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
300 cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
301 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
302
303 kvm_cpuid = __kvm_get_hypervisor_cpuid(entries, nent, KVM_SIGNATURE);
304 if (kvm_cpuid.base) {
305 best = __kvm_find_kvm_cpuid_features(entries, nent, kvm_cpuid.base);
306 if (kvm_hlt_in_guest(vcpu->kvm) && best)
307 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
308 }
309
310 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
311 best = cpuid_entry2_find(entries, nent, 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
312 if (best)
313 cpuid_entry_change(best, X86_FEATURE_MWAIT,
314 vcpu->arch.ia32_misc_enable_msr &
315 MSR_IA32_MISC_ENABLE_MWAIT);
316 }
317}
318
319void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
320{
321 __kvm_update_cpuid_runtime(vcpu, vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
322}
323EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime);
324
325static bool kvm_cpuid_has_hyperv(struct kvm_cpuid_entry2 *entries, int nent)
326{
327#ifdef CONFIG_KVM_HYPERV
328 struct kvm_cpuid_entry2 *entry;
329
330 entry = cpuid_entry2_find(entries, nent, HYPERV_CPUID_INTERFACE,
331 KVM_CPUID_INDEX_NOT_SIGNIFICANT);
332 return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX;
333#else
334 return false;
335#endif
336}
337
338static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
339{
340 struct kvm_lapic *apic = vcpu->arch.apic;
341 struct kvm_cpuid_entry2 *best;
342 bool allow_gbpages;
343
344 BUILD_BUG_ON(KVM_NR_GOVERNED_FEATURES > KVM_MAX_NR_GOVERNED_FEATURES);
345 bitmap_zero(vcpu->arch.governed_features.enabled,
346 KVM_MAX_NR_GOVERNED_FEATURES);
347
348 /*
349 * If TDP is enabled, let the guest use GBPAGES if they're supported in
350 * hardware. The hardware page walker doesn't let KVM disable GBPAGES,
351 * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA
352 * walk for performance and complexity reasons. Not to mention KVM
353 * _can't_ solve the problem because GVA->GPA walks aren't visible to
354 * KVM once a TDP translation is installed. Mimic hardware behavior so
355 * that KVM's is at least consistent, i.e. doesn't randomly inject #PF.
356 * If TDP is disabled, honor *only* guest CPUID as KVM has full control
357 * and can install smaller shadow pages if the host lacks 1GiB support.
358 */
359 allow_gbpages = tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
360 guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES);
361 if (allow_gbpages)
362 kvm_governed_feature_set(vcpu, X86_FEATURE_GBPAGES);
363
364 best = kvm_find_cpuid_entry(vcpu, 1);
365 if (best && apic) {
366 if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER))
367 apic->lapic_timer.timer_mode_mask = 3 << 17;
368 else
369 apic->lapic_timer.timer_mode_mask = 1 << 17;
370
371 kvm_apic_set_version(vcpu);
372 }
373
374 vcpu->arch.guest_supported_xcr0 =
375 cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
376
377 kvm_update_pv_runtime(vcpu);
378
379 vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu);
380 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
381 vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
382
383 kvm_pmu_refresh(vcpu);
384 vcpu->arch.cr4_guest_rsvd_bits =
385 __cr4_reserved_bits(guest_cpuid_has, vcpu);
386
387 kvm_hv_set_cpuid(vcpu, kvm_cpuid_has_hyperv(vcpu->arch.cpuid_entries,
388 vcpu->arch.cpuid_nent));
389
390 /* Invoke the vendor callback only after the above state is updated. */
391 static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu);
392
393 /*
394 * Except for the MMU, which needs to do its thing any vendor specific
395 * adjustments to the reserved GPA bits.
396 */
397 kvm_mmu_after_set_cpuid(vcpu);
398}
399
400int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
401{
402 struct kvm_cpuid_entry2 *best;
403
404 best = kvm_find_cpuid_entry(vcpu, 0x80000000);
405 if (!best || best->eax < 0x80000008)
406 goto not_found;
407 best = kvm_find_cpuid_entry(vcpu, 0x80000008);
408 if (best)
409 return best->eax & 0xff;
410not_found:
411 return 36;
412}
413
414/*
415 * This "raw" version returns the reserved GPA bits without any adjustments for
416 * encryption technologies that usurp bits. The raw mask should be used if and
417 * only if hardware does _not_ strip the usurped bits, e.g. in virtual MTRRs.
418 */
419u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu)
420{
421 return rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
422}
423
424static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
425 int nent)
426{
427 int r;
428
429 __kvm_update_cpuid_runtime(vcpu, e2, nent);
430
431 /*
432 * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
433 * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
434 * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page
435 * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with
436 * the core vCPU model on the fly. It would've been better to forbid any
437 * KVM_SET_CPUID{,2} calls after KVM_RUN altogether but unfortunately
438 * some VMMs (e.g. QEMU) reuse vCPU fds for CPU hotplug/unplug and do
439 * KVM_SET_CPUID{,2} again. To support this legacy behavior, check
440 * whether the supplied CPUID data is equal to what's already set.
441 */
442 if (kvm_vcpu_has_run(vcpu)) {
443 r = kvm_cpuid_check_equal(vcpu, e2, nent);
444 if (r)
445 return r;
446
447 kvfree(e2);
448 return 0;
449 }
450
451#ifdef CONFIG_KVM_HYPERV
452 if (kvm_cpuid_has_hyperv(e2, nent)) {
453 r = kvm_hv_vcpu_init(vcpu);
454 if (r)
455 return r;
456 }
457#endif
458
459 r = kvm_check_cpuid(vcpu, e2, nent);
460 if (r)
461 return r;
462
463 kvfree(vcpu->arch.cpuid_entries);
464 vcpu->arch.cpuid_entries = e2;
465 vcpu->arch.cpuid_nent = nent;
466
467 vcpu->arch.kvm_cpuid = kvm_get_hypervisor_cpuid(vcpu, KVM_SIGNATURE);
468#ifdef CONFIG_KVM_XEN
469 vcpu->arch.xen.cpuid = kvm_get_hypervisor_cpuid(vcpu, XEN_SIGNATURE);
470#endif
471 kvm_vcpu_after_set_cpuid(vcpu);
472
473 return 0;
474}
475
476/* when an old userspace process fills a new kernel module */
477int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
478 struct kvm_cpuid *cpuid,
479 struct kvm_cpuid_entry __user *entries)
480{
481 int r, i;
482 struct kvm_cpuid_entry *e = NULL;
483 struct kvm_cpuid_entry2 *e2 = NULL;
484
485 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
486 return -E2BIG;
487
488 if (cpuid->nent) {
489 e = vmemdup_array_user(entries, cpuid->nent, sizeof(*e));
490 if (IS_ERR(e))
491 return PTR_ERR(e);
492
493 e2 = kvmalloc_array(cpuid->nent, sizeof(*e2), GFP_KERNEL_ACCOUNT);
494 if (!e2) {
495 r = -ENOMEM;
496 goto out_free_cpuid;
497 }
498 }
499 for (i = 0; i < cpuid->nent; i++) {
500 e2[i].function = e[i].function;
501 e2[i].eax = e[i].eax;
502 e2[i].ebx = e[i].ebx;
503 e2[i].ecx = e[i].ecx;
504 e2[i].edx = e[i].edx;
505 e2[i].index = 0;
506 e2[i].flags = 0;
507 e2[i].padding[0] = 0;
508 e2[i].padding[1] = 0;
509 e2[i].padding[2] = 0;
510 }
511
512 r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
513 if (r)
514 kvfree(e2);
515
516out_free_cpuid:
517 kvfree(e);
518
519 return r;
520}
521
522int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
523 struct kvm_cpuid2 *cpuid,
524 struct kvm_cpuid_entry2 __user *entries)
525{
526 struct kvm_cpuid_entry2 *e2 = NULL;
527 int r;
528
529 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
530 return -E2BIG;
531
532 if (cpuid->nent) {
533 e2 = vmemdup_array_user(entries, cpuid->nent, sizeof(*e2));
534 if (IS_ERR(e2))
535 return PTR_ERR(e2);
536 }
537
538 r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
539 if (r)
540 kvfree(e2);
541
542 return r;
543}
544
545int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
546 struct kvm_cpuid2 *cpuid,
547 struct kvm_cpuid_entry2 __user *entries)
548{
549 if (cpuid->nent < vcpu->arch.cpuid_nent)
550 return -E2BIG;
551
552 if (copy_to_user(entries, vcpu->arch.cpuid_entries,
553 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
554 return -EFAULT;
555
556 cpuid->nent = vcpu->arch.cpuid_nent;
557 return 0;
558}
559
560/* Mask kvm_cpu_caps for @leaf with the raw CPUID capabilities of this CPU. */
561static __always_inline void __kvm_cpu_cap_mask(unsigned int leaf)
562{
563 const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32);
564 struct kvm_cpuid_entry2 entry;
565
566 reverse_cpuid_check(leaf);
567
568 cpuid_count(cpuid.function, cpuid.index,
569 &entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
570
571 kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(&entry, cpuid.reg);
572}
573
574static __always_inline
575void kvm_cpu_cap_init_kvm_defined(enum kvm_only_cpuid_leafs leaf, u32 mask)
576{
577 /* Use kvm_cpu_cap_mask for leafs that aren't KVM-only. */
578 BUILD_BUG_ON(leaf < NCAPINTS);
579
580 kvm_cpu_caps[leaf] = mask;
581
582 __kvm_cpu_cap_mask(leaf);
583}
584
585static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
586{
587 /* Use kvm_cpu_cap_init_kvm_defined for KVM-only leafs. */
588 BUILD_BUG_ON(leaf >= NCAPINTS);
589
590 kvm_cpu_caps[leaf] &= mask;
591
592 __kvm_cpu_cap_mask(leaf);
593}
594
595void kvm_set_cpu_caps(void)
596{
597#ifdef CONFIG_X86_64
598 unsigned int f_gbpages = F(GBPAGES);
599 unsigned int f_lm = F(LM);
600 unsigned int f_xfd = F(XFD);
601#else
602 unsigned int f_gbpages = 0;
603 unsigned int f_lm = 0;
604 unsigned int f_xfd = 0;
605#endif
606 memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps));
607
608 BUILD_BUG_ON(sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)) >
609 sizeof(boot_cpu_data.x86_capability));
610
611 memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability,
612 sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)));
613
614 kvm_cpu_cap_mask(CPUID_1_ECX,
615 /*
616 * NOTE: MONITOR (and MWAIT) are emulated as NOP, but *not*
617 * advertised to guests via CPUID!
618 */
619 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
620 0 /* DS-CPL, VMX, SMX, EST */ |
621 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
622 F(FMA) | F(CX16) | 0 /* xTPR Update */ | F(PDCM) |
623 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
624 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
625 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
626 F(F16C) | F(RDRAND)
627 );
628 /* KVM emulates x2apic in software irrespective of host support. */
629 kvm_cpu_cap_set(X86_FEATURE_X2APIC);
630
631 kvm_cpu_cap_mask(CPUID_1_EDX,
632 F(FPU) | F(VME) | F(DE) | F(PSE) |
633 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
634 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
635 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
636 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
637 0 /* Reserved, DS, ACPI */ | F(MMX) |
638 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
639 0 /* HTT, TM, Reserved, PBE */
640 );
641
642 kvm_cpu_cap_mask(CPUID_7_0_EBX,
643 F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) |
644 F(FDP_EXCPTN_ONLY) | F(SMEP) | F(BMI2) | F(ERMS) | F(INVPCID) |
645 F(RTM) | F(ZERO_FCS_FDS) | 0 /*MPX*/ | F(AVX512F) |
646 F(AVX512DQ) | F(RDSEED) | F(ADX) | F(SMAP) | F(AVX512IFMA) |
647 F(CLFLUSHOPT) | F(CLWB) | 0 /*INTEL_PT*/ | F(AVX512PF) |
648 F(AVX512ER) | F(AVX512CD) | F(SHA_NI) | F(AVX512BW) |
649 F(AVX512VL));
650
651 kvm_cpu_cap_mask(CPUID_7_ECX,
652 F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
653 F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
654 F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
655 F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ |
656 F(SGX_LC) | F(BUS_LOCK_DETECT)
657 );
658 /* Set LA57 based on hardware capability. */
659 if (cpuid_ecx(7) & F(LA57))
660 kvm_cpu_cap_set(X86_FEATURE_LA57);
661
662 /*
663 * PKU not yet implemented for shadow paging and requires OSPKE
664 * to be set on the host. Clear it if that is not the case
665 */
666 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
667 kvm_cpu_cap_clear(X86_FEATURE_PKU);
668
669 kvm_cpu_cap_mask(CPUID_7_EDX,
670 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
671 F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
672 F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) |
673 F(SERIALIZE) | F(TSXLDTRK) | F(AVX512_FP16) |
674 F(AMX_TILE) | F(AMX_INT8) | F(AMX_BF16) | F(FLUSH_L1D)
675 );
676
677 /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */
678 kvm_cpu_cap_set(X86_FEATURE_TSC_ADJUST);
679 kvm_cpu_cap_set(X86_FEATURE_ARCH_CAPABILITIES);
680
681 if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS))
682 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL);
683 if (boot_cpu_has(X86_FEATURE_STIBP))
684 kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP);
685 if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
686 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD);
687
688 kvm_cpu_cap_mask(CPUID_7_1_EAX,
689 F(AVX_VNNI) | F(AVX512_BF16) | F(CMPCCXADD) |
690 F(FZRM) | F(FSRS) | F(FSRC) |
691 F(AMX_FP16) | F(AVX_IFMA) | F(LAM)
692 );
693
694 kvm_cpu_cap_init_kvm_defined(CPUID_7_1_EDX,
695 F(AVX_VNNI_INT8) | F(AVX_NE_CONVERT) | F(PREFETCHITI) |
696 F(AMX_COMPLEX)
697 );
698
699 kvm_cpu_cap_init_kvm_defined(CPUID_7_2_EDX,
700 F(INTEL_PSFD) | F(IPRED_CTRL) | F(RRSBA_CTRL) | F(DDPD_U) |
701 F(BHI_CTRL) | F(MCDT_NO)
702 );
703
704 kvm_cpu_cap_mask(CPUID_D_1_EAX,
705 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd
706 );
707
708 kvm_cpu_cap_init_kvm_defined(CPUID_12_EAX,
709 SF(SGX1) | SF(SGX2) | SF(SGX_EDECCSSA)
710 );
711
712 kvm_cpu_cap_mask(CPUID_8000_0001_ECX,
713 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
714 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
715 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
716 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) |
717 F(TOPOEXT) | 0 /* PERFCTR_CORE */
718 );
719
720 kvm_cpu_cap_mask(CPUID_8000_0001_EDX,
721 F(FPU) | F(VME) | F(DE) | F(PSE) |
722 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
723 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
724 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
725 F(PAT) | F(PSE36) | 0 /* Reserved */ |
726 F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
727 F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) |
728 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW)
729 );
730
731 if (!tdp_enabled && IS_ENABLED(CONFIG_X86_64))
732 kvm_cpu_cap_set(X86_FEATURE_GBPAGES);
733
734 kvm_cpu_cap_init_kvm_defined(CPUID_8000_0007_EDX,
735 SF(CONSTANT_TSC)
736 );
737
738 kvm_cpu_cap_mask(CPUID_8000_0008_EBX,
739 F(CLZERO) | F(XSAVEERPTR) |
740 F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
741 F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) |
742 F(AMD_PSFD)
743 );
744
745 /*
746 * AMD has separate bits for each SPEC_CTRL bit.
747 * arch/x86/kernel/cpu/bugs.c is kind enough to
748 * record that in cpufeatures so use them.
749 */
750 if (boot_cpu_has(X86_FEATURE_IBPB))
751 kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB);
752 if (boot_cpu_has(X86_FEATURE_IBRS))
753 kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS);
754 if (boot_cpu_has(X86_FEATURE_STIBP))
755 kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP);
756 if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
757 kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD);
758 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
759 kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO);
760 /*
761 * The preference is to use SPEC CTRL MSR instead of the
762 * VIRT_SPEC MSR.
763 */
764 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
765 !boot_cpu_has(X86_FEATURE_AMD_SSBD))
766 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
767
768 /*
769 * Hide all SVM features by default, SVM will set the cap bits for
770 * features it emulates and/or exposes for L1.
771 */
772 kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0);
773
774 kvm_cpu_cap_mask(CPUID_8000_001F_EAX,
775 0 /* SME */ | F(SEV) | 0 /* VM_PAGE_FLUSH */ | F(SEV_ES) |
776 F(SME_COHERENT));
777
778 kvm_cpu_cap_mask(CPUID_8000_0021_EAX,
779 F(NO_NESTED_DATA_BP) | F(LFENCE_RDTSC) | 0 /* SmmPgCfgLock */ |
780 F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */ |
781 F(WRMSR_XX_BASE_NS)
782 );
783
784 kvm_cpu_cap_check_and_set(X86_FEATURE_SBPB);
785 kvm_cpu_cap_check_and_set(X86_FEATURE_IBPB_BRTYPE);
786 kvm_cpu_cap_check_and_set(X86_FEATURE_SRSO_NO);
787
788 kvm_cpu_cap_init_kvm_defined(CPUID_8000_0022_EAX,
789 F(PERFMON_V2)
790 );
791
792 /*
793 * Synthesize "LFENCE is serializing" into the AMD-defined entry in
794 * KVM's supported CPUID if the feature is reported as supported by the
795 * kernel. LFENCE_RDTSC was a Linux-defined synthetic feature long
796 * before AMD joined the bandwagon, e.g. LFENCE is serializing on most
797 * CPUs that support SSE2. On CPUs that don't support AMD's leaf,
798 * kvm_cpu_cap_mask() will unfortunately drop the flag due to ANDing
799 * the mask with the raw host CPUID, and reporting support in AMD's
800 * leaf can make it easier for userspace to detect the feature.
801 */
802 if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
803 kvm_cpu_cap_set(X86_FEATURE_LFENCE_RDTSC);
804 if (!static_cpu_has_bug(X86_BUG_NULL_SEG))
805 kvm_cpu_cap_set(X86_FEATURE_NULL_SEL_CLR_BASE);
806 kvm_cpu_cap_set(X86_FEATURE_NO_SMM_CTL_MSR);
807
808 kvm_cpu_cap_mask(CPUID_C000_0001_EDX,
809 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
810 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
811 F(PMM) | F(PMM_EN)
812 );
813
814 /*
815 * Hide RDTSCP and RDPID if either feature is reported as supported but
816 * probing MSR_TSC_AUX failed. This is purely a sanity check and
817 * should never happen, but the guest will likely crash if RDTSCP or
818 * RDPID is misreported, and KVM has botched MSR_TSC_AUX emulation in
819 * the past. For example, the sanity check may fire if this instance of
820 * KVM is running as L1 on top of an older, broken KVM.
821 */
822 if (WARN_ON((kvm_cpu_cap_has(X86_FEATURE_RDTSCP) ||
823 kvm_cpu_cap_has(X86_FEATURE_RDPID)) &&
824 !kvm_is_supported_user_return_msr(MSR_TSC_AUX))) {
825 kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
826 kvm_cpu_cap_clear(X86_FEATURE_RDPID);
827 }
828}
829EXPORT_SYMBOL_GPL(kvm_set_cpu_caps);
830
831struct kvm_cpuid_array {
832 struct kvm_cpuid_entry2 *entries;
833 int maxnent;
834 int nent;
835};
836
837static struct kvm_cpuid_entry2 *get_next_cpuid(struct kvm_cpuid_array *array)
838{
839 if (array->nent >= array->maxnent)
840 return NULL;
841
842 return &array->entries[array->nent++];
843}
844
845static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
846 u32 function, u32 index)
847{
848 struct kvm_cpuid_entry2 *entry = get_next_cpuid(array);
849
850 if (!entry)
851 return NULL;
852
853 memset(entry, 0, sizeof(*entry));
854 entry->function = function;
855 entry->index = index;
856 switch (function & 0xC0000000) {
857 case 0x40000000:
858 /* Hypervisor leaves are always synthesized by __do_cpuid_func. */
859 return entry;
860
861 case 0x80000000:
862 /*
863 * 0x80000021 is sometimes synthesized by __do_cpuid_func, which
864 * would result in out-of-bounds calls to do_host_cpuid.
865 */
866 {
867 static int max_cpuid_80000000;
868 if (!READ_ONCE(max_cpuid_80000000))
869 WRITE_ONCE(max_cpuid_80000000, cpuid_eax(0x80000000));
870 if (function > READ_ONCE(max_cpuid_80000000))
871 return entry;
872 }
873 break;
874
875 default:
876 break;
877 }
878
879 cpuid_count(entry->function, entry->index,
880 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
881
882 if (cpuid_function_is_indexed(function))
883 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
884
885 return entry;
886}
887
888static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
889{
890 struct kvm_cpuid_entry2 *entry;
891
892 if (array->nent >= array->maxnent)
893 return -E2BIG;
894
895 entry = &array->entries[array->nent];
896 entry->function = func;
897 entry->index = 0;
898 entry->flags = 0;
899
900 switch (func) {
901 case 0:
902 entry->eax = 7;
903 ++array->nent;
904 break;
905 case 1:
906 entry->ecx = F(MOVBE);
907 ++array->nent;
908 break;
909 case 7:
910 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
911 entry->eax = 0;
912 if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
913 entry->ecx = F(RDPID);
914 ++array->nent;
915 break;
916 default:
917 break;
918 }
919
920 return 0;
921}
922
923static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
924{
925 struct kvm_cpuid_entry2 *entry;
926 int r, i, max_idx;
927
928 /* all calls to cpuid_count() should be made on the same cpu */
929 get_cpu();
930
931 r = -E2BIG;
932
933 entry = do_host_cpuid(array, function, 0);
934 if (!entry)
935 goto out;
936
937 switch (function) {
938 case 0:
939 /* Limited to the highest leaf implemented in KVM. */
940 entry->eax = min(entry->eax, 0x1fU);
941 break;
942 case 1:
943 cpuid_entry_override(entry, CPUID_1_EDX);
944 cpuid_entry_override(entry, CPUID_1_ECX);
945 break;
946 case 2:
947 /*
948 * On ancient CPUs, function 2 entries are STATEFUL. That is,
949 * CPUID(function=2, index=0) may return different results each
950 * time, with the least-significant byte in EAX enumerating the
951 * number of times software should do CPUID(2, 0).
952 *
953 * Modern CPUs, i.e. every CPU KVM has *ever* run on are less
954 * idiotic. Intel's SDM states that EAX & 0xff "will always
955 * return 01H. Software should ignore this value and not
956 * interpret it as an informational descriptor", while AMD's
957 * APM states that CPUID(2) is reserved.
958 *
959 * WARN if a frankenstein CPU that supports virtualization and
960 * a stateful CPUID.0x2 is encountered.
961 */
962 WARN_ON_ONCE((entry->eax & 0xff) > 1);
963 break;
964 /* functions 4 and 0x8000001d have additional index. */
965 case 4:
966 case 0x8000001d:
967 /*
968 * Read entries until the cache type in the previous entry is
969 * zero, i.e. indicates an invalid entry.
970 */
971 for (i = 1; entry->eax & 0x1f; ++i) {
972 entry = do_host_cpuid(array, function, i);
973 if (!entry)
974 goto out;
975 }
976 break;
977 case 6: /* Thermal management */
978 entry->eax = 0x4; /* allow ARAT */
979 entry->ebx = 0;
980 entry->ecx = 0;
981 entry->edx = 0;
982 break;
983 /* function 7 has additional index. */
984 case 7:
985 max_idx = entry->eax = min(entry->eax, 2u);
986 cpuid_entry_override(entry, CPUID_7_0_EBX);
987 cpuid_entry_override(entry, CPUID_7_ECX);
988 cpuid_entry_override(entry, CPUID_7_EDX);
989
990 /* KVM only supports up to 0x7.2, capped above via min(). */
991 if (max_idx >= 1) {
992 entry = do_host_cpuid(array, function, 1);
993 if (!entry)
994 goto out;
995
996 cpuid_entry_override(entry, CPUID_7_1_EAX);
997 cpuid_entry_override(entry, CPUID_7_1_EDX);
998 entry->ebx = 0;
999 entry->ecx = 0;
1000 }
1001 if (max_idx >= 2) {
1002 entry = do_host_cpuid(array, function, 2);
1003 if (!entry)
1004 goto out;
1005
1006 cpuid_entry_override(entry, CPUID_7_2_EDX);
1007 entry->ecx = 0;
1008 entry->ebx = 0;
1009 entry->eax = 0;
1010 }
1011 break;
1012 case 0xa: { /* Architectural Performance Monitoring */
1013 union cpuid10_eax eax;
1014 union cpuid10_edx edx;
1015
1016 if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
1017 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1018 break;
1019 }
1020
1021 eax.split.version_id = kvm_pmu_cap.version;
1022 eax.split.num_counters = kvm_pmu_cap.num_counters_gp;
1023 eax.split.bit_width = kvm_pmu_cap.bit_width_gp;
1024 eax.split.mask_length = kvm_pmu_cap.events_mask_len;
1025 edx.split.num_counters_fixed = kvm_pmu_cap.num_counters_fixed;
1026 edx.split.bit_width_fixed = kvm_pmu_cap.bit_width_fixed;
1027
1028 if (kvm_pmu_cap.version)
1029 edx.split.anythread_deprecated = 1;
1030 edx.split.reserved1 = 0;
1031 edx.split.reserved2 = 0;
1032
1033 entry->eax = eax.full;
1034 entry->ebx = kvm_pmu_cap.events_mask;
1035 entry->ecx = 0;
1036 entry->edx = edx.full;
1037 break;
1038 }
1039 case 0x1f:
1040 case 0xb:
1041 /*
1042 * No topology; a valid topology is indicated by the presence
1043 * of subleaf 1.
1044 */
1045 entry->eax = entry->ebx = entry->ecx = 0;
1046 break;
1047 case 0xd: {
1048 u64 permitted_xcr0 = kvm_get_filtered_xcr0();
1049 u64 permitted_xss = kvm_caps.supported_xss;
1050
1051 entry->eax &= permitted_xcr0;
1052 entry->ebx = xstate_required_size(permitted_xcr0, false);
1053 entry->ecx = entry->ebx;
1054 entry->edx &= permitted_xcr0 >> 32;
1055 if (!permitted_xcr0)
1056 break;
1057
1058 entry = do_host_cpuid(array, function, 1);
1059 if (!entry)
1060 goto out;
1061
1062 cpuid_entry_override(entry, CPUID_D_1_EAX);
1063 if (entry->eax & (F(XSAVES)|F(XSAVEC)))
1064 entry->ebx = xstate_required_size(permitted_xcr0 | permitted_xss,
1065 true);
1066 else {
1067 WARN_ON_ONCE(permitted_xss != 0);
1068 entry->ebx = 0;
1069 }
1070 entry->ecx &= permitted_xss;
1071 entry->edx &= permitted_xss >> 32;
1072
1073 for (i = 2; i < 64; ++i) {
1074 bool s_state;
1075 if (permitted_xcr0 & BIT_ULL(i))
1076 s_state = false;
1077 else if (permitted_xss & BIT_ULL(i))
1078 s_state = true;
1079 else
1080 continue;
1081
1082 entry = do_host_cpuid(array, function, i);
1083 if (!entry)
1084 goto out;
1085
1086 /*
1087 * The supported check above should have filtered out
1088 * invalid sub-leafs. Only valid sub-leafs should
1089 * reach this point, and they should have a non-zero
1090 * save state size. Furthermore, check whether the
1091 * processor agrees with permitted_xcr0/permitted_xss
1092 * on whether this is an XCR0- or IA32_XSS-managed area.
1093 */
1094 if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) {
1095 --array->nent;
1096 continue;
1097 }
1098
1099 if (!kvm_cpu_cap_has(X86_FEATURE_XFD))
1100 entry->ecx &= ~BIT_ULL(2);
1101 entry->edx = 0;
1102 }
1103 break;
1104 }
1105 case 0x12:
1106 /* Intel SGX */
1107 if (!kvm_cpu_cap_has(X86_FEATURE_SGX)) {
1108 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1109 break;
1110 }
1111
1112 /*
1113 * Index 0: Sub-features, MISCSELECT (a.k.a extended features)
1114 * and max enclave sizes. The SGX sub-features and MISCSELECT
1115 * are restricted by kernel and KVM capabilities (like most
1116 * feature flags), while enclave size is unrestricted.
1117 */
1118 cpuid_entry_override(entry, CPUID_12_EAX);
1119 entry->ebx &= SGX_MISC_EXINFO;
1120
1121 entry = do_host_cpuid(array, function, 1);
1122 if (!entry)
1123 goto out;
1124
1125 /*
1126 * Index 1: SECS.ATTRIBUTES. ATTRIBUTES are restricted a la
1127 * feature flags. Advertise all supported flags, including
1128 * privileged attributes that require explicit opt-in from
1129 * userspace. ATTRIBUTES.XFRM is not adjusted as userspace is
1130 * expected to derive it from supported XCR0.
1131 */
1132 entry->eax &= SGX_ATTR_PRIV_MASK | SGX_ATTR_UNPRIV_MASK;
1133 entry->ebx &= 0;
1134 break;
1135 /* Intel PT */
1136 case 0x14:
1137 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) {
1138 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1139 break;
1140 }
1141
1142 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1143 if (!do_host_cpuid(array, function, i))
1144 goto out;
1145 }
1146 break;
1147 /* Intel AMX TILE */
1148 case 0x1d:
1149 if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) {
1150 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1151 break;
1152 }
1153
1154 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
1155 if (!do_host_cpuid(array, function, i))
1156 goto out;
1157 }
1158 break;
1159 case 0x1e: /* TMUL information */
1160 if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) {
1161 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1162 break;
1163 }
1164 break;
1165 case KVM_CPUID_SIGNATURE: {
1166 const u32 *sigptr = (const u32 *)KVM_SIGNATURE;
1167 entry->eax = KVM_CPUID_FEATURES;
1168 entry->ebx = sigptr[0];
1169 entry->ecx = sigptr[1];
1170 entry->edx = sigptr[2];
1171 break;
1172 }
1173 case KVM_CPUID_FEATURES:
1174 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
1175 (1 << KVM_FEATURE_NOP_IO_DELAY) |
1176 (1 << KVM_FEATURE_CLOCKSOURCE2) |
1177 (1 << KVM_FEATURE_ASYNC_PF) |
1178 (1 << KVM_FEATURE_PV_EOI) |
1179 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
1180 (1 << KVM_FEATURE_PV_UNHALT) |
1181 (1 << KVM_FEATURE_PV_TLB_FLUSH) |
1182 (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) |
1183 (1 << KVM_FEATURE_PV_SEND_IPI) |
1184 (1 << KVM_FEATURE_POLL_CONTROL) |
1185 (1 << KVM_FEATURE_PV_SCHED_YIELD) |
1186 (1 << KVM_FEATURE_ASYNC_PF_INT);
1187
1188 if (sched_info_on())
1189 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
1190
1191 entry->ebx = 0;
1192 entry->ecx = 0;
1193 entry->edx = 0;
1194 break;
1195 case 0x80000000:
1196 entry->eax = min(entry->eax, 0x80000022);
1197 /*
1198 * Serializing LFENCE is reported in a multitude of ways, and
1199 * NullSegClearsBase is not reported in CPUID on Zen2; help
1200 * userspace by providing the CPUID leaf ourselves.
1201 *
1202 * However, only do it if the host has CPUID leaf 0x8000001d.
1203 * QEMU thinks that it can query the host blindly for that
1204 * CPUID leaf if KVM reports that it supports 0x8000001d or
1205 * above. The processor merrily returns values from the
1206 * highest Intel leaf which QEMU tries to use as the guest's
1207 * 0x8000001d. Even worse, this can result in an infinite
1208 * loop if said highest leaf has no subleaves indexed by ECX.
1209 */
1210 if (entry->eax >= 0x8000001d &&
1211 (static_cpu_has(X86_FEATURE_LFENCE_RDTSC)
1212 || !static_cpu_has_bug(X86_BUG_NULL_SEG)))
1213 entry->eax = max(entry->eax, 0x80000021);
1214 break;
1215 case 0x80000001:
1216 entry->ebx &= ~GENMASK(27, 16);
1217 cpuid_entry_override(entry, CPUID_8000_0001_EDX);
1218 cpuid_entry_override(entry, CPUID_8000_0001_ECX);
1219 break;
1220 case 0x80000005:
1221 /* Pass host L1 cache and TLB info. */
1222 break;
1223 case 0x80000006:
1224 /* Drop reserved bits, pass host L2 cache and TLB info. */
1225 entry->edx &= ~GENMASK(17, 16);
1226 break;
1227 case 0x80000007: /* Advanced power management */
1228 cpuid_entry_override(entry, CPUID_8000_0007_EDX);
1229
1230 /* mask against host */
1231 entry->edx &= boot_cpu_data.x86_power;
1232 entry->eax = entry->ebx = entry->ecx = 0;
1233 break;
1234 case 0x80000008: {
1235 unsigned int virt_as = max((entry->eax >> 8) & 0xff, 48U);
1236 unsigned int phys_as;
1237
1238 /*
1239 * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as
1240 * the guest operates in the same PA space as the host, i.e.
1241 * reductions in MAXPHYADDR for memory encryption affect shadow
1242 * paging, too.
1243 *
1244 * If TDP is enabled, use the raw bare metal MAXPHYADDR as
1245 * reductions to the HPAs do not affect GPAs.
1246 */
1247 if (!tdp_enabled) {
1248 phys_as = boot_cpu_data.x86_phys_bits;
1249 } else {
1250 phys_as = entry->eax & 0xff;
1251 }
1252
1253 entry->eax = phys_as | (virt_as << 8);
1254 entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
1255 entry->edx = 0;
1256 cpuid_entry_override(entry, CPUID_8000_0008_EBX);
1257 break;
1258 }
1259 case 0x8000000A:
1260 if (!kvm_cpu_cap_has(X86_FEATURE_SVM)) {
1261 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1262 break;
1263 }
1264 entry->eax = 1; /* SVM revision 1 */
1265 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
1266 ASID emulation to nested SVM */
1267 entry->ecx = 0; /* Reserved */
1268 cpuid_entry_override(entry, CPUID_8000_000A_EDX);
1269 break;
1270 case 0x80000019:
1271 entry->ecx = entry->edx = 0;
1272 break;
1273 case 0x8000001a:
1274 entry->eax &= GENMASK(2, 0);
1275 entry->ebx = entry->ecx = entry->edx = 0;
1276 break;
1277 case 0x8000001e:
1278 /* Do not return host topology information. */
1279 entry->eax = entry->ebx = entry->ecx = 0;
1280 entry->edx = 0; /* reserved */
1281 break;
1282 case 0x8000001F:
1283 if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) {
1284 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1285 } else {
1286 cpuid_entry_override(entry, CPUID_8000_001F_EAX);
1287 /* Clear NumVMPL since KVM does not support VMPL. */
1288 entry->ebx &= ~GENMASK(31, 12);
1289 /*
1290 * Enumerate '0' for "PA bits reduction", the adjusted
1291 * MAXPHYADDR is enumerated directly (see 0x80000008).
1292 */
1293 entry->ebx &= ~GENMASK(11, 6);
1294 }
1295 break;
1296 case 0x80000020:
1297 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1298 break;
1299 case 0x80000021:
1300 entry->ebx = entry->ecx = entry->edx = 0;
1301 cpuid_entry_override(entry, CPUID_8000_0021_EAX);
1302 break;
1303 /* AMD Extended Performance Monitoring and Debug */
1304 case 0x80000022: {
1305 union cpuid_0x80000022_ebx ebx;
1306
1307 entry->ecx = entry->edx = 0;
1308 if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) {
1309 entry->eax = entry->ebx;
1310 break;
1311 }
1312
1313 cpuid_entry_override(entry, CPUID_8000_0022_EAX);
1314
1315 if (kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2))
1316 ebx.split.num_core_pmc = kvm_pmu_cap.num_counters_gp;
1317 else if (kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE))
1318 ebx.split.num_core_pmc = AMD64_NUM_COUNTERS_CORE;
1319 else
1320 ebx.split.num_core_pmc = AMD64_NUM_COUNTERS;
1321
1322 entry->ebx = ebx.full;
1323 break;
1324 }
1325 /*Add support for Centaur's CPUID instruction*/
1326 case 0xC0000000:
1327 /*Just support up to 0xC0000004 now*/
1328 entry->eax = min(entry->eax, 0xC0000004);
1329 break;
1330 case 0xC0000001:
1331 cpuid_entry_override(entry, CPUID_C000_0001_EDX);
1332 break;
1333 case 3: /* Processor serial number */
1334 case 5: /* MONITOR/MWAIT */
1335 case 0xC0000002:
1336 case 0xC0000003:
1337 case 0xC0000004:
1338 default:
1339 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
1340 break;
1341 }
1342
1343 r = 0;
1344
1345out:
1346 put_cpu();
1347
1348 return r;
1349}
1350
1351static int do_cpuid_func(struct kvm_cpuid_array *array, u32 func,
1352 unsigned int type)
1353{
1354 if (type == KVM_GET_EMULATED_CPUID)
1355 return __do_cpuid_func_emulated(array, func);
1356
1357 return __do_cpuid_func(array, func);
1358}
1359
1360#define CENTAUR_CPUID_SIGNATURE 0xC0000000
1361
1362static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func,
1363 unsigned int type)
1364{
1365 u32 limit;
1366 int r;
1367
1368 if (func == CENTAUR_CPUID_SIGNATURE &&
1369 boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR)
1370 return 0;
1371
1372 r = do_cpuid_func(array, func, type);
1373 if (r)
1374 return r;
1375
1376 limit = array->entries[array->nent - 1].eax;
1377 for (func = func + 1; func <= limit; ++func) {
1378 r = do_cpuid_func(array, func, type);
1379 if (r)
1380 break;
1381 }
1382
1383 return r;
1384}
1385
1386static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
1387 __u32 num_entries, unsigned int ioctl_type)
1388{
1389 int i;
1390 __u32 pad[3];
1391
1392 if (ioctl_type != KVM_GET_EMULATED_CPUID)
1393 return false;
1394
1395 /*
1396 * We want to make sure that ->padding is being passed clean from
1397 * userspace in case we want to use it for something in the future.
1398 *
1399 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
1400 * have to give ourselves satisfied only with the emulated side. /me
1401 * sheds a tear.
1402 */
1403 for (i = 0; i < num_entries; i++) {
1404 if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
1405 return true;
1406
1407 if (pad[0] || pad[1] || pad[2])
1408 return true;
1409 }
1410 return false;
1411}
1412
1413int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
1414 struct kvm_cpuid_entry2 __user *entries,
1415 unsigned int type)
1416{
1417 static const u32 funcs[] = {
1418 0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE,
1419 };
1420
1421 struct kvm_cpuid_array array = {
1422 .nent = 0,
1423 };
1424 int r, i;
1425
1426 if (cpuid->nent < 1)
1427 return -E2BIG;
1428 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1429 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
1430
1431 if (sanity_check_entries(entries, cpuid->nent, type))
1432 return -EINVAL;
1433
1434 array.entries = kvcalloc(cpuid->nent, sizeof(struct kvm_cpuid_entry2), GFP_KERNEL);
1435 if (!array.entries)
1436 return -ENOMEM;
1437
1438 array.maxnent = cpuid->nent;
1439
1440 for (i = 0; i < ARRAY_SIZE(funcs); i++) {
1441 r = get_cpuid_func(&array, funcs[i], type);
1442 if (r)
1443 goto out_free;
1444 }
1445 cpuid->nent = array.nent;
1446
1447 if (copy_to_user(entries, array.entries,
1448 array.nent * sizeof(struct kvm_cpuid_entry2)))
1449 r = -EFAULT;
1450
1451out_free:
1452 kvfree(array.entries);
1453 return r;
1454}
1455
1456struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
1457 u32 function, u32 index)
1458{
1459 return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent,
1460 function, index);
1461}
1462EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry_index);
1463
1464struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
1465 u32 function)
1466{
1467 return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent,
1468 function, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
1469}
1470EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
1471
1472/*
1473 * Intel CPUID semantics treats any query for an out-of-range leaf as if the
1474 * highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics
1475 * returns all zeroes for any undefined leaf, whether or not the leaf is in
1476 * range. Centaur/VIA follows Intel semantics.
1477 *
1478 * A leaf is considered out-of-range if its function is higher than the maximum
1479 * supported leaf of its associated class or if its associated class does not
1480 * exist.
1481 *
1482 * There are three primary classes to be considered, with their respective
1483 * ranges described as "<base> - <top>[,<base2> - <top2>] inclusive. A primary
1484 * class exists if a guest CPUID entry for its <base> leaf exists. For a given
1485 * class, CPUID.<base>.EAX contains the max supported leaf for the class.
1486 *
1487 * - Basic: 0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff
1488 * - Hypervisor: 0x40000000 - 0x4fffffff
1489 * - Extended: 0x80000000 - 0xbfffffff
1490 * - Centaur: 0xc0000000 - 0xcfffffff
1491 *
1492 * The Hypervisor class is further subdivided into sub-classes that each act as
1493 * their own independent class associated with a 0x100 byte range. E.g. if Qemu
1494 * is advertising support for both HyperV and KVM, the resulting Hypervisor
1495 * CPUID sub-classes are:
1496 *
1497 * - HyperV: 0x40000000 - 0x400000ff
1498 * - KVM: 0x40000100 - 0x400001ff
1499 */
1500static struct kvm_cpuid_entry2 *
1501get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index)
1502{
1503 struct kvm_cpuid_entry2 *basic, *class;
1504 u32 function = *fn_ptr;
1505
1506 basic = kvm_find_cpuid_entry(vcpu, 0);
1507 if (!basic)
1508 return NULL;
1509
1510 if (is_guest_vendor_amd(basic->ebx, basic->ecx, basic->edx) ||
1511 is_guest_vendor_hygon(basic->ebx, basic->ecx, basic->edx))
1512 return NULL;
1513
1514 if (function >= 0x40000000 && function <= 0x4fffffff)
1515 class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00);
1516 else if (function >= 0xc0000000)
1517 class = kvm_find_cpuid_entry(vcpu, 0xc0000000);
1518 else
1519 class = kvm_find_cpuid_entry(vcpu, function & 0x80000000);
1520
1521 if (class && function <= class->eax)
1522 return NULL;
1523
1524 /*
1525 * Leaf specific adjustments are also applied when redirecting to the
1526 * max basic entry, e.g. if the max basic leaf is 0xb but there is no
1527 * entry for CPUID.0xb.index (see below), then the output value for EDX
1528 * needs to be pulled from CPUID.0xb.1.
1529 */
1530 *fn_ptr = basic->eax;
1531
1532 /*
1533 * The class does not exist or the requested function is out of range;
1534 * the effective CPUID entry is the max basic leaf. Note, the index of
1535 * the original requested leaf is observed!
1536 */
1537 return kvm_find_cpuid_entry_index(vcpu, basic->eax, index);
1538}
1539
1540bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
1541 u32 *ecx, u32 *edx, bool exact_only)
1542{
1543 u32 orig_function = *eax, function = *eax, index = *ecx;
1544 struct kvm_cpuid_entry2 *entry;
1545 bool exact, used_max_basic = false;
1546
1547 entry = kvm_find_cpuid_entry_index(vcpu, function, index);
1548 exact = !!entry;
1549
1550 if (!entry && !exact_only) {
1551 entry = get_out_of_range_cpuid_entry(vcpu, &function, index);
1552 used_max_basic = !!entry;
1553 }
1554
1555 if (entry) {
1556 *eax = entry->eax;
1557 *ebx = entry->ebx;
1558 *ecx = entry->ecx;
1559 *edx = entry->edx;
1560 if (function == 7 && index == 0) {
1561 u64 data;
1562 if (!__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, &data, true) &&
1563 (data & TSX_CTRL_CPUID_CLEAR))
1564 *ebx &= ~(F(RTM) | F(HLE));
1565 } else if (function == 0x80000007) {
1566 if (kvm_hv_invtsc_suppressed(vcpu))
1567 *edx &= ~SF(CONSTANT_TSC);
1568 }
1569 } else {
1570 *eax = *ebx = *ecx = *edx = 0;
1571 /*
1572 * When leaf 0BH or 1FH is defined, CL is pass-through
1573 * and EDX is always the x2APIC ID, even for undefined
1574 * subleaves. Index 1 will exist iff the leaf is
1575 * implemented, so we pass through CL iff leaf 1
1576 * exists. EDX can be copied from any existing index.
1577 */
1578 if (function == 0xb || function == 0x1f) {
1579 entry = kvm_find_cpuid_entry_index(vcpu, function, 1);
1580 if (entry) {
1581 *ecx = index & 0xff;
1582 *edx = entry->edx;
1583 }
1584 }
1585 }
1586 trace_kvm_cpuid(orig_function, index, *eax, *ebx, *ecx, *edx, exact,
1587 used_max_basic);
1588 return exact;
1589}
1590EXPORT_SYMBOL_GPL(kvm_cpuid);
1591
1592int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1593{
1594 u32 eax, ebx, ecx, edx;
1595
1596 if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
1597 return 1;
1598
1599 eax = kvm_rax_read(vcpu);
1600 ecx = kvm_rcx_read(vcpu);
1601 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false);
1602 kvm_rax_write(vcpu, eax);
1603 kvm_rbx_write(vcpu, ebx);
1604 kvm_rcx_write(vcpu, ecx);
1605 kvm_rdx_write(vcpu, edx);
1606 return kvm_skip_emulated_instruction(vcpu);
1607}
1608EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);