Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 | // SPDX-License-Identifier: GPL-2.0 /* * Test for VMX-pmu perf capability msr * * Copyright (C) 2021 Intel Corporation * * Test to check the effect of various CPUID settings on * MSR_IA32_PERF_CAPABILITIES MSR, and check that what * we write with KVM_SET_MSR is _not_ modified by the guest * and check it can be retrieved with KVM_GET_MSR, also test * the invalid LBR formats are rejected. */ #define _GNU_SOURCE /* for program_invocation_short_name */ #include <sys/ioctl.h> #include <linux/bitmap.h> #include "kvm_test_harness.h" #include "kvm_util.h" #include "vmx.h" static union perf_capabilities { struct { u64 lbr_format:6; u64 pebs_trap:1; u64 pebs_arch_reg:1; u64 pebs_format:4; u64 smm_freeze:1; u64 full_width_write:1; u64 pebs_baseline:1; u64 perf_metrics:1; u64 pebs_output_pt_available:1; u64 anythread_deprecated:1; }; u64 capabilities; } host_cap; /* * The LBR format and most PEBS features are immutable, all other features are * fungible (if supported by the host and KVM). */ static const union perf_capabilities immutable_caps = { .lbr_format = -1, .pebs_trap = 1, .pebs_arch_reg = 1, .pebs_format = -1, .pebs_baseline = 1, }; static const union perf_capabilities format_caps = { .lbr_format = -1, .pebs_format = -1, }; static void guest_test_perf_capabilities_gp(uint64_t val) { uint8_t vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, val); __GUEST_ASSERT(vector == GP_VECTOR, "Expected #GP for value '0x%lx', got vector '0x%x'", val, vector); } static void guest_code(uint64_t current_val) { int i; guest_test_perf_capabilities_gp(current_val); guest_test_perf_capabilities_gp(0); for (i = 0; i < 64; i++) guest_test_perf_capabilities_gp(current_val ^ BIT_ULL(i)); GUEST_DONE(); } KVM_ONE_VCPU_TEST_SUITE(vmx_pmu_caps); /* * Verify that guest WRMSRs to PERF_CAPABILITIES #GP regardless of the value * written, that the guest always sees the userspace controlled value, and that * PERF_CAPABILITIES is immutable after KVM_RUN. */ KVM_ONE_VCPU_TEST(vmx_pmu_caps, guest_wrmsr_perf_capabilities, guest_code) { struct ucall uc; int r, i; vm_init_descriptor_tables(vcpu->vm); vcpu_init_descriptor_tables(vcpu); vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); vcpu_args_set(vcpu, 1, host_cap.capabilities); vcpu_run(vcpu); switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: REPORT_GUEST_ASSERT(uc); break; case UCALL_DONE: break; default: TEST_FAIL("Unexpected ucall: %lu", uc.cmd); } TEST_ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), host_cap.capabilities); vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); r = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, 0); TEST_ASSERT(!r, "Post-KVM_RUN write '0' didn't fail"); for (i = 0; i < 64; i++) { r = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities ^ BIT_ULL(i)); TEST_ASSERT(!r, "Post-KVM_RUN write '0x%llx'didn't fail", host_cap.capabilities ^ BIT_ULL(i)); } } /* * Verify KVM allows writing PERF_CAPABILITIES with all KVM-supported features * enabled, as well as '0' (to disable all features). */ KVM_ONE_VCPU_TEST(vmx_pmu_caps, basic_perf_capabilities, guest_code) { vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, 0); vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); } KVM_ONE_VCPU_TEST(vmx_pmu_caps, fungible_perf_capabilities, guest_code) { const uint64_t fungible_caps = host_cap.capabilities & ~immutable_caps.capabilities; int bit; for_each_set_bit(bit, &fungible_caps, 64) { vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, BIT_ULL(bit)); vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities & ~BIT_ULL(bit)); } vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); } /* * Verify KVM rejects attempts to set unsupported and/or immutable features in * PERF_CAPABILITIES. Note, LBR format and PEBS format need to be validated * separately as they are multi-bit values, e.g. toggling or setting a single * bit can generate a false positive without dedicated safeguards. */ KVM_ONE_VCPU_TEST(vmx_pmu_caps, immutable_perf_capabilities, guest_code) { const uint64_t reserved_caps = (~host_cap.capabilities | immutable_caps.capabilities) & ~format_caps.capabilities; union perf_capabilities val = host_cap; int r, bit; for_each_set_bit(bit, &reserved_caps, 64) { r = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities ^ BIT_ULL(bit)); TEST_ASSERT(!r, "%s immutable feature 0x%llx (bit %d) didn't fail", host_cap.capabilities & BIT_ULL(bit) ? "Setting" : "Clearing", BIT_ULL(bit), bit); } /* * KVM only supports the host's native LBR format, as well as '0' (to * disable LBR support). Verify KVM rejects all other LBR formats. */ for (val.lbr_format = 1; val.lbr_format; val.lbr_format++) { if (val.lbr_format == host_cap.lbr_format) continue; r = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, val.capabilities); TEST_ASSERT(!r, "Bad LBR FMT = 0x%x didn't fail, host = 0x%x", val.lbr_format, host_cap.lbr_format); } /* Ditto for the PEBS format. */ for (val.pebs_format = 1; val.pebs_format; val.pebs_format++) { if (val.pebs_format == host_cap.pebs_format) continue; r = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, val.capabilities); TEST_ASSERT(!r, "Bad PEBS FMT = 0x%x didn't fail, host = 0x%x", val.pebs_format, host_cap.pebs_format); } } /* * Test that LBR MSRs are writable when LBRs are enabled, and then verify that * disabling the vPMU via CPUID also disables LBR support. Set bits 2:0 of * LBR_TOS as those bits are writable across all uarch implementations (arch * LBRs will need to poke a different MSR). */ KVM_ONE_VCPU_TEST(vmx_pmu_caps, lbr_perf_capabilities, guest_code) { int r; if (!host_cap.lbr_format) return; vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); vcpu_set_msr(vcpu, MSR_LBR_TOS, 7); vcpu_clear_cpuid_entry(vcpu, X86_PROPERTY_PMU_VERSION.function); r = _vcpu_set_msr(vcpu, MSR_LBR_TOS, 7); TEST_ASSERT(!r, "Writing LBR_TOS should fail after disabling vPMU"); } int main(int argc, char *argv[]) { TEST_REQUIRE(kvm_is_pmu_enabled()); TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_PDCM)); TEST_REQUIRE(kvm_cpu_has_p(X86_PROPERTY_PMU_VERSION)); TEST_REQUIRE(kvm_cpu_property(X86_PROPERTY_PMU_VERSION) > 0); host_cap.capabilities = kvm_get_feature_msr(MSR_IA32_PERF_CAPABILITIES); TEST_ASSERT(host_cap.full_width_write, "Full-width writes should always be supported"); return test_harness_run(argc, argv); } |