Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __KVM_X86_PMU_H
3#define __KVM_X86_PMU_H
4
5#include <linux/nospec.h>
6
7#include <asm/kvm_host.h>
8
9#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
10#define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu))
11#define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu)
12
13#define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | \
14 MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
15
16/* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
17#define fixed_ctrl_field(ctrl_reg, idx) \
18 (((ctrl_reg) >> ((idx) * INTEL_FIXED_BITS_STRIDE)) & INTEL_FIXED_BITS_MASK)
19
20#define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000
21#define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001
22#define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002
23
24#define KVM_FIXED_PMC_BASE_IDX INTEL_PMC_IDX_FIXED
25
26struct kvm_pmu_emulated_event_selectors {
27 u64 INSTRUCTIONS_RETIRED;
28 u64 BRANCH_INSTRUCTIONS_RETIRED;
29};
30
31struct kvm_pmu_ops {
32 struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
33 unsigned int idx, u64 *mask);
34 struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
35 int (*check_rdpmc_early)(struct kvm_vcpu *vcpu, unsigned int idx);
36 bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
37 int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
38 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
39 void (*refresh)(struct kvm_vcpu *vcpu);
40 void (*init)(struct kvm_vcpu *vcpu);
41 void (*reset)(struct kvm_vcpu *vcpu);
42 void (*deliver_pmi)(struct kvm_vcpu *vcpu);
43 void (*cleanup)(struct kvm_vcpu *vcpu);
44
45 const u64 EVENTSEL_EVENT;
46 const int MAX_NR_GP_COUNTERS;
47 const int MIN_NR_GP_COUNTERS;
48};
49
50void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops);
51
52static inline bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
53{
54 /*
55 * Architecturally, Intel's SDM states that IA32_PERF_GLOBAL_CTRL is
56 * supported if "CPUID.0AH: EAX[7:0] > 0", i.e. if the PMU version is
57 * greater than zero. However, KVM only exposes and emulates the MSR
58 * to/for the guest if the guest PMU supports at least "Architectural
59 * Performance Monitoring Version 2".
60 *
61 * AMD's version of PERF_GLOBAL_CTRL conveniently shows up with v2.
62 */
63 return pmu->version > 1;
64}
65
66/*
67 * KVM tracks all counters in 64-bit bitmaps, with general purpose counters
68 * mapped to bits 31:0 and fixed counters mapped to 63:32, e.g. fixed counter 0
69 * is tracked internally via index 32. On Intel, (AMD doesn't support fixed
70 * counters), this mirrors how fixed counters are mapped to PERF_GLOBAL_CTRL
71 * and similar MSRs, i.e. tracking fixed counters at base index 32 reduces the
72 * amounter of boilerplate needed to iterate over PMCs *and* simplifies common
73 * enabling/disable/reset operations.
74 *
75 * WARNING! This helper is only for lookups that are initiated by KVM, it is
76 * NOT safe for guest lookups, e.g. will do the wrong thing if passed a raw
77 * ECX value from RDPMC (fixed counters are accessed by setting bit 30 in ECX
78 * for RDPMC, not by adding 32 to the fixed counter index).
79 */
80static inline struct kvm_pmc *kvm_pmc_idx_to_pmc(struct kvm_pmu *pmu, int idx)
81{
82 if (idx < pmu->nr_arch_gp_counters)
83 return &pmu->gp_counters[idx];
84
85 idx -= KVM_FIXED_PMC_BASE_IDX;
86 if (idx >= 0 && idx < pmu->nr_arch_fixed_counters)
87 return &pmu->fixed_counters[idx];
88
89 return NULL;
90}
91
92#define kvm_for_each_pmc(pmu, pmc, i, bitmap) \
93 for_each_set_bit(i, bitmap, X86_PMC_IDX_MAX) \
94 if (!(pmc = kvm_pmc_idx_to_pmc(pmu, i))) \
95 continue; \
96 else \
97
98static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
99{
100 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
101
102 return pmu->counter_bitmask[pmc->type];
103}
104
105static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
106{
107 u64 counter, enabled, running;
108
109 counter = pmc->counter + pmc->emulated_counter;
110
111 if (pmc->perf_event && !pmc->is_paused)
112 counter += perf_event_read_value(pmc->perf_event,
113 &enabled, &running);
114 /* FIXME: Scaling needed? */
115 return counter & pmc_bitmask(pmc);
116}
117
118void pmc_write_counter(struct kvm_pmc *pmc, u64 val);
119
120static inline bool pmc_is_gp(struct kvm_pmc *pmc)
121{
122 return pmc->type == KVM_PMC_GP;
123}
124
125static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
126{
127 return pmc->type == KVM_PMC_FIXED;
128}
129
130static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
131 u64 data)
132{
133 return !(pmu->global_ctrl_rsvd & data);
134}
135
136/* returns general purpose PMC with the specified MSR. Note that it can be
137 * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
138 * parameter to tell them apart.
139 */
140static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
141 u32 base)
142{
143 if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
144 u32 index = array_index_nospec(msr - base,
145 pmu->nr_arch_gp_counters);
146
147 return &pmu->gp_counters[index];
148 }
149
150 return NULL;
151}
152
153/* returns fixed PMC with the specified MSR */
154static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
155{
156 int base = MSR_CORE_PERF_FIXED_CTR0;
157
158 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
159 u32 index = array_index_nospec(msr - base,
160 pmu->nr_arch_fixed_counters);
161
162 return &pmu->fixed_counters[index];
163 }
164
165 return NULL;
166}
167
168static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
169{
170 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
171
172 if (pmc_is_fixed(pmc))
173 return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
174 pmc->idx - KVM_FIXED_PMC_BASE_IDX) &
175 (INTEL_FIXED_0_KERNEL | INTEL_FIXED_0_USER);
176
177 return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
178}
179
180extern struct x86_pmu_capability kvm_pmu_cap;
181extern struct kvm_pmu_emulated_event_selectors kvm_pmu_eventsel;
182
183static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
184{
185 bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
186 int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS;
187
188 /*
189 * Hybrid PMUs don't play nice with virtualization without careful
190 * configuration by userspace, and KVM's APIs for reporting supported
191 * vPMU features do not account for hybrid PMUs. Disable vPMU support
192 * for hybrid PMUs until KVM gains a way to let userspace opt-in.
193 */
194 if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
195 enable_pmu = false;
196
197 if (enable_pmu) {
198 perf_get_x86_pmu_capability(&kvm_pmu_cap);
199
200 /*
201 * WARN if perf did NOT disable hardware PMU if the number of
202 * architecturally required GP counters aren't present, i.e. if
203 * there are a non-zero number of counters, but fewer than what
204 * is architecturally required.
205 */
206 if (!kvm_pmu_cap.num_counters_gp ||
207 WARN_ON_ONCE(kvm_pmu_cap.num_counters_gp < min_nr_gp_ctrs))
208 enable_pmu = false;
209 else if (is_intel && !kvm_pmu_cap.version)
210 enable_pmu = false;
211 }
212
213 if (!enable_pmu) {
214 memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
215 return;
216 }
217
218 kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2);
219 kvm_pmu_cap.num_counters_gp = min(kvm_pmu_cap.num_counters_gp,
220 pmu_ops->MAX_NR_GP_COUNTERS);
221 kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
222 KVM_MAX_NR_FIXED_COUNTERS);
223
224 kvm_pmu_eventsel.INSTRUCTIONS_RETIRED =
225 perf_get_hw_event_config(PERF_COUNT_HW_INSTRUCTIONS);
226 kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED =
227 perf_get_hw_event_config(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
228}
229
230static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
231{
232 set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
233 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
234}
235
236static inline void reprogram_counters(struct kvm_pmu *pmu, u64 diff)
237{
238 int bit;
239
240 if (!diff)
241 return;
242
243 for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
244 set_bit(bit, pmu->reprogram_pmi);
245 kvm_make_request(KVM_REQ_PMU, pmu_to_vcpu(pmu));
246}
247
248/*
249 * Check if a PMC is enabled by comparing it against global_ctrl bits.
250 *
251 * If the vPMU doesn't have global_ctrl MSR, all vPMCs are enabled.
252 */
253static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc)
254{
255 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
256
257 if (!kvm_pmu_has_perf_global_ctrl(pmu))
258 return true;
259
260 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
261}
262
263void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
264void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
265int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
266int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx);
267bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
268int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
269int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
270void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
271void kvm_pmu_init(struct kvm_vcpu *vcpu);
272void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
273void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
274int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
275void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel);
276
277bool is_vmware_backdoor_pmc(u32 pmc_idx);
278
279extern struct kvm_pmu_ops intel_pmu_ops;
280extern struct kvm_pmu_ops amd_pmu_ops;
281#endif /* __KVM_X86_PMU_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __KVM_X86_PMU_H
3#define __KVM_X86_PMU_H
4
5#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
6#define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu))
7#define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu)
8
9/* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
10#define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
11
12#define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000
13#define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001
14#define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002
15
16struct kvm_event_hw_type_mapping {
17 u8 eventsel;
18 u8 unit_mask;
19 unsigned event_type;
20};
21
22struct kvm_pmu_ops {
23 unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
24 u8 unit_mask);
25 unsigned (*find_fixed_event)(int idx);
26 bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
27 struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
28 struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx);
29 int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx);
30 bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
31 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
32 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
33 void (*refresh)(struct kvm_vcpu *vcpu);
34 void (*init)(struct kvm_vcpu *vcpu);
35 void (*reset)(struct kvm_vcpu *vcpu);
36};
37
38static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
39{
40 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
41
42 return pmu->counter_bitmask[pmc->type];
43}
44
45static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
46{
47 u64 counter, enabled, running;
48
49 counter = pmc->counter;
50 if (pmc->perf_event)
51 counter += perf_event_read_value(pmc->perf_event,
52 &enabled, &running);
53 /* FIXME: Scaling needed? */
54 return counter & pmc_bitmask(pmc);
55}
56
57static inline void pmc_stop_counter(struct kvm_pmc *pmc)
58{
59 if (pmc->perf_event) {
60 pmc->counter = pmc_read_counter(pmc);
61 perf_event_release_kernel(pmc->perf_event);
62 pmc->perf_event = NULL;
63 }
64}
65
66static inline bool pmc_is_gp(struct kvm_pmc *pmc)
67{
68 return pmc->type == KVM_PMC_GP;
69}
70
71static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
72{
73 return pmc->type == KVM_PMC_FIXED;
74}
75
76static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
77{
78 return kvm_x86_ops->pmu_ops->pmc_is_enabled(pmc);
79}
80
81/* returns general purpose PMC with the specified MSR. Note that it can be
82 * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
83 * paramenter to tell them apart.
84 */
85static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
86 u32 base)
87{
88 if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
89 return &pmu->gp_counters[msr - base];
90
91 return NULL;
92}
93
94/* returns fixed PMC with the specified MSR */
95static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
96{
97 int base = MSR_CORE_PERF_FIXED_CTR0;
98
99 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
100 return &pmu->fixed_counters[msr - base];
101
102 return NULL;
103}
104
105void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
106void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
107void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
108
109void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
110void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
111int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
112int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx);
113bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
114int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
115int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
116void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
117void kvm_pmu_reset(struct kvm_vcpu *vcpu);
118void kvm_pmu_init(struct kvm_vcpu *vcpu);
119void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
120
121bool is_vmware_backdoor_pmc(u32 pmc_idx);
122
123extern struct kvm_pmu_ops intel_pmu_ops;
124extern struct kvm_pmu_ops amd_pmu_ops;
125#endif /* __KVM_X86_PMU_H */