Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
  1/*
  2 * KVM PMU support for AMD
  3 *
  4 * Copyright 2015, Red Hat, Inc. and/or its affiliates.
  5 *
  6 * Author:
  7 *   Wei Huang <wei@redhat.com>
  8 *
  9 * This work is licensed under the terms of the GNU GPL, version 2.  See
 10 * the COPYING file in the top-level directory.
 11 *
 12 * Implementation is based on pmu_intel.c file
 13 */
 14#include <linux/types.h>
 15#include <linux/kvm_host.h>
 16#include <linux/perf_event.h>
 17#include "x86.h"
 18#include "cpuid.h"
 19#include "lapic.h"
 20#include "pmu.h"
 21
 22enum pmu_type {
 23	PMU_TYPE_COUNTER = 0,
 24	PMU_TYPE_EVNTSEL,
 25};
 26
 27enum index {
 28	INDEX_ZERO = 0,
 29	INDEX_ONE,
 30	INDEX_TWO,
 31	INDEX_THREE,
 32	INDEX_FOUR,
 33	INDEX_FIVE,
 34	INDEX_ERROR,
 35};
 36
 37/* duplicated from amd_perfmon_event_map, K7 and above should work. */
 38static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
 39	[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
 40	[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
 41	[2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
 42	[3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
 43	[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
 44	[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
 45	[6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
 46	[7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
 47};
 48
 49static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
 50{
 51	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
 52
 53	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
 54		if (type == PMU_TYPE_COUNTER)
 55			return MSR_F15H_PERF_CTR;
 56		else
 57			return MSR_F15H_PERF_CTL;
 58	} else {
 59		if (type == PMU_TYPE_COUNTER)
 60			return MSR_K7_PERFCTR0;
 61		else
 62			return MSR_K7_EVNTSEL0;
 63	}
 64}
 65
 66static enum index msr_to_index(u32 msr)
 67{
 68	switch (msr) {
 69	case MSR_F15H_PERF_CTL0:
 70	case MSR_F15H_PERF_CTR0:
 71	case MSR_K7_EVNTSEL0:
 72	case MSR_K7_PERFCTR0:
 73		return INDEX_ZERO;
 74	case MSR_F15H_PERF_CTL1:
 75	case MSR_F15H_PERF_CTR1:
 76	case MSR_K7_EVNTSEL1:
 77	case MSR_K7_PERFCTR1:
 78		return INDEX_ONE;
 79	case MSR_F15H_PERF_CTL2:
 80	case MSR_F15H_PERF_CTR2:
 81	case MSR_K7_EVNTSEL2:
 82	case MSR_K7_PERFCTR2:
 83		return INDEX_TWO;
 84	case MSR_F15H_PERF_CTL3:
 85	case MSR_F15H_PERF_CTR3:
 86	case MSR_K7_EVNTSEL3:
 87	case MSR_K7_PERFCTR3:
 88		return INDEX_THREE;
 89	case MSR_F15H_PERF_CTL4:
 90	case MSR_F15H_PERF_CTR4:
 91		return INDEX_FOUR;
 92	case MSR_F15H_PERF_CTL5:
 93	case MSR_F15H_PERF_CTR5:
 94		return INDEX_FIVE;
 95	default:
 96		return INDEX_ERROR;
 97	}
 98}
 99
100static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
101					     enum pmu_type type)
102{
103	switch (msr) {
104	case MSR_F15H_PERF_CTL0:
105	case MSR_F15H_PERF_CTL1:
106	case MSR_F15H_PERF_CTL2:
107	case MSR_F15H_PERF_CTL3:
108	case MSR_F15H_PERF_CTL4:
109	case MSR_F15H_PERF_CTL5:
110	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
111		if (type != PMU_TYPE_EVNTSEL)
112			return NULL;
113		break;
114	case MSR_F15H_PERF_CTR0:
115	case MSR_F15H_PERF_CTR1:
116	case MSR_F15H_PERF_CTR2:
117	case MSR_F15H_PERF_CTR3:
118	case MSR_F15H_PERF_CTR4:
119	case MSR_F15H_PERF_CTR5:
120	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
121		if (type != PMU_TYPE_COUNTER)
122			return NULL;
123		break;
124	default:
125		return NULL;
126	}
127
128	return &pmu->gp_counters[msr_to_index(msr)];
129}
130
131static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
132				    u8 event_select,
133				    u8 unit_mask)
134{
135	int i;
136
137	for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
138		if (amd_event_mapping[i].eventsel == event_select
139		    && amd_event_mapping[i].unit_mask == unit_mask)
140			break;
141
142	if (i == ARRAY_SIZE(amd_event_mapping))
143		return PERF_COUNT_HW_MAX;
144
145	return amd_event_mapping[i].event_type;
146}
147
148/* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
149static unsigned amd_find_fixed_event(int idx)
150{
151	return PERF_COUNT_HW_MAX;
152}
153
154/* check if a PMC is enabled by comparing it against global_ctrl bits. Because
155 * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
156 */
157static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
158{
159	return true;
160}
161
162static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
163{
164	unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
165	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
166
167	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
168		/*
169		 * The idx is contiguous. The MSRs are not. The counter MSRs
170		 * are interleaved with the event select MSRs.
171		 */
172		pmc_idx *= 2;
173	}
174
175	return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
176}
177
178/* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
179static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
180{
181	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
182
183	idx &= ~(3u << 30);
184
185	return (idx >= pmu->nr_arch_gp_counters);
186}
187
188/* idx is the ECX register of RDPMC instruction */
189static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx)
190{
191	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
192	struct kvm_pmc *counters;
193
194	idx &= ~(3u << 30);
195	if (idx >= pmu->nr_arch_gp_counters)
196		return NULL;
197	counters = pmu->gp_counters;
198
199	return &counters[idx];
200}
201
202static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
203{
204	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
205	int ret = false;
206
207	ret = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER) ||
208		get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
209
210	return ret;
211}
212
213static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
214{
215	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
216	struct kvm_pmc *pmc;
217
218	/* MSR_PERFCTRn */
219	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
220	if (pmc) {
221		*data = pmc_read_counter(pmc);
222		return 0;
223	}
224	/* MSR_EVNTSELn */
225	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
226	if (pmc) {
227		*data = pmc->eventsel;
228		return 0;
229	}
230
231	return 1;
232}
233
234static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
235{
236	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
237	struct kvm_pmc *pmc;
238	u32 msr = msr_info->index;
239	u64 data = msr_info->data;
240
241	/* MSR_PERFCTRn */
242	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
243	if (pmc) {
244		pmc->counter += data - pmc_read_counter(pmc);
245		return 0;
246	}
247	/* MSR_EVNTSELn */
248	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
249	if (pmc) {
250		if (data == pmc->eventsel)
251			return 0;
252		if (!(data & pmu->reserved_bits)) {
253			reprogram_gp_counter(pmc, data);
254			return 0;
255		}
256	}
257
258	return 1;
259}
260
261static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
262{
263	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
264
265	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
266		pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
267	else
268		pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
269
270	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
271	pmu->reserved_bits = 0xffffffff00200000ull;
272	/* not applicable to AMD; but clean them to prevent any fall out */
273	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
274	pmu->nr_arch_fixed_counters = 0;
275	pmu->version = 0;
276	pmu->global_status = 0;
277}
278
279static void amd_pmu_init(struct kvm_vcpu *vcpu)
280{
281	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
282	int i;
283
284	BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC);
285
286	for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
287		pmu->gp_counters[i].type = KVM_PMC_GP;
288		pmu->gp_counters[i].vcpu = vcpu;
289		pmu->gp_counters[i].idx = i;
290	}
291}
292
293static void amd_pmu_reset(struct kvm_vcpu *vcpu)
294{
295	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
296	int i;
297
298	for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
299		struct kvm_pmc *pmc = &pmu->gp_counters[i];
300
301		pmc_stop_counter(pmc);
302		pmc->counter = pmc->eventsel = 0;
303	}
304}
305
306struct kvm_pmu_ops amd_pmu_ops = {
307	.find_arch_event = amd_find_arch_event,
308	.find_fixed_event = amd_find_fixed_event,
309	.pmc_is_enabled = amd_pmc_is_enabled,
310	.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
311	.msr_idx_to_pmc = amd_msr_idx_to_pmc,
312	.is_valid_msr_idx = amd_is_valid_msr_idx,
313	.is_valid_msr = amd_is_valid_msr,
314	.get_msr = amd_pmu_get_msr,
315	.set_msr = amd_pmu_set_msr,
316	.refresh = amd_pmu_refresh,
317	.init = amd_pmu_init,
318	.reset = amd_pmu_reset,
319};