Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * KVM PMU support for AMD
  4 *
  5 * Copyright 2015, Red Hat, Inc. and/or its affiliates.
  6 *
  7 * Author:
  8 *   Wei Huang <wei@redhat.com>
  9 *
 10 * Implementation is based on pmu_intel.c file
 11 */
 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 13
 14#include <linux/types.h>
 15#include <linux/kvm_host.h>
 16#include <linux/perf_event.h>
 17#include "x86.h"
 18#include "cpuid.h"
 19#include "lapic.h"
 20#include "pmu.h"
 21#include "svm.h"
 22
 23enum pmu_type {
 24	PMU_TYPE_COUNTER = 0,
 25	PMU_TYPE_EVNTSEL,
 26};
 27
 28static struct kvm_pmc *amd_pmu_get_pmc(struct kvm_pmu *pmu, int pmc_idx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 29{
 30	unsigned int num_counters = pmu->nr_arch_gp_counters;
 31
 32	if (pmc_idx >= num_counters)
 33		return NULL;
 
 
 
 
 
 
 
 
 
 
 34
 35	return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 36}
 37
 38static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
 39					     enum pmu_type type)
 40{
 41	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
 42	unsigned int idx;
 43
 44	if (!vcpu->kvm->arch.enable_pmu)
 45		return NULL;
 46
 47	switch (msr) {
 48	case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
 
 
 
 
 
 49		if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
 50			return NULL;
 51		/*
 52		 * Each PMU counter has a pair of CTL and CTR MSRs. CTLn
 53		 * MSRs (accessed via EVNTSEL) are even, CTRn MSRs are odd.
 54		 */
 55		idx = (unsigned int)((msr - MSR_F15H_PERF_CTL0) / 2);
 56		if (!(msr & 0x1) != (type == PMU_TYPE_EVNTSEL))
 57			return NULL;
 58		break;
 59	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
 60		if (type != PMU_TYPE_EVNTSEL)
 61			return NULL;
 62		idx = msr - MSR_K7_EVNTSEL0;
 63		break;
 
 
 
 
 
 
 
 
 
 64	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
 65		if (type != PMU_TYPE_COUNTER)
 66			return NULL;
 67		idx = msr - MSR_K7_PERFCTR0;
 68		break;
 69	default:
 70		return NULL;
 71	}
 72
 73	return amd_pmu_get_pmc(pmu, idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 74}
 75
 76static int amd_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 77{
 78	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 79
 80	if (idx >= pmu->nr_arch_gp_counters)
 81		return -EINVAL;
 82
 83	return 0;
 84}
 85
 86/* idx is the ECX register of RDPMC instruction */
 87static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
 88	unsigned int idx, u64 *mask)
 89{
 90	return amd_pmu_get_pmc(vcpu_to_pmu(vcpu), idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 91}
 92
 93static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
 94{
 95	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 96	struct kvm_pmc *pmc;
 97
 98	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
 99	pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
100
101	return pmc;
102}
103
104static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
105{
106	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
107
108	switch (msr) {
109	case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3:
110		return pmu->version > 0;
111	case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
112		return guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE);
113	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
114	case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
115	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
116		return pmu->version > 1;
117	default:
118		if (msr > MSR_F15H_PERF_CTR5 &&
119		    msr < MSR_F15H_PERF_CTL0 + 2 * pmu->nr_arch_gp_counters)
120			return pmu->version > 1;
121		break;
122	}
123
124	return amd_msr_idx_to_pmc(vcpu, msr);
125}
126
127static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
128{
129	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
130	struct kvm_pmc *pmc;
131	u32 msr = msr_info->index;
132
133	/* MSR_PERFCTRn */
134	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
135	if (pmc) {
136		msr_info->data = pmc_read_counter(pmc);
137		return 0;
138	}
139	/* MSR_EVNTSELn */
140	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
141	if (pmc) {
142		msr_info->data = pmc->eventsel;
143		return 0;
144	}
145
146	return 1;
147}
148
149static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
150{
151	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
152	struct kvm_pmc *pmc;
153	u32 msr = msr_info->index;
154	u64 data = msr_info->data;
155
156	/* MSR_PERFCTRn */
157	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
158	if (pmc) {
159		pmc_write_counter(pmc, data);
160		return 0;
161	}
162	/* MSR_EVNTSELn */
163	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
164	if (pmc) {
165		data &= ~pmu->reserved_bits;
166		if (data != pmc->eventsel) {
167			pmc->eventsel = data;
168			kvm_pmu_request_counter_reprogram(pmc);
 
169		}
170		return 0;
171	}
172
173	return 1;
174}
175
176static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
177{
178	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
179	union cpuid_0x80000022_ebx ebx;
180
181	pmu->version = 1;
182	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFMON_V2)) {
183		pmu->version = 2;
184		/*
185		 * Note, PERFMON_V2 is also in 0x80000022.0x0, i.e. the guest
186		 * CPUID entry is guaranteed to be non-NULL.
187		 */
188		BUILD_BUG_ON(x86_feature_cpuid(X86_FEATURE_PERFMON_V2).function != 0x80000022 ||
189			     x86_feature_cpuid(X86_FEATURE_PERFMON_V2).index);
190		ebx.full = kvm_find_cpuid_entry_index(vcpu, 0x80000022, 0)->ebx;
191		pmu->nr_arch_gp_counters = ebx.split.num_core_pmc;
192	} else if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
193		pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
194	} else {
195		pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
196	}
197
198	pmu->nr_arch_gp_counters = min_t(unsigned int, pmu->nr_arch_gp_counters,
199					 kvm_pmu_cap.num_counters_gp);
200
201	if (pmu->version > 1) {
202		pmu->global_ctrl_rsvd = ~((1ull << pmu->nr_arch_gp_counters) - 1);
203		pmu->global_status_rsvd = pmu->global_ctrl_rsvd;
204	}
205
206	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
207	pmu->reserved_bits = 0xfffffff000280000ull;
208	pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
209	/* not applicable to AMD; but clean them to prevent any fall out */
210	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
211	pmu->nr_arch_fixed_counters = 0;
 
212	bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
213}
214
215static void amd_pmu_init(struct kvm_vcpu *vcpu)
216{
217	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
218	int i;
219
220	BUILD_BUG_ON(KVM_MAX_NR_AMD_GP_COUNTERS > AMD64_NUM_COUNTERS_CORE);
221
222	for (i = 0; i < KVM_MAX_NR_AMD_GP_COUNTERS; i++) {
223		pmu->gp_counters[i].type = KVM_PMC_GP;
224		pmu->gp_counters[i].vcpu = vcpu;
225		pmu->gp_counters[i].idx = i;
226		pmu->gp_counters[i].current_config = 0;
227	}
228}
229
230struct kvm_pmu_ops amd_pmu_ops __initdata = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231	.rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
232	.msr_idx_to_pmc = amd_msr_idx_to_pmc,
233	.check_rdpmc_early = amd_check_rdpmc_early,
234	.is_valid_msr = amd_is_valid_msr,
235	.get_msr = amd_pmu_get_msr,
236	.set_msr = amd_pmu_set_msr,
237	.refresh = amd_pmu_refresh,
238	.init = amd_pmu_init,
239	.EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT,
240	.MAX_NR_GP_COUNTERS = KVM_MAX_NR_AMD_GP_COUNTERS,
241	.MIN_NR_GP_COUNTERS = AMD64_NUM_COUNTERS,
242};
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * KVM PMU support for AMD
  4 *
  5 * Copyright 2015, Red Hat, Inc. and/or its affiliates.
  6 *
  7 * Author:
  8 *   Wei Huang <wei@redhat.com>
  9 *
 10 * Implementation is based on pmu_intel.c file
 11 */
 
 
 12#include <linux/types.h>
 13#include <linux/kvm_host.h>
 14#include <linux/perf_event.h>
 15#include "x86.h"
 16#include "cpuid.h"
 17#include "lapic.h"
 18#include "pmu.h"
 
 19
 20enum pmu_type {
 21	PMU_TYPE_COUNTER = 0,
 22	PMU_TYPE_EVNTSEL,
 23};
 24
 25enum index {
 26	INDEX_ZERO = 0,
 27	INDEX_ONE,
 28	INDEX_TWO,
 29	INDEX_THREE,
 30	INDEX_FOUR,
 31	INDEX_FIVE,
 32	INDEX_ERROR,
 33};
 34
 35/* duplicated from amd_perfmon_event_map, K7 and above should work. */
 36static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
 37	[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
 38	[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
 39	[2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
 40	[3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
 41	[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
 42	[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
 43	[6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
 44	[7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
 45};
 46
 47static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
 48{
 49	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
 50
 51	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
 52		if (type == PMU_TYPE_COUNTER)
 53			return MSR_F15H_PERF_CTR;
 54		else
 55			return MSR_F15H_PERF_CTL;
 56	} else {
 57		if (type == PMU_TYPE_COUNTER)
 58			return MSR_K7_PERFCTR0;
 59		else
 60			return MSR_K7_EVNTSEL0;
 61	}
 62}
 63
 64static enum index msr_to_index(u32 msr)
 65{
 66	switch (msr) {
 67	case MSR_F15H_PERF_CTL0:
 68	case MSR_F15H_PERF_CTR0:
 69	case MSR_K7_EVNTSEL0:
 70	case MSR_K7_PERFCTR0:
 71		return INDEX_ZERO;
 72	case MSR_F15H_PERF_CTL1:
 73	case MSR_F15H_PERF_CTR1:
 74	case MSR_K7_EVNTSEL1:
 75	case MSR_K7_PERFCTR1:
 76		return INDEX_ONE;
 77	case MSR_F15H_PERF_CTL2:
 78	case MSR_F15H_PERF_CTR2:
 79	case MSR_K7_EVNTSEL2:
 80	case MSR_K7_PERFCTR2:
 81		return INDEX_TWO;
 82	case MSR_F15H_PERF_CTL3:
 83	case MSR_F15H_PERF_CTR3:
 84	case MSR_K7_EVNTSEL3:
 85	case MSR_K7_PERFCTR3:
 86		return INDEX_THREE;
 87	case MSR_F15H_PERF_CTL4:
 88	case MSR_F15H_PERF_CTR4:
 89		return INDEX_FOUR;
 90	case MSR_F15H_PERF_CTL5:
 91	case MSR_F15H_PERF_CTR5:
 92		return INDEX_FIVE;
 93	default:
 94		return INDEX_ERROR;
 95	}
 96}
 97
 98static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
 99					     enum pmu_type type)
100{
101	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
 
 
 
 
102
103	switch (msr) {
104	case MSR_F15H_PERF_CTL0:
105	case MSR_F15H_PERF_CTL1:
106	case MSR_F15H_PERF_CTL2:
107	case MSR_F15H_PERF_CTL3:
108	case MSR_F15H_PERF_CTL4:
109	case MSR_F15H_PERF_CTL5:
110		if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
111			return NULL;
112		fallthrough;
 
 
 
 
 
 
 
113	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
114		if (type != PMU_TYPE_EVNTSEL)
115			return NULL;
 
116		break;
117	case MSR_F15H_PERF_CTR0:
118	case MSR_F15H_PERF_CTR1:
119	case MSR_F15H_PERF_CTR2:
120	case MSR_F15H_PERF_CTR3:
121	case MSR_F15H_PERF_CTR4:
122	case MSR_F15H_PERF_CTR5:
123		if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
124			return NULL;
125		fallthrough;
126	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
127		if (type != PMU_TYPE_COUNTER)
128			return NULL;
 
129		break;
130	default:
131		return NULL;
132	}
133
134	return &pmu->gp_counters[msr_to_index(msr)];
135}
136
137static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
138				    u8 event_select,
139				    u8 unit_mask)
140{
141	int i;
142
143	for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
144		if (amd_event_mapping[i].eventsel == event_select
145		    && amd_event_mapping[i].unit_mask == unit_mask)
146			break;
147
148	if (i == ARRAY_SIZE(amd_event_mapping))
149		return PERF_COUNT_HW_MAX;
150
151	return amd_event_mapping[i].event_type;
152}
153
154/* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
155static unsigned amd_find_fixed_event(int idx)
156{
157	return PERF_COUNT_HW_MAX;
158}
159
160/* check if a PMC is enabled by comparing it against global_ctrl bits. Because
161 * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
162 */
163static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
164{
165	return true;
166}
167
168static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
169{
170	unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
171	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
172
173	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
174		/*
175		 * The idx is contiguous. The MSRs are not. The counter MSRs
176		 * are interleaved with the event select MSRs.
177		 */
178		pmc_idx *= 2;
179	}
180
181	return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
182}
183
184/* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
185static int amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
186{
187	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
188
189	idx &= ~(3u << 30);
 
190
191	return (idx >= pmu->nr_arch_gp_counters);
192}
193
194/* idx is the ECX register of RDPMC instruction */
195static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
196	unsigned int idx, u64 *mask)
197{
198	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
199	struct kvm_pmc *counters;
200
201	idx &= ~(3u << 30);
202	if (idx >= pmu->nr_arch_gp_counters)
203		return NULL;
204	counters = pmu->gp_counters;
205
206	return &counters[idx];
207}
208
209static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
210{
211	/* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough.  */
212	return false;
213}
214
215static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
216{
217	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
218	struct kvm_pmc *pmc;
219
220	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
221	pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
222
223	return pmc;
224}
225
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
227{
228	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
229	struct kvm_pmc *pmc;
230	u32 msr = msr_info->index;
231
232	/* MSR_PERFCTRn */
233	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
234	if (pmc) {
235		msr_info->data = pmc_read_counter(pmc);
236		return 0;
237	}
238	/* MSR_EVNTSELn */
239	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
240	if (pmc) {
241		msr_info->data = pmc->eventsel;
242		return 0;
243	}
244
245	return 1;
246}
247
248static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
249{
250	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
251	struct kvm_pmc *pmc;
252	u32 msr = msr_info->index;
253	u64 data = msr_info->data;
254
255	/* MSR_PERFCTRn */
256	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
257	if (pmc) {
258		pmc->counter += data - pmc_read_counter(pmc);
259		return 0;
260	}
261	/* MSR_EVNTSELn */
262	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
263	if (pmc) {
264		if (data == pmc->eventsel)
265			return 0;
266		if (!(data & pmu->reserved_bits)) {
267			reprogram_gp_counter(pmc, data);
268			return 0;
269		}
 
270	}
271
272	return 1;
273}
274
275static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
276{
277	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 
278
279	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
 
 
 
 
 
 
 
 
 
 
 
280		pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
281	else
282		pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
 
 
 
 
 
 
 
 
 
283
284	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
285	pmu->reserved_bits = 0xffffffff00200000ull;
286	pmu->version = 1;
287	/* not applicable to AMD; but clean them to prevent any fall out */
288	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
289	pmu->nr_arch_fixed_counters = 0;
290	pmu->global_status = 0;
291	bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
292}
293
294static void amd_pmu_init(struct kvm_vcpu *vcpu)
295{
296	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
297	int i;
298
299	BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC);
300
301	for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
302		pmu->gp_counters[i].type = KVM_PMC_GP;
303		pmu->gp_counters[i].vcpu = vcpu;
304		pmu->gp_counters[i].idx = i;
305		pmu->gp_counters[i].current_config = 0;
306	}
307}
308
309static void amd_pmu_reset(struct kvm_vcpu *vcpu)
310{
311	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
312	int i;
313
314	for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
315		struct kvm_pmc *pmc = &pmu->gp_counters[i];
316
317		pmc_stop_counter(pmc);
318		pmc->counter = pmc->eventsel = 0;
319	}
320}
321
322struct kvm_pmu_ops amd_pmu_ops = {
323	.find_arch_event = amd_find_arch_event,
324	.find_fixed_event = amd_find_fixed_event,
325	.pmc_is_enabled = amd_pmc_is_enabled,
326	.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
327	.rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
328	.msr_idx_to_pmc = amd_msr_idx_to_pmc,
329	.is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx,
330	.is_valid_msr = amd_is_valid_msr,
331	.get_msr = amd_pmu_get_msr,
332	.set_msr = amd_pmu_set_msr,
333	.refresh = amd_pmu_refresh,
334	.init = amd_pmu_init,
335	.reset = amd_pmu_reset,
 
 
336};