Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
  4 *
  5 * Copyright 2015 Red Hat, Inc. and/or its affiliates.
  6 *
  7 * Authors:
  8 *   Avi Kivity   <avi@redhat.com>
  9 *   Gleb Natapov <gleb@redhat.com>
 10 *   Wei Huang    <wei@redhat.com>
 11 */
 12
 13#include <linux/types.h>
 14#include <linux/kvm_host.h>
 15#include <linux/perf_event.h>
 16#include <linux/bsearch.h>
 17#include <linux/sort.h>
 18#include <asm/perf_event.h>
 19#include <asm/cpu_device_id.h>
 20#include "x86.h"
 21#include "cpuid.h"
 22#include "lapic.h"
 23#include "pmu.h"
 24
 25/* This is enough to filter the vast majority of currently defined events. */
 26#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
 27
 28struct x86_pmu_capability __read_mostly kvm_pmu_cap;
 29EXPORT_SYMBOL_GPL(kvm_pmu_cap);
 30
 31static const struct x86_cpu_id vmx_icl_pebs_cpu[] = {
 32	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
 33	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL),
 34	{}
 35};
 36
 37/* NOTE:
 38 * - Each perf counter is defined as "struct kvm_pmc";
 39 * - There are two types of perf counters: general purpose (gp) and fixed.
 40 *   gp counters are stored in gp_counters[] and fixed counters are stored
 41 *   in fixed_counters[] respectively. Both of them are part of "struct
 42 *   kvm_pmu";
 43 * - pmu.c understands the difference between gp counters and fixed counters.
 44 *   However AMD doesn't support fixed-counters;
 45 * - There are three types of index to access perf counters (PMC):
 46 *     1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
 47 *        has MSR_K7_PERFCTRn and, for families 15H and later,
 48 *        MSR_F15H_PERF_CTRn, where MSR_F15H_PERF_CTR[0-3] are
 49 *        aliased to MSR_K7_PERFCTRn.
 50 *     2. MSR Index (named idx): This normally is used by RDPMC instruction.
 51 *        For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
 52 *        C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
 53 *        that it also supports fixed counters. idx can be used to as index to
 54 *        gp and fixed counters.
 55 *     3. Global PMC Index (named pmc): pmc is an index specific to PMU
 56 *        code. Each pmc, stored in kvm_pmc.idx field, is unique across
 57 *        all perf counters (both gp and fixed). The mapping relationship
 58 *        between pmc and perf counters is as the following:
 59 *        * Intel: [0 .. KVM_INTEL_PMC_MAX_GENERIC-1] <=> gp counters
 60 *                 [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
 61 *        * AMD:   [0 .. AMD64_NUM_COUNTERS-1] and, for families 15H
 62 *          and later, [0 .. AMD64_NUM_COUNTERS_CORE-1] <=> gp counters
 63 */
 64
 65static struct kvm_pmu_ops kvm_pmu_ops __read_mostly;
 66
 67#define KVM_X86_PMU_OP(func)					     \
 68	DEFINE_STATIC_CALL_NULL(kvm_x86_pmu_##func,			     \
 69				*(((struct kvm_pmu_ops *)0)->func));
 70#define KVM_X86_PMU_OP_OPTIONAL KVM_X86_PMU_OP
 71#include <asm/kvm-x86-pmu-ops.h>
 72
 73void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
 74{
 75	memcpy(&kvm_pmu_ops, pmu_ops, sizeof(kvm_pmu_ops));
 76
 77#define __KVM_X86_PMU_OP(func) \
 78	static_call_update(kvm_x86_pmu_##func, kvm_pmu_ops.func);
 79#define KVM_X86_PMU_OP(func) \
 80	WARN_ON(!kvm_pmu_ops.func); __KVM_X86_PMU_OP(func)
 81#define KVM_X86_PMU_OP_OPTIONAL __KVM_X86_PMU_OP
 82#include <asm/kvm-x86-pmu-ops.h>
 83#undef __KVM_X86_PMU_OP
 84}
 85
 86static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
 87{
 88	return static_call(kvm_x86_pmu_pmc_is_enabled)(pmc);
 89}
 90
 91static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
 92{
 93	struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
 94	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
 95
 96	kvm_pmu_deliver_pmi(vcpu);
 97}
 98
 99static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
 
 
100{
 
101	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
102	bool skip_pmi = false;
103
104	if (pmc->perf_event && pmc->perf_event->attr.precise_ip) {
105		if (!in_pmi) {
106			/*
107			 * TODO: KVM is currently _choosing_ to not generate records
108			 * for emulated instructions, avoiding BUFFER_OVF PMI when
109			 * there are no records. Strictly speaking, it should be done
110			 * as well in the right context to improve sampling accuracy.
111			 */
112			skip_pmi = true;
113		} else {
114			/* Indicate PEBS overflow PMI to guest. */
115			skip_pmi = __test_and_set_bit(GLOBAL_STATUS_BUFFER_OVF_BIT,
116						      (unsigned long *)&pmu->global_status);
117		}
118	} else {
119		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
 
120	}
121
122	if (!pmc->intr || skip_pmi)
123		return;
124
125	/*
126	 * Inject PMI. If vcpu was in a guest mode during NMI PMI
127	 * can be ejected on a guest mode re-entry. Otherwise we can't
128	 * be sure that vcpu wasn't executing hlt instruction at the
129	 * time of vmexit and is not going to re-enter guest mode until
130	 * woken up. So we should wake it, but this is impossible from
131	 * NMI context. Do it from irq work instead.
132	 */
133	if (in_pmi && !kvm_handling_nmi_from_guest(pmc->vcpu))
134		irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
135	else
136		kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
137}
138
139static void kvm_perf_overflow(struct perf_event *perf_event,
140			      struct perf_sample_data *data,
141			      struct pt_regs *regs)
142{
143	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
 
144
145	/*
146	 * Ignore overflow events for counters that are scheduled to be
147	 * reprogrammed, e.g. if a PMI for the previous event races with KVM's
148	 * handling of a related guest WRMSR.
149	 */
150	if (test_and_set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi))
151		return;
152
153	__kvm_perf_overflow(pmc, true);
154
155	kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
 
 
 
 
 
 
 
 
 
 
 
 
156}
157
158static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config,
159				 bool exclude_user, bool exclude_kernel,
160				 bool intr)
 
161{
162	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
163	struct perf_event *event;
164	struct perf_event_attr attr = {
165		.type = type,
166		.size = sizeof(attr),
167		.pinned = true,
168		.exclude_idle = true,
169		.exclude_host = 1,
170		.exclude_user = exclude_user,
171		.exclude_kernel = exclude_kernel,
172		.config = config,
173	};
174	bool pebs = test_bit(pmc->idx, (unsigned long *)&pmu->pebs_enable);
175
176	attr.sample_period = get_sample_period(pmc, pmc->counter);
177
178	if ((attr.config & HSW_IN_TX_CHECKPOINTED) &&
179	    guest_cpuid_is_intel(pmc->vcpu)) {
 
180		/*
181		 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
182		 * period. Just clear the sample period so at least
183		 * allocating the counter doesn't fail.
184		 */
185		attr.sample_period = 0;
186	}
187	if (pebs) {
188		/*
189		 * The non-zero precision level of guest event makes the ordinary
190		 * guest event becomes a guest PEBS event and triggers the host
191		 * PEBS PMI handler to determine whether the PEBS overflow PMI
192		 * comes from the host counters or the guest.
193		 *
194		 * For most PEBS hardware events, the difference in the software
195		 * precision levels of guest and host PEBS events will not affect
196		 * the accuracy of the PEBS profiling result, because the "event IP"
197		 * in the PEBS record is calibrated on the guest side.
198		 *
199		 * On Icelake everything is fine. Other hardware (GLC+, TNT+) that
200		 * could possibly care here is unsupported and needs changes.
201		 */
202		attr.precise_ip = 1;
203		if (x86_match_cpu(vmx_icl_pebs_cpu) && pmc->idx == 32)
204			attr.precise_ip = 3;
205	}
206
207	event = perf_event_create_kernel_counter(&attr, -1, current,
 
208						 kvm_perf_overflow, pmc);
209	if (IS_ERR(event)) {
210		pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
211			    PTR_ERR(event), pmc->idx);
212		return PTR_ERR(event);
213	}
214
215	pmc->perf_event = event;
216	pmc_to_pmu(pmc)->event_count++;
217	pmc->is_paused = false;
218	pmc->intr = intr || pebs;
219	return 0;
220}
221
222static void pmc_pause_counter(struct kvm_pmc *pmc)
223{
224	u64 counter = pmc->counter;
225
226	if (!pmc->perf_event || pmc->is_paused)
227		return;
228
229	/* update counter, reset event value to avoid redundant accumulation */
230	counter += perf_event_pause(pmc->perf_event, true);
231	pmc->counter = counter & pmc_bitmask(pmc);
232	pmc->is_paused = true;
233}
234
235static bool pmc_resume_counter(struct kvm_pmc *pmc)
236{
237	if (!pmc->perf_event)
238		return false;
239
240	/* recalibrate sample period and check if it's accepted by perf core */
241	if (is_sampling_event(pmc->perf_event) &&
242	    perf_event_period(pmc->perf_event,
243			      get_sample_period(pmc, pmc->counter)))
244		return false;
245
246	if (test_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->pebs_enable) !=
247	    (!!pmc->perf_event->attr.precise_ip))
248		return false;
249
250	/* reuse perf_event to serve as pmc_reprogram_counter() does*/
251	perf_event_enable(pmc->perf_event);
252	pmc->is_paused = false;
253
254	return true;
255}
 
 
 
 
 
 
 
 
 
 
 
 
 
256
257static int cmp_u64(const void *pa, const void *pb)
258{
259	u64 a = *(u64 *)pa;
260	u64 b = *(u64 *)pb;
261
262	return (a > b) - (a < b);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263}
 
264
265static bool check_pmu_event_filter(struct kvm_pmc *pmc)
266{
 
 
267	struct kvm_pmu_event_filter *filter;
268	struct kvm *kvm = pmc->vcpu->kvm;
269	bool allow_event = true;
270	__u64 key;
271	int idx;
272
273	if (!static_call(kvm_x86_pmu_hw_event_available)(pmc))
274		return false;
275
276	filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
277	if (!filter)
278		goto out;
279
280	if (pmc_is_gp(pmc)) {
281		key = pmc->eventsel & AMD64_RAW_EVENT_MASK_NB;
282		if (bsearch(&key, filter->events, filter->nevents,
283			    sizeof(__u64), cmp_u64))
284			allow_event = filter->action == KVM_PMU_EVENT_ALLOW;
285		else
286			allow_event = filter->action == KVM_PMU_EVENT_DENY;
287	} else {
288		idx = pmc->idx - INTEL_PMC_IDX_FIXED;
289		if (filter->action == KVM_PMU_EVENT_DENY &&
290		    test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
291			allow_event = false;
292		if (filter->action == KVM_PMU_EVENT_ALLOW &&
293		    !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
294			allow_event = false;
295	}
296
297out:
298	return allow_event;
 
 
 
299}
 
300
301static void reprogram_counter(struct kvm_pmc *pmc)
302{
303	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
304	u64 eventsel = pmc->eventsel;
305	u64 new_config = eventsel;
306	u8 fixed_ctr_ctrl;
307
308	pmc_pause_counter(pmc);
309
310	if (!pmc_speculative_in_use(pmc) || !pmc_is_enabled(pmc))
311		goto reprogram_complete;
312
313	if (!check_pmu_event_filter(pmc))
314		goto reprogram_complete;
315
316	if (pmc->counter < pmc->prev_counter)
317		__kvm_perf_overflow(pmc, false);
318
319	if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
320		printk_once("kvm pmu: pin control bit is ignored\n");
321
322	if (pmc_is_fixed(pmc)) {
323		fixed_ctr_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl,
324						  pmc->idx - INTEL_PMC_IDX_FIXED);
325		if (fixed_ctr_ctrl & 0x1)
326			eventsel |= ARCH_PERFMON_EVENTSEL_OS;
327		if (fixed_ctr_ctrl & 0x2)
328			eventsel |= ARCH_PERFMON_EVENTSEL_USR;
329		if (fixed_ctr_ctrl & 0x8)
330			eventsel |= ARCH_PERFMON_EVENTSEL_INT;
331		new_config = (u64)fixed_ctr_ctrl;
332	}
333
334	if (pmc->current_config == new_config && pmc_resume_counter(pmc))
335		goto reprogram_complete;
336
337	pmc_release_perf_event(pmc);
338
339	pmc->current_config = new_config;
340
341	/*
342	 * If reprogramming fails, e.g. due to contention, leave the counter's
343	 * regprogram bit set, i.e. opportunistically try again on the next PMU
344	 * refresh.  Don't make a new request as doing so can stall the guest
345	 * if reprogramming repeatedly fails.
346	 */
347	if (pmc_reprogram_counter(pmc, PERF_TYPE_RAW,
348				  (eventsel & pmu->raw_event_mask),
349				  !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
350				  !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
351				  eventsel & ARCH_PERFMON_EVENTSEL_INT))
352		return;
353
354reprogram_complete:
355	clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
356	pmc->prev_counter = 0;
 
 
 
 
 
357}
 
358
359void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
360{
361	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 
362	int bit;
363
364	for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
365		struct kvm_pmc *pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, bit);
366
367		if (unlikely(!pmc)) {
368			clear_bit(bit, pmu->reprogram_pmi);
 
 
 
369			continue;
370		}
371
372		reprogram_counter(pmc);
373	}
374
375	/*
376	 * Unused perf_events are only released if the corresponding MSRs
377	 * weren't accessed during the last vCPU time slice. kvm_arch_sched_in
378	 * triggers KVM_REQ_PMU if cleanup is needed.
379	 */
380	if (unlikely(pmu->need_cleanup))
381		kvm_pmu_cleanup(vcpu);
382}
383
384/* check if idx is a valid index to access PMU */
385bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
386{
387	return static_call(kvm_x86_pmu_is_valid_rdpmc_ecx)(vcpu, idx);
388}
389
390bool is_vmware_backdoor_pmc(u32 pmc_idx)
391{
392	switch (pmc_idx) {
393	case VMWARE_BACKDOOR_PMC_HOST_TSC:
394	case VMWARE_BACKDOOR_PMC_REAL_TIME:
395	case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
396		return true;
397	}
398	return false;
399}
400
401static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
402{
403	u64 ctr_val;
404
405	switch (idx) {
406	case VMWARE_BACKDOOR_PMC_HOST_TSC:
407		ctr_val = rdtsc();
408		break;
409	case VMWARE_BACKDOOR_PMC_REAL_TIME:
410		ctr_val = ktime_get_boottime_ns();
411		break;
412	case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
413		ctr_val = ktime_get_boottime_ns() +
414			vcpu->kvm->arch.kvmclock_offset;
415		break;
416	default:
417		return 1;
418	}
419
420	*data = ctr_val;
421	return 0;
422}
423
424int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
425{
426	bool fast_mode = idx & (1u << 31);
427	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
428	struct kvm_pmc *pmc;
429	u64 mask = fast_mode ? ~0u : ~0ull;
430
431	if (!pmu->version)
432		return 1;
433
434	if (is_vmware_backdoor_pmc(idx))
435		return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
436
437	pmc = static_call(kvm_x86_pmu_rdpmc_ecx_to_pmc)(vcpu, idx, &mask);
438	if (!pmc)
439		return 1;
440
441	if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) &&
442	    (static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
443	    (kvm_read_cr0(vcpu) & X86_CR0_PE))
444		return 1;
445
446	*data = pmc_read_counter(pmc) & mask;
447	return 0;
448}
449
450void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
451{
452	if (lapic_in_kernel(vcpu)) {
453		static_call_cond(kvm_x86_pmu_deliver_pmi)(vcpu);
454		kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
455	}
456}
457
458bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
459{
460	return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) ||
461		static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr);
462}
463
464static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
465{
466	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
467	struct kvm_pmc *pmc = static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr);
468
469	if (pmc)
470		__set_bit(pmc->idx, pmu->pmc_in_use);
471}
472
473int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
474{
475	return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info);
476}
477
478int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
479{
480	kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
481	return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info);
482}
483
484/* refresh PMU settings. This function generally is called when underlying
485 * settings are changed (such as changes of PMU CPUID by guest VMs), which
486 * should rarely happen.
487 */
488void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
489{
490	static_call(kvm_x86_pmu_refresh)(vcpu);
491}
492
493void kvm_pmu_reset(struct kvm_vcpu *vcpu)
494{
495	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
496
497	irq_work_sync(&pmu->irq_work);
498	static_call(kvm_x86_pmu_reset)(vcpu);
499}
500
501void kvm_pmu_init(struct kvm_vcpu *vcpu)
502{
503	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
504
505	memset(pmu, 0, sizeof(*pmu));
506	static_call(kvm_x86_pmu_init)(vcpu);
507	init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
508	pmu->event_count = 0;
509	pmu->need_cleanup = false;
510	kvm_pmu_refresh(vcpu);
511}
512
513/* Release perf_events for vPMCs that have been unused for a full time slice.  */
514void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
515{
516	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
517	struct kvm_pmc *pmc = NULL;
518	DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX);
519	int i;
520
521	pmu->need_cleanup = false;
522
523	bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
524		      pmu->pmc_in_use, X86_PMC_IDX_MAX);
525
526	for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
527		pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
528
529		if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
530			pmc_stop_counter(pmc);
531	}
532
533	static_call_cond(kvm_x86_pmu_cleanup)(vcpu);
534
535	bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
536}
537
538void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
539{
540	kvm_pmu_reset(vcpu);
541}
542
543static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
544{
545	pmc->prev_counter = pmc->counter;
546	pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc);
547	kvm_pmu_request_counter_reprogam(pmc);
548}
549
550static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc,
551	unsigned int perf_hw_id)
552{
553	return !((pmc->eventsel ^ perf_get_hw_event_config(perf_hw_id)) &
554		AMD64_RAW_EVENT_MASK_NB);
555}
556
557static inline bool cpl_is_matched(struct kvm_pmc *pmc)
558{
559	bool select_os, select_user;
560	u64 config;
561
562	if (pmc_is_gp(pmc)) {
563		config = pmc->eventsel;
564		select_os = config & ARCH_PERFMON_EVENTSEL_OS;
565		select_user = config & ARCH_PERFMON_EVENTSEL_USR;
566	} else {
567		config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl,
568					  pmc->idx - INTEL_PMC_IDX_FIXED);
569		select_os = config & 0x1;
570		select_user = config & 0x2;
571	}
572
573	return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user;
574}
575
576void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
577{
578	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
579	struct kvm_pmc *pmc;
580	int i;
581
582	for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
583		pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
584
585		if (!pmc || !pmc_is_enabled(pmc) || !pmc_speculative_in_use(pmc))
586			continue;
587
588		/* Ignore checks for edge detect, pin control, invert and CMASK bits */
589		if (eventsel_match_perf_hw_id(pmc, perf_hw_id) && cpl_is_matched(pmc))
590			kvm_pmu_incr_counter(pmc);
591	}
592}
593EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event);
594
595int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
596{
597	struct kvm_pmu_event_filter tmp, *filter;
598	struct kvm_vcpu *vcpu;
599	unsigned long i;
600	size_t size;
601	int r;
602
603	if (copy_from_user(&tmp, argp, sizeof(tmp)))
604		return -EFAULT;
605
606	if (tmp.action != KVM_PMU_EVENT_ALLOW &&
607	    tmp.action != KVM_PMU_EVENT_DENY)
608		return -EINVAL;
609
610	if (tmp.flags != 0)
611		return -EINVAL;
612
613	if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
614		return -E2BIG;
615
616	size = struct_size(filter, events, tmp.nevents);
617	filter = kmalloc(size, GFP_KERNEL_ACCOUNT);
618	if (!filter)
619		return -ENOMEM;
620
621	r = -EFAULT;
622	if (copy_from_user(filter, argp, size))
623		goto cleanup;
624
625	/* Ensure nevents can't be changed between the user copies. */
626	*filter = tmp;
627
628	/*
629	 * Sort the in-kernel list so that we can search it with bsearch.
630	 */
631	sort(&filter->events, filter->nevents, sizeof(__u64), cmp_u64, NULL);
632
633	mutex_lock(&kvm->lock);
634	filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
635				     mutex_is_locked(&kvm->lock));
636	synchronize_srcu_expedited(&kvm->srcu);
637
638	BUILD_BUG_ON(sizeof(((struct kvm_pmu *)0)->reprogram_pmi) >
639		     sizeof(((struct kvm_pmu *)0)->__reprogram_pmi));
640
641	kvm_for_each_vcpu(i, vcpu, kvm)
642		atomic64_set(&vcpu_to_pmu(vcpu)->__reprogram_pmi, -1ull);
643
644	kvm_make_all_cpus_request(kvm, KVM_REQ_PMU);
645
646	mutex_unlock(&kvm->lock);
647
 
648	r = 0;
649cleanup:
650	kfree(filter);
651	return r;
652}
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
  4 *
  5 * Copyright 2015 Red Hat, Inc. and/or its affiliates.
  6 *
  7 * Authors:
  8 *   Avi Kivity   <avi@redhat.com>
  9 *   Gleb Natapov <gleb@redhat.com>
 10 *   Wei Huang    <wei@redhat.com>
 11 */
 12
 13#include <linux/types.h>
 14#include <linux/kvm_host.h>
 15#include <linux/perf_event.h>
 
 
 16#include <asm/perf_event.h>
 
 17#include "x86.h"
 18#include "cpuid.h"
 19#include "lapic.h"
 20#include "pmu.h"
 21
 22/* This is enough to filter the vast majority of currently defined events. */
 23#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
 24
 
 
 
 
 
 
 
 
 
 25/* NOTE:
 26 * - Each perf counter is defined as "struct kvm_pmc";
 27 * - There are two types of perf counters: general purpose (gp) and fixed.
 28 *   gp counters are stored in gp_counters[] and fixed counters are stored
 29 *   in fixed_counters[] respectively. Both of them are part of "struct
 30 *   kvm_pmu";
 31 * - pmu.c understands the difference between gp counters and fixed counters.
 32 *   However AMD doesn't support fixed-counters;
 33 * - There are three types of index to access perf counters (PMC):
 34 *     1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
 35 *        has MSR_K7_PERFCTRn.
 
 
 36 *     2. MSR Index (named idx): This normally is used by RDPMC instruction.
 37 *        For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
 38 *        C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
 39 *        that it also supports fixed counters. idx can be used to as index to
 40 *        gp and fixed counters.
 41 *     3. Global PMC Index (named pmc): pmc is an index specific to PMU
 42 *        code. Each pmc, stored in kvm_pmc.idx field, is unique across
 43 *        all perf counters (both gp and fixed). The mapping relationship
 44 *        between pmc and perf counters is as the following:
 45 *        * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters
 46 *                 [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
 47 *        * AMD:   [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters
 
 48 */
 49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 50static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
 51{
 52	struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
 53	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
 54
 55	kvm_pmu_deliver_pmi(vcpu);
 56}
 57
 58static void kvm_perf_overflow(struct perf_event *perf_event,
 59			      struct perf_sample_data *data,
 60			      struct pt_regs *regs)
 61{
 62	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
 63	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
 
 64
 65	if (!test_and_set_bit(pmc->idx,
 66			      (unsigned long *)&pmu->reprogram_pmi)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 67		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
 68		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
 69	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 70}
 71
 72static void kvm_perf_overflow_intr(struct perf_event *perf_event,
 73				   struct perf_sample_data *data,
 74				   struct pt_regs *regs)
 75{
 76	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
 77	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
 78
 79	if (!test_and_set_bit(pmc->idx,
 80			      (unsigned long *)&pmu->reprogram_pmi)) {
 81		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
 82		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
 
 
 
 
 
 83
 84		/*
 85		 * Inject PMI. If vcpu was in a guest mode during NMI PMI
 86		 * can be ejected on a guest mode re-entry. Otherwise we can't
 87		 * be sure that vcpu wasn't executing hlt instruction at the
 88		 * time of vmexit and is not going to re-enter guest mode until
 89		 * woken up. So we should wake it, but this is impossible from
 90		 * NMI context. Do it from irq work instead.
 91		 */
 92		if (!kvm_is_in_guest())
 93			irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
 94		else
 95			kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
 96	}
 97}
 98
 99static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
100				  unsigned config, bool exclude_user,
101				  bool exclude_kernel, bool intr,
102				  bool in_tx, bool in_tx_cp)
103{
 
104	struct perf_event *event;
105	struct perf_event_attr attr = {
106		.type = type,
107		.size = sizeof(attr),
108		.pinned = true,
109		.exclude_idle = true,
110		.exclude_host = 1,
111		.exclude_user = exclude_user,
112		.exclude_kernel = exclude_kernel,
113		.config = config,
114	};
 
115
116	attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
117
118	if (in_tx)
119		attr.config |= HSW_IN_TX;
120	if (in_tx_cp) {
121		/*
122		 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
123		 * period. Just clear the sample period so at least
124		 * allocating the counter doesn't fail.
125		 */
126		attr.sample_period = 0;
127		attr.config |= HSW_IN_TX_CHECKPOINTED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128	}
129
130	event = perf_event_create_kernel_counter(&attr, -1, current,
131						 intr ? kvm_perf_overflow_intr :
132						 kvm_perf_overflow, pmc);
133	if (IS_ERR(event)) {
134		pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
135			    PTR_ERR(event), pmc->idx);
136		return;
137	}
138
139	pmc->perf_event = event;
140	clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi);
 
 
 
141}
142
143void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
144{
145	unsigned config, type = PERF_TYPE_RAW;
146	u8 event_select, unit_mask;
147	struct kvm *kvm = pmc->vcpu->kvm;
148	struct kvm_pmu_event_filter *filter;
149	int i;
150	bool allow_event = true;
 
 
 
 
151
152	if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
153		printk_once("kvm pmu: pin control bit is ignored\n");
 
 
154
155	pmc->eventsel = eventsel;
 
 
 
 
156
157	pmc_stop_counter(pmc);
 
 
158
159	if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
160		return;
 
161
162	filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
163	if (filter) {
164		for (i = 0; i < filter->nevents; i++)
165			if (filter->events[i] ==
166			    (eventsel & AMD64_RAW_EVENT_MASK_NB))
167				break;
168		if (filter->action == KVM_PMU_EVENT_ALLOW &&
169		    i == filter->nevents)
170			allow_event = false;
171		if (filter->action == KVM_PMU_EVENT_DENY &&
172		    i < filter->nevents)
173			allow_event = false;
174	}
175	if (!allow_event)
176		return;
177
178	event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
179	unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
 
 
180
181	if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
182			  ARCH_PERFMON_EVENTSEL_INV |
183			  ARCH_PERFMON_EVENTSEL_CMASK |
184			  HSW_IN_TX |
185			  HSW_IN_TX_CHECKPOINTED))) {
186		config = kvm_x86_ops->pmu_ops->find_arch_event(pmc_to_pmu(pmc),
187						      event_select,
188						      unit_mask);
189		if (config != PERF_COUNT_HW_MAX)
190			type = PERF_TYPE_HARDWARE;
191	}
192
193	if (type == PERF_TYPE_RAW)
194		config = eventsel & X86_RAW_EVENT_MASK;
195
196	pmc_reprogram_counter(pmc, type, config,
197			      !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
198			      !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
199			      eventsel & ARCH_PERFMON_EVENTSEL_INT,
200			      (eventsel & HSW_IN_TX),
201			      (eventsel & HSW_IN_TX_CHECKPOINTED));
202}
203EXPORT_SYMBOL_GPL(reprogram_gp_counter);
204
205void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
206{
207	unsigned en_field = ctrl & 0x3;
208	bool pmi = ctrl & 0x8;
209	struct kvm_pmu_event_filter *filter;
210	struct kvm *kvm = pmc->vcpu->kvm;
 
 
 
211
212	pmc_stop_counter(pmc);
 
213
214	if (!en_field || !pmc_is_enabled(pmc))
215		return;
 
216
217	filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
218	if (filter) {
 
 
 
 
 
 
 
219		if (filter->action == KVM_PMU_EVENT_DENY &&
220		    test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
221			return;
222		if (filter->action == KVM_PMU_EVENT_ALLOW &&
223		    !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
224			return;
225	}
226
227	pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
228			      kvm_x86_ops->pmu_ops->find_fixed_event(idx),
229			      !(en_field & 0x2), /* exclude user */
230			      !(en_field & 0x1), /* exclude kernel */
231			      pmi, false, false);
232}
233EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
234
235void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
236{
237	struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
238
239	if (!pmc)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240		return;
241
242	if (pmc_is_gp(pmc))
243		reprogram_gp_counter(pmc, pmc->eventsel);
244	else {
245		int idx = pmc_idx - INTEL_PMC_IDX_FIXED;
246		u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx);
247
248		reprogram_fixed_counter(pmc, ctrl, idx);
249	}
250}
251EXPORT_SYMBOL_GPL(reprogram_counter);
252
253void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
254{
255	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
256	u64 bitmask;
257	int bit;
258
259	bitmask = pmu->reprogram_pmi;
 
260
261	for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
262		struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, bit);
263
264		if (unlikely(!pmc || !pmc->perf_event)) {
265			clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
266			continue;
267		}
268
269		reprogram_counter(pmu, bit);
270	}
 
 
 
 
 
 
 
 
271}
272
273/* check if idx is a valid index to access PMU */
274int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
275{
276	return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx);
277}
278
279bool is_vmware_backdoor_pmc(u32 pmc_idx)
280{
281	switch (pmc_idx) {
282	case VMWARE_BACKDOOR_PMC_HOST_TSC:
283	case VMWARE_BACKDOOR_PMC_REAL_TIME:
284	case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
285		return true;
286	}
287	return false;
288}
289
290static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
291{
292	u64 ctr_val;
293
294	switch (idx) {
295	case VMWARE_BACKDOOR_PMC_HOST_TSC:
296		ctr_val = rdtsc();
297		break;
298	case VMWARE_BACKDOOR_PMC_REAL_TIME:
299		ctr_val = ktime_get_boottime_ns();
300		break;
301	case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
302		ctr_val = ktime_get_boottime_ns() +
303			vcpu->kvm->arch.kvmclock_offset;
304		break;
305	default:
306		return 1;
307	}
308
309	*data = ctr_val;
310	return 0;
311}
312
313int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
314{
315	bool fast_mode = idx & (1u << 31);
316	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
317	struct kvm_pmc *pmc;
318	u64 mask = fast_mode ? ~0u : ~0ull;
319
320	if (!pmu->version)
321		return 1;
322
323	if (is_vmware_backdoor_pmc(idx))
324		return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
325
326	pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx, &mask);
327	if (!pmc)
328		return 1;
329
 
 
 
 
 
330	*data = pmc_read_counter(pmc) & mask;
331	return 0;
332}
333
334void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
335{
336	if (lapic_in_kernel(vcpu))
 
337		kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
 
338}
339
340bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
341{
342	return kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, msr);
 
 
 
 
 
 
 
 
 
 
343}
344
345int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
346{
347	return kvm_x86_ops->pmu_ops->get_msr(vcpu, msr, data);
348}
349
350int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
351{
352	return kvm_x86_ops->pmu_ops->set_msr(vcpu, msr_info);
 
353}
354
355/* refresh PMU settings. This function generally is called when underlying
356 * settings are changed (such as changes of PMU CPUID by guest VMs), which
357 * should rarely happen.
358 */
359void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
360{
361	kvm_x86_ops->pmu_ops->refresh(vcpu);
362}
363
364void kvm_pmu_reset(struct kvm_vcpu *vcpu)
365{
366	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
367
368	irq_work_sync(&pmu->irq_work);
369	kvm_x86_ops->pmu_ops->reset(vcpu);
370}
371
372void kvm_pmu_init(struct kvm_vcpu *vcpu)
373{
374	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
375
376	memset(pmu, 0, sizeof(*pmu));
377	kvm_x86_ops->pmu_ops->init(vcpu);
378	init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
 
 
379	kvm_pmu_refresh(vcpu);
380}
381
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
382void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
383{
384	kvm_pmu_reset(vcpu);
385}
386
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
387int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
388{
389	struct kvm_pmu_event_filter tmp, *filter;
 
 
390	size_t size;
391	int r;
392
393	if (copy_from_user(&tmp, argp, sizeof(tmp)))
394		return -EFAULT;
395
396	if (tmp.action != KVM_PMU_EVENT_ALLOW &&
397	    tmp.action != KVM_PMU_EVENT_DENY)
398		return -EINVAL;
399
400	if (tmp.flags != 0)
401		return -EINVAL;
402
403	if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
404		return -E2BIG;
405
406	size = struct_size(filter, events, tmp.nevents);
407	filter = kmalloc(size, GFP_KERNEL_ACCOUNT);
408	if (!filter)
409		return -ENOMEM;
410
411	r = -EFAULT;
412	if (copy_from_user(filter, argp, size))
413		goto cleanup;
414
415	/* Ensure nevents can't be changed between the user copies. */
416	*filter = tmp;
417
 
 
 
 
 
418	mutex_lock(&kvm->lock);
419	rcu_swap_protected(kvm->arch.pmu_event_filter, filter,
420			   mutex_is_locked(&kvm->lock));
 
 
 
 
 
 
 
 
 
 
421	mutex_unlock(&kvm->lock);
422
423	synchronize_srcu_expedited(&kvm->srcu);
424	r = 0;
425cleanup:
426	kfree(filter);
427	return r;
428}