Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
   4 *
   5 * Copyright 2015 Red Hat, Inc. and/or its affiliates.
   6 *
   7 * Authors:
   8 *   Avi Kivity   <avi@redhat.com>
   9 *   Gleb Natapov <gleb@redhat.com>
  10 *   Wei Huang    <wei@redhat.com>
  11 */
  12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13
  14#include <linux/types.h>
  15#include <linux/kvm_host.h>
  16#include <linux/perf_event.h>
  17#include <linux/bsearch.h>
  18#include <linux/sort.h>
  19#include <asm/perf_event.h>
  20#include <asm/cpu_device_id.h>
  21#include "x86.h"
  22#include "cpuid.h"
  23#include "lapic.h"
  24#include "pmu.h"
  25
  26/* This is enough to filter the vast majority of currently defined events. */
  27#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
  28
  29struct x86_pmu_capability __read_mostly kvm_pmu_cap;
  30EXPORT_SYMBOL_GPL(kvm_pmu_cap);
  31
  32struct kvm_pmu_emulated_event_selectors __read_mostly kvm_pmu_eventsel;
  33EXPORT_SYMBOL_GPL(kvm_pmu_eventsel);
  34
  35/* Precise Distribution of Instructions Retired (PDIR) */
  36static const struct x86_cpu_id vmx_pebs_pdir_cpu[] = {
  37	X86_MATCH_VFM(INTEL_ICELAKE_D, NULL),
  38	X86_MATCH_VFM(INTEL_ICELAKE_X, NULL),
  39	/* Instruction-Accurate PDIR (PDIR++) */
  40	X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, NULL),
  41	{}
  42};
  43
  44/* Precise Distribution (PDist) */
  45static const struct x86_cpu_id vmx_pebs_pdist_cpu[] = {
  46	X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, NULL),
  47	{}
  48};
  49
  50/* NOTE:
  51 * - Each perf counter is defined as "struct kvm_pmc";
  52 * - There are two types of perf counters: general purpose (gp) and fixed.
  53 *   gp counters are stored in gp_counters[] and fixed counters are stored
  54 *   in fixed_counters[] respectively. Both of them are part of "struct
  55 *   kvm_pmu";
  56 * - pmu.c understands the difference between gp counters and fixed counters.
  57 *   However AMD doesn't support fixed-counters;
  58 * - There are three types of index to access perf counters (PMC):
  59 *     1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
  60 *        has MSR_K7_PERFCTRn and, for families 15H and later,
  61 *        MSR_F15H_PERF_CTRn, where MSR_F15H_PERF_CTR[0-3] are
  62 *        aliased to MSR_K7_PERFCTRn.
  63 *     2. MSR Index (named idx): This normally is used by RDPMC instruction.
  64 *        For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
  65 *        C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
  66 *        that it also supports fixed counters. idx can be used to as index to
  67 *        gp and fixed counters.
  68 *     3. Global PMC Index (named pmc): pmc is an index specific to PMU
  69 *        code. Each pmc, stored in kvm_pmc.idx field, is unique across
  70 *        all perf counters (both gp and fixed). The mapping relationship
  71 *        between pmc and perf counters is as the following:
  72 *        * Intel: [0 .. KVM_MAX_NR_INTEL_GP_COUNTERS-1] <=> gp counters
  73 *                 [KVM_FIXED_PMC_BASE_IDX .. KVM_FIXED_PMC_BASE_IDX + 2] <=> fixed
  74 *        * AMD:   [0 .. AMD64_NUM_COUNTERS-1] and, for families 15H
  75 *          and later, [0 .. AMD64_NUM_COUNTERS_CORE-1] <=> gp counters
  76 */
  77
  78static struct kvm_pmu_ops kvm_pmu_ops __read_mostly;
  79
  80#define KVM_X86_PMU_OP(func)					     \
  81	DEFINE_STATIC_CALL_NULL(kvm_x86_pmu_##func,			     \
  82				*(((struct kvm_pmu_ops *)0)->func));
  83#define KVM_X86_PMU_OP_OPTIONAL KVM_X86_PMU_OP
  84#include <asm/kvm-x86-pmu-ops.h>
  85
  86void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
  87{
  88	memcpy(&kvm_pmu_ops, pmu_ops, sizeof(kvm_pmu_ops));
  89
  90#define __KVM_X86_PMU_OP(func) \
  91	static_call_update(kvm_x86_pmu_##func, kvm_pmu_ops.func);
  92#define KVM_X86_PMU_OP(func) \
  93	WARN_ON(!kvm_pmu_ops.func); __KVM_X86_PMU_OP(func)
  94#define KVM_X86_PMU_OP_OPTIONAL __KVM_X86_PMU_OP
  95#include <asm/kvm-x86-pmu-ops.h>
  96#undef __KVM_X86_PMU_OP
  97}
  98
  99static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
 100{
 101	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
 102	bool skip_pmi = false;
 103
 104	if (pmc->perf_event && pmc->perf_event->attr.precise_ip) {
 105		if (!in_pmi) {
 106			/*
 107			 * TODO: KVM is currently _choosing_ to not generate records
 108			 * for emulated instructions, avoiding BUFFER_OVF PMI when
 109			 * there are no records. Strictly speaking, it should be done
 110			 * as well in the right context to improve sampling accuracy.
 111			 */
 112			skip_pmi = true;
 113		} else {
 114			/* Indicate PEBS overflow PMI to guest. */
 115			skip_pmi = __test_and_set_bit(GLOBAL_STATUS_BUFFER_OVF_BIT,
 116						      (unsigned long *)&pmu->global_status);
 117		}
 118	} else {
 119		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
 120	}
 121
 122	if (pmc->intr && !skip_pmi)
 123		kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
 124}
 125
 126static void kvm_perf_overflow(struct perf_event *perf_event,
 127			      struct perf_sample_data *data,
 128			      struct pt_regs *regs)
 129{
 130	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
 131
 132	/*
 133	 * Ignore asynchronous overflow events for counters that are scheduled
 134	 * to be reprogrammed, e.g. if a PMI for the previous event races with
 135	 * KVM's handling of a related guest WRMSR.
 136	 */
 137	if (test_and_set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi))
 138		return;
 139
 140	__kvm_perf_overflow(pmc, true);
 141
 142	kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
 143}
 144
 145static u64 pmc_get_pebs_precise_level(struct kvm_pmc *pmc)
 146{
 147	/*
 148	 * For some model specific pebs counters with special capabilities
 149	 * (PDIR, PDIR++, PDIST), KVM needs to raise the event precise
 150	 * level to the maximum value (currently 3, backwards compatible)
 151	 * so that the perf subsystem would assign specific hardware counter
 152	 * with that capability for vPMC.
 153	 */
 154	if ((pmc->idx == 0 && x86_match_cpu(vmx_pebs_pdist_cpu)) ||
 155	    (pmc->idx == 32 && x86_match_cpu(vmx_pebs_pdir_cpu)))
 156		return 3;
 157
 158	/*
 159	 * The non-zero precision level of guest event makes the ordinary
 160	 * guest event becomes a guest PEBS event and triggers the host
 161	 * PEBS PMI handler to determine whether the PEBS overflow PMI
 162	 * comes from the host counters or the guest.
 163	 */
 164	return 1;
 165}
 166
 167static u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
 168{
 169	u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
 170
 171	if (!sample_period)
 172		sample_period = pmc_bitmask(pmc) + 1;
 173	return sample_period;
 174}
 175
 176static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config,
 177				 bool exclude_user, bool exclude_kernel,
 178				 bool intr)
 179{
 180	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
 181	struct perf_event *event;
 182	struct perf_event_attr attr = {
 183		.type = type,
 184		.size = sizeof(attr),
 185		.pinned = true,
 186		.exclude_idle = true,
 187		.exclude_host = 1,
 188		.exclude_user = exclude_user,
 189		.exclude_kernel = exclude_kernel,
 190		.config = config,
 191	};
 192	bool pebs = test_bit(pmc->idx, (unsigned long *)&pmu->pebs_enable);
 193
 194	attr.sample_period = get_sample_period(pmc, pmc->counter);
 195
 196	if ((attr.config & HSW_IN_TX_CHECKPOINTED) &&
 197	    (boot_cpu_has(X86_FEATURE_RTM) || boot_cpu_has(X86_FEATURE_HLE))) {
 198		/*
 199		 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
 200		 * period. Just clear the sample period so at least
 201		 * allocating the counter doesn't fail.
 202		 */
 203		attr.sample_period = 0;
 204	}
 205	if (pebs) {
 206		/*
 207		 * For most PEBS hardware events, the difference in the software
 208		 * precision levels of guest and host PEBS events will not affect
 209		 * the accuracy of the PEBS profiling result, because the "event IP"
 210		 * in the PEBS record is calibrated on the guest side.
 211		 */
 212		attr.precise_ip = pmc_get_pebs_precise_level(pmc);
 213	}
 214
 215	event = perf_event_create_kernel_counter(&attr, -1, current,
 216						 kvm_perf_overflow, pmc);
 217	if (IS_ERR(event)) {
 218		pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
 219			    PTR_ERR(event), pmc->idx);
 220		return PTR_ERR(event);
 221	}
 222
 223	pmc->perf_event = event;
 224	pmc_to_pmu(pmc)->event_count++;
 225	pmc->is_paused = false;
 226	pmc->intr = intr || pebs;
 227	return 0;
 228}
 229
 230static bool pmc_pause_counter(struct kvm_pmc *pmc)
 231{
 232	u64 counter = pmc->counter;
 233	u64 prev_counter;
 234
 235	/* update counter, reset event value to avoid redundant accumulation */
 236	if (pmc->perf_event && !pmc->is_paused)
 237		counter += perf_event_pause(pmc->perf_event, true);
 238
 239	/*
 240	 * Snapshot the previous counter *after* accumulating state from perf.
 241	 * If overflow already happened, hardware (via perf) is responsible for
 242	 * generating a PMI.  KVM just needs to detect overflow on emulated
 243	 * counter events that haven't yet been processed.
 244	 */
 245	prev_counter = counter & pmc_bitmask(pmc);
 246
 247	counter += pmc->emulated_counter;
 248	pmc->counter = counter & pmc_bitmask(pmc);
 249
 250	pmc->emulated_counter = 0;
 251	pmc->is_paused = true;
 252
 253	return pmc->counter < prev_counter;
 254}
 255
 256static bool pmc_resume_counter(struct kvm_pmc *pmc)
 257{
 258	if (!pmc->perf_event)
 259		return false;
 260
 261	/* recalibrate sample period and check if it's accepted by perf core */
 262	if (is_sampling_event(pmc->perf_event) &&
 263	    perf_event_period(pmc->perf_event,
 264			      get_sample_period(pmc, pmc->counter)))
 265		return false;
 266
 267	if (test_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->pebs_enable) !=
 268	    (!!pmc->perf_event->attr.precise_ip))
 269		return false;
 270
 271	/* reuse perf_event to serve as pmc_reprogram_counter() does*/
 272	perf_event_enable(pmc->perf_event);
 273	pmc->is_paused = false;
 274
 275	return true;
 276}
 277
 278static void pmc_release_perf_event(struct kvm_pmc *pmc)
 279{
 280	if (pmc->perf_event) {
 281		perf_event_release_kernel(pmc->perf_event);
 282		pmc->perf_event = NULL;
 283		pmc->current_config = 0;
 284		pmc_to_pmu(pmc)->event_count--;
 285	}
 286}
 287
 288static void pmc_stop_counter(struct kvm_pmc *pmc)
 289{
 290	if (pmc->perf_event) {
 291		pmc->counter = pmc_read_counter(pmc);
 292		pmc_release_perf_event(pmc);
 293	}
 294}
 295
 296static void pmc_update_sample_period(struct kvm_pmc *pmc)
 297{
 298	if (!pmc->perf_event || pmc->is_paused ||
 299	    !is_sampling_event(pmc->perf_event))
 300		return;
 301
 302	perf_event_period(pmc->perf_event,
 303			  get_sample_period(pmc, pmc->counter));
 304}
 305
 306void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
 307{
 308	/*
 309	 * Drop any unconsumed accumulated counts, the WRMSR is a write, not a
 310	 * read-modify-write.  Adjust the counter value so that its value is
 311	 * relative to the current count, as reading the current count from
 312	 * perf is faster than pausing and repgrogramming the event in order to
 313	 * reset it to '0'.  Note, this very sneakily offsets the accumulated
 314	 * emulated count too, by using pmc_read_counter()!
 315	 */
 316	pmc->emulated_counter = 0;
 317	pmc->counter += val - pmc_read_counter(pmc);
 318	pmc->counter &= pmc_bitmask(pmc);
 319	pmc_update_sample_period(pmc);
 320}
 321EXPORT_SYMBOL_GPL(pmc_write_counter);
 322
 323static int filter_cmp(const void *pa, const void *pb, u64 mask)
 324{
 325	u64 a = *(u64 *)pa & mask;
 326	u64 b = *(u64 *)pb & mask;
 327
 328	return (a > b) - (a < b);
 329}
 330
 331
 332static int filter_sort_cmp(const void *pa, const void *pb)
 333{
 334	return filter_cmp(pa, pb, (KVM_PMU_MASKED_ENTRY_EVENT_SELECT |
 335				   KVM_PMU_MASKED_ENTRY_EXCLUDE));
 336}
 337
 338/*
 339 * For the event filter, searching is done on the 'includes' list and
 340 * 'excludes' list separately rather than on the 'events' list (which
 341 * has both).  As a result the exclude bit can be ignored.
 342 */
 343static int filter_event_cmp(const void *pa, const void *pb)
 344{
 345	return filter_cmp(pa, pb, (KVM_PMU_MASKED_ENTRY_EVENT_SELECT));
 346}
 347
 348static int find_filter_index(u64 *events, u64 nevents, u64 key)
 349{
 350	u64 *fe = bsearch(&key, events, nevents, sizeof(events[0]),
 351			  filter_event_cmp);
 352
 353	if (!fe)
 354		return -1;
 355
 356	return fe - events;
 357}
 358
 359static bool is_filter_entry_match(u64 filter_event, u64 umask)
 360{
 361	u64 mask = filter_event >> (KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT - 8);
 362	u64 match = filter_event & KVM_PMU_MASKED_ENTRY_UMASK_MATCH;
 363
 364	BUILD_BUG_ON((KVM_PMU_ENCODE_MASKED_ENTRY(0, 0xff, 0, false) >>
 365		     (KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT - 8)) !=
 366		     ARCH_PERFMON_EVENTSEL_UMASK);
 367
 368	return (umask & mask) == match;
 369}
 370
 371static bool filter_contains_match(u64 *events, u64 nevents, u64 eventsel)
 372{
 373	u64 event_select = eventsel & kvm_pmu_ops.EVENTSEL_EVENT;
 374	u64 umask = eventsel & ARCH_PERFMON_EVENTSEL_UMASK;
 375	int i, index;
 376
 377	index = find_filter_index(events, nevents, event_select);
 378	if (index < 0)
 379		return false;
 380
 381	/*
 382	 * Entries are sorted by the event select.  Walk the list in both
 383	 * directions to process all entries with the targeted event select.
 384	 */
 385	for (i = index; i < nevents; i++) {
 386		if (filter_event_cmp(&events[i], &event_select))
 387			break;
 388
 389		if (is_filter_entry_match(events[i], umask))
 390			return true;
 391	}
 392
 393	for (i = index - 1; i >= 0; i--) {
 394		if (filter_event_cmp(&events[i], &event_select))
 395			break;
 396
 397		if (is_filter_entry_match(events[i], umask))
 398			return true;
 399	}
 400
 401	return false;
 402}
 403
 404static bool is_gp_event_allowed(struct kvm_x86_pmu_event_filter *f,
 405				u64 eventsel)
 406{
 407	if (filter_contains_match(f->includes, f->nr_includes, eventsel) &&
 408	    !filter_contains_match(f->excludes, f->nr_excludes, eventsel))
 409		return f->action == KVM_PMU_EVENT_ALLOW;
 410
 411	return f->action == KVM_PMU_EVENT_DENY;
 412}
 413
 414static bool is_fixed_event_allowed(struct kvm_x86_pmu_event_filter *filter,
 415				   int idx)
 416{
 417	int fixed_idx = idx - KVM_FIXED_PMC_BASE_IDX;
 418
 419	if (filter->action == KVM_PMU_EVENT_DENY &&
 420	    test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
 421		return false;
 422	if (filter->action == KVM_PMU_EVENT_ALLOW &&
 423	    !test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
 424		return false;
 425
 426	return true;
 427}
 428
 429static bool check_pmu_event_filter(struct kvm_pmc *pmc)
 430{
 431	struct kvm_x86_pmu_event_filter *filter;
 432	struct kvm *kvm = pmc->vcpu->kvm;
 433
 434	filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
 435	if (!filter)
 436		return true;
 437
 438	if (pmc_is_gp(pmc))
 439		return is_gp_event_allowed(filter, pmc->eventsel);
 440
 441	return is_fixed_event_allowed(filter, pmc->idx);
 442}
 443
 444static bool pmc_event_is_allowed(struct kvm_pmc *pmc)
 445{
 446	return pmc_is_globally_enabled(pmc) && pmc_speculative_in_use(pmc) &&
 
 447	       check_pmu_event_filter(pmc);
 448}
 449
 450static int reprogram_counter(struct kvm_pmc *pmc)
 451{
 452	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
 453	u64 eventsel = pmc->eventsel;
 454	u64 new_config = eventsel;
 455	bool emulate_overflow;
 456	u8 fixed_ctr_ctrl;
 457
 458	emulate_overflow = pmc_pause_counter(pmc);
 459
 460	if (!pmc_event_is_allowed(pmc))
 461		return 0;
 462
 463	if (emulate_overflow)
 464		__kvm_perf_overflow(pmc, false);
 465
 466	if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
 467		printk_once("kvm pmu: pin control bit is ignored\n");
 468
 469	if (pmc_is_fixed(pmc)) {
 470		fixed_ctr_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl,
 471						  pmc->idx - KVM_FIXED_PMC_BASE_IDX);
 472		if (fixed_ctr_ctrl & INTEL_FIXED_0_KERNEL)
 473			eventsel |= ARCH_PERFMON_EVENTSEL_OS;
 474		if (fixed_ctr_ctrl & INTEL_FIXED_0_USER)
 475			eventsel |= ARCH_PERFMON_EVENTSEL_USR;
 476		if (fixed_ctr_ctrl & INTEL_FIXED_0_ENABLE_PMI)
 477			eventsel |= ARCH_PERFMON_EVENTSEL_INT;
 478		new_config = (u64)fixed_ctr_ctrl;
 479	}
 480
 481	if (pmc->current_config == new_config && pmc_resume_counter(pmc))
 482		return 0;
 483
 484	pmc_release_perf_event(pmc);
 485
 486	pmc->current_config = new_config;
 487
 488	return pmc_reprogram_counter(pmc, PERF_TYPE_RAW,
 489				     (eventsel & pmu->raw_event_mask),
 490				     !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
 491				     !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
 492				     eventsel & ARCH_PERFMON_EVENTSEL_INT);
 
 
 
 
 
 
 
 
 
 
 493}
 494
 495void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
 496{
 497	DECLARE_BITMAP(bitmap, X86_PMC_IDX_MAX);
 498	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 499	struct kvm_pmc *pmc;
 500	int bit;
 501
 502	bitmap_copy(bitmap, pmu->reprogram_pmi, X86_PMC_IDX_MAX);
 
 503
 504	/*
 505	 * The reprogramming bitmap can be written asynchronously by something
 506	 * other than the task that holds vcpu->mutex, take care to clear only
 507	 * the bits that will actually processed.
 508	 */
 509	BUILD_BUG_ON(sizeof(bitmap) != sizeof(atomic64_t));
 510	atomic64_andnot(*(s64 *)bitmap, &pmu->__reprogram_pmi);
 511
 512	kvm_for_each_pmc(pmu, pmc, bit, bitmap) {
 513		/*
 514		 * If reprogramming fails, e.g. due to contention, re-set the
 515		 * regprogram bit set, i.e. opportunistically try again on the
 516		 * next PMU refresh.  Don't make a new request as doing so can
 517		 * stall the guest if reprogramming repeatedly fails.
 518		 */
 519		if (reprogram_counter(pmc))
 520			set_bit(pmc->idx, pmu->reprogram_pmi);
 521	}
 522
 523	/*
 524	 * Release unused perf_events if the corresponding guest MSRs weren't
 525	 * accessed during the last vCPU time slice (need_cleanup is set when
 526	 * the vCPU is scheduled back in).
 527	 */
 528	if (unlikely(pmu->need_cleanup))
 529		kvm_pmu_cleanup(vcpu);
 530}
 531
 532int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx)
 
 533{
 534	/*
 535	 * On Intel, VMX interception has priority over RDPMC exceptions that
 536	 * aren't already handled by the emulator, i.e. there are no additional
 537	 * check needed for Intel PMUs.
 538	 *
 539	 * On AMD, _all_ exceptions on RDPMC have priority over SVM intercepts,
 540	 * i.e. an invalid PMC results in a #GP, not #VMEXIT.
 541	 */
 542	if (!kvm_pmu_ops.check_rdpmc_early)
 543		return 0;
 544
 545	return kvm_pmu_call(check_rdpmc_early)(vcpu, idx);
 546}
 547
 548bool is_vmware_backdoor_pmc(u32 pmc_idx)
 549{
 550	switch (pmc_idx) {
 551	case VMWARE_BACKDOOR_PMC_HOST_TSC:
 552	case VMWARE_BACKDOOR_PMC_REAL_TIME:
 553	case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
 554		return true;
 555	}
 556	return false;
 557}
 558
 559static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
 560{
 561	u64 ctr_val;
 562
 563	switch (idx) {
 564	case VMWARE_BACKDOOR_PMC_HOST_TSC:
 565		ctr_val = rdtsc();
 566		break;
 567	case VMWARE_BACKDOOR_PMC_REAL_TIME:
 568		ctr_val = ktime_get_boottime_ns();
 569		break;
 570	case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
 571		ctr_val = ktime_get_boottime_ns() +
 572			vcpu->kvm->arch.kvmclock_offset;
 573		break;
 574	default:
 575		return 1;
 576	}
 577
 578	*data = ctr_val;
 579	return 0;
 580}
 581
 582int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
 583{
 
 584	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 585	struct kvm_pmc *pmc;
 586	u64 mask = ~0ull;
 587
 588	if (!pmu->version)
 589		return 1;
 590
 591	if (is_vmware_backdoor_pmc(idx))
 592		return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
 593
 594	pmc = kvm_pmu_call(rdpmc_ecx_to_pmc)(vcpu, idx, &mask);
 595	if (!pmc)
 596		return 1;
 597
 598	if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCE) &&
 599	    (kvm_x86_call(get_cpl)(vcpu) != 0) &&
 600	    kvm_is_cr0_bit_set(vcpu, X86_CR0_PE))
 601		return 1;
 602
 603	*data = pmc_read_counter(pmc) & mask;
 604	return 0;
 605}
 606
 607void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
 608{
 609	if (lapic_in_kernel(vcpu)) {
 610		kvm_pmu_call(deliver_pmi)(vcpu);
 611		kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
 612	}
 613}
 614
 615bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
 616{
 617	switch (msr) {
 618	case MSR_CORE_PERF_GLOBAL_STATUS:
 619	case MSR_CORE_PERF_GLOBAL_CTRL:
 620	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
 621		return kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu));
 622	default:
 623		break;
 624	}
 625	return kvm_pmu_call(msr_idx_to_pmc)(vcpu, msr) ||
 626	       kvm_pmu_call(is_valid_msr)(vcpu, msr);
 627}
 628
 629static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
 630{
 631	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 632	struct kvm_pmc *pmc = kvm_pmu_call(msr_idx_to_pmc)(vcpu, msr);
 633
 634	if (pmc)
 635		__set_bit(pmc->idx, pmu->pmc_in_use);
 636}
 637
 638int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 639{
 640	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 641	u32 msr = msr_info->index;
 642
 643	switch (msr) {
 644	case MSR_CORE_PERF_GLOBAL_STATUS:
 645	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
 646		msr_info->data = pmu->global_status;
 647		break;
 648	case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
 649	case MSR_CORE_PERF_GLOBAL_CTRL:
 650		msr_info->data = pmu->global_ctrl;
 651		break;
 652	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
 653	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
 654		msr_info->data = 0;
 655		break;
 656	default:
 657		return kvm_pmu_call(get_msr)(vcpu, msr_info);
 658	}
 659
 660	return 0;
 661}
 662
 663int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 664{
 665	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 666	u32 msr = msr_info->index;
 667	u64 data = msr_info->data;
 668	u64 diff;
 669
 670	/*
 671	 * Note, AMD ignores writes to reserved bits and read-only PMU MSRs,
 672	 * whereas Intel generates #GP on attempts to write reserved/RO MSRs.
 673	 */
 674	switch (msr) {
 675	case MSR_CORE_PERF_GLOBAL_STATUS:
 676		if (!msr_info->host_initiated)
 677			return 1; /* RO MSR */
 678		fallthrough;
 679	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
 680		/* Per PPR, Read-only MSR. Writes are ignored. */
 681		if (!msr_info->host_initiated)
 682			break;
 683
 684		if (data & pmu->global_status_rsvd)
 685			return 1;
 686
 687		pmu->global_status = data;
 688		break;
 689	case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
 690		data &= ~pmu->global_ctrl_rsvd;
 691		fallthrough;
 692	case MSR_CORE_PERF_GLOBAL_CTRL:
 693		if (!kvm_valid_perf_global_ctrl(pmu, data))
 694			return 1;
 695
 696		if (pmu->global_ctrl != data) {
 697			diff = pmu->global_ctrl ^ data;
 698			pmu->global_ctrl = data;
 699			reprogram_counters(pmu, diff);
 700		}
 701		break;
 702	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
 703		/*
 704		 * GLOBAL_OVF_CTRL, a.k.a. GLOBAL STATUS_RESET, clears bits in
 705		 * GLOBAL_STATUS, and so the set of reserved bits is the same.
 706		 */
 707		if (data & pmu->global_status_rsvd)
 708			return 1;
 709		fallthrough;
 710	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
 711		if (!msr_info->host_initiated)
 712			pmu->global_status &= ~data;
 713		break;
 714	default:
 715		kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
 716		return kvm_pmu_call(set_msr)(vcpu, msr_info);
 717	}
 718
 719	return 0;
 720}
 721
 722static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
 723{
 724	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 725	struct kvm_pmc *pmc;
 726	int i;
 727
 728	pmu->need_cleanup = false;
 729
 730	bitmap_zero(pmu->reprogram_pmi, X86_PMC_IDX_MAX);
 731
 732	kvm_for_each_pmc(pmu, pmc, i, pmu->all_valid_pmc_idx) {
 
 
 
 
 733		pmc_stop_counter(pmc);
 734		pmc->counter = 0;
 735		pmc->emulated_counter = 0;
 736
 737		if (pmc_is_gp(pmc))
 738			pmc->eventsel = 0;
 739	}
 740
 741	pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
 742
 743	kvm_pmu_call(reset)(vcpu);
 744}
 745
 746
 747/*
 748 * Refresh the PMU configuration for the vCPU, e.g. if userspace changes CPUID
 749 * and/or PERF_CAPABILITIES.
 750 */
 751void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
 752{
 753	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 754
 755	if (KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm))
 756		return;
 757
 758	/*
 759	 * Stop/release all existing counters/events before realizing the new
 760	 * vPMU model.
 761	 */
 762	kvm_pmu_reset(vcpu);
 763
 764	pmu->version = 0;
 765	pmu->nr_arch_gp_counters = 0;
 766	pmu->nr_arch_fixed_counters = 0;
 767	pmu->counter_bitmask[KVM_PMC_GP] = 0;
 768	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
 769	pmu->reserved_bits = 0xffffffff00200000ull;
 770	pmu->raw_event_mask = X86_RAW_EVENT_MASK;
 771	pmu->global_ctrl_rsvd = ~0ull;
 772	pmu->global_status_rsvd = ~0ull;
 773	pmu->fixed_ctr_ctrl_rsvd = ~0ull;
 774	pmu->pebs_enable_rsvd = ~0ull;
 775	pmu->pebs_data_cfg_rsvd = ~0ull;
 776	bitmap_zero(pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX);
 777
 778	if (!vcpu->kvm->arch.enable_pmu)
 779		return;
 780
 781	kvm_pmu_call(refresh)(vcpu);
 782
 783	/*
 784	 * At RESET, both Intel and AMD CPUs set all enable bits for general
 785	 * purpose counters in IA32_PERF_GLOBAL_CTRL (so that software that
 786	 * was written for v1 PMUs don't unknowingly leave GP counters disabled
 787	 * in the global controls).  Emulate that behavior when refreshing the
 788	 * PMU so that userspace doesn't need to manually set PERF_GLOBAL_CTRL.
 789	 */
 790	if (kvm_pmu_has_perf_global_ctrl(pmu) && pmu->nr_arch_gp_counters)
 791		pmu->global_ctrl = GENMASK_ULL(pmu->nr_arch_gp_counters - 1, 0);
 792}
 793
 794void kvm_pmu_init(struct kvm_vcpu *vcpu)
 795{
 796	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 797
 798	memset(pmu, 0, sizeof(*pmu));
 799	kvm_pmu_call(init)(vcpu);
 800	kvm_pmu_refresh(vcpu);
 801}
 802
 803/* Release perf_events for vPMCs that have been unused for a full time slice.  */
 804void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
 805{
 806	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 807	struct kvm_pmc *pmc = NULL;
 808	DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX);
 809	int i;
 810
 811	pmu->need_cleanup = false;
 812
 813	bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
 814		      pmu->pmc_in_use, X86_PMC_IDX_MAX);
 815
 816	kvm_for_each_pmc(pmu, pmc, i, bitmask) {
 817		if (pmc->perf_event && !pmc_speculative_in_use(pmc))
 
 
 818			pmc_stop_counter(pmc);
 819	}
 820
 821	kvm_pmu_call(cleanup)(vcpu);
 822
 823	bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
 824}
 825
 826void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
 827{
 828	kvm_pmu_reset(vcpu);
 829}
 830
 831static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
 832{
 833	pmc->emulated_counter++;
 834	kvm_pmu_request_counter_reprogram(pmc);
 835}
 836
 
 
 
 
 
 
 
 837static inline bool cpl_is_matched(struct kvm_pmc *pmc)
 838{
 839	bool select_os, select_user;
 840	u64 config;
 841
 842	if (pmc_is_gp(pmc)) {
 843		config = pmc->eventsel;
 844		select_os = config & ARCH_PERFMON_EVENTSEL_OS;
 845		select_user = config & ARCH_PERFMON_EVENTSEL_USR;
 846	} else {
 847		config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl,
 848					  pmc->idx - KVM_FIXED_PMC_BASE_IDX);
 849		select_os = config & INTEL_FIXED_0_KERNEL;
 850		select_user = config & INTEL_FIXED_0_USER;
 851	}
 852
 853	/*
 854	 * Skip the CPL lookup, which isn't free on Intel, if the result will
 855	 * be the same regardless of the CPL.
 856	 */
 857	if (select_os == select_user)
 858		return select_os;
 859
 860	return (kvm_x86_call(get_cpl)(pmc->vcpu) == 0) ? select_os :
 861							 select_user;
 862}
 863
 864void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel)
 865{
 866	DECLARE_BITMAP(bitmap, X86_PMC_IDX_MAX);
 867	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 868	struct kvm_pmc *pmc;
 869	int i;
 870
 871	BUILD_BUG_ON(sizeof(pmu->global_ctrl) * BITS_PER_BYTE != X86_PMC_IDX_MAX);
 
 872
 873	if (!kvm_pmu_has_perf_global_ctrl(pmu))
 874		bitmap_copy(bitmap, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX);
 875	else if (!bitmap_and(bitmap, pmu->all_valid_pmc_idx,
 876			     (unsigned long *)&pmu->global_ctrl, X86_PMC_IDX_MAX))
 877		return;
 878
 879	kvm_for_each_pmc(pmu, pmc, i, bitmap) {
 880		/*
 881		 * Ignore checks for edge detect (all events currently emulated
 882		 * but KVM are always rising edges), pin control (unsupported
 883		 * by modern CPUs), and counter mask and its invert flag (KVM
 884		 * doesn't emulate multiple events in a single clock cycle).
 885		 *
 886		 * Note, the uppermost nibble of AMD's mask overlaps Intel's
 887		 * IN_TX (bit 32) and IN_TXCP (bit 33), as well as two reserved
 888		 * bits (bits 35:34).  Checking the "in HLE/RTM transaction"
 889		 * flags is correct as the vCPU can't be in a transaction if
 890		 * KVM is emulating an instruction.  Checking the reserved bits
 891		 * might be wrong if they are defined in the future, but so
 892		 * could ignoring them, so do the simple thing for now.
 893		 */
 894		if (((pmc->eventsel ^ eventsel) & AMD64_RAW_EVENT_MASK_NB) ||
 895		    !pmc_event_is_allowed(pmc) || !cpl_is_matched(pmc))
 896			continue;
 897
 898		kvm_pmu_incr_counter(pmc);
 
 
 899	}
 900}
 901EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event);
 902
 903static bool is_masked_filter_valid(const struct kvm_x86_pmu_event_filter *filter)
 904{
 905	u64 mask = kvm_pmu_ops.EVENTSEL_EVENT |
 906		   KVM_PMU_MASKED_ENTRY_UMASK_MASK |
 907		   KVM_PMU_MASKED_ENTRY_UMASK_MATCH |
 908		   KVM_PMU_MASKED_ENTRY_EXCLUDE;
 909	int i;
 910
 911	for (i = 0; i < filter->nevents; i++) {
 912		if (filter->events[i] & ~mask)
 913			return false;
 914	}
 915
 916	return true;
 917}
 918
 919static void convert_to_masked_filter(struct kvm_x86_pmu_event_filter *filter)
 920{
 921	int i, j;
 922
 923	for (i = 0, j = 0; i < filter->nevents; i++) {
 924		/*
 925		 * Skip events that are impossible to match against a guest
 926		 * event.  When filtering, only the event select + unit mask
 927		 * of the guest event is used.  To maintain backwards
 928		 * compatibility, impossible filters can't be rejected :-(
 929		 */
 930		if (filter->events[i] & ~(kvm_pmu_ops.EVENTSEL_EVENT |
 931					  ARCH_PERFMON_EVENTSEL_UMASK))
 932			continue;
 933		/*
 934		 * Convert userspace events to a common in-kernel event so
 935		 * only one code path is needed to support both events.  For
 936		 * the in-kernel events use masked events because they are
 937		 * flexible enough to handle both cases.  To convert to masked
 938		 * events all that's needed is to add an "all ones" umask_mask,
 939		 * (unmasked filter events don't support EXCLUDE).
 940		 */
 941		filter->events[j++] = filter->events[i] |
 942				      (0xFFULL << KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT);
 943	}
 944
 945	filter->nevents = j;
 946}
 947
 948static int prepare_filter_lists(struct kvm_x86_pmu_event_filter *filter)
 949{
 950	int i;
 951
 952	if (!(filter->flags & KVM_PMU_EVENT_FLAG_MASKED_EVENTS))
 953		convert_to_masked_filter(filter);
 954	else if (!is_masked_filter_valid(filter))
 955		return -EINVAL;
 956
 957	/*
 958	 * Sort entries by event select and includes vs. excludes so that all
 959	 * entries for a given event select can be processed efficiently during
 960	 * filtering.  The EXCLUDE flag uses a more significant bit than the
 961	 * event select, and so the sorted list is also effectively split into
 962	 * includes and excludes sub-lists.
 963	 */
 964	sort(&filter->events, filter->nevents, sizeof(filter->events[0]),
 965	     filter_sort_cmp, NULL);
 966
 967	i = filter->nevents;
 968	/* Find the first EXCLUDE event (only supported for masked events). */
 969	if (filter->flags & KVM_PMU_EVENT_FLAG_MASKED_EVENTS) {
 970		for (i = 0; i < filter->nevents; i++) {
 971			if (filter->events[i] & KVM_PMU_MASKED_ENTRY_EXCLUDE)
 972				break;
 973		}
 974	}
 975
 976	filter->nr_includes = i;
 977	filter->nr_excludes = filter->nevents - filter->nr_includes;
 978	filter->includes = filter->events;
 979	filter->excludes = filter->events + filter->nr_includes;
 980
 981	return 0;
 982}
 983
 984int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
 985{
 986	struct kvm_pmu_event_filter __user *user_filter = argp;
 987	struct kvm_x86_pmu_event_filter *filter;
 988	struct kvm_pmu_event_filter tmp;
 989	struct kvm_vcpu *vcpu;
 990	unsigned long i;
 991	size_t size;
 992	int r;
 993
 994	if (copy_from_user(&tmp, user_filter, sizeof(tmp)))
 995		return -EFAULT;
 996
 997	if (tmp.action != KVM_PMU_EVENT_ALLOW &&
 998	    tmp.action != KVM_PMU_EVENT_DENY)
 999		return -EINVAL;
1000
1001	if (tmp.flags & ~KVM_PMU_EVENT_FLAGS_VALID_MASK)
1002		return -EINVAL;
1003
1004	if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
1005		return -E2BIG;
1006
1007	size = struct_size(filter, events, tmp.nevents);
1008	filter = kzalloc(size, GFP_KERNEL_ACCOUNT);
1009	if (!filter)
1010		return -ENOMEM;
1011
1012	filter->action = tmp.action;
1013	filter->nevents = tmp.nevents;
1014	filter->fixed_counter_bitmap = tmp.fixed_counter_bitmap;
1015	filter->flags = tmp.flags;
1016
1017	r = -EFAULT;
1018	if (copy_from_user(filter->events, user_filter->events,
1019			   sizeof(filter->events[0]) * filter->nevents))
1020		goto cleanup;
1021
1022	r = prepare_filter_lists(filter);
1023	if (r)
1024		goto cleanup;
1025
1026	mutex_lock(&kvm->lock);
1027	filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
1028				     mutex_is_locked(&kvm->lock));
1029	mutex_unlock(&kvm->lock);
1030	synchronize_srcu_expedited(&kvm->srcu);
1031
1032	BUILD_BUG_ON(sizeof(((struct kvm_pmu *)0)->reprogram_pmi) >
1033		     sizeof(((struct kvm_pmu *)0)->__reprogram_pmi));
1034
1035	kvm_for_each_vcpu(i, vcpu, kvm)
1036		atomic64_set(&vcpu_to_pmu(vcpu)->__reprogram_pmi, -1ull);
1037
1038	kvm_make_all_cpus_request(kvm, KVM_REQ_PMU);
1039
1040	r = 0;
1041cleanup:
1042	kfree(filter);
1043	return r;
1044}
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
  4 *
  5 * Copyright 2015 Red Hat, Inc. and/or its affiliates.
  6 *
  7 * Authors:
  8 *   Avi Kivity   <avi@redhat.com>
  9 *   Gleb Natapov <gleb@redhat.com>
 10 *   Wei Huang    <wei@redhat.com>
 11 */
 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 13
 14#include <linux/types.h>
 15#include <linux/kvm_host.h>
 16#include <linux/perf_event.h>
 17#include <linux/bsearch.h>
 18#include <linux/sort.h>
 19#include <asm/perf_event.h>
 20#include <asm/cpu_device_id.h>
 21#include "x86.h"
 22#include "cpuid.h"
 23#include "lapic.h"
 24#include "pmu.h"
 25
 26/* This is enough to filter the vast majority of currently defined events. */
 27#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
 28
 29struct x86_pmu_capability __read_mostly kvm_pmu_cap;
 30EXPORT_SYMBOL_GPL(kvm_pmu_cap);
 31
 
 
 
 32/* Precise Distribution of Instructions Retired (PDIR) */
 33static const struct x86_cpu_id vmx_pebs_pdir_cpu[] = {
 34	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
 35	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL),
 36	/* Instruction-Accurate PDIR (PDIR++) */
 37	X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL),
 38	{}
 39};
 40
 41/* Precise Distribution (PDist) */
 42static const struct x86_cpu_id vmx_pebs_pdist_cpu[] = {
 43	X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL),
 44	{}
 45};
 46
 47/* NOTE:
 48 * - Each perf counter is defined as "struct kvm_pmc";
 49 * - There are two types of perf counters: general purpose (gp) and fixed.
 50 *   gp counters are stored in gp_counters[] and fixed counters are stored
 51 *   in fixed_counters[] respectively. Both of them are part of "struct
 52 *   kvm_pmu";
 53 * - pmu.c understands the difference between gp counters and fixed counters.
 54 *   However AMD doesn't support fixed-counters;
 55 * - There are three types of index to access perf counters (PMC):
 56 *     1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
 57 *        has MSR_K7_PERFCTRn and, for families 15H and later,
 58 *        MSR_F15H_PERF_CTRn, where MSR_F15H_PERF_CTR[0-3] are
 59 *        aliased to MSR_K7_PERFCTRn.
 60 *     2. MSR Index (named idx): This normally is used by RDPMC instruction.
 61 *        For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
 62 *        C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
 63 *        that it also supports fixed counters. idx can be used to as index to
 64 *        gp and fixed counters.
 65 *     3. Global PMC Index (named pmc): pmc is an index specific to PMU
 66 *        code. Each pmc, stored in kvm_pmc.idx field, is unique across
 67 *        all perf counters (both gp and fixed). The mapping relationship
 68 *        between pmc and perf counters is as the following:
 69 *        * Intel: [0 .. KVM_INTEL_PMC_MAX_GENERIC-1] <=> gp counters
 70 *                 [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
 71 *        * AMD:   [0 .. AMD64_NUM_COUNTERS-1] and, for families 15H
 72 *          and later, [0 .. AMD64_NUM_COUNTERS_CORE-1] <=> gp counters
 73 */
 74
 75static struct kvm_pmu_ops kvm_pmu_ops __read_mostly;
 76
 77#define KVM_X86_PMU_OP(func)					     \
 78	DEFINE_STATIC_CALL_NULL(kvm_x86_pmu_##func,			     \
 79				*(((struct kvm_pmu_ops *)0)->func));
 80#define KVM_X86_PMU_OP_OPTIONAL KVM_X86_PMU_OP
 81#include <asm/kvm-x86-pmu-ops.h>
 82
 83void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
 84{
 85	memcpy(&kvm_pmu_ops, pmu_ops, sizeof(kvm_pmu_ops));
 86
 87#define __KVM_X86_PMU_OP(func) \
 88	static_call_update(kvm_x86_pmu_##func, kvm_pmu_ops.func);
 89#define KVM_X86_PMU_OP(func) \
 90	WARN_ON(!kvm_pmu_ops.func); __KVM_X86_PMU_OP(func)
 91#define KVM_X86_PMU_OP_OPTIONAL __KVM_X86_PMU_OP
 92#include <asm/kvm-x86-pmu-ops.h>
 93#undef __KVM_X86_PMU_OP
 94}
 95
 96static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
 97{
 98	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
 99	bool skip_pmi = false;
100
101	if (pmc->perf_event && pmc->perf_event->attr.precise_ip) {
102		if (!in_pmi) {
103			/*
104			 * TODO: KVM is currently _choosing_ to not generate records
105			 * for emulated instructions, avoiding BUFFER_OVF PMI when
106			 * there are no records. Strictly speaking, it should be done
107			 * as well in the right context to improve sampling accuracy.
108			 */
109			skip_pmi = true;
110		} else {
111			/* Indicate PEBS overflow PMI to guest. */
112			skip_pmi = __test_and_set_bit(GLOBAL_STATUS_BUFFER_OVF_BIT,
113						      (unsigned long *)&pmu->global_status);
114		}
115	} else {
116		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
117	}
118
119	if (pmc->intr && !skip_pmi)
120		kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
121}
122
123static void kvm_perf_overflow(struct perf_event *perf_event,
124			      struct perf_sample_data *data,
125			      struct pt_regs *regs)
126{
127	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
128
129	/*
130	 * Ignore asynchronous overflow events for counters that are scheduled
131	 * to be reprogrammed, e.g. if a PMI for the previous event races with
132	 * KVM's handling of a related guest WRMSR.
133	 */
134	if (test_and_set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi))
135		return;
136
137	__kvm_perf_overflow(pmc, true);
138
139	kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
140}
141
142static u64 pmc_get_pebs_precise_level(struct kvm_pmc *pmc)
143{
144	/*
145	 * For some model specific pebs counters with special capabilities
146	 * (PDIR, PDIR++, PDIST), KVM needs to raise the event precise
147	 * level to the maximum value (currently 3, backwards compatible)
148	 * so that the perf subsystem would assign specific hardware counter
149	 * with that capability for vPMC.
150	 */
151	if ((pmc->idx == 0 && x86_match_cpu(vmx_pebs_pdist_cpu)) ||
152	    (pmc->idx == 32 && x86_match_cpu(vmx_pebs_pdir_cpu)))
153		return 3;
154
155	/*
156	 * The non-zero precision level of guest event makes the ordinary
157	 * guest event becomes a guest PEBS event and triggers the host
158	 * PEBS PMI handler to determine whether the PEBS overflow PMI
159	 * comes from the host counters or the guest.
160	 */
161	return 1;
162}
163
164static u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
165{
166	u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
167
168	if (!sample_period)
169		sample_period = pmc_bitmask(pmc) + 1;
170	return sample_period;
171}
172
173static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config,
174				 bool exclude_user, bool exclude_kernel,
175				 bool intr)
176{
177	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
178	struct perf_event *event;
179	struct perf_event_attr attr = {
180		.type = type,
181		.size = sizeof(attr),
182		.pinned = true,
183		.exclude_idle = true,
184		.exclude_host = 1,
185		.exclude_user = exclude_user,
186		.exclude_kernel = exclude_kernel,
187		.config = config,
188	};
189	bool pebs = test_bit(pmc->idx, (unsigned long *)&pmu->pebs_enable);
190
191	attr.sample_period = get_sample_period(pmc, pmc->counter);
192
193	if ((attr.config & HSW_IN_TX_CHECKPOINTED) &&
194	    guest_cpuid_is_intel(pmc->vcpu)) {
195		/*
196		 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
197		 * period. Just clear the sample period so at least
198		 * allocating the counter doesn't fail.
199		 */
200		attr.sample_period = 0;
201	}
202	if (pebs) {
203		/*
204		 * For most PEBS hardware events, the difference in the software
205		 * precision levels of guest and host PEBS events will not affect
206		 * the accuracy of the PEBS profiling result, because the "event IP"
207		 * in the PEBS record is calibrated on the guest side.
208		 */
209		attr.precise_ip = pmc_get_pebs_precise_level(pmc);
210	}
211
212	event = perf_event_create_kernel_counter(&attr, -1, current,
213						 kvm_perf_overflow, pmc);
214	if (IS_ERR(event)) {
215		pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
216			    PTR_ERR(event), pmc->idx);
217		return PTR_ERR(event);
218	}
219
220	pmc->perf_event = event;
221	pmc_to_pmu(pmc)->event_count++;
222	pmc->is_paused = false;
223	pmc->intr = intr || pebs;
224	return 0;
225}
226
227static bool pmc_pause_counter(struct kvm_pmc *pmc)
228{
229	u64 counter = pmc->counter;
230	u64 prev_counter;
231
232	/* update counter, reset event value to avoid redundant accumulation */
233	if (pmc->perf_event && !pmc->is_paused)
234		counter += perf_event_pause(pmc->perf_event, true);
235
236	/*
237	 * Snapshot the previous counter *after* accumulating state from perf.
238	 * If overflow already happened, hardware (via perf) is responsible for
239	 * generating a PMI.  KVM just needs to detect overflow on emulated
240	 * counter events that haven't yet been processed.
241	 */
242	prev_counter = counter & pmc_bitmask(pmc);
243
244	counter += pmc->emulated_counter;
245	pmc->counter = counter & pmc_bitmask(pmc);
246
247	pmc->emulated_counter = 0;
248	pmc->is_paused = true;
249
250	return pmc->counter < prev_counter;
251}
252
253static bool pmc_resume_counter(struct kvm_pmc *pmc)
254{
255	if (!pmc->perf_event)
256		return false;
257
258	/* recalibrate sample period and check if it's accepted by perf core */
259	if (is_sampling_event(pmc->perf_event) &&
260	    perf_event_period(pmc->perf_event,
261			      get_sample_period(pmc, pmc->counter)))
262		return false;
263
264	if (test_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->pebs_enable) !=
265	    (!!pmc->perf_event->attr.precise_ip))
266		return false;
267
268	/* reuse perf_event to serve as pmc_reprogram_counter() does*/
269	perf_event_enable(pmc->perf_event);
270	pmc->is_paused = false;
271
272	return true;
273}
274
275static void pmc_release_perf_event(struct kvm_pmc *pmc)
276{
277	if (pmc->perf_event) {
278		perf_event_release_kernel(pmc->perf_event);
279		pmc->perf_event = NULL;
280		pmc->current_config = 0;
281		pmc_to_pmu(pmc)->event_count--;
282	}
283}
284
285static void pmc_stop_counter(struct kvm_pmc *pmc)
286{
287	if (pmc->perf_event) {
288		pmc->counter = pmc_read_counter(pmc);
289		pmc_release_perf_event(pmc);
290	}
291}
292
293static void pmc_update_sample_period(struct kvm_pmc *pmc)
294{
295	if (!pmc->perf_event || pmc->is_paused ||
296	    !is_sampling_event(pmc->perf_event))
297		return;
298
299	perf_event_period(pmc->perf_event,
300			  get_sample_period(pmc, pmc->counter));
301}
302
303void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
304{
305	/*
306	 * Drop any unconsumed accumulated counts, the WRMSR is a write, not a
307	 * read-modify-write.  Adjust the counter value so that its value is
308	 * relative to the current count, as reading the current count from
309	 * perf is faster than pausing and repgrogramming the event in order to
310	 * reset it to '0'.  Note, this very sneakily offsets the accumulated
311	 * emulated count too, by using pmc_read_counter()!
312	 */
313	pmc->emulated_counter = 0;
314	pmc->counter += val - pmc_read_counter(pmc);
315	pmc->counter &= pmc_bitmask(pmc);
316	pmc_update_sample_period(pmc);
317}
318EXPORT_SYMBOL_GPL(pmc_write_counter);
319
320static int filter_cmp(const void *pa, const void *pb, u64 mask)
321{
322	u64 a = *(u64 *)pa & mask;
323	u64 b = *(u64 *)pb & mask;
324
325	return (a > b) - (a < b);
326}
327
328
329static int filter_sort_cmp(const void *pa, const void *pb)
330{
331	return filter_cmp(pa, pb, (KVM_PMU_MASKED_ENTRY_EVENT_SELECT |
332				   KVM_PMU_MASKED_ENTRY_EXCLUDE));
333}
334
335/*
336 * For the event filter, searching is done on the 'includes' list and
337 * 'excludes' list separately rather than on the 'events' list (which
338 * has both).  As a result the exclude bit can be ignored.
339 */
340static int filter_event_cmp(const void *pa, const void *pb)
341{
342	return filter_cmp(pa, pb, (KVM_PMU_MASKED_ENTRY_EVENT_SELECT));
343}
344
345static int find_filter_index(u64 *events, u64 nevents, u64 key)
346{
347	u64 *fe = bsearch(&key, events, nevents, sizeof(events[0]),
348			  filter_event_cmp);
349
350	if (!fe)
351		return -1;
352
353	return fe - events;
354}
355
356static bool is_filter_entry_match(u64 filter_event, u64 umask)
357{
358	u64 mask = filter_event >> (KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT - 8);
359	u64 match = filter_event & KVM_PMU_MASKED_ENTRY_UMASK_MATCH;
360
361	BUILD_BUG_ON((KVM_PMU_ENCODE_MASKED_ENTRY(0, 0xff, 0, false) >>
362		     (KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT - 8)) !=
363		     ARCH_PERFMON_EVENTSEL_UMASK);
364
365	return (umask & mask) == match;
366}
367
368static bool filter_contains_match(u64 *events, u64 nevents, u64 eventsel)
369{
370	u64 event_select = eventsel & kvm_pmu_ops.EVENTSEL_EVENT;
371	u64 umask = eventsel & ARCH_PERFMON_EVENTSEL_UMASK;
372	int i, index;
373
374	index = find_filter_index(events, nevents, event_select);
375	if (index < 0)
376		return false;
377
378	/*
379	 * Entries are sorted by the event select.  Walk the list in both
380	 * directions to process all entries with the targeted event select.
381	 */
382	for (i = index; i < nevents; i++) {
383		if (filter_event_cmp(&events[i], &event_select))
384			break;
385
386		if (is_filter_entry_match(events[i], umask))
387			return true;
388	}
389
390	for (i = index - 1; i >= 0; i--) {
391		if (filter_event_cmp(&events[i], &event_select))
392			break;
393
394		if (is_filter_entry_match(events[i], umask))
395			return true;
396	}
397
398	return false;
399}
400
401static bool is_gp_event_allowed(struct kvm_x86_pmu_event_filter *f,
402				u64 eventsel)
403{
404	if (filter_contains_match(f->includes, f->nr_includes, eventsel) &&
405	    !filter_contains_match(f->excludes, f->nr_excludes, eventsel))
406		return f->action == KVM_PMU_EVENT_ALLOW;
407
408	return f->action == KVM_PMU_EVENT_DENY;
409}
410
411static bool is_fixed_event_allowed(struct kvm_x86_pmu_event_filter *filter,
412				   int idx)
413{
414	int fixed_idx = idx - INTEL_PMC_IDX_FIXED;
415
416	if (filter->action == KVM_PMU_EVENT_DENY &&
417	    test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
418		return false;
419	if (filter->action == KVM_PMU_EVENT_ALLOW &&
420	    !test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
421		return false;
422
423	return true;
424}
425
426static bool check_pmu_event_filter(struct kvm_pmc *pmc)
427{
428	struct kvm_x86_pmu_event_filter *filter;
429	struct kvm *kvm = pmc->vcpu->kvm;
430
431	filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
432	if (!filter)
433		return true;
434
435	if (pmc_is_gp(pmc))
436		return is_gp_event_allowed(filter, pmc->eventsel);
437
438	return is_fixed_event_allowed(filter, pmc->idx);
439}
440
441static bool pmc_event_is_allowed(struct kvm_pmc *pmc)
442{
443	return pmc_is_globally_enabled(pmc) && pmc_speculative_in_use(pmc) &&
444	       static_call(kvm_x86_pmu_hw_event_available)(pmc) &&
445	       check_pmu_event_filter(pmc);
446}
447
448static void reprogram_counter(struct kvm_pmc *pmc)
449{
450	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
451	u64 eventsel = pmc->eventsel;
452	u64 new_config = eventsel;
453	bool emulate_overflow;
454	u8 fixed_ctr_ctrl;
455
456	emulate_overflow = pmc_pause_counter(pmc);
457
458	if (!pmc_event_is_allowed(pmc))
459		goto reprogram_complete;
460
461	if (emulate_overflow)
462		__kvm_perf_overflow(pmc, false);
463
464	if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
465		printk_once("kvm pmu: pin control bit is ignored\n");
466
467	if (pmc_is_fixed(pmc)) {
468		fixed_ctr_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl,
469						  pmc->idx - INTEL_PMC_IDX_FIXED);
470		if (fixed_ctr_ctrl & 0x1)
471			eventsel |= ARCH_PERFMON_EVENTSEL_OS;
472		if (fixed_ctr_ctrl & 0x2)
473			eventsel |= ARCH_PERFMON_EVENTSEL_USR;
474		if (fixed_ctr_ctrl & 0x8)
475			eventsel |= ARCH_PERFMON_EVENTSEL_INT;
476		new_config = (u64)fixed_ctr_ctrl;
477	}
478
479	if (pmc->current_config == new_config && pmc_resume_counter(pmc))
480		goto reprogram_complete;
481
482	pmc_release_perf_event(pmc);
483
484	pmc->current_config = new_config;
485
486	/*
487	 * If reprogramming fails, e.g. due to contention, leave the counter's
488	 * regprogram bit set, i.e. opportunistically try again on the next PMU
489	 * refresh.  Don't make a new request as doing so can stall the guest
490	 * if reprogramming repeatedly fails.
491	 */
492	if (pmc_reprogram_counter(pmc, PERF_TYPE_RAW,
493				  (eventsel & pmu->raw_event_mask),
494				  !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
495				  !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
496				  eventsel & ARCH_PERFMON_EVENTSEL_INT))
497		return;
498
499reprogram_complete:
500	clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
501}
502
503void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
504{
 
505	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 
506	int bit;
507
508	for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
509		struct kvm_pmc *pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, bit);
510
511		if (unlikely(!pmc)) {
512			clear_bit(bit, pmu->reprogram_pmi);
513			continue;
514		}
 
 
 
515
516		reprogram_counter(pmc);
 
 
 
 
 
 
 
 
517	}
518
519	/*
520	 * Unused perf_events are only released if the corresponding MSRs
521	 * weren't accessed during the last vCPU time slice. kvm_arch_sched_in
522	 * triggers KVM_REQ_PMU if cleanup is needed.
523	 */
524	if (unlikely(pmu->need_cleanup))
525		kvm_pmu_cleanup(vcpu);
526}
527
528/* check if idx is a valid index to access PMU */
529bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
530{
531	return static_call(kvm_x86_pmu_is_valid_rdpmc_ecx)(vcpu, idx);
 
 
 
 
 
 
 
 
 
 
 
532}
533
534bool is_vmware_backdoor_pmc(u32 pmc_idx)
535{
536	switch (pmc_idx) {
537	case VMWARE_BACKDOOR_PMC_HOST_TSC:
538	case VMWARE_BACKDOOR_PMC_REAL_TIME:
539	case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
540		return true;
541	}
542	return false;
543}
544
545static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
546{
547	u64 ctr_val;
548
549	switch (idx) {
550	case VMWARE_BACKDOOR_PMC_HOST_TSC:
551		ctr_val = rdtsc();
552		break;
553	case VMWARE_BACKDOOR_PMC_REAL_TIME:
554		ctr_val = ktime_get_boottime_ns();
555		break;
556	case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
557		ctr_val = ktime_get_boottime_ns() +
558			vcpu->kvm->arch.kvmclock_offset;
559		break;
560	default:
561		return 1;
562	}
563
564	*data = ctr_val;
565	return 0;
566}
567
568int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
569{
570	bool fast_mode = idx & (1u << 31);
571	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
572	struct kvm_pmc *pmc;
573	u64 mask = fast_mode ? ~0u : ~0ull;
574
575	if (!pmu->version)
576		return 1;
577
578	if (is_vmware_backdoor_pmc(idx))
579		return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
580
581	pmc = static_call(kvm_x86_pmu_rdpmc_ecx_to_pmc)(vcpu, idx, &mask);
582	if (!pmc)
583		return 1;
584
585	if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCE) &&
586	    (static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
587	    kvm_is_cr0_bit_set(vcpu, X86_CR0_PE))
588		return 1;
589
590	*data = pmc_read_counter(pmc) & mask;
591	return 0;
592}
593
594void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
595{
596	if (lapic_in_kernel(vcpu)) {
597		static_call_cond(kvm_x86_pmu_deliver_pmi)(vcpu);
598		kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
599	}
600}
601
602bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
603{
604	switch (msr) {
605	case MSR_CORE_PERF_GLOBAL_STATUS:
606	case MSR_CORE_PERF_GLOBAL_CTRL:
607	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
608		return kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu));
609	default:
610		break;
611	}
612	return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) ||
613		static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr);
614}
615
616static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
617{
618	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
619	struct kvm_pmc *pmc = static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr);
620
621	if (pmc)
622		__set_bit(pmc->idx, pmu->pmc_in_use);
623}
624
625int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
626{
627	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
628	u32 msr = msr_info->index;
629
630	switch (msr) {
631	case MSR_CORE_PERF_GLOBAL_STATUS:
632	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
633		msr_info->data = pmu->global_status;
634		break;
635	case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
636	case MSR_CORE_PERF_GLOBAL_CTRL:
637		msr_info->data = pmu->global_ctrl;
638		break;
639	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
640	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
641		msr_info->data = 0;
642		break;
643	default:
644		return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info);
645	}
646
647	return 0;
648}
649
650int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
651{
652	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
653	u32 msr = msr_info->index;
654	u64 data = msr_info->data;
655	u64 diff;
656
657	/*
658	 * Note, AMD ignores writes to reserved bits and read-only PMU MSRs,
659	 * whereas Intel generates #GP on attempts to write reserved/RO MSRs.
660	 */
661	switch (msr) {
662	case MSR_CORE_PERF_GLOBAL_STATUS:
663		if (!msr_info->host_initiated)
664			return 1; /* RO MSR */
665		fallthrough;
666	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
667		/* Per PPR, Read-only MSR. Writes are ignored. */
668		if (!msr_info->host_initiated)
669			break;
670
671		if (data & pmu->global_status_mask)
672			return 1;
673
674		pmu->global_status = data;
675		break;
676	case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
677		data &= ~pmu->global_ctrl_mask;
678		fallthrough;
679	case MSR_CORE_PERF_GLOBAL_CTRL:
680		if (!kvm_valid_perf_global_ctrl(pmu, data))
681			return 1;
682
683		if (pmu->global_ctrl != data) {
684			diff = pmu->global_ctrl ^ data;
685			pmu->global_ctrl = data;
686			reprogram_counters(pmu, diff);
687		}
688		break;
689	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
690		/*
691		 * GLOBAL_OVF_CTRL, a.k.a. GLOBAL STATUS_RESET, clears bits in
692		 * GLOBAL_STATUS, and so the set of reserved bits is the same.
693		 */
694		if (data & pmu->global_status_mask)
695			return 1;
696		fallthrough;
697	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
698		if (!msr_info->host_initiated)
699			pmu->global_status &= ~data;
700		break;
701	default:
702		kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
703		return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info);
704	}
705
706	return 0;
707}
708
709static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
710{
711	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
712	struct kvm_pmc *pmc;
713	int i;
714
715	pmu->need_cleanup = false;
716
717	bitmap_zero(pmu->reprogram_pmi, X86_PMC_IDX_MAX);
718
719	for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
720		pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
721		if (!pmc)
722			continue;
723
724		pmc_stop_counter(pmc);
725		pmc->counter = 0;
726		pmc->emulated_counter = 0;
727
728		if (pmc_is_gp(pmc))
729			pmc->eventsel = 0;
730	}
731
732	pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
733
734	static_call_cond(kvm_x86_pmu_reset)(vcpu);
735}
736
737
738/*
739 * Refresh the PMU configuration for the vCPU, e.g. if userspace changes CPUID
740 * and/or PERF_CAPABILITIES.
741 */
742void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
743{
 
 
744	if (KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm))
745		return;
746
747	/*
748	 * Stop/release all existing counters/events before realizing the new
749	 * vPMU model.
750	 */
751	kvm_pmu_reset(vcpu);
752
753	bitmap_zero(vcpu_to_pmu(vcpu)->all_valid_pmc_idx, X86_PMC_IDX_MAX);
754	static_call(kvm_x86_pmu_refresh)(vcpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
755}
756
757void kvm_pmu_init(struct kvm_vcpu *vcpu)
758{
759	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
760
761	memset(pmu, 0, sizeof(*pmu));
762	static_call(kvm_x86_pmu_init)(vcpu);
763	kvm_pmu_refresh(vcpu);
764}
765
766/* Release perf_events for vPMCs that have been unused for a full time slice.  */
767void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
768{
769	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
770	struct kvm_pmc *pmc = NULL;
771	DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX);
772	int i;
773
774	pmu->need_cleanup = false;
775
776	bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
777		      pmu->pmc_in_use, X86_PMC_IDX_MAX);
778
779	for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
780		pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
781
782		if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
783			pmc_stop_counter(pmc);
784	}
785
786	static_call_cond(kvm_x86_pmu_cleanup)(vcpu);
787
788	bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
789}
790
791void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
792{
793	kvm_pmu_reset(vcpu);
794}
795
796static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
797{
798	pmc->emulated_counter++;
799	kvm_pmu_request_counter_reprogram(pmc);
800}
801
802static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc,
803	unsigned int perf_hw_id)
804{
805	return !((pmc->eventsel ^ perf_get_hw_event_config(perf_hw_id)) &
806		AMD64_RAW_EVENT_MASK_NB);
807}
808
809static inline bool cpl_is_matched(struct kvm_pmc *pmc)
810{
811	bool select_os, select_user;
812	u64 config;
813
814	if (pmc_is_gp(pmc)) {
815		config = pmc->eventsel;
816		select_os = config & ARCH_PERFMON_EVENTSEL_OS;
817		select_user = config & ARCH_PERFMON_EVENTSEL_USR;
818	} else {
819		config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl,
820					  pmc->idx - INTEL_PMC_IDX_FIXED);
821		select_os = config & 0x1;
822		select_user = config & 0x2;
823	}
824
825	return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user;
 
 
 
 
 
 
 
 
826}
827
828void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
829{
 
830	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
831	struct kvm_pmc *pmc;
832	int i;
833
834	for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
835		pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
836
837		if (!pmc || !pmc_event_is_allowed(pmc))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
838			continue;
839
840		/* Ignore checks for edge detect, pin control, invert and CMASK bits */
841		if (eventsel_match_perf_hw_id(pmc, perf_hw_id) && cpl_is_matched(pmc))
842			kvm_pmu_incr_counter(pmc);
843	}
844}
845EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event);
846
847static bool is_masked_filter_valid(const struct kvm_x86_pmu_event_filter *filter)
848{
849	u64 mask = kvm_pmu_ops.EVENTSEL_EVENT |
850		   KVM_PMU_MASKED_ENTRY_UMASK_MASK |
851		   KVM_PMU_MASKED_ENTRY_UMASK_MATCH |
852		   KVM_PMU_MASKED_ENTRY_EXCLUDE;
853	int i;
854
855	for (i = 0; i < filter->nevents; i++) {
856		if (filter->events[i] & ~mask)
857			return false;
858	}
859
860	return true;
861}
862
863static void convert_to_masked_filter(struct kvm_x86_pmu_event_filter *filter)
864{
865	int i, j;
866
867	for (i = 0, j = 0; i < filter->nevents; i++) {
868		/*
869		 * Skip events that are impossible to match against a guest
870		 * event.  When filtering, only the event select + unit mask
871		 * of the guest event is used.  To maintain backwards
872		 * compatibility, impossible filters can't be rejected :-(
873		 */
874		if (filter->events[i] & ~(kvm_pmu_ops.EVENTSEL_EVENT |
875					  ARCH_PERFMON_EVENTSEL_UMASK))
876			continue;
877		/*
878		 * Convert userspace events to a common in-kernel event so
879		 * only one code path is needed to support both events.  For
880		 * the in-kernel events use masked events because they are
881		 * flexible enough to handle both cases.  To convert to masked
882		 * events all that's needed is to add an "all ones" umask_mask,
883		 * (unmasked filter events don't support EXCLUDE).
884		 */
885		filter->events[j++] = filter->events[i] |
886				      (0xFFULL << KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT);
887	}
888
889	filter->nevents = j;
890}
891
892static int prepare_filter_lists(struct kvm_x86_pmu_event_filter *filter)
893{
894	int i;
895
896	if (!(filter->flags & KVM_PMU_EVENT_FLAG_MASKED_EVENTS))
897		convert_to_masked_filter(filter);
898	else if (!is_masked_filter_valid(filter))
899		return -EINVAL;
900
901	/*
902	 * Sort entries by event select and includes vs. excludes so that all
903	 * entries for a given event select can be processed efficiently during
904	 * filtering.  The EXCLUDE flag uses a more significant bit than the
905	 * event select, and so the sorted list is also effectively split into
906	 * includes and excludes sub-lists.
907	 */
908	sort(&filter->events, filter->nevents, sizeof(filter->events[0]),
909	     filter_sort_cmp, NULL);
910
911	i = filter->nevents;
912	/* Find the first EXCLUDE event (only supported for masked events). */
913	if (filter->flags & KVM_PMU_EVENT_FLAG_MASKED_EVENTS) {
914		for (i = 0; i < filter->nevents; i++) {
915			if (filter->events[i] & KVM_PMU_MASKED_ENTRY_EXCLUDE)
916				break;
917		}
918	}
919
920	filter->nr_includes = i;
921	filter->nr_excludes = filter->nevents - filter->nr_includes;
922	filter->includes = filter->events;
923	filter->excludes = filter->events + filter->nr_includes;
924
925	return 0;
926}
927
928int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
929{
930	struct kvm_pmu_event_filter __user *user_filter = argp;
931	struct kvm_x86_pmu_event_filter *filter;
932	struct kvm_pmu_event_filter tmp;
933	struct kvm_vcpu *vcpu;
934	unsigned long i;
935	size_t size;
936	int r;
937
938	if (copy_from_user(&tmp, user_filter, sizeof(tmp)))
939		return -EFAULT;
940
941	if (tmp.action != KVM_PMU_EVENT_ALLOW &&
942	    tmp.action != KVM_PMU_EVENT_DENY)
943		return -EINVAL;
944
945	if (tmp.flags & ~KVM_PMU_EVENT_FLAGS_VALID_MASK)
946		return -EINVAL;
947
948	if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
949		return -E2BIG;
950
951	size = struct_size(filter, events, tmp.nevents);
952	filter = kzalloc(size, GFP_KERNEL_ACCOUNT);
953	if (!filter)
954		return -ENOMEM;
955
956	filter->action = tmp.action;
957	filter->nevents = tmp.nevents;
958	filter->fixed_counter_bitmap = tmp.fixed_counter_bitmap;
959	filter->flags = tmp.flags;
960
961	r = -EFAULT;
962	if (copy_from_user(filter->events, user_filter->events,
963			   sizeof(filter->events[0]) * filter->nevents))
964		goto cleanup;
965
966	r = prepare_filter_lists(filter);
967	if (r)
968		goto cleanup;
969
970	mutex_lock(&kvm->lock);
971	filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
972				     mutex_is_locked(&kvm->lock));
973	mutex_unlock(&kvm->lock);
974	synchronize_srcu_expedited(&kvm->srcu);
975
976	BUILD_BUG_ON(sizeof(((struct kvm_pmu *)0)->reprogram_pmi) >
977		     sizeof(((struct kvm_pmu *)0)->__reprogram_pmi));
978
979	kvm_for_each_vcpu(i, vcpu, kvm)
980		atomic64_set(&vcpu_to_pmu(vcpu)->__reprogram_pmi, -1ull);
981
982	kvm_make_all_cpus_request(kvm, KVM_REQ_PMU);
983
984	r = 0;
985cleanup:
986	kfree(filter);
987	return r;
988}