Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2017-2018 Intel Corporation
   5 */
   6
   7#include <linux/irq.h>
   8#include <linux/pm_runtime.h>
   9
  10#include "gt/intel_engine.h"
  11#include "gt/intel_engine_pm.h"
  12#include "gt/intel_engine_user.h"
  13#include "gt/intel_gt_pm.h"
  14#include "gt/intel_rc6.h"
  15#include "gt/intel_rps.h"
  16
  17#include "i915_drv.h"
  18#include "i915_pmu.h"
  19#include "intel_pm.h"
  20
  21/* Frequency for the sampling timer for events which need it. */
  22#define FREQUENCY 200
  23#define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY)
  24
  25#define ENGINE_SAMPLE_MASK \
  26	(BIT(I915_SAMPLE_BUSY) | \
  27	 BIT(I915_SAMPLE_WAIT) | \
  28	 BIT(I915_SAMPLE_SEMA))
  29
  30#define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
  31
  32static cpumask_t i915_pmu_cpumask;
  33
  34static u8 engine_config_sample(u64 config)
  35{
  36	return config & I915_PMU_SAMPLE_MASK;
  37}
  38
  39static u8 engine_event_sample(struct perf_event *event)
  40{
  41	return engine_config_sample(event->attr.config);
  42}
  43
  44static u8 engine_event_class(struct perf_event *event)
  45{
  46	return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff;
  47}
  48
  49static u8 engine_event_instance(struct perf_event *event)
  50{
  51	return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff;
  52}
  53
  54static bool is_engine_config(u64 config)
  55{
  56	return config < __I915_PMU_OTHER(0);
  57}
  58
  59static unsigned int config_enabled_bit(u64 config)
  60{
  61	if (is_engine_config(config))
  62		return engine_config_sample(config);
  63	else
  64		return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0));
  65}
  66
  67static u64 config_enabled_mask(u64 config)
  68{
  69	return BIT_ULL(config_enabled_bit(config));
  70}
  71
  72static bool is_engine_event(struct perf_event *event)
  73{
  74	return is_engine_config(event->attr.config);
  75}
  76
  77static unsigned int event_enabled_bit(struct perf_event *event)
  78{
  79	return config_enabled_bit(event->attr.config);
  80}
  81
  82static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
  83{
  84	struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
  85	u64 enable;
  86
  87	/*
  88	 * Only some counters need the sampling timer.
  89	 *
  90	 * We start with a bitmask of all currently enabled events.
  91	 */
  92	enable = pmu->enable;
  93
  94	/*
  95	 * Mask out all the ones which do not need the timer, or in
  96	 * other words keep all the ones that could need the timer.
  97	 */
  98	enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
  99		  config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) |
 100		  ENGINE_SAMPLE_MASK;
 101
 102	/*
 103	 * When the GPU is idle per-engine counters do not need to be
 104	 * running so clear those bits out.
 105	 */
 106	if (!gpu_active)
 107		enable &= ~ENGINE_SAMPLE_MASK;
 108	/*
 109	 * Also there is software busyness tracking available we do not
 110	 * need the timer for I915_SAMPLE_BUSY counter.
 111	 */
 112	else if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS)
 113		enable &= ~BIT(I915_SAMPLE_BUSY);
 114
 115	/*
 116	 * If some bits remain it means we need the sampling timer running.
 117	 */
 118	return enable;
 119}
 120
 121static u64 __get_rc6(struct intel_gt *gt)
 122{
 123	struct drm_i915_private *i915 = gt->i915;
 124	u64 val;
 125
 126	val = intel_rc6_residency_ns(&gt->rc6,
 127				     IS_VALLEYVIEW(i915) ?
 128				     VLV_GT_RENDER_RC6 :
 129				     GEN6_GT_GFX_RC6);
 130
 131	if (HAS_RC6p(i915))
 132		val += intel_rc6_residency_ns(&gt->rc6, GEN6_GT_GFX_RC6p);
 133
 134	if (HAS_RC6pp(i915))
 135		val += intel_rc6_residency_ns(&gt->rc6, GEN6_GT_GFX_RC6pp);
 136
 137	return val;
 138}
 139
 140#if IS_ENABLED(CONFIG_PM)
 141
 142static inline s64 ktime_since(const ktime_t kt)
 143{
 144	return ktime_to_ns(ktime_sub(ktime_get(), kt));
 145}
 146
 147static u64 get_rc6(struct intel_gt *gt)
 148{
 149	struct drm_i915_private *i915 = gt->i915;
 150	struct i915_pmu *pmu = &i915->pmu;
 151	unsigned long flags;
 152	bool awake = false;
 153	u64 val;
 154
 155	if (intel_gt_pm_get_if_awake(gt)) {
 156		val = __get_rc6(gt);
 157		intel_gt_pm_put_async(gt);
 158		awake = true;
 159	}
 160
 161	spin_lock_irqsave(&pmu->lock, flags);
 162
 163	if (awake) {
 164		pmu->sample[__I915_SAMPLE_RC6].cur = val;
 165	} else {
 166		/*
 167		 * We think we are runtime suspended.
 168		 *
 169		 * Report the delta from when the device was suspended to now,
 170		 * on top of the last known real value, as the approximated RC6
 171		 * counter value.
 172		 */
 173		val = ktime_since(pmu->sleep_last);
 174		val += pmu->sample[__I915_SAMPLE_RC6].cur;
 175	}
 176
 177	if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur)
 178		val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur;
 179	else
 180		pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val;
 181
 182	spin_unlock_irqrestore(&pmu->lock, flags);
 183
 184	return val;
 185}
 186
 187static void park_rc6(struct drm_i915_private *i915)
 188{
 189	struct i915_pmu *pmu = &i915->pmu;
 190
 191	if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
 192		pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
 193
 194	pmu->sleep_last = ktime_get();
 195}
 196
 197#else
 198
 199static u64 get_rc6(struct intel_gt *gt)
 200{
 201	return __get_rc6(gt);
 202}
 203
 204static void park_rc6(struct drm_i915_private *i915) {}
 205
 206#endif
 207
 208static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
 209{
 210	if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) {
 211		pmu->timer_enabled = true;
 212		pmu->timer_last = ktime_get();
 213		hrtimer_start_range_ns(&pmu->timer,
 214				       ns_to_ktime(PERIOD), 0,
 215				       HRTIMER_MODE_REL_PINNED);
 216	}
 217}
 218
 219void i915_pmu_gt_parked(struct drm_i915_private *i915)
 220{
 221	struct i915_pmu *pmu = &i915->pmu;
 222
 223	if (!pmu->base.event_init)
 224		return;
 225
 226	spin_lock_irq(&pmu->lock);
 227
 228	park_rc6(i915);
 229
 230	/*
 231	 * Signal sampling timer to stop if only engine events are enabled and
 232	 * GPU went idle.
 233	 */
 234	pmu->timer_enabled = pmu_needs_timer(pmu, false);
 235
 236	spin_unlock_irq(&pmu->lock);
 237}
 238
 239void i915_pmu_gt_unparked(struct drm_i915_private *i915)
 240{
 241	struct i915_pmu *pmu = &i915->pmu;
 242
 243	if (!pmu->base.event_init)
 244		return;
 245
 246	spin_lock_irq(&pmu->lock);
 247
 248	/*
 249	 * Re-enable sampling timer when GPU goes active.
 250	 */
 251	__i915_pmu_maybe_start_timer(pmu);
 252
 253	spin_unlock_irq(&pmu->lock);
 254}
 255
 256static void
 257add_sample(struct i915_pmu_sample *sample, u32 val)
 258{
 259	sample->cur += val;
 260}
 261
 262static bool exclusive_mmio_access(const struct drm_i915_private *i915)
 263{
 264	/*
 265	 * We have to avoid concurrent mmio cache line access on gen7 or
 266	 * risk a machine hang. For a fun history lesson dig out the old
 267	 * userspace intel_gpu_top and run it on Ivybridge or Haswell!
 268	 */
 269	return IS_GEN(i915, 7);
 270}
 271
 272static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns)
 273{
 274	struct intel_engine_pmu *pmu = &engine->pmu;
 275	bool busy;
 276	u32 val;
 277
 278	val = ENGINE_READ_FW(engine, RING_CTL);
 279	if (val == 0) /* powerwell off => engine idle */
 280		return;
 281
 282	if (val & RING_WAIT)
 283		add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
 284	if (val & RING_WAIT_SEMAPHORE)
 285		add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
 286
 287	/* No need to sample when busy stats are supported. */
 288	if (intel_engine_supports_stats(engine))
 289		return;
 290
 291	/*
 292	 * While waiting on a semaphore or event, MI_MODE reports the
 293	 * ring as idle. However, previously using the seqno, and with
 294	 * execlists sampling, we account for the ring waiting as the
 295	 * engine being busy. Therefore, we record the sample as being
 296	 * busy if either waiting or !idle.
 297	 */
 298	busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
 299	if (!busy) {
 300		val = ENGINE_READ_FW(engine, RING_MI_MODE);
 301		busy = !(val & MODE_IDLE);
 302	}
 303	if (busy)
 304		add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
 305}
 306
 307static void
 308engines_sample(struct intel_gt *gt, unsigned int period_ns)
 309{
 310	struct drm_i915_private *i915 = gt->i915;
 311	struct intel_engine_cs *engine;
 312	enum intel_engine_id id;
 313	unsigned long flags;
 314
 315	if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
 316		return;
 317
 318	if (!intel_gt_pm_is_awake(gt))
 319		return;
 320
 321	for_each_engine(engine, gt, id) {
 322		if (!intel_engine_pm_get_if_awake(engine))
 323			continue;
 324
 325		if (exclusive_mmio_access(i915)) {
 326			spin_lock_irqsave(&engine->uncore->lock, flags);
 327			engine_sample(engine, period_ns);
 328			spin_unlock_irqrestore(&engine->uncore->lock, flags);
 329		} else {
 330			engine_sample(engine, period_ns);
 331		}
 332
 333		intel_engine_pm_put_async(engine);
 334	}
 335}
 336
 337static void
 338add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
 339{
 340	sample->cur += mul_u32_u32(val, mul);
 341}
 342
 343static bool frequency_sampling_enabled(struct i915_pmu *pmu)
 344{
 345	return pmu->enable &
 346	       (config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
 347		config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY));
 348}
 349
 350static void
 351frequency_sample(struct intel_gt *gt, unsigned int period_ns)
 352{
 353	struct drm_i915_private *i915 = gt->i915;
 354	struct intel_uncore *uncore = gt->uncore;
 355	struct i915_pmu *pmu = &i915->pmu;
 356	struct intel_rps *rps = &gt->rps;
 357
 358	if (!frequency_sampling_enabled(pmu))
 359		return;
 360
 361	/* Report 0/0 (actual/requested) frequency while parked. */
 362	if (!intel_gt_pm_get_if_awake(gt))
 363		return;
 364
 365	if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
 366		u32 val;
 367
 368		/*
 369		 * We take a quick peek here without using forcewake
 370		 * so that we don't perturb the system under observation
 371		 * (forcewake => !rc6 => increased power use). We expect
 372		 * that if the read fails because it is outside of the
 373		 * mmio power well, then it will return 0 -- in which
 374		 * case we assume the system is running at the intended
 375		 * frequency. Fortunately, the read should rarely fail!
 376		 */
 377		val = intel_uncore_read_fw(uncore, GEN6_RPSTAT1);
 378		if (val)
 379			val = intel_rps_get_cagf(rps, val);
 380		else
 381			val = rps->cur_freq;
 382
 383		add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
 384				intel_gpu_freq(rps, val), period_ns / 1000);
 385	}
 386
 387	if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
 388		add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
 389				intel_gpu_freq(rps, rps->cur_freq),
 390				period_ns / 1000);
 391	}
 392
 393	intel_gt_pm_put_async(gt);
 394}
 395
 396static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
 397{
 398	struct drm_i915_private *i915 =
 399		container_of(hrtimer, struct drm_i915_private, pmu.timer);
 400	struct i915_pmu *pmu = &i915->pmu;
 401	struct intel_gt *gt = &i915->gt;
 402	unsigned int period_ns;
 403	ktime_t now;
 404
 405	if (!READ_ONCE(pmu->timer_enabled))
 406		return HRTIMER_NORESTART;
 407
 408	now = ktime_get();
 409	period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last));
 410	pmu->timer_last = now;
 411
 412	/*
 413	 * Strictly speaking the passed in period may not be 100% accurate for
 414	 * all internal calculation, since some amount of time can be spent on
 415	 * grabbing the forcewake. However the potential error from timer call-
 416	 * back delay greatly dominates this so we keep it simple.
 417	 */
 418	engines_sample(gt, period_ns);
 419	frequency_sample(gt, period_ns);
 420
 421	hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
 422
 423	return HRTIMER_RESTART;
 424}
 425
 426static u64 count_interrupts(struct drm_i915_private *i915)
 427{
 428	/* open-coded kstat_irqs() */
 429	struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq);
 430	u64 sum = 0;
 431	int cpu;
 432
 433	if (!desc || !desc->kstat_irqs)
 434		return 0;
 435
 436	for_each_possible_cpu(cpu)
 437		sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
 438
 439	return sum;
 440}
 441
 442static void i915_pmu_event_destroy(struct perf_event *event)
 443{
 444	struct drm_i915_private *i915 =
 445		container_of(event->pmu, typeof(*i915), pmu.base);
 446
 447	drm_WARN_ON(&i915->drm, event->parent);
 448}
 449
 450static int
 451engine_event_status(struct intel_engine_cs *engine,
 452		    enum drm_i915_pmu_engine_sample sample)
 453{
 454	switch (sample) {
 455	case I915_SAMPLE_BUSY:
 456	case I915_SAMPLE_WAIT:
 457		break;
 458	case I915_SAMPLE_SEMA:
 459		if (INTEL_GEN(engine->i915) < 6)
 460			return -ENODEV;
 461		break;
 462	default:
 463		return -ENOENT;
 464	}
 465
 466	return 0;
 467}
 468
 469static int
 470config_status(struct drm_i915_private *i915, u64 config)
 471{
 472	switch (config) {
 473	case I915_PMU_ACTUAL_FREQUENCY:
 474		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
 475			/* Requires a mutex for sampling! */
 476			return -ENODEV;
 477		fallthrough;
 478	case I915_PMU_REQUESTED_FREQUENCY:
 479		if (INTEL_GEN(i915) < 6)
 480			return -ENODEV;
 481		break;
 482	case I915_PMU_INTERRUPTS:
 483		break;
 484	case I915_PMU_RC6_RESIDENCY:
 485		if (!HAS_RC6(i915))
 486			return -ENODEV;
 487		break;
 488	default:
 489		return -ENOENT;
 490	}
 491
 492	return 0;
 493}
 494
 495static int engine_event_init(struct perf_event *event)
 496{
 497	struct drm_i915_private *i915 =
 498		container_of(event->pmu, typeof(*i915), pmu.base);
 499	struct intel_engine_cs *engine;
 500
 501	engine = intel_engine_lookup_user(i915, engine_event_class(event),
 502					  engine_event_instance(event));
 503	if (!engine)
 504		return -ENODEV;
 505
 506	return engine_event_status(engine, engine_event_sample(event));
 507}
 508
 509static int i915_pmu_event_init(struct perf_event *event)
 510{
 511	struct drm_i915_private *i915 =
 512		container_of(event->pmu, typeof(*i915), pmu.base);
 513	int ret;
 514
 515	if (event->attr.type != event->pmu->type)
 516		return -ENOENT;
 517
 518	/* unsupported modes and filters */
 519	if (event->attr.sample_period) /* no sampling */
 520		return -EINVAL;
 521
 522	if (has_branch_stack(event))
 523		return -EOPNOTSUPP;
 524
 525	if (event->cpu < 0)
 526		return -EINVAL;
 527
 528	/* only allow running on one cpu at a time */
 529	if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask))
 530		return -EINVAL;
 531
 532	if (is_engine_event(event))
 533		ret = engine_event_init(event);
 534	else
 535		ret = config_status(i915, event->attr.config);
 536	if (ret)
 537		return ret;
 538
 539	if (!event->parent)
 540		event->destroy = i915_pmu_event_destroy;
 541
 542	return 0;
 543}
 544
 545static u64 __i915_pmu_event_read(struct perf_event *event)
 546{
 547	struct drm_i915_private *i915 =
 548		container_of(event->pmu, typeof(*i915), pmu.base);
 549	struct i915_pmu *pmu = &i915->pmu;
 550	u64 val = 0;
 551
 552	if (is_engine_event(event)) {
 553		u8 sample = engine_event_sample(event);
 554		struct intel_engine_cs *engine;
 555
 556		engine = intel_engine_lookup_user(i915,
 557						  engine_event_class(event),
 558						  engine_event_instance(event));
 559
 560		if (drm_WARN_ON_ONCE(&i915->drm, !engine)) {
 561			/* Do nothing */
 562		} else if (sample == I915_SAMPLE_BUSY &&
 563			   intel_engine_supports_stats(engine)) {
 564			ktime_t unused;
 565
 566			val = ktime_to_ns(intel_engine_get_busy_time(engine,
 567								     &unused));
 568		} else {
 569			val = engine->pmu.sample[sample].cur;
 570		}
 571	} else {
 572		switch (event->attr.config) {
 573		case I915_PMU_ACTUAL_FREQUENCY:
 574			val =
 575			   div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur,
 576				   USEC_PER_SEC /* to MHz */);
 577			break;
 578		case I915_PMU_REQUESTED_FREQUENCY:
 579			val =
 580			   div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur,
 581				   USEC_PER_SEC /* to MHz */);
 582			break;
 583		case I915_PMU_INTERRUPTS:
 584			val = count_interrupts(i915);
 585			break;
 586		case I915_PMU_RC6_RESIDENCY:
 587			val = get_rc6(&i915->gt);
 588			break;
 589		}
 590	}
 591
 592	return val;
 593}
 594
 595static void i915_pmu_event_read(struct perf_event *event)
 596{
 597	struct hw_perf_event *hwc = &event->hw;
 598	u64 prev, new;
 599
 600again:
 601	prev = local64_read(&hwc->prev_count);
 602	new = __i915_pmu_event_read(event);
 603
 604	if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
 605		goto again;
 606
 607	local64_add(new - prev, &event->count);
 608}
 609
 610static void i915_pmu_enable(struct perf_event *event)
 611{
 612	struct drm_i915_private *i915 =
 613		container_of(event->pmu, typeof(*i915), pmu.base);
 614	unsigned int bit = event_enabled_bit(event);
 615	struct i915_pmu *pmu = &i915->pmu;
 616	intel_wakeref_t wakeref;
 617	unsigned long flags;
 618
 619	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 620	spin_lock_irqsave(&pmu->lock, flags);
 621
 622	/*
 623	 * Update the bitmask of enabled events and increment
 624	 * the event reference counter.
 625	 */
 626	BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
 627	GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
 628	GEM_BUG_ON(pmu->enable_count[bit] == ~0);
 629
 630	if (pmu->enable_count[bit] == 0 &&
 631	    config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) {
 632		pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0;
 633		pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
 634		pmu->sleep_last = ktime_get();
 635	}
 636
 637	pmu->enable |= BIT_ULL(bit);
 638	pmu->enable_count[bit]++;
 639
 640	/*
 641	 * Start the sampling timer if needed and not already enabled.
 642	 */
 643	__i915_pmu_maybe_start_timer(pmu);
 644
 645	/*
 646	 * For per-engine events the bitmask and reference counting
 647	 * is stored per engine.
 648	 */
 649	if (is_engine_event(event)) {
 650		u8 sample = engine_event_sample(event);
 651		struct intel_engine_cs *engine;
 652
 653		engine = intel_engine_lookup_user(i915,
 654						  engine_event_class(event),
 655						  engine_event_instance(event));
 656
 657		BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) !=
 658			     I915_ENGINE_SAMPLE_COUNT);
 659		BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) !=
 660			     I915_ENGINE_SAMPLE_COUNT);
 661		GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
 662		GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
 663		GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
 664
 665		engine->pmu.enable |= BIT(sample);
 666		engine->pmu.enable_count[sample]++;
 667	}
 668
 669	spin_unlock_irqrestore(&pmu->lock, flags);
 670
 671	/*
 672	 * Store the current counter value so we can report the correct delta
 673	 * for all listeners. Even when the event was already enabled and has
 674	 * an existing non-zero value.
 675	 */
 676	local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
 677
 678	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 679}
 680
 681static void i915_pmu_disable(struct perf_event *event)
 682{
 683	struct drm_i915_private *i915 =
 684		container_of(event->pmu, typeof(*i915), pmu.base);
 685	unsigned int bit = event_enabled_bit(event);
 686	struct i915_pmu *pmu = &i915->pmu;
 687	unsigned long flags;
 688
 689	spin_lock_irqsave(&pmu->lock, flags);
 690
 691	if (is_engine_event(event)) {
 692		u8 sample = engine_event_sample(event);
 693		struct intel_engine_cs *engine;
 694
 695		engine = intel_engine_lookup_user(i915,
 696						  engine_event_class(event),
 697						  engine_event_instance(event));
 698
 699		GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
 700		GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
 701		GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
 702
 703		/*
 704		 * Decrement the reference count and clear the enabled
 705		 * bitmask when the last listener on an event goes away.
 706		 */
 707		if (--engine->pmu.enable_count[sample] == 0)
 708			engine->pmu.enable &= ~BIT(sample);
 709	}
 710
 711	GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
 712	GEM_BUG_ON(pmu->enable_count[bit] == 0);
 713	/*
 714	 * Decrement the reference count and clear the enabled
 715	 * bitmask when the last listener on an event goes away.
 716	 */
 717	if (--pmu->enable_count[bit] == 0) {
 718		pmu->enable &= ~BIT_ULL(bit);
 719		pmu->timer_enabled &= pmu_needs_timer(pmu, true);
 720	}
 721
 722	spin_unlock_irqrestore(&pmu->lock, flags);
 723}
 724
 725static void i915_pmu_event_start(struct perf_event *event, int flags)
 726{
 727	i915_pmu_enable(event);
 728	event->hw.state = 0;
 729}
 730
 731static void i915_pmu_event_stop(struct perf_event *event, int flags)
 732{
 733	if (flags & PERF_EF_UPDATE)
 734		i915_pmu_event_read(event);
 735	i915_pmu_disable(event);
 736	event->hw.state = PERF_HES_STOPPED;
 737}
 738
 739static int i915_pmu_event_add(struct perf_event *event, int flags)
 740{
 741	if (flags & PERF_EF_START)
 742		i915_pmu_event_start(event, flags);
 743
 744	return 0;
 745}
 746
 747static void i915_pmu_event_del(struct perf_event *event, int flags)
 748{
 749	i915_pmu_event_stop(event, PERF_EF_UPDATE);
 750}
 751
 752static int i915_pmu_event_event_idx(struct perf_event *event)
 753{
 754	return 0;
 755}
 756
 757struct i915_str_attribute {
 758	struct device_attribute attr;
 759	const char *str;
 760};
 761
 762static ssize_t i915_pmu_format_show(struct device *dev,
 763				    struct device_attribute *attr, char *buf)
 764{
 765	struct i915_str_attribute *eattr;
 766
 767	eattr = container_of(attr, struct i915_str_attribute, attr);
 768	return sprintf(buf, "%s\n", eattr->str);
 769}
 770
 771#define I915_PMU_FORMAT_ATTR(_name, _config) \
 772	(&((struct i915_str_attribute[]) { \
 773		{ .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \
 774		  .str = _config, } \
 775	})[0].attr.attr)
 776
 777static struct attribute *i915_pmu_format_attrs[] = {
 778	I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"),
 779	NULL,
 780};
 781
 782static const struct attribute_group i915_pmu_format_attr_group = {
 783	.name = "format",
 784	.attrs = i915_pmu_format_attrs,
 785};
 786
 787struct i915_ext_attribute {
 788	struct device_attribute attr;
 789	unsigned long val;
 790};
 791
 792static ssize_t i915_pmu_event_show(struct device *dev,
 793				   struct device_attribute *attr, char *buf)
 794{
 795	struct i915_ext_attribute *eattr;
 796
 797	eattr = container_of(attr, struct i915_ext_attribute, attr);
 798	return sprintf(buf, "config=0x%lx\n", eattr->val);
 799}
 800
 801static ssize_t
 802i915_pmu_get_attr_cpumask(struct device *dev,
 803			  struct device_attribute *attr,
 804			  char *buf)
 805{
 806	return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
 807}
 808
 809static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL);
 810
 811static struct attribute *i915_cpumask_attrs[] = {
 812	&dev_attr_cpumask.attr,
 813	NULL,
 814};
 815
 816static const struct attribute_group i915_pmu_cpumask_attr_group = {
 817	.attrs = i915_cpumask_attrs,
 818};
 819
 820#define __event(__config, __name, __unit) \
 821{ \
 822	.config = (__config), \
 823	.name = (__name), \
 824	.unit = (__unit), \
 825}
 826
 827#define __engine_event(__sample, __name) \
 828{ \
 829	.sample = (__sample), \
 830	.name = (__name), \
 831}
 832
 833static struct i915_ext_attribute *
 834add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config)
 835{
 836	sysfs_attr_init(&attr->attr.attr);
 837	attr->attr.attr.name = name;
 838	attr->attr.attr.mode = 0444;
 839	attr->attr.show = i915_pmu_event_show;
 840	attr->val = config;
 841
 842	return ++attr;
 843}
 844
 845static struct perf_pmu_events_attr *
 846add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
 847	     const char *str)
 848{
 849	sysfs_attr_init(&attr->attr.attr);
 850	attr->attr.attr.name = name;
 851	attr->attr.attr.mode = 0444;
 852	attr->attr.show = perf_event_sysfs_show;
 853	attr->event_str = str;
 854
 855	return ++attr;
 856}
 857
 858static struct attribute **
 859create_event_attributes(struct i915_pmu *pmu)
 860{
 861	struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
 862	static const struct {
 863		u64 config;
 864		const char *name;
 865		const char *unit;
 866	} events[] = {
 867		__event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"),
 868		__event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"),
 869		__event(I915_PMU_INTERRUPTS, "interrupts", NULL),
 870		__event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
 871	};
 872	static const struct {
 873		enum drm_i915_pmu_engine_sample sample;
 874		char *name;
 875	} engine_events[] = {
 876		__engine_event(I915_SAMPLE_BUSY, "busy"),
 877		__engine_event(I915_SAMPLE_SEMA, "sema"),
 878		__engine_event(I915_SAMPLE_WAIT, "wait"),
 879	};
 880	unsigned int count = 0;
 881	struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter;
 882	struct i915_ext_attribute *i915_attr = NULL, *i915_iter;
 883	struct attribute **attr = NULL, **attr_iter;
 884	struct intel_engine_cs *engine;
 885	unsigned int i;
 886
 887	/* Count how many counters we will be exposing. */
 888	for (i = 0; i < ARRAY_SIZE(events); i++) {
 889		if (!config_status(i915, events[i].config))
 890			count++;
 891	}
 892
 893	for_each_uabi_engine(engine, i915) {
 894		for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
 895			if (!engine_event_status(engine,
 896						 engine_events[i].sample))
 897				count++;
 898		}
 899	}
 900
 901	/* Allocate attribute objects and table. */
 902	i915_attr = kcalloc(count, sizeof(*i915_attr), GFP_KERNEL);
 903	if (!i915_attr)
 904		goto err_alloc;
 905
 906	pmu_attr = kcalloc(count, sizeof(*pmu_attr), GFP_KERNEL);
 907	if (!pmu_attr)
 908		goto err_alloc;
 909
 910	/* Max one pointer of each attribute type plus a termination entry. */
 911	attr = kcalloc(count * 2 + 1, sizeof(*attr), GFP_KERNEL);
 912	if (!attr)
 913		goto err_alloc;
 914
 915	i915_iter = i915_attr;
 916	pmu_iter = pmu_attr;
 917	attr_iter = attr;
 918
 919	/* Initialize supported non-engine counters. */
 920	for (i = 0; i < ARRAY_SIZE(events); i++) {
 921		char *str;
 922
 923		if (config_status(i915, events[i].config))
 924			continue;
 925
 926		str = kstrdup(events[i].name, GFP_KERNEL);
 927		if (!str)
 928			goto err;
 929
 930		*attr_iter++ = &i915_iter->attr.attr;
 931		i915_iter = add_i915_attr(i915_iter, str, events[i].config);
 932
 933		if (events[i].unit) {
 934			str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name);
 935			if (!str)
 936				goto err;
 937
 938			*attr_iter++ = &pmu_iter->attr.attr;
 939			pmu_iter = add_pmu_attr(pmu_iter, str, events[i].unit);
 940		}
 941	}
 942
 943	/* Initialize supported engine counters. */
 944	for_each_uabi_engine(engine, i915) {
 945		for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
 946			char *str;
 947
 948			if (engine_event_status(engine,
 949						engine_events[i].sample))
 950				continue;
 951
 952			str = kasprintf(GFP_KERNEL, "%s-%s",
 953					engine->name, engine_events[i].name);
 954			if (!str)
 955				goto err;
 956
 957			*attr_iter++ = &i915_iter->attr.attr;
 958			i915_iter =
 959				add_i915_attr(i915_iter, str,
 960					      __I915_PMU_ENGINE(engine->uabi_class,
 961								engine->uabi_instance,
 962								engine_events[i].sample));
 963
 964			str = kasprintf(GFP_KERNEL, "%s-%s.unit",
 965					engine->name, engine_events[i].name);
 966			if (!str)
 967				goto err;
 968
 969			*attr_iter++ = &pmu_iter->attr.attr;
 970			pmu_iter = add_pmu_attr(pmu_iter, str, "ns");
 971		}
 972	}
 973
 974	pmu->i915_attr = i915_attr;
 975	pmu->pmu_attr = pmu_attr;
 976
 977	return attr;
 978
 979err:;
 980	for (attr_iter = attr; *attr_iter; attr_iter++)
 981		kfree((*attr_iter)->name);
 982
 983err_alloc:
 984	kfree(attr);
 985	kfree(i915_attr);
 986	kfree(pmu_attr);
 987
 988	return NULL;
 989}
 990
 991static void free_event_attributes(struct i915_pmu *pmu)
 992{
 993	struct attribute **attr_iter = pmu->events_attr_group.attrs;
 994
 995	for (; *attr_iter; attr_iter++)
 996		kfree((*attr_iter)->name);
 997
 998	kfree(pmu->events_attr_group.attrs);
 999	kfree(pmu->i915_attr);
1000	kfree(pmu->pmu_attr);
1001
1002	pmu->events_attr_group.attrs = NULL;
1003	pmu->i915_attr = NULL;
1004	pmu->pmu_attr = NULL;
1005}
1006
1007static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
1008{
1009	struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
1010
1011	GEM_BUG_ON(!pmu->base.event_init);
1012
1013	/* Select the first online CPU as a designated reader. */
1014	if (!cpumask_weight(&i915_pmu_cpumask))
1015		cpumask_set_cpu(cpu, &i915_pmu_cpumask);
1016
1017	return 0;
1018}
1019
1020static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
1021{
1022	struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
1023	unsigned int target;
1024
1025	GEM_BUG_ON(!pmu->base.event_init);
1026
1027	if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
1028		target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
1029		/* Migrate events if there is a valid target */
1030		if (target < nr_cpu_ids) {
1031			cpumask_set_cpu(target, &i915_pmu_cpumask);
1032			perf_pmu_migrate_context(&pmu->base, cpu, target);
1033		}
1034	}
1035
1036	return 0;
1037}
1038
1039static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
1040{
1041	enum cpuhp_state slot;
1042	int ret;
1043
1044	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
1045				      "perf/x86/intel/i915:online",
1046				      i915_pmu_cpu_online,
1047				      i915_pmu_cpu_offline);
1048	if (ret < 0)
1049		return ret;
1050
1051	slot = ret;
1052	ret = cpuhp_state_add_instance(slot, &pmu->cpuhp.node);
1053	if (ret) {
1054		cpuhp_remove_multi_state(slot);
1055		return ret;
1056	}
1057
1058	pmu->cpuhp.slot = slot;
1059	return 0;
1060}
1061
1062static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
1063{
1064	struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
1065
1066	drm_WARN_ON(&i915->drm, pmu->cpuhp.slot == CPUHP_INVALID);
1067	drm_WARN_ON(&i915->drm, cpuhp_state_remove_instance(pmu->cpuhp.slot, &pmu->cpuhp.node));
1068	cpuhp_remove_multi_state(pmu->cpuhp.slot);
1069	pmu->cpuhp.slot = CPUHP_INVALID;
1070}
1071
1072static bool is_igp(struct drm_i915_private *i915)
1073{
1074	struct pci_dev *pdev = i915->drm.pdev;
1075
1076	/* IGP is 0000:00:02.0 */
1077	return pci_domain_nr(pdev->bus) == 0 &&
1078	       pdev->bus->number == 0 &&
1079	       PCI_SLOT(pdev->devfn) == 2 &&
1080	       PCI_FUNC(pdev->devfn) == 0;
1081}
1082
1083void i915_pmu_register(struct drm_i915_private *i915)
1084{
1085	struct i915_pmu *pmu = &i915->pmu;
1086	const struct attribute_group *attr_groups[] = {
1087		&i915_pmu_format_attr_group,
1088		&pmu->events_attr_group,
1089		&i915_pmu_cpumask_attr_group,
1090		NULL
1091	};
1092
1093	int ret = -ENOMEM;
1094
1095	if (INTEL_GEN(i915) <= 2) {
1096		drm_info(&i915->drm, "PMU not supported for this GPU.");
1097		return;
1098	}
1099
1100	spin_lock_init(&pmu->lock);
1101	hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1102	pmu->timer.function = i915_sample;
1103	pmu->cpuhp.slot = CPUHP_INVALID;
1104
1105	if (!is_igp(i915)) {
1106		pmu->name = kasprintf(GFP_KERNEL,
1107				      "i915_%s",
1108				      dev_name(i915->drm.dev));
1109		if (pmu->name) {
1110			/* tools/perf reserves colons as special. */
1111			strreplace((char *)pmu->name, ':', '_');
1112		}
1113	} else {
1114		pmu->name = "i915";
1115	}
1116	if (!pmu->name)
1117		goto err;
1118
1119	pmu->events_attr_group.name = "events";
1120	pmu->events_attr_group.attrs = create_event_attributes(pmu);
1121	if (!pmu->events_attr_group.attrs)
1122		goto err_name;
1123
1124	pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
1125					GFP_KERNEL);
1126	if (!pmu->base.attr_groups)
1127		goto err_attr;
1128
1129	pmu->base.module	= THIS_MODULE;
1130	pmu->base.task_ctx_nr	= perf_invalid_context;
1131	pmu->base.event_init	= i915_pmu_event_init;
1132	pmu->base.add		= i915_pmu_event_add;
1133	pmu->base.del		= i915_pmu_event_del;
1134	pmu->base.start		= i915_pmu_event_start;
1135	pmu->base.stop		= i915_pmu_event_stop;
1136	pmu->base.read		= i915_pmu_event_read;
1137	pmu->base.event_idx	= i915_pmu_event_event_idx;
1138
1139	ret = perf_pmu_register(&pmu->base, pmu->name, -1);
1140	if (ret)
1141		goto err_groups;
1142
1143	ret = i915_pmu_register_cpuhp_state(pmu);
1144	if (ret)
1145		goto err_unreg;
1146
1147	return;
1148
1149err_unreg:
1150	perf_pmu_unregister(&pmu->base);
1151err_groups:
1152	kfree(pmu->base.attr_groups);
1153err_attr:
1154	pmu->base.event_init = NULL;
1155	free_event_attributes(pmu);
1156err_name:
1157	if (!is_igp(i915))
1158		kfree(pmu->name);
1159err:
1160	drm_notice(&i915->drm, "Failed to register PMU!\n");
1161}
1162
1163void i915_pmu_unregister(struct drm_i915_private *i915)
1164{
1165	struct i915_pmu *pmu = &i915->pmu;
1166
1167	if (!pmu->base.event_init)
1168		return;
1169
1170	drm_WARN_ON(&i915->drm, pmu->enable);
1171
1172	hrtimer_cancel(&pmu->timer);
1173
1174	i915_pmu_unregister_cpuhp_state(pmu);
1175
1176	perf_pmu_unregister(&pmu->base);
1177	pmu->base.event_init = NULL;
1178	kfree(pmu->base.attr_groups);
1179	if (!is_igp(i915))
1180		kfree(pmu->name);
1181	free_event_attributes(pmu);
1182}