Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * Copyright © 2015-2016 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *   Robert Bragg <robert@sixbynine.org>
  25 */
  26
  27
  28/**
  29 * DOC: i915 Perf Overview
  30 *
  31 * Gen graphics supports a large number of performance counters that can help
  32 * driver and application developers understand and optimize their use of the
  33 * GPU.
  34 *
  35 * This i915 perf interface enables userspace to configure and open a file
  36 * descriptor representing a stream of GPU metrics which can then be read() as
  37 * a stream of sample records.
  38 *
  39 * The interface is particularly suited to exposing buffered metrics that are
  40 * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
  41 *
  42 * Streams representing a single context are accessible to applications with a
  43 * corresponding drm file descriptor, such that OpenGL can use the interface
  44 * without special privileges. Access to system-wide metrics requires root
  45 * privileges by default, unless changed via the dev.i915.perf_event_paranoid
  46 * sysctl option.
  47 *
  48 */
  49
  50/**
  51 * DOC: i915 Perf History and Comparison with Core Perf
  52 *
  53 * The interface was initially inspired by the core Perf infrastructure but
  54 * some notable differences are:
  55 *
  56 * i915 perf file descriptors represent a "stream" instead of an "event"; where
  57 * a perf event primarily corresponds to a single 64bit value, while a stream
  58 * might sample sets of tightly-coupled counters, depending on the
  59 * configuration.  For example the Gen OA unit isn't designed to support
  60 * orthogonal configurations of individual counters; it's configured for a set
  61 * of related counters. Samples for an i915 perf stream capturing OA metrics
  62 * will include a set of counter values packed in a compact HW specific format.
  63 * The OA unit supports a number of different packing formats which can be
  64 * selected by the user opening the stream. Perf has support for grouping
  65 * events, but each event in the group is configured, validated and
  66 * authenticated individually with separate system calls.
  67 *
  68 * i915 perf stream configurations are provided as an array of u64 (key,value)
  69 * pairs, instead of a fixed struct with multiple miscellaneous config members,
  70 * interleaved with event-type specific members.
  71 *
  72 * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
  73 * The supported metrics are being written to memory by the GPU unsynchronized
  74 * with the CPU, using HW specific packing formats for counter sets. Sometimes
  75 * the constraints on HW configuration require reports to be filtered before it
  76 * would be acceptable to expose them to unprivileged applications - to hide
  77 * the metrics of other processes/contexts. For these use cases a read() based
  78 * interface is a good fit, and provides an opportunity to filter data as it
  79 * gets copied from the GPU mapped buffers to userspace buffers.
  80 *
  81 *
  82 * Issues hit with first prototype based on Core Perf
  83 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  84 *
  85 * The first prototype of this driver was based on the core perf
  86 * infrastructure, and while we did make that mostly work, with some changes to
  87 * perf, we found we were breaking or working around too many assumptions baked
  88 * into perf's currently cpu centric design.
  89 *
  90 * In the end we didn't see a clear benefit to making perf's implementation and
  91 * interface more complex by changing design assumptions while we knew we still
  92 * wouldn't be able to use any existing perf based userspace tools.
  93 *
  94 * Also considering the Gen specific nature of the Observability hardware and
  95 * how userspace will sometimes need to combine i915 perf OA metrics with
  96 * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
  97 * expecting the interface to be used by a platform specific userspace such as
  98 * OpenGL or tools. This is to say; we aren't inherently missing out on having
  99 * a standard vendor/architecture agnostic interface by not using perf.
 100 *
 101 *
 102 * For posterity, in case we might re-visit trying to adapt core perf to be
 103 * better suited to exposing i915 metrics these were the main pain points we
 104 * hit:
 105 *
 106 * - The perf based OA PMU driver broke some significant design assumptions:
 107 *
 108 *   Existing perf pmus are used for profiling work on a cpu and we were
 109 *   introducing the idea of _IS_DEVICE pmus with different security
 110 *   implications, the need to fake cpu-related data (such as user/kernel
 111 *   registers) to fit with perf's current design, and adding _DEVICE records
 112 *   as a way to forward device-specific status records.
 113 *
 114 *   The OA unit writes reports of counters into a circular buffer, without
 115 *   involvement from the CPU, making our PMU driver the first of a kind.
 116 *
 117 *   Given the way we were periodically forward data from the GPU-mapped, OA
 118 *   buffer to perf's buffer, those bursts of sample writes looked to perf like
 119 *   we were sampling too fast and so we had to subvert its throttling checks.
 120 *
 121 *   Perf supports groups of counters and allows those to be read via
 122 *   transactions internally but transactions currently seem designed to be
 123 *   explicitly initiated from the cpu (say in response to a userspace read())
 124 *   and while we could pull a report out of the OA buffer we can't
 125 *   trigger a report from the cpu on demand.
 126 *
 127 *   Related to being report based; the OA counters are configured in HW as a
 128 *   set while perf generally expects counter configurations to be orthogonal.
 129 *   Although counters can be associated with a group leader as they are
 130 *   opened, there's no clear precedent for being able to provide group-wide
 131 *   configuration attributes (for example we want to let userspace choose the
 132 *   OA unit report format used to capture all counters in a set, or specify a
 133 *   GPU context to filter metrics on). We avoided using perf's grouping
 134 *   feature and forwarded OA reports to userspace via perf's 'raw' sample
 135 *   field. This suited our userspace well considering how coupled the counters
 136 *   are when dealing with normalizing. It would be inconvenient to split
 137 *   counters up into separate events, only to require userspace to recombine
 138 *   them. For Mesa it's also convenient to be forwarded raw, periodic reports
 139 *   for combining with the side-band raw reports it captures using
 140 *   MI_REPORT_PERF_COUNT commands.
 141 *
 142 *   - As a side note on perf's grouping feature; there was also some concern
 143 *     that using PERF_FORMAT_GROUP as a way to pack together counter values
 144 *     would quite drastically inflate our sample sizes, which would likely
 145 *     lower the effective sampling resolutions we could use when the available
 146 *     memory bandwidth is limited.
 147 *
 148 *     With the OA unit's report formats, counters are packed together as 32
 149 *     or 40bit values, with the largest report size being 256 bytes.
 150 *
 151 *     PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
 152 *     documented ordering to the values, implying PERF_FORMAT_ID must also be
 153 *     used to add a 64bit ID before each value; giving 16 bytes per counter.
 154 *
 155 *   Related to counter orthogonality; we can't time share the OA unit, while
 156 *   event scheduling is a central design idea within perf for allowing
 157 *   userspace to open + enable more events than can be configured in HW at any
 158 *   one time.  The OA unit is not designed to allow re-configuration while in
 159 *   use. We can't reconfigure the OA unit without losing internal OA unit
 160 *   state which we can't access explicitly to save and restore. Reconfiguring
 161 *   the OA unit is also relatively slow, involving ~100 register writes. From
 162 *   userspace Mesa also depends on a stable OA configuration when emitting
 163 *   MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
 164 *   disabled while there are outstanding MI_RPC commands lest we hang the
 165 *   command streamer.
 166 *
 167 *   The contents of sample records aren't extensible by device drivers (i.e.
 168 *   the sample_type bits). As an example; Sourab Gupta had been looking to
 169 *   attach GPU timestamps to our OA samples. We were shoehorning OA reports
 170 *   into sample records by using the 'raw' field, but it's tricky to pack more
 171 *   than one thing into this field because events/core.c currently only lets a
 172 *   pmu give a single raw data pointer plus len which will be copied into the
 173 *   ring buffer. To include more than the OA report we'd have to copy the
 174 *   report into an intermediate larger buffer. I'd been considering allowing a
 175 *   vector of data+len values to be specified for copying the raw data, but
 176 *   it felt like a kludge to being using the raw field for this purpose.
 177 *
 178 * - It felt like our perf based PMU was making some technical compromises
 179 *   just for the sake of using perf:
 180 *
 181 *   perf_event_open() requires events to either relate to a pid or a specific
 182 *   cpu core, while our device pmu related to neither.  Events opened with a
 183 *   pid will be automatically enabled/disabled according to the scheduling of
 184 *   that process - so not appropriate for us. When an event is related to a
 185 *   cpu id, perf ensures pmu methods will be invoked via an inter process
 186 *   interrupt on that core. To avoid invasive changes our userspace opened OA
 187 *   perf events for a specific cpu. This was workable but it meant the
 188 *   majority of the OA driver ran in atomic context, including all OA report
 189 *   forwarding, which wasn't really necessary in our case and seems to make
 190 *   our locking requirements somewhat complex as we handled the interaction
 191 *   with the rest of the i915 driver.
 192 */
 193
 194#include <linux/anon_inodes.h>
 195#include <linux/sizes.h>
 196#include <linux/uuid.h>
 197
 198#include "gem/i915_gem_context.h"
 199#include "gt/intel_engine_pm.h"
 200#include "gt/intel_engine_user.h"
 201#include "gt/intel_gt.h"
 202#include "gt/intel_lrc_reg.h"
 203#include "gt/intel_ring.h"
 204
 205#include "i915_drv.h"
 206#include "i915_perf.h"
 207
 208/* HW requires this to be a power of two, between 128k and 16M, though driver
 209 * is currently generally designed assuming the largest 16M size is used such
 210 * that the overflow cases are unlikely in normal operation.
 211 */
 212#define OA_BUFFER_SIZE		SZ_16M
 213
 214#define OA_TAKEN(tail, head)	((tail - head) & (OA_BUFFER_SIZE - 1))
 215
 216/**
 217 * DOC: OA Tail Pointer Race
 218 *
 219 * There's a HW race condition between OA unit tail pointer register updates and
 220 * writes to memory whereby the tail pointer can sometimes get ahead of what's
 221 * been written out to the OA buffer so far (in terms of what's visible to the
 222 * CPU).
 223 *
 224 * Although this can be observed explicitly while copying reports to userspace
 225 * by checking for a zeroed report-id field in tail reports, we want to account
 226 * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of
 227 * redundant read() attempts.
 228 *
 229 * We workaround this issue in oa_buffer_check_unlocked() by reading the reports
 230 * in the OA buffer, starting from the tail reported by the HW until we find a
 231 * report with its first 2 dwords not 0 meaning its previous report is
 232 * completely in memory and ready to be read. Those dwords are also set to 0
 233 * once read and the whole buffer is cleared upon OA buffer initialization. The
 234 * first dword is the reason for this report while the second is the timestamp,
 235 * making the chances of having those 2 fields at 0 fairly unlikely. A more
 236 * detailed explanation is available in oa_buffer_check_unlocked().
 237 *
 238 * Most of the implementation details for this workaround are in
 239 * oa_buffer_check_unlocked() and _append_oa_reports()
 240 *
 241 * Note for posterity: previously the driver used to define an effective tail
 242 * pointer that lagged the real pointer by a 'tail margin' measured in bytes
 243 * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
 244 * This was flawed considering that the OA unit may also automatically generate
 245 * non-periodic reports (such as on context switch) or the OA unit may be
 246 * enabled without any periodic sampling.
 247 */
 248#define OA_TAIL_MARGIN_NSEC	100000ULL
 249#define INVALID_TAIL_PTR	0xffffffff
 250
 251/* The default frequency for checking whether the OA unit has written new
 252 * reports to the circular OA buffer...
 253 */
 254#define DEFAULT_POLL_FREQUENCY_HZ 200
 255#define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
 256
 257/* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
 258static u32 i915_perf_stream_paranoid = true;
 259
 260/* The maximum exponent the hardware accepts is 63 (essentially it selects one
 261 * of the 64bit timestamp bits to trigger reports from) but there's currently
 262 * no known use case for sampling as infrequently as once per 47 thousand years.
 263 *
 264 * Since the timestamps included in OA reports are only 32bits it seems
 265 * reasonable to limit the OA exponent where it's still possible to account for
 266 * overflow in OA report timestamps.
 267 */
 268#define OA_EXPONENT_MAX 31
 269
 270#define INVALID_CTX_ID 0xffffffff
 271
 272/* On Gen8+ automatically triggered OA reports include a 'reason' field... */
 273#define OAREPORT_REASON_MASK           0x3f
 274#define OAREPORT_REASON_MASK_EXTENDED  0x7f
 275#define OAREPORT_REASON_SHIFT          19
 276#define OAREPORT_REASON_TIMER          (1<<0)
 277#define OAREPORT_REASON_CTX_SWITCH     (1<<3)
 278#define OAREPORT_REASON_CLK_RATIO      (1<<5)
 279
 280
 281/* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
 282 *
 283 * The highest sampling frequency we can theoretically program the OA unit
 284 * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
 285 *
 286 * Initialized just before we register the sysctl parameter.
 287 */
 288static int oa_sample_rate_hard_limit;
 289
 290/* Theoretically we can program the OA unit to sample every 160ns but don't
 291 * allow that by default unless root...
 292 *
 293 * The default threshold of 100000Hz is based on perf's similar
 294 * kernel.perf_event_max_sample_rate sysctl parameter.
 295 */
 296static u32 i915_oa_max_sample_rate = 100000;
 297
 298/* XXX: beware if future OA HW adds new report formats that the current
 299 * code assumes all reports have a power-of-two size and ~(size - 1) can
 300 * be used as a mask to align the OA tail pointer.
 301 */
 302static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
 303	[I915_OA_FORMAT_A13]	    = { 0, 64 },
 304	[I915_OA_FORMAT_A29]	    = { 1, 128 },
 305	[I915_OA_FORMAT_A13_B8_C8]  = { 2, 128 },
 306	/* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
 307	[I915_OA_FORMAT_B4_C8]	    = { 4, 64 },
 308	[I915_OA_FORMAT_A45_B8_C8]  = { 5, 256 },
 309	[I915_OA_FORMAT_B4_C8_A16]  = { 6, 128 },
 310	[I915_OA_FORMAT_C4_B8]	    = { 7, 64 },
 311};
 312
 313static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
 314	[I915_OA_FORMAT_A12]		    = { 0, 64 },
 315	[I915_OA_FORMAT_A12_B8_C8]	    = { 2, 128 },
 316	[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
 317	[I915_OA_FORMAT_C4_B8]		    = { 7, 64 },
 318};
 319
 320static const struct i915_oa_format gen12_oa_formats[I915_OA_FORMAT_MAX] = {
 321	[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
 322};
 323
 324#define SAMPLE_OA_REPORT      (1<<0)
 325
 326/**
 327 * struct perf_open_properties - for validated properties given to open a stream
 328 * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
 329 * @single_context: Whether a single or all gpu contexts should be monitored
 330 * @hold_preemption: Whether the preemption is disabled for the filtered
 331 *                   context
 332 * @ctx_handle: A gem ctx handle for use with @single_context
 333 * @metrics_set: An ID for an OA unit metric set advertised via sysfs
 334 * @oa_format: An OA unit HW report format
 335 * @oa_periodic: Whether to enable periodic OA unit sampling
 336 * @oa_period_exponent: The OA unit sampling period is derived from this
 337 * @engine: The engine (typically rcs0) being monitored by the OA unit
 338 * @has_sseu: Whether @sseu was specified by userspace
 339 * @sseu: internal SSEU configuration computed either from the userspace
 340 *        specified configuration in the opening parameters or a default value
 341 *        (see get_default_sseu_config())
 342 * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA
 343 * data availability
 344 *
 345 * As read_properties_unlocked() enumerates and validates the properties given
 346 * to open a stream of metrics the configuration is built up in the structure
 347 * which starts out zero initialized.
 348 */
 349struct perf_open_properties {
 350	u32 sample_flags;
 351
 352	u64 single_context:1;
 353	u64 hold_preemption:1;
 354	u64 ctx_handle;
 355
 356	/* OA sampling state */
 357	int metrics_set;
 358	int oa_format;
 359	bool oa_periodic;
 360	int oa_period_exponent;
 361
 362	struct intel_engine_cs *engine;
 363
 364	bool has_sseu;
 365	struct intel_sseu sseu;
 366
 367	u64 poll_oa_period;
 368};
 369
 370struct i915_oa_config_bo {
 371	struct llist_node node;
 372
 373	struct i915_oa_config *oa_config;
 374	struct i915_vma *vma;
 375};
 376
 377static struct ctl_table_header *sysctl_header;
 378
 379static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
 380
 381void i915_oa_config_release(struct kref *ref)
 382{
 383	struct i915_oa_config *oa_config =
 384		container_of(ref, typeof(*oa_config), ref);
 385
 386	kfree(oa_config->flex_regs);
 387	kfree(oa_config->b_counter_regs);
 388	kfree(oa_config->mux_regs);
 389
 390	kfree_rcu(oa_config, rcu);
 391}
 392
 393struct i915_oa_config *
 394i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
 395{
 396	struct i915_oa_config *oa_config;
 397
 398	rcu_read_lock();
 399	oa_config = idr_find(&perf->metrics_idr, metrics_set);
 400	if (oa_config)
 401		oa_config = i915_oa_config_get(oa_config);
 402	rcu_read_unlock();
 403
 404	return oa_config;
 405}
 406
 407static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo)
 408{
 409	i915_oa_config_put(oa_bo->oa_config);
 410	i915_vma_put(oa_bo->vma);
 411	kfree(oa_bo);
 412}
 413
 414static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream)
 415{
 416	struct intel_uncore *uncore = stream->uncore;
 417
 418	return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) &
 419	       GEN12_OAG_OATAILPTR_MASK;
 420}
 421
 422static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
 423{
 424	struct intel_uncore *uncore = stream->uncore;
 425
 426	return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
 427}
 428
 429static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
 430{
 431	struct intel_uncore *uncore = stream->uncore;
 432	u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
 433
 434	return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
 435}
 436
 437/**
 438 * oa_buffer_check_unlocked - check for data and update tail ptr state
 439 * @stream: i915 stream instance
 440 *
 441 * This is either called via fops (for blocking reads in user ctx) or the poll
 442 * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
 443 * if there is data available for userspace to read.
 444 *
 445 * This function is central to providing a workaround for the OA unit tail
 446 * pointer having a race with respect to what data is visible to the CPU.
 447 * It is responsible for reading tail pointers from the hardware and giving
 448 * the pointers time to 'age' before they are made available for reading.
 449 * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
 450 *
 451 * Besides returning true when there is data available to read() this function
 452 * also updates the tail, aging_tail and aging_timestamp in the oa_buffer
 453 * object.
 454 *
 455 * Note: It's safe to read OA config state here unlocked, assuming that this is
 456 * only called while the stream is enabled, while the global OA configuration
 457 * can't be modified.
 458 *
 459 * Returns: %true if the OA buffer contains data, else %false
 460 */
 461static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
 462{
 463	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
 464	int report_size = stream->oa_buffer.format_size;
 465	unsigned long flags;
 466	bool pollin;
 467	u32 hw_tail;
 468	u64 now;
 469
 470	/* We have to consider the (unlikely) possibility that read() errors
 471	 * could result in an OA buffer reset which might reset the head and
 472	 * tail state.
 473	 */
 474	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
 475
 476	hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
 477
 478	/* The tail pointer increases in 64 byte increments,
 479	 * not in report_size steps...
 480	 */
 481	hw_tail &= ~(report_size - 1);
 482
 483	now = ktime_get_mono_fast_ns();
 484
 485	if (hw_tail == stream->oa_buffer.aging_tail &&
 486	    (now - stream->oa_buffer.aging_timestamp) > OA_TAIL_MARGIN_NSEC) {
 487		/* If the HW tail hasn't move since the last check and the HW
 488		 * tail has been aging for long enough, declare it the new
 489		 * tail.
 490		 */
 491		stream->oa_buffer.tail = stream->oa_buffer.aging_tail;
 492	} else {
 493		u32 head, tail, aged_tail;
 494
 495		/* NB: The head we observe here might effectively be a little
 496		 * out of date. If a read() is in progress, the head could be
 497		 * anywhere between this head and stream->oa_buffer.tail.
 498		 */
 499		head = stream->oa_buffer.head - gtt_offset;
 500		aged_tail = stream->oa_buffer.tail - gtt_offset;
 501
 502		hw_tail -= gtt_offset;
 503		tail = hw_tail;
 504
 505		/* Walk the stream backward until we find a report with dword 0
 506		 * & 1 not at 0. Since the circular buffer pointers progress by
 507		 * increments of 64 bytes and that reports can be up to 256
 508		 * bytes long, we can't tell whether a report has fully landed
 509		 * in memory before the first 2 dwords of the following report
 510		 * have effectively landed.
 511		 *
 512		 * This is assuming that the writes of the OA unit land in
 513		 * memory in the order they were written to.
 514		 * If not : (╯°□°)╯︵ ┻━┻
 515		 */
 516		while (OA_TAKEN(tail, aged_tail) >= report_size) {
 517			u32 *report32 = (void *)(stream->oa_buffer.vaddr + tail);
 518
 519			if (report32[0] != 0 || report32[1] != 0)
 520				break;
 521
 522			tail = (tail - report_size) & (OA_BUFFER_SIZE - 1);
 523		}
 524
 525		if (OA_TAKEN(hw_tail, tail) > report_size &&
 526		    __ratelimit(&stream->perf->tail_pointer_race))
 527			DRM_NOTE("unlanded report(s) head=0x%x "
 528				 "tail=0x%x hw_tail=0x%x\n",
 529				 head, tail, hw_tail);
 530
 531		stream->oa_buffer.tail = gtt_offset + tail;
 532		stream->oa_buffer.aging_tail = gtt_offset + hw_tail;
 533		stream->oa_buffer.aging_timestamp = now;
 534	}
 535
 536	pollin = OA_TAKEN(stream->oa_buffer.tail - gtt_offset,
 537			  stream->oa_buffer.head - gtt_offset) >= report_size;
 538
 539	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
 540
 541	return pollin;
 542}
 543
 544/**
 545 * append_oa_status - Appends a status record to a userspace read() buffer.
 546 * @stream: An i915-perf stream opened for OA metrics
 547 * @buf: destination buffer given by userspace
 548 * @count: the number of bytes userspace wants to read
 549 * @offset: (inout): the current position for writing into @buf
 550 * @type: The kind of status to report to userspace
 551 *
 552 * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
 553 * into the userspace read() buffer.
 554 *
 555 * The @buf @offset will only be updated on success.
 556 *
 557 * Returns: 0 on success, negative error code on failure.
 558 */
 559static int append_oa_status(struct i915_perf_stream *stream,
 560			    char __user *buf,
 561			    size_t count,
 562			    size_t *offset,
 563			    enum drm_i915_perf_record_type type)
 564{
 565	struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
 566
 567	if ((count - *offset) < header.size)
 568		return -ENOSPC;
 569
 570	if (copy_to_user(buf + *offset, &header, sizeof(header)))
 571		return -EFAULT;
 572
 573	(*offset) += header.size;
 574
 575	return 0;
 576}
 577
 578/**
 579 * append_oa_sample - Copies single OA report into userspace read() buffer.
 580 * @stream: An i915-perf stream opened for OA metrics
 581 * @buf: destination buffer given by userspace
 582 * @count: the number of bytes userspace wants to read
 583 * @offset: (inout): the current position for writing into @buf
 584 * @report: A single OA report to (optionally) include as part of the sample
 585 *
 586 * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
 587 * properties when opening a stream, tracked as `stream->sample_flags`. This
 588 * function copies the requested components of a single sample to the given
 589 * read() @buf.
 590 *
 591 * The @buf @offset will only be updated on success.
 592 *
 593 * Returns: 0 on success, negative error code on failure.
 594 */
 595static int append_oa_sample(struct i915_perf_stream *stream,
 596			    char __user *buf,
 597			    size_t count,
 598			    size_t *offset,
 599			    const u8 *report)
 600{
 601	int report_size = stream->oa_buffer.format_size;
 602	struct drm_i915_perf_record_header header;
 603	u32 sample_flags = stream->sample_flags;
 604
 605	header.type = DRM_I915_PERF_RECORD_SAMPLE;
 606	header.pad = 0;
 607	header.size = stream->sample_size;
 608
 609	if ((count - *offset) < header.size)
 610		return -ENOSPC;
 611
 612	buf += *offset;
 613	if (copy_to_user(buf, &header, sizeof(header)))
 614		return -EFAULT;
 615	buf += sizeof(header);
 616
 617	if (sample_flags & SAMPLE_OA_REPORT) {
 618		if (copy_to_user(buf, report, report_size))
 619			return -EFAULT;
 620	}
 621
 622	(*offset) += header.size;
 623
 624	return 0;
 625}
 626
 627/**
 628 * Copies all buffered OA reports into userspace read() buffer.
 629 * @stream: An i915-perf stream opened for OA metrics
 630 * @buf: destination buffer given by userspace
 631 * @count: the number of bytes userspace wants to read
 632 * @offset: (inout): the current position for writing into @buf
 633 *
 634 * Notably any error condition resulting in a short read (-%ENOSPC or
 635 * -%EFAULT) will be returned even though one or more records may
 636 * have been successfully copied. In this case it's up to the caller
 637 * to decide if the error should be squashed before returning to
 638 * userspace.
 639 *
 640 * Note: reports are consumed from the head, and appended to the
 641 * tail, so the tail chases the head?... If you think that's mad
 642 * and back-to-front you're not alone, but this follows the
 643 * Gen PRM naming convention.
 644 *
 645 * Returns: 0 on success, negative error code on failure.
 646 */
 647static int gen8_append_oa_reports(struct i915_perf_stream *stream,
 648				  char __user *buf,
 649				  size_t count,
 650				  size_t *offset)
 651{
 652	struct intel_uncore *uncore = stream->uncore;
 653	int report_size = stream->oa_buffer.format_size;
 654	u8 *oa_buf_base = stream->oa_buffer.vaddr;
 655	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
 656	u32 mask = (OA_BUFFER_SIZE - 1);
 657	size_t start_offset = *offset;
 658	unsigned long flags;
 659	u32 head, tail;
 660	u32 taken;
 661	int ret = 0;
 662
 663	if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
 664		return -EIO;
 665
 666	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
 667
 668	head = stream->oa_buffer.head;
 669	tail = stream->oa_buffer.tail;
 670
 671	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
 672
 673	/*
 674	 * NB: oa_buffer.head/tail include the gtt_offset which we don't want
 675	 * while indexing relative to oa_buf_base.
 676	 */
 677	head -= gtt_offset;
 678	tail -= gtt_offset;
 679
 680	/*
 681	 * An out of bounds or misaligned head or tail pointer implies a driver
 682	 * bug since we validate + align the tail pointers we read from the
 683	 * hardware and we are in full control of the head pointer which should
 684	 * only be incremented by multiples of the report size (notably also
 685	 * all a power of two).
 686	 */
 687	if (drm_WARN_ONCE(&uncore->i915->drm,
 688			  head > OA_BUFFER_SIZE || head % report_size ||
 689			  tail > OA_BUFFER_SIZE || tail % report_size,
 690			  "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
 691			  head, tail))
 692		return -EIO;
 693
 694
 695	for (/* none */;
 696	     (taken = OA_TAKEN(tail, head));
 697	     head = (head + report_size) & mask) {
 698		u8 *report = oa_buf_base + head;
 699		u32 *report32 = (void *)report;
 700		u32 ctx_id;
 701		u32 reason;
 702
 703		/*
 704		 * All the report sizes factor neatly into the buffer
 705		 * size so we never expect to see a report split
 706		 * between the beginning and end of the buffer.
 707		 *
 708		 * Given the initial alignment check a misalignment
 709		 * here would imply a driver bug that would result
 710		 * in an overrun.
 711		 */
 712		if (drm_WARN_ON(&uncore->i915->drm,
 713				(OA_BUFFER_SIZE - head) < report_size)) {
 714			drm_err(&uncore->i915->drm,
 715				"Spurious OA head ptr: non-integral report offset\n");
 716			break;
 717		}
 718
 719		/*
 720		 * The reason field includes flags identifying what
 721		 * triggered this specific report (mostly timer
 722		 * triggered or e.g. due to a context switch).
 723		 *
 724		 * This field is never expected to be zero so we can
 725		 * check that the report isn't invalid before copying
 726		 * it to userspace...
 727		 */
 728		reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
 729			  (IS_GEN(stream->perf->i915, 12) ?
 730			   OAREPORT_REASON_MASK_EXTENDED :
 731			   OAREPORT_REASON_MASK));
 732		if (reason == 0) {
 733			if (__ratelimit(&stream->perf->spurious_report_rs))
 734				DRM_NOTE("Skipping spurious, invalid OA report\n");
 735			continue;
 736		}
 737
 738		ctx_id = report32[2] & stream->specific_ctx_id_mask;
 739
 740		/*
 741		 * Squash whatever is in the CTX_ID field if it's marked as
 742		 * invalid to be sure we avoid false-positive, single-context
 743		 * filtering below...
 744		 *
 745		 * Note: that we don't clear the valid_ctx_bit so userspace can
 746		 * understand that the ID has been squashed by the kernel.
 747		 */
 748		if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) &&
 749		    INTEL_GEN(stream->perf->i915) <= 11)
 750			ctx_id = report32[2] = INVALID_CTX_ID;
 751
 752		/*
 753		 * NB: For Gen 8 the OA unit no longer supports clock gating
 754		 * off for a specific context and the kernel can't securely
 755		 * stop the counters from updating as system-wide / global
 756		 * values.
 757		 *
 758		 * Automatic reports now include a context ID so reports can be
 759		 * filtered on the cpu but it's not worth trying to
 760		 * automatically subtract/hide counter progress for other
 761		 * contexts while filtering since we can't stop userspace
 762		 * issuing MI_REPORT_PERF_COUNT commands which would still
 763		 * provide a side-band view of the real values.
 764		 *
 765		 * To allow userspace (such as Mesa/GL_INTEL_performance_query)
 766		 * to normalize counters for a single filtered context then it
 767		 * needs be forwarded bookend context-switch reports so that it
 768		 * can track switches in between MI_REPORT_PERF_COUNT commands
 769		 * and can itself subtract/ignore the progress of counters
 770		 * associated with other contexts. Note that the hardware
 771		 * automatically triggers reports when switching to a new
 772		 * context which are tagged with the ID of the newly active
 773		 * context. To avoid the complexity (and likely fragility) of
 774		 * reading ahead while parsing reports to try and minimize
 775		 * forwarding redundant context switch reports (i.e. between
 776		 * other, unrelated contexts) we simply elect to forward them
 777		 * all.
 778		 *
 779		 * We don't rely solely on the reason field to identify context
 780		 * switches since it's not-uncommon for periodic samples to
 781		 * identify a switch before any 'context switch' report.
 782		 */
 783		if (!stream->perf->exclusive_stream->ctx ||
 784		    stream->specific_ctx_id == ctx_id ||
 785		    stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
 786		    reason & OAREPORT_REASON_CTX_SWITCH) {
 787
 788			/*
 789			 * While filtering for a single context we avoid
 790			 * leaking the IDs of other contexts.
 791			 */
 792			if (stream->perf->exclusive_stream->ctx &&
 793			    stream->specific_ctx_id != ctx_id) {
 794				report32[2] = INVALID_CTX_ID;
 795			}
 796
 797			ret = append_oa_sample(stream, buf, count, offset,
 798					       report);
 799			if (ret)
 800				break;
 801
 802			stream->oa_buffer.last_ctx_id = ctx_id;
 803		}
 804
 805		/*
 806		 * Clear out the first 2 dword as a mean to detect unlanded
 807		 * reports.
 808		 */
 809		report32[0] = 0;
 810		report32[1] = 0;
 811	}
 812
 813	if (start_offset != *offset) {
 814		i915_reg_t oaheadptr;
 815
 816		oaheadptr = IS_GEN(stream->perf->i915, 12) ?
 817			    GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR;
 818
 819		spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
 820
 821		/*
 822		 * We removed the gtt_offset for the copy loop above, indexing
 823		 * relative to oa_buf_base so put back here...
 824		 */
 825		head += gtt_offset;
 826		intel_uncore_write(uncore, oaheadptr,
 827				   head & GEN12_OAG_OAHEADPTR_MASK);
 828		stream->oa_buffer.head = head;
 829
 830		spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
 831	}
 832
 833	return ret;
 834}
 835
 836/**
 837 * gen8_oa_read - copy status records then buffered OA reports
 838 * @stream: An i915-perf stream opened for OA metrics
 839 * @buf: destination buffer given by userspace
 840 * @count: the number of bytes userspace wants to read
 841 * @offset: (inout): the current position for writing into @buf
 842 *
 843 * Checks OA unit status registers and if necessary appends corresponding
 844 * status records for userspace (such as for a buffer full condition) and then
 845 * initiate appending any buffered OA reports.
 846 *
 847 * Updates @offset according to the number of bytes successfully copied into
 848 * the userspace buffer.
 849 *
 850 * NB: some data may be successfully copied to the userspace buffer
 851 * even if an error is returned, and this is reflected in the
 852 * updated @offset.
 853 *
 854 * Returns: zero on success or a negative error code
 855 */
 856static int gen8_oa_read(struct i915_perf_stream *stream,
 857			char __user *buf,
 858			size_t count,
 859			size_t *offset)
 860{
 861	struct intel_uncore *uncore = stream->uncore;
 862	u32 oastatus;
 863	i915_reg_t oastatus_reg;
 864	int ret;
 865
 866	if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
 867		return -EIO;
 868
 869	oastatus_reg = IS_GEN(stream->perf->i915, 12) ?
 870		       GEN12_OAG_OASTATUS : GEN8_OASTATUS;
 871
 872	oastatus = intel_uncore_read(uncore, oastatus_reg);
 873
 874	/*
 875	 * We treat OABUFFER_OVERFLOW as a significant error:
 876	 *
 877	 * Although theoretically we could handle this more gracefully
 878	 * sometimes, some Gens don't correctly suppress certain
 879	 * automatically triggered reports in this condition and so we
 880	 * have to assume that old reports are now being trampled
 881	 * over.
 882	 *
 883	 * Considering how we don't currently give userspace control
 884	 * over the OA buffer size and always configure a large 16MB
 885	 * buffer, then a buffer overflow does anyway likely indicate
 886	 * that something has gone quite badly wrong.
 887	 */
 888	if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
 889		ret = append_oa_status(stream, buf, count, offset,
 890				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
 891		if (ret)
 892			return ret;
 893
 894		DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
 895			  stream->period_exponent);
 896
 897		stream->perf->ops.oa_disable(stream);
 898		stream->perf->ops.oa_enable(stream);
 899
 900		/*
 901		 * Note: .oa_enable() is expected to re-init the oabuffer and
 902		 * reset GEN8_OASTATUS for us
 903		 */
 904		oastatus = intel_uncore_read(uncore, oastatus_reg);
 905	}
 906
 907	if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
 908		ret = append_oa_status(stream, buf, count, offset,
 909				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
 910		if (ret)
 911			return ret;
 912		intel_uncore_write(uncore, oastatus_reg,
 913				   oastatus & ~GEN8_OASTATUS_REPORT_LOST);
 914	}
 915
 916	return gen8_append_oa_reports(stream, buf, count, offset);
 917}
 918
 919/**
 920 * Copies all buffered OA reports into userspace read() buffer.
 921 * @stream: An i915-perf stream opened for OA metrics
 922 * @buf: destination buffer given by userspace
 923 * @count: the number of bytes userspace wants to read
 924 * @offset: (inout): the current position for writing into @buf
 925 *
 926 * Notably any error condition resulting in a short read (-%ENOSPC or
 927 * -%EFAULT) will be returned even though one or more records may
 928 * have been successfully copied. In this case it's up to the caller
 929 * to decide if the error should be squashed before returning to
 930 * userspace.
 931 *
 932 * Note: reports are consumed from the head, and appended to the
 933 * tail, so the tail chases the head?... If you think that's mad
 934 * and back-to-front you're not alone, but this follows the
 935 * Gen PRM naming convention.
 936 *
 937 * Returns: 0 on success, negative error code on failure.
 938 */
 939static int gen7_append_oa_reports(struct i915_perf_stream *stream,
 940				  char __user *buf,
 941				  size_t count,
 942				  size_t *offset)
 943{
 944	struct intel_uncore *uncore = stream->uncore;
 945	int report_size = stream->oa_buffer.format_size;
 946	u8 *oa_buf_base = stream->oa_buffer.vaddr;
 947	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
 948	u32 mask = (OA_BUFFER_SIZE - 1);
 949	size_t start_offset = *offset;
 950	unsigned long flags;
 951	u32 head, tail;
 952	u32 taken;
 953	int ret = 0;
 954
 955	if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
 956		return -EIO;
 957
 958	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
 959
 960	head = stream->oa_buffer.head;
 961	tail = stream->oa_buffer.tail;
 962
 963	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
 964
 965	/* NB: oa_buffer.head/tail include the gtt_offset which we don't want
 966	 * while indexing relative to oa_buf_base.
 967	 */
 968	head -= gtt_offset;
 969	tail -= gtt_offset;
 970
 971	/* An out of bounds or misaligned head or tail pointer implies a driver
 972	 * bug since we validate + align the tail pointers we read from the
 973	 * hardware and we are in full control of the head pointer which should
 974	 * only be incremented by multiples of the report size (notably also
 975	 * all a power of two).
 976	 */
 977	if (drm_WARN_ONCE(&uncore->i915->drm,
 978			  head > OA_BUFFER_SIZE || head % report_size ||
 979			  tail > OA_BUFFER_SIZE || tail % report_size,
 980			  "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
 981			  head, tail))
 982		return -EIO;
 983
 984
 985	for (/* none */;
 986	     (taken = OA_TAKEN(tail, head));
 987	     head = (head + report_size) & mask) {
 988		u8 *report = oa_buf_base + head;
 989		u32 *report32 = (void *)report;
 990
 991		/* All the report sizes factor neatly into the buffer
 992		 * size so we never expect to see a report split
 993		 * between the beginning and end of the buffer.
 994		 *
 995		 * Given the initial alignment check a misalignment
 996		 * here would imply a driver bug that would result
 997		 * in an overrun.
 998		 */
 999		if (drm_WARN_ON(&uncore->i915->drm,
1000				(OA_BUFFER_SIZE - head) < report_size)) {
1001			drm_err(&uncore->i915->drm,
1002				"Spurious OA head ptr: non-integral report offset\n");
1003			break;
1004		}
1005
1006		/* The report-ID field for periodic samples includes
1007		 * some undocumented flags related to what triggered
1008		 * the report and is never expected to be zero so we
1009		 * can check that the report isn't invalid before
1010		 * copying it to userspace...
1011		 */
1012		if (report32[0] == 0) {
1013			if (__ratelimit(&stream->perf->spurious_report_rs))
1014				DRM_NOTE("Skipping spurious, invalid OA report\n");
1015			continue;
1016		}
1017
1018		ret = append_oa_sample(stream, buf, count, offset, report);
1019		if (ret)
1020			break;
1021
1022		/* Clear out the first 2 dwords as a mean to detect unlanded
1023		 * reports.
1024		 */
1025		report32[0] = 0;
1026		report32[1] = 0;
1027	}
1028
1029	if (start_offset != *offset) {
1030		spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1031
1032		/* We removed the gtt_offset for the copy loop above, indexing
1033		 * relative to oa_buf_base so put back here...
1034		 */
1035		head += gtt_offset;
1036
1037		intel_uncore_write(uncore, GEN7_OASTATUS2,
1038				   (head & GEN7_OASTATUS2_HEAD_MASK) |
1039				   GEN7_OASTATUS2_MEM_SELECT_GGTT);
1040		stream->oa_buffer.head = head;
1041
1042		spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1043	}
1044
1045	return ret;
1046}
1047
1048/**
1049 * gen7_oa_read - copy status records then buffered OA reports
1050 * @stream: An i915-perf stream opened for OA metrics
1051 * @buf: destination buffer given by userspace
1052 * @count: the number of bytes userspace wants to read
1053 * @offset: (inout): the current position for writing into @buf
1054 *
1055 * Checks Gen 7 specific OA unit status registers and if necessary appends
1056 * corresponding status records for userspace (such as for a buffer full
1057 * condition) and then initiate appending any buffered OA reports.
1058 *
1059 * Updates @offset according to the number of bytes successfully copied into
1060 * the userspace buffer.
1061 *
1062 * Returns: zero on success or a negative error code
1063 */
1064static int gen7_oa_read(struct i915_perf_stream *stream,
1065			char __user *buf,
1066			size_t count,
1067			size_t *offset)
1068{
1069	struct intel_uncore *uncore = stream->uncore;
1070	u32 oastatus1;
1071	int ret;
1072
1073	if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
1074		return -EIO;
1075
1076	oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1077
1078	/* XXX: On Haswell we don't have a safe way to clear oastatus1
1079	 * bits while the OA unit is enabled (while the tail pointer
1080	 * may be updated asynchronously) so we ignore status bits
1081	 * that have already been reported to userspace.
1082	 */
1083	oastatus1 &= ~stream->perf->gen7_latched_oastatus1;
1084
1085	/* We treat OABUFFER_OVERFLOW as a significant error:
1086	 *
1087	 * - The status can be interpreted to mean that the buffer is
1088	 *   currently full (with a higher precedence than OA_TAKEN()
1089	 *   which will start to report a near-empty buffer after an
1090	 *   overflow) but it's awkward that we can't clear the status
1091	 *   on Haswell, so without a reset we won't be able to catch
1092	 *   the state again.
1093	 *
1094	 * - Since it also implies the HW has started overwriting old
1095	 *   reports it may also affect our sanity checks for invalid
1096	 *   reports when copying to userspace that assume new reports
1097	 *   are being written to cleared memory.
1098	 *
1099	 * - In the future we may want to introduce a flight recorder
1100	 *   mode where the driver will automatically maintain a safe
1101	 *   guard band between head/tail, avoiding this overflow
1102	 *   condition, but we avoid the added driver complexity for
1103	 *   now.
1104	 */
1105	if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
1106		ret = append_oa_status(stream, buf, count, offset,
1107				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
1108		if (ret)
1109			return ret;
1110
1111		DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
1112			  stream->period_exponent);
1113
1114		stream->perf->ops.oa_disable(stream);
1115		stream->perf->ops.oa_enable(stream);
1116
1117		oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1118	}
1119
1120	if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
1121		ret = append_oa_status(stream, buf, count, offset,
1122				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
1123		if (ret)
1124			return ret;
1125		stream->perf->gen7_latched_oastatus1 |=
1126			GEN7_OASTATUS1_REPORT_LOST;
1127	}
1128
1129	return gen7_append_oa_reports(stream, buf, count, offset);
1130}
1131
1132/**
1133 * i915_oa_wait_unlocked - handles blocking IO until OA data available
1134 * @stream: An i915-perf stream opened for OA metrics
1135 *
1136 * Called when userspace tries to read() from a blocking stream FD opened
1137 * for OA metrics. It waits until the hrtimer callback finds a non-empty
1138 * OA buffer and wakes us.
1139 *
1140 * Note: it's acceptable to have this return with some false positives
1141 * since any subsequent read handling will return -EAGAIN if there isn't
1142 * really data ready for userspace yet.
1143 *
1144 * Returns: zero on success or a negative error code
1145 */
1146static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
1147{
1148	/* We would wait indefinitely if periodic sampling is not enabled */
1149	if (!stream->periodic)
1150		return -EIO;
1151
1152	return wait_event_interruptible(stream->poll_wq,
1153					oa_buffer_check_unlocked(stream));
1154}
1155
1156/**
1157 * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
1158 * @stream: An i915-perf stream opened for OA metrics
1159 * @file: An i915 perf stream file
1160 * @wait: poll() state table
1161 *
1162 * For handling userspace polling on an i915 perf stream opened for OA metrics,
1163 * this starts a poll_wait with the wait queue that our hrtimer callback wakes
1164 * when it sees data ready to read in the circular OA buffer.
1165 */
1166static void i915_oa_poll_wait(struct i915_perf_stream *stream,
1167			      struct file *file,
1168			      poll_table *wait)
1169{
1170	poll_wait(file, &stream->poll_wq, wait);
1171}
1172
1173/**
1174 * i915_oa_read - just calls through to &i915_oa_ops->read
1175 * @stream: An i915-perf stream opened for OA metrics
1176 * @buf: destination buffer given by userspace
1177 * @count: the number of bytes userspace wants to read
1178 * @offset: (inout): the current position for writing into @buf
1179 *
1180 * Updates @offset according to the number of bytes successfully copied into
1181 * the userspace buffer.
1182 *
1183 * Returns: zero on success or a negative error code
1184 */
1185static int i915_oa_read(struct i915_perf_stream *stream,
1186			char __user *buf,
1187			size_t count,
1188			size_t *offset)
1189{
1190	return stream->perf->ops.read(stream, buf, count, offset);
1191}
1192
1193static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
1194{
1195	struct i915_gem_engines_iter it;
1196	struct i915_gem_context *ctx = stream->ctx;
1197	struct intel_context *ce;
1198	int err;
1199
1200	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1201		if (ce->engine != stream->engine) /* first match! */
1202			continue;
1203
1204		/*
1205		 * As the ID is the gtt offset of the context's vma we
1206		 * pin the vma to ensure the ID remains fixed.
1207		 */
1208		err = intel_context_pin(ce);
1209		if (err == 0) {
1210			stream->pinned_ctx = ce;
1211			break;
1212		}
1213	}
1214	i915_gem_context_unlock_engines(ctx);
1215
1216	return stream->pinned_ctx;
1217}
1218
1219/**
1220 * oa_get_render_ctx_id - determine and hold ctx hw id
1221 * @stream: An i915-perf stream opened for OA metrics
1222 *
1223 * Determine the render context hw id, and ensure it remains fixed for the
1224 * lifetime of the stream. This ensures that we don't have to worry about
1225 * updating the context ID in OACONTROL on the fly.
1226 *
1227 * Returns: zero on success or a negative error code
1228 */
1229static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
1230{
1231	struct intel_context *ce;
1232
1233	ce = oa_pin_context(stream);
1234	if (IS_ERR(ce))
1235		return PTR_ERR(ce);
1236
1237	switch (INTEL_GEN(ce->engine->i915)) {
1238	case 7: {
1239		/*
1240		 * On Haswell we don't do any post processing of the reports
1241		 * and don't need to use the mask.
1242		 */
1243		stream->specific_ctx_id = i915_ggtt_offset(ce->state);
1244		stream->specific_ctx_id_mask = 0;
1245		break;
1246	}
1247
1248	case 8:
1249	case 9:
1250	case 10:
1251		if (intel_engine_in_execlists_submission_mode(ce->engine)) {
1252			stream->specific_ctx_id_mask =
1253				(1U << GEN8_CTX_ID_WIDTH) - 1;
1254			stream->specific_ctx_id = stream->specific_ctx_id_mask;
1255		} else {
1256			/*
1257			 * When using GuC, the context descriptor we write in
1258			 * i915 is read by GuC and rewritten before it's
1259			 * actually written into the hardware. The LRCA is
1260			 * what is put into the context id field of the
1261			 * context descriptor by GuC. Because it's aligned to
1262			 * a page, the lower 12bits are always at 0 and
1263			 * dropped by GuC. They won't be part of the context
1264			 * ID in the OA reports, so squash those lower bits.
1265			 */
1266			stream->specific_ctx_id = ce->lrc.lrca >> 12;
1267
1268			/*
1269			 * GuC uses the top bit to signal proxy submission, so
1270			 * ignore that bit.
1271			 */
1272			stream->specific_ctx_id_mask =
1273				(1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
1274		}
1275		break;
1276
1277	case 11:
1278	case 12: {
1279		stream->specific_ctx_id_mask =
1280			((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
1281		/*
1282		 * Pick an unused context id
1283		 * 0 - BITS_PER_LONG are used by other contexts
1284		 * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context
1285		 */
1286		stream->specific_ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
1287		break;
1288	}
1289
1290	default:
1291		MISSING_CASE(INTEL_GEN(ce->engine->i915));
1292	}
1293
1294	ce->tag = stream->specific_ctx_id;
1295
1296	drm_dbg(&stream->perf->i915->drm,
1297		"filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
1298		stream->specific_ctx_id,
1299		stream->specific_ctx_id_mask);
1300
1301	return 0;
1302}
1303
1304/**
1305 * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
1306 * @stream: An i915-perf stream opened for OA metrics
1307 *
1308 * In case anything needed doing to ensure the context HW ID would remain valid
1309 * for the lifetime of the stream, then that can be undone here.
1310 */
1311static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
1312{
1313	struct intel_context *ce;
1314
1315	ce = fetch_and_zero(&stream->pinned_ctx);
1316	if (ce) {
1317		ce->tag = 0; /* recomputed on next submission after parking */
1318		intel_context_unpin(ce);
1319	}
1320
1321	stream->specific_ctx_id = INVALID_CTX_ID;
1322	stream->specific_ctx_id_mask = 0;
1323}
1324
1325static void
1326free_oa_buffer(struct i915_perf_stream *stream)
1327{
1328	i915_vma_unpin_and_release(&stream->oa_buffer.vma,
1329				   I915_VMA_RELEASE_MAP);
1330
1331	stream->oa_buffer.vaddr = NULL;
1332}
1333
1334static void
1335free_oa_configs(struct i915_perf_stream *stream)
1336{
1337	struct i915_oa_config_bo *oa_bo, *tmp;
1338
1339	i915_oa_config_put(stream->oa_config);
1340	llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
1341		free_oa_config_bo(oa_bo);
1342}
1343
1344static void
1345free_noa_wait(struct i915_perf_stream *stream)
1346{
1347	i915_vma_unpin_and_release(&stream->noa_wait, 0);
1348}
1349
1350static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
1351{
1352	struct i915_perf *perf = stream->perf;
1353
1354	BUG_ON(stream != perf->exclusive_stream);
1355
1356	/*
1357	 * Unset exclusive_stream first, it will be checked while disabling
1358	 * the metric set on gen8+.
1359	 *
1360	 * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
1361	 */
1362	WRITE_ONCE(perf->exclusive_stream, NULL);
1363	perf->ops.disable_metric_set(stream);
1364
1365	free_oa_buffer(stream);
1366
1367	intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
1368	intel_engine_pm_put(stream->engine);
1369
1370	if (stream->ctx)
1371		oa_put_render_ctx_id(stream);
1372
1373	free_oa_configs(stream);
1374	free_noa_wait(stream);
1375
1376	if (perf->spurious_report_rs.missed) {
1377		DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
1378			 perf->spurious_report_rs.missed);
1379	}
1380}
1381
1382static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
1383{
1384	struct intel_uncore *uncore = stream->uncore;
1385	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1386	unsigned long flags;
1387
1388	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1389
1390	/* Pre-DevBDW: OABUFFER must be set with counters off,
1391	 * before OASTATUS1, but after OASTATUS2
1392	 */
1393	intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */
1394			   gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT);
1395	stream->oa_buffer.head = gtt_offset;
1396
1397	intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset);
1398
1399	intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */
1400			   gtt_offset | OABUFFER_SIZE_16M);
1401
1402	/* Mark that we need updated tail pointers to read from... */
1403	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1404	stream->oa_buffer.tail = gtt_offset;
1405
1406	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1407
1408	/* On Haswell we have to track which OASTATUS1 flags we've
1409	 * already seen since they can't be cleared while periodic
1410	 * sampling is enabled.
1411	 */
1412	stream->perf->gen7_latched_oastatus1 = 0;
1413
1414	/* NB: although the OA buffer will initially be allocated
1415	 * zeroed via shmfs (and so this memset is redundant when
1416	 * first allocating), we may re-init the OA buffer, either
1417	 * when re-enabling a stream or in error/reset paths.
1418	 *
1419	 * The reason we clear the buffer for each re-init is for the
1420	 * sanity check in gen7_append_oa_reports() that looks at the
1421	 * report-id field to make sure it's non-zero which relies on
1422	 * the assumption that new reports are being written to zeroed
1423	 * memory...
1424	 */
1425	memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1426}
1427
1428static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
1429{
1430	struct intel_uncore *uncore = stream->uncore;
1431	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1432	unsigned long flags;
1433
1434	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1435
1436	intel_uncore_write(uncore, GEN8_OASTATUS, 0);
1437	intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset);
1438	stream->oa_buffer.head = gtt_offset;
1439
1440	intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0);
1441
1442	/*
1443	 * PRM says:
1444	 *
1445	 *  "This MMIO must be set before the OATAILPTR
1446	 *  register and after the OAHEADPTR register. This is
1447	 *  to enable proper functionality of the overflow
1448	 *  bit."
1449	 */
1450	intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset |
1451		   OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1452	intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
1453
1454	/* Mark that we need updated tail pointers to read from... */
1455	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1456	stream->oa_buffer.tail = gtt_offset;
1457
1458	/*
1459	 * Reset state used to recognise context switches, affecting which
1460	 * reports we will forward to userspace while filtering for a single
1461	 * context.
1462	 */
1463	stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1464
1465	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1466
1467	/*
1468	 * NB: although the OA buffer will initially be allocated
1469	 * zeroed via shmfs (and so this memset is redundant when
1470	 * first allocating), we may re-init the OA buffer, either
1471	 * when re-enabling a stream or in error/reset paths.
1472	 *
1473	 * The reason we clear the buffer for each re-init is for the
1474	 * sanity check in gen8_append_oa_reports() that looks at the
1475	 * reason field to make sure it's non-zero which relies on
1476	 * the assumption that new reports are being written to zeroed
1477	 * memory...
1478	 */
1479	memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1480}
1481
1482static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
1483{
1484	struct intel_uncore *uncore = stream->uncore;
1485	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1486	unsigned long flags;
1487
1488	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1489
1490	intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0);
1491	intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR,
1492			   gtt_offset & GEN12_OAG_OAHEADPTR_MASK);
1493	stream->oa_buffer.head = gtt_offset;
1494
1495	/*
1496	 * PRM says:
1497	 *
1498	 *  "This MMIO must be set before the OATAILPTR
1499	 *  register and after the OAHEADPTR register. This is
1500	 *  to enable proper functionality of the overflow
1501	 *  bit."
1502	 */
1503	intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset |
1504			   OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1505	intel_uncore_write(uncore, GEN12_OAG_OATAILPTR,
1506			   gtt_offset & GEN12_OAG_OATAILPTR_MASK);
1507
1508	/* Mark that we need updated tail pointers to read from... */
1509	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1510	stream->oa_buffer.tail = gtt_offset;
1511
1512	/*
1513	 * Reset state used to recognise context switches, affecting which
1514	 * reports we will forward to userspace while filtering for a single
1515	 * context.
1516	 */
1517	stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1518
1519	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1520
1521	/*
1522	 * NB: although the OA buffer will initially be allocated
1523	 * zeroed via shmfs (and so this memset is redundant when
1524	 * first allocating), we may re-init the OA buffer, either
1525	 * when re-enabling a stream or in error/reset paths.
1526	 *
1527	 * The reason we clear the buffer for each re-init is for the
1528	 * sanity check in gen8_append_oa_reports() that looks at the
1529	 * reason field to make sure it's non-zero which relies on
1530	 * the assumption that new reports are being written to zeroed
1531	 * memory...
1532	 */
1533	memset(stream->oa_buffer.vaddr, 0,
1534	       stream->oa_buffer.vma->size);
1535}
1536
1537static int alloc_oa_buffer(struct i915_perf_stream *stream)
1538{
1539	struct drm_i915_private *i915 = stream->perf->i915;
1540	struct drm_i915_gem_object *bo;
1541	struct i915_vma *vma;
1542	int ret;
1543
1544	if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma))
1545		return -ENODEV;
1546
1547	BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
1548	BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
1549
1550	bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
1551	if (IS_ERR(bo)) {
1552		drm_err(&i915->drm, "Failed to allocate OA buffer\n");
1553		return PTR_ERR(bo);
1554	}
1555
1556	i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
1557
1558	/* PreHSW required 512K alignment, HSW requires 16M */
1559	vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
1560	if (IS_ERR(vma)) {
1561		ret = PTR_ERR(vma);
1562		goto err_unref;
1563	}
1564	stream->oa_buffer.vma = vma;
1565
1566	stream->oa_buffer.vaddr =
1567		i915_gem_object_pin_map(bo, I915_MAP_WB);
1568	if (IS_ERR(stream->oa_buffer.vaddr)) {
1569		ret = PTR_ERR(stream->oa_buffer.vaddr);
1570		goto err_unpin;
1571	}
1572
1573	return 0;
1574
1575err_unpin:
1576	__i915_vma_unpin(vma);
1577
1578err_unref:
1579	i915_gem_object_put(bo);
1580
1581	stream->oa_buffer.vaddr = NULL;
1582	stream->oa_buffer.vma = NULL;
1583
1584	return ret;
1585}
1586
1587static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
1588				  bool save, i915_reg_t reg, u32 offset,
1589				  u32 dword_count)
1590{
1591	u32 cmd;
1592	u32 d;
1593
1594	cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
1595	cmd |= MI_SRM_LRM_GLOBAL_GTT;
1596	if (INTEL_GEN(stream->perf->i915) >= 8)
1597		cmd++;
1598
1599	for (d = 0; d < dword_count; d++) {
1600		*cs++ = cmd;
1601		*cs++ = i915_mmio_reg_offset(reg) + 4 * d;
1602		*cs++ = intel_gt_scratch_offset(stream->engine->gt,
1603						offset) + 4 * d;
1604		*cs++ = 0;
1605	}
1606
1607	return cs;
1608}
1609
1610static int alloc_noa_wait(struct i915_perf_stream *stream)
1611{
1612	struct drm_i915_private *i915 = stream->perf->i915;
1613	struct drm_i915_gem_object *bo;
1614	struct i915_vma *vma;
1615	const u64 delay_ticks = 0xffffffffffffffff -
1616		i915_cs_timestamp_ns_to_ticks(i915, atomic64_read(&stream->perf->noa_programming_delay));
1617	const u32 base = stream->engine->mmio_base;
1618#define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
1619	u32 *batch, *ts0, *cs, *jump;
1620	int ret, i;
1621	enum {
1622		START_TS,
1623		NOW_TS,
1624		DELTA_TS,
1625		JUMP_PREDICATE,
1626		DELTA_TARGET,
1627		N_CS_GPR
1628	};
1629
1630	bo = i915_gem_object_create_internal(i915, 4096);
1631	if (IS_ERR(bo)) {
1632		drm_err(&i915->drm,
1633			"Failed to allocate NOA wait batchbuffer\n");
1634		return PTR_ERR(bo);
1635	}
1636
1637	/*
1638	 * We pin in GGTT because we jump into this buffer now because
1639	 * multiple OA config BOs will have a jump to this address and it
1640	 * needs to be fixed during the lifetime of the i915/perf stream.
1641	 */
1642	vma = i915_gem_object_ggtt_pin(bo, NULL, 0, 0, PIN_HIGH);
1643	if (IS_ERR(vma)) {
1644		ret = PTR_ERR(vma);
1645		goto err_unref;
1646	}
1647
1648	batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
1649	if (IS_ERR(batch)) {
1650		ret = PTR_ERR(batch);
1651		goto err_unpin;
1652	}
1653
1654	/* Save registers. */
1655	for (i = 0; i < N_CS_GPR; i++)
1656		cs = save_restore_register(
1657			stream, cs, true /* save */, CS_GPR(i),
1658			INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
1659	cs = save_restore_register(
1660		stream, cs, true /* save */, MI_PREDICATE_RESULT_1,
1661		INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
1662
1663	/* First timestamp snapshot location. */
1664	ts0 = cs;
1665
1666	/*
1667	 * Initial snapshot of the timestamp register to implement the wait.
1668	 * We work with 32b values, so clear out the top 32b bits of the
1669	 * register because the ALU works 64bits.
1670	 */
1671	*cs++ = MI_LOAD_REGISTER_IMM(1);
1672	*cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4;
1673	*cs++ = 0;
1674	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1675	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1676	*cs++ = i915_mmio_reg_offset(CS_GPR(START_TS));
1677
1678	/*
1679	 * This is the location we're going to jump back into until the
1680	 * required amount of time has passed.
1681	 */
1682	jump = cs;
1683
1684	/*
1685	 * Take another snapshot of the timestamp register. Take care to clear
1686	 * up the top 32bits of CS_GPR(1) as we're using it for other
1687	 * operations below.
1688	 */
1689	*cs++ = MI_LOAD_REGISTER_IMM(1);
1690	*cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4;
1691	*cs++ = 0;
1692	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1693	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1694	*cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS));
1695
1696	/*
1697	 * Do a diff between the 2 timestamps and store the result back into
1698	 * CS_GPR(1).
1699	 */
1700	*cs++ = MI_MATH(5);
1701	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS));
1702	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS));
1703	*cs++ = MI_MATH_SUB;
1704	*cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU);
1705	*cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
1706
1707	/*
1708	 * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the
1709	 * timestamp have rolled over the 32bits) into the predicate register
1710	 * to be used for the predicated jump.
1711	 */
1712	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1713	*cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
1714	*cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
1715
1716	/* Restart from the beginning if we had timestamps roll over. */
1717	*cs++ = (INTEL_GEN(i915) < 8 ?
1718		 MI_BATCH_BUFFER_START :
1719		 MI_BATCH_BUFFER_START_GEN8) |
1720		MI_BATCH_PREDICATE;
1721	*cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4;
1722	*cs++ = 0;
1723
1724	/*
1725	 * Now add the diff between to previous timestamps and add it to :
1726	 *      (((1 * << 64) - 1) - delay_ns)
1727	 *
1728	 * When the Carry Flag contains 1 this means the elapsed time is
1729	 * longer than the expected delay, and we can exit the wait loop.
1730	 */
1731	*cs++ = MI_LOAD_REGISTER_IMM(2);
1732	*cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET));
1733	*cs++ = lower_32_bits(delay_ticks);
1734	*cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4;
1735	*cs++ = upper_32_bits(delay_ticks);
1736
1737	*cs++ = MI_MATH(4);
1738	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS));
1739	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET));
1740	*cs++ = MI_MATH_ADD;
1741	*cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
1742
1743	*cs++ = MI_ARB_CHECK;
1744
1745	/*
1746	 * Transfer the result into the predicate register to be used for the
1747	 * predicated jump.
1748	 */
1749	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1750	*cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
1751	*cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
1752
1753	/* Predicate the jump.  */
1754	*cs++ = (INTEL_GEN(i915) < 8 ?
1755		 MI_BATCH_BUFFER_START :
1756		 MI_BATCH_BUFFER_START_GEN8) |
1757		MI_BATCH_PREDICATE;
1758	*cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4;
1759	*cs++ = 0;
1760
1761	/* Restore registers. */
1762	for (i = 0; i < N_CS_GPR; i++)
1763		cs = save_restore_register(
1764			stream, cs, false /* restore */, CS_GPR(i),
1765			INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
1766	cs = save_restore_register(
1767		stream, cs, false /* restore */, MI_PREDICATE_RESULT_1,
1768		INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
1769
1770	/* And return to the ring. */
1771	*cs++ = MI_BATCH_BUFFER_END;
1772
1773	GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
1774
1775	i915_gem_object_flush_map(bo);
1776	__i915_gem_object_release_map(bo);
1777
1778	stream->noa_wait = vma;
1779	return 0;
1780
1781err_unpin:
1782	i915_vma_unpin_and_release(&vma, 0);
1783err_unref:
1784	i915_gem_object_put(bo);
1785	return ret;
1786}
1787
1788static u32 *write_cs_mi_lri(u32 *cs,
1789			    const struct i915_oa_reg *reg_data,
1790			    u32 n_regs)
1791{
1792	u32 i;
1793
1794	for (i = 0; i < n_regs; i++) {
1795		if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) {
1796			u32 n_lri = min_t(u32,
1797					  n_regs - i,
1798					  MI_LOAD_REGISTER_IMM_MAX_REGS);
1799
1800			*cs++ = MI_LOAD_REGISTER_IMM(n_lri);
1801		}
1802		*cs++ = i915_mmio_reg_offset(reg_data[i].addr);
1803		*cs++ = reg_data[i].value;
1804	}
1805
1806	return cs;
1807}
1808
1809static int num_lri_dwords(int num_regs)
1810{
1811	int count = 0;
1812
1813	if (num_regs > 0) {
1814		count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS);
1815		count += num_regs * 2;
1816	}
1817
1818	return count;
1819}
1820
1821static struct i915_oa_config_bo *
1822alloc_oa_config_buffer(struct i915_perf_stream *stream,
1823		       struct i915_oa_config *oa_config)
1824{
1825	struct drm_i915_gem_object *obj;
1826	struct i915_oa_config_bo *oa_bo;
1827	size_t config_length = 0;
1828	u32 *cs;
1829	int err;
1830
1831	oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL);
1832	if (!oa_bo)
1833		return ERR_PTR(-ENOMEM);
1834
1835	config_length += num_lri_dwords(oa_config->mux_regs_len);
1836	config_length += num_lri_dwords(oa_config->b_counter_regs_len);
1837	config_length += num_lri_dwords(oa_config->flex_regs_len);
1838	config_length += 3; /* MI_BATCH_BUFFER_START */
1839	config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
1840
1841	obj = i915_gem_object_create_shmem(stream->perf->i915, config_length);
1842	if (IS_ERR(obj)) {
1843		err = PTR_ERR(obj);
1844		goto err_free;
1845	}
1846
1847	cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
1848	if (IS_ERR(cs)) {
1849		err = PTR_ERR(cs);
1850		goto err_oa_bo;
1851	}
1852
1853	cs = write_cs_mi_lri(cs,
1854			     oa_config->mux_regs,
1855			     oa_config->mux_regs_len);
1856	cs = write_cs_mi_lri(cs,
1857			     oa_config->b_counter_regs,
1858			     oa_config->b_counter_regs_len);
1859	cs = write_cs_mi_lri(cs,
1860			     oa_config->flex_regs,
1861			     oa_config->flex_regs_len);
1862
1863	/* Jump into the active wait. */
1864	*cs++ = (INTEL_GEN(stream->perf->i915) < 8 ?
1865		 MI_BATCH_BUFFER_START :
1866		 MI_BATCH_BUFFER_START_GEN8);
1867	*cs++ = i915_ggtt_offset(stream->noa_wait);
1868	*cs++ = 0;
1869
1870	i915_gem_object_flush_map(obj);
1871	__i915_gem_object_release_map(obj);
1872
1873	oa_bo->vma = i915_vma_instance(obj,
1874				       &stream->engine->gt->ggtt->vm,
1875				       NULL);
1876	if (IS_ERR(oa_bo->vma)) {
1877		err = PTR_ERR(oa_bo->vma);
1878		goto err_oa_bo;
1879	}
1880
1881	oa_bo->oa_config = i915_oa_config_get(oa_config);
1882	llist_add(&oa_bo->node, &stream->oa_config_bos);
1883
1884	return oa_bo;
1885
1886err_oa_bo:
1887	i915_gem_object_put(obj);
1888err_free:
1889	kfree(oa_bo);
1890	return ERR_PTR(err);
1891}
1892
1893static struct i915_vma *
1894get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
1895{
1896	struct i915_oa_config_bo *oa_bo;
1897
1898	/*
1899	 * Look for the buffer in the already allocated BOs attached
1900	 * to the stream.
1901	 */
1902	llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
1903		if (oa_bo->oa_config == oa_config &&
1904		    memcmp(oa_bo->oa_config->uuid,
1905			   oa_config->uuid,
1906			   sizeof(oa_config->uuid)) == 0)
1907			goto out;
1908	}
1909
1910	oa_bo = alloc_oa_config_buffer(stream, oa_config);
1911	if (IS_ERR(oa_bo))
1912		return ERR_CAST(oa_bo);
1913
1914out:
1915	return i915_vma_get(oa_bo->vma);
1916}
1917
1918static int
1919emit_oa_config(struct i915_perf_stream *stream,
1920	       struct i915_oa_config *oa_config,
1921	       struct intel_context *ce,
1922	       struct i915_active *active)
1923{
1924	struct i915_request *rq;
1925	struct i915_vma *vma;
1926	int err;
1927
1928	vma = get_oa_vma(stream, oa_config);
1929	if (IS_ERR(vma))
1930		return PTR_ERR(vma);
1931
1932	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
1933	if (err)
1934		goto err_vma_put;
1935
1936	intel_engine_pm_get(ce->engine);
1937	rq = i915_request_create(ce);
1938	intel_engine_pm_put(ce->engine);
1939	if (IS_ERR(rq)) {
1940		err = PTR_ERR(rq);
1941		goto err_vma_unpin;
1942	}
1943
1944	if (!IS_ERR_OR_NULL(active)) {
1945		/* After all individual context modifications */
1946		err = i915_request_await_active(rq, active,
1947						I915_ACTIVE_AWAIT_ACTIVE);
1948		if (err)
1949			goto err_add_request;
1950
1951		err = i915_active_add_request(active, rq);
1952		if (err)
1953			goto err_add_request;
1954	}
1955
1956	i915_vma_lock(vma);
1957	err = i915_request_await_object(rq, vma->obj, 0);
1958	if (!err)
1959		err = i915_vma_move_to_active(vma, rq, 0);
1960	i915_vma_unlock(vma);
1961	if (err)
1962		goto err_add_request;
1963
1964	err = rq->engine->emit_bb_start(rq,
1965					vma->node.start, 0,
1966					I915_DISPATCH_SECURE);
1967	if (err)
1968		goto err_add_request;
1969
1970err_add_request:
1971	i915_request_add(rq);
1972err_vma_unpin:
1973	i915_vma_unpin(vma);
1974err_vma_put:
1975	i915_vma_put(vma);
1976	return err;
1977}
1978
1979static struct intel_context *oa_context(struct i915_perf_stream *stream)
1980{
1981	return stream->pinned_ctx ?: stream->engine->kernel_context;
1982}
1983
1984static int
1985hsw_enable_metric_set(struct i915_perf_stream *stream,
1986		      struct i915_active *active)
1987{
1988	struct intel_uncore *uncore = stream->uncore;
1989
1990	/*
1991	 * PRM:
1992	 *
1993	 * OA unit is using “crclk” for its functionality. When trunk
1994	 * level clock gating takes place, OA clock would be gated,
1995	 * unable to count the events from non-render clock domain.
1996	 * Render clock gating must be disabled when OA is enabled to
1997	 * count the events from non-render domain. Unit level clock
1998	 * gating for RCS should also be disabled.
1999	 */
2000	intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2001			 GEN7_DOP_CLOCK_GATE_ENABLE, 0);
2002	intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2003			 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
2004
2005	return emit_oa_config(stream,
2006			      stream->oa_config, oa_context(stream),
2007			      active);
2008}
2009
2010static void hsw_disable_metric_set(struct i915_perf_stream *stream)
2011{
2012	struct intel_uncore *uncore = stream->uncore;
2013
2014	intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2015			 GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0);
2016	intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2017			 0, GEN7_DOP_CLOCK_GATE_ENABLE);
2018
2019	intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2020}
2021
2022static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
2023			      i915_reg_t reg)
2024{
2025	u32 mmio = i915_mmio_reg_offset(reg);
2026	int i;
2027
2028	/*
2029	 * This arbitrary default will select the 'EU FPU0 Pipeline
2030	 * Active' event. In the future it's anticipated that there
2031	 * will be an explicit 'No Event' we can select, but not yet...
2032	 */
2033	if (!oa_config)
2034		return 0;
2035
2036	for (i = 0; i < oa_config->flex_regs_len; i++) {
2037		if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio)
2038			return oa_config->flex_regs[i].value;
2039	}
2040
2041	return 0;
2042}
2043/*
2044 * NB: It must always remain pointer safe to run this even if the OA unit
2045 * has been disabled.
2046 *
2047 * It's fine to put out-of-date values into these per-context registers
2048 * in the case that the OA unit has been disabled.
2049 */
2050static void
2051gen8_update_reg_state_unlocked(const struct intel_context *ce,
2052			       const struct i915_perf_stream *stream)
2053{
2054	u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
2055	u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2056	/* The MMIO offsets for Flex EU registers aren't contiguous */
2057	i915_reg_t flex_regs[] = {
2058		EU_PERF_CNTL0,
2059		EU_PERF_CNTL1,
2060		EU_PERF_CNTL2,
2061		EU_PERF_CNTL3,
2062		EU_PERF_CNTL4,
2063		EU_PERF_CNTL5,
2064		EU_PERF_CNTL6,
2065	};
2066	u32 *reg_state = ce->lrc_reg_state;
2067	int i;
2068
2069	reg_state[ctx_oactxctrl + 1] =
2070		(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2071		(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2072		GEN8_OA_COUNTER_RESUME;
2073
2074	for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
2075		reg_state[ctx_flexeu0 + i * 2 + 1] =
2076			oa_config_flex_reg(stream->oa_config, flex_regs[i]);
2077}
2078
2079struct flex {
2080	i915_reg_t reg;
2081	u32 offset;
2082	u32 value;
2083};
2084
2085static int
2086gen8_store_flex(struct i915_request *rq,
2087		struct intel_context *ce,
2088		const struct flex *flex, unsigned int count)
2089{
2090	u32 offset;
2091	u32 *cs;
2092
2093	cs = intel_ring_begin(rq, 4 * count);
2094	if (IS_ERR(cs))
2095		return PTR_ERR(cs);
2096
2097	offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET;
2098	do {
2099		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
2100		*cs++ = offset + flex->offset * sizeof(u32);
2101		*cs++ = 0;
2102		*cs++ = flex->value;
2103	} while (flex++, --count);
2104
2105	intel_ring_advance(rq, cs);
2106
2107	return 0;
2108}
2109
2110static int
2111gen8_load_flex(struct i915_request *rq,
2112	       struct intel_context *ce,
2113	       const struct flex *flex, unsigned int count)
2114{
2115	u32 *cs;
2116
2117	GEM_BUG_ON(!count || count > 63);
2118
2119	cs = intel_ring_begin(rq, 2 * count + 2);
2120	if (IS_ERR(cs))
2121		return PTR_ERR(cs);
2122
2123	*cs++ = MI_LOAD_REGISTER_IMM(count);
2124	do {
2125		*cs++ = i915_mmio_reg_offset(flex->reg);
2126		*cs++ = flex->value;
2127	} while (flex++, --count);
2128	*cs++ = MI_NOOP;
2129
2130	intel_ring_advance(rq, cs);
2131
2132	return 0;
2133}
2134
2135static int gen8_modify_context(struct intel_context *ce,
2136			       const struct flex *flex, unsigned int count)
2137{
2138	struct i915_request *rq;
2139	int err;
2140
2141	rq = intel_engine_create_kernel_request(ce->engine);
2142	if (IS_ERR(rq))
2143		return PTR_ERR(rq);
2144
2145	/* Serialise with the remote context */
2146	err = intel_context_prepare_remote_request(ce, rq);
2147	if (err == 0)
2148		err = gen8_store_flex(rq, ce, flex, count);
2149
2150	i915_request_add(rq);
2151	return err;
2152}
2153
2154static int
2155gen8_modify_self(struct intel_context *ce,
2156		 const struct flex *flex, unsigned int count,
2157		 struct i915_active *active)
2158{
2159	struct i915_request *rq;
2160	int err;
2161
2162	intel_engine_pm_get(ce->engine);
2163	rq = i915_request_create(ce);
2164	intel_engine_pm_put(ce->engine);
2165	if (IS_ERR(rq))
2166		return PTR_ERR(rq);
2167
2168	if (!IS_ERR_OR_NULL(active)) {
2169		err = i915_active_add_request(active, rq);
2170		if (err)
2171			goto err_add_request;
2172	}
2173
2174	err = gen8_load_flex(rq, ce, flex, count);
2175	if (err)
2176		goto err_add_request;
2177
2178err_add_request:
2179	i915_request_add(rq);
2180	return err;
2181}
2182
2183static int gen8_configure_context(struct i915_gem_context *ctx,
2184				  struct flex *flex, unsigned int count)
2185{
2186	struct i915_gem_engines_iter it;
2187	struct intel_context *ce;
2188	int err = 0;
2189
2190	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2191		GEM_BUG_ON(ce == ce->engine->kernel_context);
2192
2193		if (ce->engine->class != RENDER_CLASS)
2194			continue;
2195
2196		/* Otherwise OA settings will be set upon first use */
2197		if (!intel_context_pin_if_active(ce))
2198			continue;
2199
2200		flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu);
2201		err = gen8_modify_context(ce, flex, count);
2202
2203		intel_context_unpin(ce);
2204		if (err)
2205			break;
2206	}
2207	i915_gem_context_unlock_engines(ctx);
2208
2209	return err;
2210}
2211
2212static int gen12_configure_oar_context(struct i915_perf_stream *stream,
2213				       struct i915_active *active)
2214{
2215	int err;
2216	struct intel_context *ce = stream->pinned_ctx;
2217	u32 format = stream->oa_buffer.format;
2218	struct flex regs_context[] = {
2219		{
2220			GEN8_OACTXCONTROL,
2221			stream->perf->ctx_oactxctrl_offset + 1,
2222			active ? GEN8_OA_COUNTER_RESUME : 0,
2223		},
2224	};
2225	/* Offsets in regs_lri are not used since this configuration is only
2226	 * applied using LRI. Initialize the correct offsets for posterity.
2227	 */
2228#define GEN12_OAR_OACONTROL_OFFSET 0x5B0
2229	struct flex regs_lri[] = {
2230		{
2231			GEN12_OAR_OACONTROL,
2232			GEN12_OAR_OACONTROL_OFFSET + 1,
2233			(format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
2234			(active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
2235		},
2236		{
2237			RING_CONTEXT_CONTROL(ce->engine->mmio_base),
2238			CTX_CONTEXT_CONTROL,
2239			_MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
2240				      active ?
2241				      GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
2242				      0)
2243		},
2244	};
2245
2246	/* Modify the context image of pinned context with regs_context*/
2247	err = intel_context_lock_pinned(ce);
2248	if (err)
2249		return err;
2250
2251	err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context));
2252	intel_context_unlock_pinned(ce);
2253	if (err)
2254		return err;
2255
2256	/* Apply regs_lri using LRI with pinned context */
2257	return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active);
2258}
2259
2260/*
2261 * Manages updating the per-context aspects of the OA stream
2262 * configuration across all contexts.
2263 *
2264 * The awkward consideration here is that OACTXCONTROL controls the
2265 * exponent for periodic sampling which is primarily used for system
2266 * wide profiling where we'd like a consistent sampling period even in
2267 * the face of context switches.
2268 *
2269 * Our approach of updating the register state context (as opposed to
2270 * say using a workaround batch buffer) ensures that the hardware
2271 * won't automatically reload an out-of-date timer exponent even
2272 * transiently before a WA BB could be parsed.
2273 *
2274 * This function needs to:
2275 * - Ensure the currently running context's per-context OA state is
2276 *   updated
2277 * - Ensure that all existing contexts will have the correct per-context
2278 *   OA state if they are scheduled for use.
2279 * - Ensure any new contexts will be initialized with the correct
2280 *   per-context OA state.
2281 *
2282 * Note: it's only the RCS/Render context that has any OA state.
2283 * Note: the first flex register passed must always be R_PWR_CLK_STATE
2284 */
2285static int
2286oa_configure_all_contexts(struct i915_perf_stream *stream,
2287			  struct flex *regs,
2288			  size_t num_regs,
2289			  struct i915_active *active)
2290{
2291	struct drm_i915_private *i915 = stream->perf->i915;
2292	struct intel_engine_cs *engine;
2293	struct i915_gem_context *ctx, *cn;
2294	int err;
2295
2296	lockdep_assert_held(&stream->perf->lock);
2297
2298	/*
2299	 * The OA register config is setup through the context image. This image
2300	 * might be written to by the GPU on context switch (in particular on
2301	 * lite-restore). This means we can't safely update a context's image,
2302	 * if this context is scheduled/submitted to run on the GPU.
2303	 *
2304	 * We could emit the OA register config through the batch buffer but
2305	 * this might leave small interval of time where the OA unit is
2306	 * configured at an invalid sampling period.
2307	 *
2308	 * Note that since we emit all requests from a single ring, there
2309	 * is still an implicit global barrier here that may cause a high
2310	 * priority context to wait for an otherwise independent low priority
2311	 * context. Contexts idle at the time of reconfiguration are not
2312	 * trapped behind the barrier.
2313	 */
2314	spin_lock(&i915->gem.contexts.lock);
2315	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
2316		if (!kref_get_unless_zero(&ctx->ref))
2317			continue;
2318
2319		spin_unlock(&i915->gem.contexts.lock);
2320
2321		err = gen8_configure_context(ctx, regs, num_regs);
2322		if (err) {
2323			i915_gem_context_put(ctx);
2324			return err;
2325		}
2326
2327		spin_lock(&i915->gem.contexts.lock);
2328		list_safe_reset_next(ctx, cn, link);
2329		i915_gem_context_put(ctx);
2330	}
2331	spin_unlock(&i915->gem.contexts.lock);
2332
2333	/*
2334	 * After updating all other contexts, we need to modify ourselves.
2335	 * If we don't modify the kernel_context, we do not get events while
2336	 * idle.
2337	 */
2338	for_each_uabi_engine(engine, i915) {
2339		struct intel_context *ce = engine->kernel_context;
2340
2341		if (engine->class != RENDER_CLASS)
2342			continue;
2343
2344		regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu);
2345
2346		err = gen8_modify_self(ce, regs, num_regs, active);
2347		if (err)
2348			return err;
2349	}
2350
2351	return 0;
2352}
2353
2354static int
2355gen12_configure_all_contexts(struct i915_perf_stream *stream,
2356			     const struct i915_oa_config *oa_config,
2357			     struct i915_active *active)
2358{
2359	struct flex regs[] = {
2360		{
2361			GEN8_R_PWR_CLK_STATE,
2362			CTX_R_PWR_CLK_STATE,
2363		},
2364	};
2365
2366	return oa_configure_all_contexts(stream,
2367					 regs, ARRAY_SIZE(regs),
2368					 active);
2369}
2370
2371static int
2372lrc_configure_all_contexts(struct i915_perf_stream *stream,
2373			   const struct i915_oa_config *oa_config,
2374			   struct i915_active *active)
2375{
2376	/* The MMIO offsets for Flex EU registers aren't contiguous */
2377	const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2378#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
2379	struct flex regs[] = {
2380		{
2381			GEN8_R_PWR_CLK_STATE,
2382			CTX_R_PWR_CLK_STATE,
2383		},
2384		{
2385			GEN8_OACTXCONTROL,
2386			stream->perf->ctx_oactxctrl_offset + 1,
2387		},
2388		{ EU_PERF_CNTL0, ctx_flexeuN(0) },
2389		{ EU_PERF_CNTL1, ctx_flexeuN(1) },
2390		{ EU_PERF_CNTL2, ctx_flexeuN(2) },
2391		{ EU_PERF_CNTL3, ctx_flexeuN(3) },
2392		{ EU_PERF_CNTL4, ctx_flexeuN(4) },
2393		{ EU_PERF_CNTL5, ctx_flexeuN(5) },
2394		{ EU_PERF_CNTL6, ctx_flexeuN(6) },
2395	};
2396#undef ctx_flexeuN
2397	int i;
2398
2399	regs[1].value =
2400		(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2401		(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2402		GEN8_OA_COUNTER_RESUME;
2403
2404	for (i = 2; i < ARRAY_SIZE(regs); i++)
2405		regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
2406
2407	return oa_configure_all_contexts(stream,
2408					 regs, ARRAY_SIZE(regs),
2409					 active);
2410}
2411
2412static int
2413gen8_enable_metric_set(struct i915_perf_stream *stream,
2414		       struct i915_active *active)
2415{
2416	struct intel_uncore *uncore = stream->uncore;
2417	struct i915_oa_config *oa_config = stream->oa_config;
2418	int ret;
2419
2420	/*
2421	 * We disable slice/unslice clock ratio change reports on SKL since
2422	 * they are too noisy. The HW generates a lot of redundant reports
2423	 * where the ratio hasn't really changed causing a lot of redundant
2424	 * work to processes and increasing the chances we'll hit buffer
2425	 * overruns.
2426	 *
2427	 * Although we don't currently use the 'disable overrun' OABUFFER
2428	 * feature it's worth noting that clock ratio reports have to be
2429	 * disabled before considering to use that feature since the HW doesn't
2430	 * correctly block these reports.
2431	 *
2432	 * Currently none of the high-level metrics we have depend on knowing
2433	 * this ratio to normalize.
2434	 *
2435	 * Note: This register is not power context saved and restored, but
2436	 * that's OK considering that we disable RC6 while the OA unit is
2437	 * enabled.
2438	 *
2439	 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
2440	 * be read back from automatically triggered reports, as part of the
2441	 * RPT_ID field.
2442	 */
2443	if (IS_GEN_RANGE(stream->perf->i915, 9, 11)) {
2444		intel_uncore_write(uncore, GEN8_OA_DEBUG,
2445				   _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2446						      GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
2447	}
2448
2449	/*
2450	 * Update all contexts prior writing the mux configurations as we need
2451	 * to make sure all slices/subslices are ON before writing to NOA
2452	 * registers.
2453	 */
2454	ret = lrc_configure_all_contexts(stream, oa_config, active);
2455	if (ret)
2456		return ret;
2457
2458	return emit_oa_config(stream,
2459			      stream->oa_config, oa_context(stream),
2460			      active);
2461}
2462
2463static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
2464{
2465	return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
2466			     (stream->sample_flags & SAMPLE_OA_REPORT) ?
2467			     0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
2468}
2469
2470static int
2471gen12_enable_metric_set(struct i915_perf_stream *stream,
2472			struct i915_active *active)
2473{
2474	struct intel_uncore *uncore = stream->uncore;
2475	struct i915_oa_config *oa_config = stream->oa_config;
2476	bool periodic = stream->periodic;
2477	u32 period_exponent = stream->period_exponent;
2478	int ret;
2479
2480	intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG,
2481			   /* Disable clk ratio reports, like previous Gens. */
2482			   _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2483					      GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
2484			   /*
2485			    * If the user didn't require OA reports, instruct
2486			    * the hardware not to emit ctx switch reports.
2487			    */
2488			   oag_report_ctx_switches(stream));
2489
2490	intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ?
2491			   (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
2492			    GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE |
2493			    (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT))
2494			    : 0);
2495
2496	/*
2497	 * Update all contexts prior writing the mux configurations as we need
2498	 * to make sure all slices/subslices are ON before writing to NOA
2499	 * registers.
2500	 */
2501	ret = gen12_configure_all_contexts(stream, oa_config, active);
2502	if (ret)
2503		return ret;
2504
2505	/*
2506	 * For Gen12, performance counters are context
2507	 * saved/restored. Only enable it for the context that
2508	 * requested this.
2509	 */
2510	if (stream->ctx) {
2511		ret = gen12_configure_oar_context(stream, active);
2512		if (ret)
2513			return ret;
2514	}
2515
2516	return emit_oa_config(stream,
2517			      stream->oa_config, oa_context(stream),
2518			      active);
2519}
2520
2521static void gen8_disable_metric_set(struct i915_perf_stream *stream)
2522{
2523	struct intel_uncore *uncore = stream->uncore;
2524
2525	/* Reset all contexts' slices/subslices configurations. */
2526	lrc_configure_all_contexts(stream, NULL, NULL);
2527
2528	intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2529}
2530
2531static void gen10_disable_metric_set(struct i915_perf_stream *stream)
2532{
2533	struct intel_uncore *uncore = stream->uncore;
2534
2535	/* Reset all contexts' slices/subslices configurations. */
2536	lrc_configure_all_contexts(stream, NULL, NULL);
2537
2538	/* Make sure we disable noa to save power. */
2539	intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2540}
2541
2542static void gen12_disable_metric_set(struct i915_perf_stream *stream)
2543{
2544	struct intel_uncore *uncore = stream->uncore;
2545
2546	/* Reset all contexts' slices/subslices configurations. */
2547	gen12_configure_all_contexts(stream, NULL, NULL);
2548
2549	/* disable the context save/restore or OAR counters */
2550	if (stream->ctx)
2551		gen12_configure_oar_context(stream, NULL);
2552
2553	/* Make sure we disable noa to save power. */
2554	intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2555}
2556
2557static void gen7_oa_enable(struct i915_perf_stream *stream)
2558{
2559	struct intel_uncore *uncore = stream->uncore;
2560	struct i915_gem_context *ctx = stream->ctx;
2561	u32 ctx_id = stream->specific_ctx_id;
2562	bool periodic = stream->periodic;
2563	u32 period_exponent = stream->period_exponent;
2564	u32 report_format = stream->oa_buffer.format;
2565
2566	/*
2567	 * Reset buf pointers so we don't forward reports from before now.
2568	 *
2569	 * Think carefully if considering trying to avoid this, since it
2570	 * also ensures status flags and the buffer itself are cleared
2571	 * in error paths, and we have checks for invalid reports based
2572	 * on the assumption that certain fields are written to zeroed
2573	 * memory which this helps maintains.
2574	 */
2575	gen7_init_oa_buffer(stream);
2576
2577	intel_uncore_write(uncore, GEN7_OACONTROL,
2578			   (ctx_id & GEN7_OACONTROL_CTX_MASK) |
2579			   (period_exponent <<
2580			    GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
2581			   (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
2582			   (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
2583			   (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
2584			   GEN7_OACONTROL_ENABLE);
2585}
2586
2587static void gen8_oa_enable(struct i915_perf_stream *stream)
2588{
2589	struct intel_uncore *uncore = stream->uncore;
2590	u32 report_format = stream->oa_buffer.format;
2591
2592	/*
2593	 * Reset buf pointers so we don't forward reports from before now.
2594	 *
2595	 * Think carefully if considering trying to avoid this, since it
2596	 * also ensures status flags and the buffer itself are cleared
2597	 * in error paths, and we have checks for invalid reports based
2598	 * on the assumption that certain fields are written to zeroed
2599	 * memory which this helps maintains.
2600	 */
2601	gen8_init_oa_buffer(stream);
2602
2603	/*
2604	 * Note: we don't rely on the hardware to perform single context
2605	 * filtering and instead filter on the cpu based on the context-id
2606	 * field of reports
2607	 */
2608	intel_uncore_write(uncore, GEN8_OACONTROL,
2609			   (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) |
2610			   GEN8_OA_COUNTER_ENABLE);
2611}
2612
2613static void gen12_oa_enable(struct i915_perf_stream *stream)
2614{
2615	struct intel_uncore *uncore = stream->uncore;
2616	u32 report_format = stream->oa_buffer.format;
2617
2618	/*
2619	 * If we don't want OA reports from the OA buffer, then we don't even
2620	 * need to program the OAG unit.
2621	 */
2622	if (!(stream->sample_flags & SAMPLE_OA_REPORT))
2623		return;
2624
2625	gen12_init_oa_buffer(stream);
2626
2627	intel_uncore_write(uncore, GEN12_OAG_OACONTROL,
2628			   (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) |
2629			   GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE);
2630}
2631
2632/**
2633 * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
2634 * @stream: An i915 perf stream opened for OA metrics
2635 *
2636 * [Re]enables hardware periodic sampling according to the period configured
2637 * when opening the stream. This also starts a hrtimer that will periodically
2638 * check for data in the circular OA buffer for notifying userspace (e.g.
2639 * during a read() or poll()).
2640 */
2641static void i915_oa_stream_enable(struct i915_perf_stream *stream)
2642{
2643	stream->pollin = false;
2644
2645	stream->perf->ops.oa_enable(stream);
2646
2647	if (stream->periodic)
2648		hrtimer_start(&stream->poll_check_timer,
2649			      ns_to_ktime(stream->poll_oa_period),
2650			      HRTIMER_MODE_REL_PINNED);
2651}
2652
2653static void gen7_oa_disable(struct i915_perf_stream *stream)
2654{
2655	struct intel_uncore *uncore = stream->uncore;
2656
2657	intel_uncore_write(uncore, GEN7_OACONTROL, 0);
2658	if (intel_wait_for_register(uncore,
2659				    GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
2660				    50))
2661		drm_err(&stream->perf->i915->drm,
2662			"wait for OA to be disabled timed out\n");
2663}
2664
2665static void gen8_oa_disable(struct i915_perf_stream *stream)
2666{
2667	struct intel_uncore *uncore = stream->uncore;
2668
2669	intel_uncore_write(uncore, GEN8_OACONTROL, 0);
2670	if (intel_wait_for_register(uncore,
2671				    GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
2672				    50))
2673		drm_err(&stream->perf->i915->drm,
2674			"wait for OA to be disabled timed out\n");
2675}
2676
2677static void gen12_oa_disable(struct i915_perf_stream *stream)
2678{
2679	struct intel_uncore *uncore = stream->uncore;
2680
2681	intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0);
2682	if (intel_wait_for_register(uncore,
2683				    GEN12_OAG_OACONTROL,
2684				    GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
2685				    50))
2686		drm_err(&stream->perf->i915->drm,
2687			"wait for OA to be disabled timed out\n");
2688
2689	intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1);
2690	if (intel_wait_for_register(uncore,
2691				    GEN12_OA_TLB_INV_CR,
2692				    1, 0,
2693				    50))
2694		drm_err(&stream->perf->i915->drm,
2695			"wait for OA tlb invalidate timed out\n");
2696}
2697
2698/**
2699 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
2700 * @stream: An i915 perf stream opened for OA metrics
2701 *
2702 * Stops the OA unit from periodically writing counter reports into the
2703 * circular OA buffer. This also stops the hrtimer that periodically checks for
2704 * data in the circular OA buffer, for notifying userspace.
2705 */
2706static void i915_oa_stream_disable(struct i915_perf_stream *stream)
2707{
2708	stream->perf->ops.oa_disable(stream);
2709
2710	if (stream->periodic)
2711		hrtimer_cancel(&stream->poll_check_timer);
2712}
2713
2714static const struct i915_perf_stream_ops i915_oa_stream_ops = {
2715	.destroy = i915_oa_stream_destroy,
2716	.enable = i915_oa_stream_enable,
2717	.disable = i915_oa_stream_disable,
2718	.wait_unlocked = i915_oa_wait_unlocked,
2719	.poll_wait = i915_oa_poll_wait,
2720	.read = i915_oa_read,
2721};
2722
2723static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
2724{
2725	struct i915_active *active;
2726	int err;
2727
2728	active = i915_active_create();
2729	if (!active)
2730		return -ENOMEM;
2731
2732	err = stream->perf->ops.enable_metric_set(stream, active);
2733	if (err == 0)
2734		__i915_active_wait(active, TASK_UNINTERRUPTIBLE);
2735
2736	i915_active_put(active);
2737	return err;
2738}
2739
2740static void
2741get_default_sseu_config(struct intel_sseu *out_sseu,
2742			struct intel_engine_cs *engine)
2743{
2744	const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu;
2745
2746	*out_sseu = intel_sseu_from_device_info(devinfo_sseu);
2747
2748	if (IS_GEN(engine->i915, 11)) {
2749		/*
2750		 * We only need subslice count so it doesn't matter which ones
2751		 * we select - just turn off low bits in the amount of half of
2752		 * all available subslices per slice.
2753		 */
2754		out_sseu->subslice_mask =
2755			~(~0 << (hweight8(out_sseu->subslice_mask) / 2));
2756		out_sseu->slice_mask = 0x1;
2757	}
2758}
2759
2760static int
2761get_sseu_config(struct intel_sseu *out_sseu,
2762		struct intel_engine_cs *engine,
2763		const struct drm_i915_gem_context_param_sseu *drm_sseu)
2764{
2765	if (drm_sseu->engine.engine_class != engine->uabi_class ||
2766	    drm_sseu->engine.engine_instance != engine->uabi_instance)
2767		return -EINVAL;
2768
2769	return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu);
2770}
2771
2772/**
2773 * i915_oa_stream_init - validate combined props for OA stream and init
2774 * @stream: An i915 perf stream
2775 * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
2776 * @props: The property state that configures stream (individually validated)
2777 *
2778 * While read_properties_unlocked() validates properties in isolation it
2779 * doesn't ensure that the combination necessarily makes sense.
2780 *
2781 * At this point it has been determined that userspace wants a stream of
2782 * OA metrics, but still we need to further validate the combined
2783 * properties are OK.
2784 *
2785 * If the configuration makes sense then we can allocate memory for
2786 * a circular OA buffer and apply the requested metric set configuration.
2787 *
2788 * Returns: zero on success or a negative error code.
2789 */
2790static int i915_oa_stream_init(struct i915_perf_stream *stream,
2791			       struct drm_i915_perf_open_param *param,
2792			       struct perf_open_properties *props)
2793{
2794	struct drm_i915_private *i915 = stream->perf->i915;
2795	struct i915_perf *perf = stream->perf;
2796	int format_size;
2797	int ret;
2798
2799	if (!props->engine) {
2800		DRM_DEBUG("OA engine not specified\n");
2801		return -EINVAL;
2802	}
2803
2804	/*
2805	 * If the sysfs metrics/ directory wasn't registered for some
2806	 * reason then don't let userspace try their luck with config
2807	 * IDs
2808	 */
2809	if (!perf->metrics_kobj) {
2810		DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
2811		return -EINVAL;
2812	}
2813
2814	if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
2815	    (INTEL_GEN(perf->i915) < 12 || !stream->ctx)) {
2816		DRM_DEBUG("Only OA report sampling supported\n");
2817		return -EINVAL;
2818	}
2819
2820	if (!perf->ops.enable_metric_set) {
2821		DRM_DEBUG("OA unit not supported\n");
2822		return -ENODEV;
2823	}
2824
2825	/*
2826	 * To avoid the complexity of having to accurately filter
2827	 * counter reports and marshal to the appropriate client
2828	 * we currently only allow exclusive access
2829	 */
2830	if (perf->exclusive_stream) {
2831		DRM_DEBUG("OA unit already in use\n");
2832		return -EBUSY;
2833	}
2834
2835	if (!props->oa_format) {
2836		DRM_DEBUG("OA report format not specified\n");
2837		return -EINVAL;
2838	}
2839
2840	stream->engine = props->engine;
2841	stream->uncore = stream->engine->gt->uncore;
2842
2843	stream->sample_size = sizeof(struct drm_i915_perf_record_header);
2844
2845	format_size = perf->oa_formats[props->oa_format].size;
2846
2847	stream->sample_flags = props->sample_flags;
2848	stream->sample_size += format_size;
2849
2850	stream->oa_buffer.format_size = format_size;
2851	if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0))
2852		return -EINVAL;
2853
2854	stream->hold_preemption = props->hold_preemption;
2855
2856	stream->oa_buffer.format =
2857		perf->oa_formats[props->oa_format].format;
2858
2859	stream->periodic = props->oa_periodic;
2860	if (stream->periodic)
2861		stream->period_exponent = props->oa_period_exponent;
2862
2863	if (stream->ctx) {
2864		ret = oa_get_render_ctx_id(stream);
2865		if (ret) {
2866			DRM_DEBUG("Invalid context id to filter with\n");
2867			return ret;
2868		}
2869	}
2870
2871	ret = alloc_noa_wait(stream);
2872	if (ret) {
2873		DRM_DEBUG("Unable to allocate NOA wait batch buffer\n");
2874		goto err_noa_wait_alloc;
2875	}
2876
2877	stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set);
2878	if (!stream->oa_config) {
2879		DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
2880		ret = -EINVAL;
2881		goto err_config;
2882	}
2883
2884	/* PRM - observability performance counters:
2885	 *
2886	 *   OACONTROL, performance counter enable, note:
2887	 *
2888	 *   "When this bit is set, in order to have coherent counts,
2889	 *   RC6 power state and trunk clock gating must be disabled.
2890	 *   This can be achieved by programming MMIO registers as
2891	 *   0xA094=0 and 0xA090[31]=1"
2892	 *
2893	 *   In our case we are expecting that taking pm + FORCEWAKE
2894	 *   references will effectively disable RC6.
2895	 */
2896	intel_engine_pm_get(stream->engine);
2897	intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL);
2898
2899	ret = alloc_oa_buffer(stream);
2900	if (ret)
2901		goto err_oa_buf_alloc;
2902
2903	stream->ops = &i915_oa_stream_ops;
2904
2905	perf->sseu = props->sseu;
2906	WRITE_ONCE(perf->exclusive_stream, stream);
2907
2908	ret = i915_perf_stream_enable_sync(stream);
2909	if (ret) {
2910		DRM_DEBUG("Unable to enable metric set\n");
2911		goto err_enable;
2912	}
2913
2914	DRM_DEBUG("opening stream oa config uuid=%s\n",
2915		  stream->oa_config->uuid);
2916
2917	hrtimer_init(&stream->poll_check_timer,
2918		     CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2919	stream->poll_check_timer.function = oa_poll_check_timer_cb;
2920	init_waitqueue_head(&stream->poll_wq);
2921	spin_lock_init(&stream->oa_buffer.ptr_lock);
2922
2923	return 0;
2924
2925err_enable:
2926	WRITE_ONCE(perf->exclusive_stream, NULL);
2927	perf->ops.disable_metric_set(stream);
2928
2929	free_oa_buffer(stream);
2930
2931err_oa_buf_alloc:
2932	free_oa_configs(stream);
2933
2934	intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
2935	intel_engine_pm_put(stream->engine);
2936
2937err_config:
2938	free_noa_wait(stream);
2939
2940err_noa_wait_alloc:
2941	if (stream->ctx)
2942		oa_put_render_ctx_id(stream);
2943
2944	return ret;
2945}
2946
2947void i915_oa_init_reg_state(const struct intel_context *ce,
2948			    const struct intel_engine_cs *engine)
2949{
2950	struct i915_perf_stream *stream;
2951
2952	if (engine->class != RENDER_CLASS)
2953		return;
2954
2955	/* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
2956	stream = READ_ONCE(engine->i915->perf.exclusive_stream);
2957	if (stream && INTEL_GEN(stream->perf->i915) < 12)
2958		gen8_update_reg_state_unlocked(ce, stream);
2959}
2960
2961/**
2962 * i915_perf_read - handles read() FOP for i915 perf stream FDs
2963 * @file: An i915 perf stream file
2964 * @buf: destination buffer given by userspace
2965 * @count: the number of bytes userspace wants to read
2966 * @ppos: (inout) file seek position (unused)
2967 *
2968 * The entry point for handling a read() on a stream file descriptor from
2969 * userspace. Most of the work is left to the i915_perf_read_locked() and
2970 * &i915_perf_stream_ops->read but to save having stream implementations (of
2971 * which we might have multiple later) we handle blocking read here.
2972 *
2973 * We can also consistently treat trying to read from a disabled stream
2974 * as an IO error so implementations can assume the stream is enabled
2975 * while reading.
2976 *
2977 * Returns: The number of bytes copied or a negative error code on failure.
2978 */
2979static ssize_t i915_perf_read(struct file *file,
2980			      char __user *buf,
2981			      size_t count,
2982			      loff_t *ppos)
2983{
2984	struct i915_perf_stream *stream = file->private_data;
2985	struct i915_perf *perf = stream->perf;
2986	size_t offset = 0;
2987	int ret;
2988
2989	/* To ensure it's handled consistently we simply treat all reads of a
2990	 * disabled stream as an error. In particular it might otherwise lead
2991	 * to a deadlock for blocking file descriptors...
2992	 */
2993	if (!stream->enabled)
2994		return -EIO;
2995
2996	if (!(file->f_flags & O_NONBLOCK)) {
2997		/* There's the small chance of false positives from
2998		 * stream->ops->wait_unlocked.
2999		 *
3000		 * E.g. with single context filtering since we only wait until
3001		 * oabuffer has >= 1 report we don't immediately know whether
3002		 * any reports really belong to the current context
3003		 */
3004		do {
3005			ret = stream->ops->wait_unlocked(stream);
3006			if (ret)
3007				return ret;
3008
3009			mutex_lock(&perf->lock);
3010			ret = stream->ops->read(stream, buf, count, &offset);
3011			mutex_unlock(&perf->lock);
3012		} while (!offset && !ret);
3013	} else {
3014		mutex_lock(&perf->lock);
3015		ret = stream->ops->read(stream, buf, count, &offset);
3016		mutex_unlock(&perf->lock);
3017	}
3018
3019	/* We allow the poll checking to sometimes report false positive EPOLLIN
3020	 * events where we might actually report EAGAIN on read() if there's
3021	 * not really any data available. In this situation though we don't
3022	 * want to enter a busy loop between poll() reporting a EPOLLIN event
3023	 * and read() returning -EAGAIN. Clearing the oa.pollin state here
3024	 * effectively ensures we back off until the next hrtimer callback
3025	 * before reporting another EPOLLIN event.
3026	 * The exception to this is if ops->read() returned -ENOSPC which means
3027	 * that more OA data is available than could fit in the user provided
3028	 * buffer. In this case we want the next poll() call to not block.
3029	 */
3030	if (ret != -ENOSPC)
3031		stream->pollin = false;
3032
3033	/* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
3034	return offset ?: (ret ?: -EAGAIN);
3035}
3036
3037static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
3038{
3039	struct i915_perf_stream *stream =
3040		container_of(hrtimer, typeof(*stream), poll_check_timer);
3041
3042	if (oa_buffer_check_unlocked(stream)) {
3043		stream->pollin = true;
3044		wake_up(&stream->poll_wq);
3045	}
3046
3047	hrtimer_forward_now(hrtimer,
3048			    ns_to_ktime(stream->poll_oa_period));
3049
3050	return HRTIMER_RESTART;
3051}
3052
3053/**
3054 * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
3055 * @stream: An i915 perf stream
3056 * @file: An i915 perf stream file
3057 * @wait: poll() state table
3058 *
3059 * For handling userspace polling on an i915 perf stream, this calls through to
3060 * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
3061 * will be woken for new stream data.
3062 *
3063 * Note: The &perf->lock mutex has been taken to serialize
3064 * with any non-file-operation driver hooks.
3065 *
3066 * Returns: any poll events that are ready without sleeping
3067 */
3068static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream,
3069				      struct file *file,
3070				      poll_table *wait)
3071{
3072	__poll_t events = 0;
3073
3074	stream->ops->poll_wait(stream, file, wait);
3075
3076	/* Note: we don't explicitly check whether there's something to read
3077	 * here since this path may be very hot depending on what else
3078	 * userspace is polling, or on the timeout in use. We rely solely on
3079	 * the hrtimer/oa_poll_check_timer_cb to notify us when there are
3080	 * samples to read.
3081	 */
3082	if (stream->pollin)
3083		events |= EPOLLIN;
3084
3085	return events;
3086}
3087
3088/**
3089 * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
3090 * @file: An i915 perf stream file
3091 * @wait: poll() state table
3092 *
3093 * For handling userspace polling on an i915 perf stream, this ensures
3094 * poll_wait() gets called with a wait queue that will be woken for new stream
3095 * data.
3096 *
3097 * Note: Implementation deferred to i915_perf_poll_locked()
3098 *
3099 * Returns: any poll events that are ready without sleeping
3100 */
3101static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
3102{
3103	struct i915_perf_stream *stream = file->private_data;
3104	struct i915_perf *perf = stream->perf;
3105	__poll_t ret;
3106
3107	mutex_lock(&perf->lock);
3108	ret = i915_perf_poll_locked(stream, file, wait);
3109	mutex_unlock(&perf->lock);
3110
3111	return ret;
3112}
3113
3114/**
3115 * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
3116 * @stream: A disabled i915 perf stream
3117 *
3118 * [Re]enables the associated capture of data for this stream.
3119 *
3120 * If a stream was previously enabled then there's currently no intention
3121 * to provide userspace any guarantee about the preservation of previously
3122 * buffered data.
3123 */
3124static void i915_perf_enable_locked(struct i915_perf_stream *stream)
3125{
3126	if (stream->enabled)
3127		return;
3128
3129	/* Allow stream->ops->enable() to refer to this */
3130	stream->enabled = true;
3131
3132	if (stream->ops->enable)
3133		stream->ops->enable(stream);
3134
3135	if (stream->hold_preemption)
3136		intel_context_set_nopreempt(stream->pinned_ctx);
3137}
3138
3139/**
3140 * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
3141 * @stream: An enabled i915 perf stream
3142 *
3143 * Disables the associated capture of data for this stream.
3144 *
3145 * The intention is that disabling an re-enabling a stream will ideally be
3146 * cheaper than destroying and re-opening a stream with the same configuration,
3147 * though there are no formal guarantees about what state or buffered data
3148 * must be retained between disabling and re-enabling a stream.
3149 *
3150 * Note: while a stream is disabled it's considered an error for userspace
3151 * to attempt to read from the stream (-EIO).
3152 */
3153static void i915_perf_disable_locked(struct i915_perf_stream *stream)
3154{
3155	if (!stream->enabled)
3156		return;
3157
3158	/* Allow stream->ops->disable() to refer to this */
3159	stream->enabled = false;
3160
3161	if (stream->hold_preemption)
3162		intel_context_clear_nopreempt(stream->pinned_ctx);
3163
3164	if (stream->ops->disable)
3165		stream->ops->disable(stream);
3166}
3167
3168static long i915_perf_config_locked(struct i915_perf_stream *stream,
3169				    unsigned long metrics_set)
3170{
3171	struct i915_oa_config *config;
3172	long ret = stream->oa_config->id;
3173
3174	config = i915_perf_get_oa_config(stream->perf, metrics_set);
3175	if (!config)
3176		return -EINVAL;
3177
3178	if (config != stream->oa_config) {
3179		int err;
3180
3181		/*
3182		 * If OA is bound to a specific context, emit the
3183		 * reconfiguration inline from that context. The update
3184		 * will then be ordered with respect to submission on that
3185		 * context.
3186		 *
3187		 * When set globally, we use a low priority kernel context,
3188		 * so it will effectively take effect when idle.
3189		 */
3190		err = emit_oa_config(stream, config, oa_context(stream), NULL);
3191		if (!err)
3192			config = xchg(&stream->oa_config, config);
3193		else
3194			ret = err;
3195	}
3196
3197	i915_oa_config_put(config);
3198
3199	return ret;
3200}
3201
3202/**
3203 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
3204 * @stream: An i915 perf stream
3205 * @cmd: the ioctl request
3206 * @arg: the ioctl data
3207 *
3208 * Note: The &perf->lock mutex has been taken to serialize
3209 * with any non-file-operation driver hooks.
3210 *
3211 * Returns: zero on success or a negative error code. Returns -EINVAL for
3212 * an unknown ioctl request.
3213 */
3214static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
3215				   unsigned int cmd,
3216				   unsigned long arg)
3217{
3218	switch (cmd) {
3219	case I915_PERF_IOCTL_ENABLE:
3220		i915_perf_enable_locked(stream);
3221		return 0;
3222	case I915_PERF_IOCTL_DISABLE:
3223		i915_perf_disable_locked(stream);
3224		return 0;
3225	case I915_PERF_IOCTL_CONFIG:
3226		return i915_perf_config_locked(stream, arg);
3227	}
3228
3229	return -EINVAL;
3230}
3231
3232/**
3233 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
3234 * @file: An i915 perf stream file
3235 * @cmd: the ioctl request
3236 * @arg: the ioctl data
3237 *
3238 * Implementation deferred to i915_perf_ioctl_locked().
3239 *
3240 * Returns: zero on success or a negative error code. Returns -EINVAL for
3241 * an unknown ioctl request.
3242 */
3243static long i915_perf_ioctl(struct file *file,
3244			    unsigned int cmd,
3245			    unsigned long arg)
3246{
3247	struct i915_perf_stream *stream = file->private_data;
3248	struct i915_perf *perf = stream->perf;
3249	long ret;
3250
3251	mutex_lock(&perf->lock);
3252	ret = i915_perf_ioctl_locked(stream, cmd, arg);
3253	mutex_unlock(&perf->lock);
3254
3255	return ret;
3256}
3257
3258/**
3259 * i915_perf_destroy_locked - destroy an i915 perf stream
3260 * @stream: An i915 perf stream
3261 *
3262 * Frees all resources associated with the given i915 perf @stream, disabling
3263 * any associated data capture in the process.
3264 *
3265 * Note: The &perf->lock mutex has been taken to serialize
3266 * with any non-file-operation driver hooks.
3267 */
3268static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
3269{
3270	if (stream->enabled)
3271		i915_perf_disable_locked(stream);
3272
3273	if (stream->ops->destroy)
3274		stream->ops->destroy(stream);
3275
3276	if (stream->ctx)
3277		i915_gem_context_put(stream->ctx);
3278
3279	kfree(stream);
3280}
3281
3282/**
3283 * i915_perf_release - handles userspace close() of a stream file
3284 * @inode: anonymous inode associated with file
3285 * @file: An i915 perf stream file
3286 *
3287 * Cleans up any resources associated with an open i915 perf stream file.
3288 *
3289 * NB: close() can't really fail from the userspace point of view.
3290 *
3291 * Returns: zero on success or a negative error code.
3292 */
3293static int i915_perf_release(struct inode *inode, struct file *file)
3294{
3295	struct i915_perf_stream *stream = file->private_data;
3296	struct i915_perf *perf = stream->perf;
3297
3298	mutex_lock(&perf->lock);
3299	i915_perf_destroy_locked(stream);
3300	mutex_unlock(&perf->lock);
3301
3302	/* Release the reference the perf stream kept on the driver. */
3303	drm_dev_put(&perf->i915->drm);
3304
3305	return 0;
3306}
3307
3308
3309static const struct file_operations fops = {
3310	.owner		= THIS_MODULE,
3311	.llseek		= no_llseek,
3312	.release	= i915_perf_release,
3313	.poll		= i915_perf_poll,
3314	.read		= i915_perf_read,
3315	.unlocked_ioctl	= i915_perf_ioctl,
3316	/* Our ioctl have no arguments, so it's safe to use the same function
3317	 * to handle 32bits compatibility.
3318	 */
3319	.compat_ioctl   = i915_perf_ioctl,
3320};
3321
3322
3323/**
3324 * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
3325 * @perf: i915 perf instance
3326 * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
3327 * @props: individually validated u64 property value pairs
3328 * @file: drm file
3329 *
3330 * See i915_perf_ioctl_open() for interface details.
3331 *
3332 * Implements further stream config validation and stream initialization on
3333 * behalf of i915_perf_open_ioctl() with the &perf->lock mutex
3334 * taken to serialize with any non-file-operation driver hooks.
3335 *
3336 * Note: at this point the @props have only been validated in isolation and
3337 * it's still necessary to validate that the combination of properties makes
3338 * sense.
3339 *
3340 * In the case where userspace is interested in OA unit metrics then further
3341 * config validation and stream initialization details will be handled by
3342 * i915_oa_stream_init(). The code here should only validate config state that
3343 * will be relevant to all stream types / backends.
3344 *
3345 * Returns: zero on success or a negative error code.
3346 */
3347static int
3348i915_perf_open_ioctl_locked(struct i915_perf *perf,
3349			    struct drm_i915_perf_open_param *param,
3350			    struct perf_open_properties *props,
3351			    struct drm_file *file)
3352{
3353	struct i915_gem_context *specific_ctx = NULL;
3354	struct i915_perf_stream *stream = NULL;
3355	unsigned long f_flags = 0;
3356	bool privileged_op = true;
3357	int stream_fd;
3358	int ret;
3359
3360	if (props->single_context) {
3361		u32 ctx_handle = props->ctx_handle;
3362		struct drm_i915_file_private *file_priv = file->driver_priv;
3363
3364		specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
3365		if (!specific_ctx) {
3366			DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
3367				  ctx_handle);
3368			ret = -ENOENT;
3369			goto err;
3370		}
3371	}
3372
3373	/*
3374	 * On Haswell the OA unit supports clock gating off for a specific
3375	 * context and in this mode there's no visibility of metrics for the
3376	 * rest of the system, which we consider acceptable for a
3377	 * non-privileged client.
3378	 *
3379	 * For Gen8->11 the OA unit no longer supports clock gating off for a
3380	 * specific context and the kernel can't securely stop the counters
3381	 * from updating as system-wide / global values. Even though we can
3382	 * filter reports based on the included context ID we can't block
3383	 * clients from seeing the raw / global counter values via
3384	 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
3385	 * enable the OA unit by default.
3386	 *
3387	 * For Gen12+ we gain a new OAR unit that only monitors the RCS on a
3388	 * per context basis. So we can relax requirements there if the user
3389	 * doesn't request global stream access (i.e. query based sampling
3390	 * using MI_RECORD_PERF_COUNT.
3391	 */
3392	if (IS_HASWELL(perf->i915) && specific_ctx)
3393		privileged_op = false;
3394	else if (IS_GEN(perf->i915, 12) && specific_ctx &&
3395		 (props->sample_flags & SAMPLE_OA_REPORT) == 0)
3396		privileged_op = false;
3397
3398	if (props->hold_preemption) {
3399		if (!props->single_context) {
3400			DRM_DEBUG("preemption disable with no context\n");
3401			ret = -EINVAL;
3402			goto err;
3403		}
3404		privileged_op = true;
3405	}
3406
3407	/*
3408	 * Asking for SSEU configuration is a priviliged operation.
3409	 */
3410	if (props->has_sseu)
3411		privileged_op = true;
3412	else
3413		get_default_sseu_config(&props->sseu, props->engine);
3414
3415	/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
3416	 * we check a dev.i915.perf_stream_paranoid sysctl option
3417	 * to determine if it's ok to access system wide OA counters
3418	 * without CAP_PERFMON or CAP_SYS_ADMIN privileges.
3419	 */
3420	if (privileged_op &&
3421	    i915_perf_stream_paranoid && !perfmon_capable()) {
3422		DRM_DEBUG("Insufficient privileges to open i915 perf stream\n");
3423		ret = -EACCES;
3424		goto err_ctx;
3425	}
3426
3427	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
3428	if (!stream) {
3429		ret = -ENOMEM;
3430		goto err_ctx;
3431	}
3432
3433	stream->perf = perf;
3434	stream->ctx = specific_ctx;
3435	stream->poll_oa_period = props->poll_oa_period;
3436
3437	ret = i915_oa_stream_init(stream, param, props);
3438	if (ret)
3439		goto err_alloc;
3440
3441	/* we avoid simply assigning stream->sample_flags = props->sample_flags
3442	 * to have _stream_init check the combination of sample flags more
3443	 * thoroughly, but still this is the expected result at this point.
3444	 */
3445	if (WARN_ON(stream->sample_flags != props->sample_flags)) {
3446		ret = -ENODEV;
3447		goto err_flags;
3448	}
3449
3450	if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
3451		f_flags |= O_CLOEXEC;
3452	if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
3453		f_flags |= O_NONBLOCK;
3454
3455	stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
3456	if (stream_fd < 0) {
3457		ret = stream_fd;
3458		goto err_flags;
3459	}
3460
3461	if (!(param->flags & I915_PERF_FLAG_DISABLED))
3462		i915_perf_enable_locked(stream);
3463
3464	/* Take a reference on the driver that will be kept with stream_fd
3465	 * until its release.
3466	 */
3467	drm_dev_get(&perf->i915->drm);
3468
3469	return stream_fd;
3470
3471err_flags:
3472	if (stream->ops->destroy)
3473		stream->ops->destroy(stream);
3474err_alloc:
3475	kfree(stream);
3476err_ctx:
3477	if (specific_ctx)
3478		i915_gem_context_put(specific_ctx);
3479err:
3480	return ret;
3481}
3482
3483static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
3484{
3485	return i915_cs_timestamp_ticks_to_ns(perf->i915, 2ULL << exponent);
3486}
3487
3488/**
3489 * read_properties_unlocked - validate + copy userspace stream open properties
3490 * @perf: i915 perf instance
3491 * @uprops: The array of u64 key value pairs given by userspace
3492 * @n_props: The number of key value pairs expected in @uprops
3493 * @props: The stream configuration built up while validating properties
3494 *
3495 * Note this function only validates properties in isolation it doesn't
3496 * validate that the combination of properties makes sense or that all
3497 * properties necessary for a particular kind of stream have been set.
3498 *
3499 * Note that there currently aren't any ordering requirements for properties so
3500 * we shouldn't validate or assume anything about ordering here. This doesn't
3501 * rule out defining new properties with ordering requirements in the future.
3502 */
3503static int read_properties_unlocked(struct i915_perf *perf,
3504				    u64 __user *uprops,
3505				    u32 n_props,
3506				    struct perf_open_properties *props)
3507{
3508	u64 __user *uprop = uprops;
3509	u32 i;
3510	int ret;
3511
3512	memset(props, 0, sizeof(struct perf_open_properties));
3513	props->poll_oa_period = DEFAULT_POLL_PERIOD_NS;
3514
3515	if (!n_props) {
3516		DRM_DEBUG("No i915 perf properties given\n");
3517		return -EINVAL;
3518	}
3519
3520	/* At the moment we only support using i915-perf on the RCS. */
3521	props->engine = intel_engine_lookup_user(perf->i915,
3522						 I915_ENGINE_CLASS_RENDER,
3523						 0);
3524	if (!props->engine) {
3525		DRM_DEBUG("No RENDER-capable engines\n");
3526		return -EINVAL;
3527	}
3528
3529	/* Considering that ID = 0 is reserved and assuming that we don't
3530	 * (currently) expect any configurations to ever specify duplicate
3531	 * values for a particular property ID then the last _PROP_MAX value is
3532	 * one greater than the maximum number of properties we expect to get
3533	 * from userspace.
3534	 */
3535	if (n_props >= DRM_I915_PERF_PROP_MAX) {
3536		DRM_DEBUG("More i915 perf properties specified than exist\n");
3537		return -EINVAL;
3538	}
3539
3540	for (i = 0; i < n_props; i++) {
3541		u64 oa_period, oa_freq_hz;
3542		u64 id, value;
3543
3544		ret = get_user(id, uprop);
3545		if (ret)
3546			return ret;
3547
3548		ret = get_user(value, uprop + 1);
3549		if (ret)
3550			return ret;
3551
3552		if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
3553			DRM_DEBUG("Unknown i915 perf property ID\n");
3554			return -EINVAL;
3555		}
3556
3557		switch ((enum drm_i915_perf_property_id)id) {
3558		case DRM_I915_PERF_PROP_CTX_HANDLE:
3559			props->single_context = 1;
3560			props->ctx_handle = value;
3561			break;
3562		case DRM_I915_PERF_PROP_SAMPLE_OA:
3563			if (value)
3564				props->sample_flags |= SAMPLE_OA_REPORT;
3565			break;
3566		case DRM_I915_PERF_PROP_OA_METRICS_SET:
3567			if (value == 0) {
3568				DRM_DEBUG("Unknown OA metric set ID\n");
3569				return -EINVAL;
3570			}
3571			props->metrics_set = value;
3572			break;
3573		case DRM_I915_PERF_PROP_OA_FORMAT:
3574			if (value == 0 || value >= I915_OA_FORMAT_MAX) {
3575				DRM_DEBUG("Out-of-range OA report format %llu\n",
3576					  value);
3577				return -EINVAL;
3578			}
3579			if (!perf->oa_formats[value].size) {
3580				DRM_DEBUG("Unsupported OA report format %llu\n",
3581					  value);
3582				return -EINVAL;
3583			}
3584			props->oa_format = value;
3585			break;
3586		case DRM_I915_PERF_PROP_OA_EXPONENT:
3587			if (value > OA_EXPONENT_MAX) {
3588				DRM_DEBUG("OA timer exponent too high (> %u)\n",
3589					 OA_EXPONENT_MAX);
3590				return -EINVAL;
3591			}
3592
3593			/* Theoretically we can program the OA unit to sample
3594			 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
3595			 * for BXT. We don't allow such high sampling
3596			 * frequencies by default unless root.
3597			 */
3598
3599			BUILD_BUG_ON(sizeof(oa_period) != 8);
3600			oa_period = oa_exponent_to_ns(perf, value);
3601
3602			/* This check is primarily to ensure that oa_period <=
3603			 * UINT32_MAX (before passing to do_div which only
3604			 * accepts a u32 denominator), but we can also skip
3605			 * checking anything < 1Hz which implicitly can't be
3606			 * limited via an integer oa_max_sample_rate.
3607			 */
3608			if (oa_period <= NSEC_PER_SEC) {
3609				u64 tmp = NSEC_PER_SEC;
3610				do_div(tmp, oa_period);
3611				oa_freq_hz = tmp;
3612			} else
3613				oa_freq_hz = 0;
3614
3615			if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) {
3616				DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",
3617					  i915_oa_max_sample_rate);
3618				return -EACCES;
3619			}
3620
3621			props->oa_periodic = true;
3622			props->oa_period_exponent = value;
3623			break;
3624		case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
3625			props->hold_preemption = !!value;
3626			break;
3627		case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
3628			struct drm_i915_gem_context_param_sseu user_sseu;
3629
3630			if (copy_from_user(&user_sseu,
3631					   u64_to_user_ptr(value),
3632					   sizeof(user_sseu))) {
3633				DRM_DEBUG("Unable to copy global sseu parameter\n");
3634				return -EFAULT;
3635			}
3636
3637			ret = get_sseu_config(&props->sseu, props->engine, &user_sseu);
3638			if (ret) {
3639				DRM_DEBUG("Invalid SSEU configuration\n");
3640				return ret;
3641			}
3642			props->has_sseu = true;
3643			break;
3644		}
3645		case DRM_I915_PERF_PROP_POLL_OA_PERIOD:
3646			if (value < 100000 /* 100us */) {
3647				DRM_DEBUG("OA availability timer too small (%lluns < 100us)\n",
3648					  value);
3649				return -EINVAL;
3650			}
3651			props->poll_oa_period = value;
3652			break;
3653		case DRM_I915_PERF_PROP_MAX:
3654			MISSING_CASE(id);
3655			return -EINVAL;
3656		}
3657
3658		uprop += 2;
3659	}
3660
3661	return 0;
3662}
3663
3664/**
3665 * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
3666 * @dev: drm device
3667 * @data: ioctl data copied from userspace (unvalidated)
3668 * @file: drm file
3669 *
3670 * Validates the stream open parameters given by userspace including flags
3671 * and an array of u64 key, value pair properties.
3672 *
3673 * Very little is assumed up front about the nature of the stream being
3674 * opened (for instance we don't assume it's for periodic OA unit metrics). An
3675 * i915-perf stream is expected to be a suitable interface for other forms of
3676 * buffered data written by the GPU besides periodic OA metrics.
3677 *
3678 * Note we copy the properties from userspace outside of the i915 perf
3679 * mutex to avoid an awkward lockdep with mmap_lock.
3680 *
3681 * Most of the implementation details are handled by
3682 * i915_perf_open_ioctl_locked() after taking the &perf->lock
3683 * mutex for serializing with any non-file-operation driver hooks.
3684 *
3685 * Return: A newly opened i915 Perf stream file descriptor or negative
3686 * error code on failure.
3687 */
3688int i915_perf_open_ioctl(struct drm_device *dev, void *data,
3689			 struct drm_file *file)
3690{
3691	struct i915_perf *perf = &to_i915(dev)->perf;
3692	struct drm_i915_perf_open_param *param = data;
3693	struct perf_open_properties props;
3694	u32 known_open_flags;
3695	int ret;
3696
3697	if (!perf->i915) {
3698		DRM_DEBUG("i915 perf interface not available for this system\n");
3699		return -ENOTSUPP;
3700	}
3701
3702	known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
3703			   I915_PERF_FLAG_FD_NONBLOCK |
3704			   I915_PERF_FLAG_DISABLED;
3705	if (param->flags & ~known_open_flags) {
3706		DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n");
3707		return -EINVAL;
3708	}
3709
3710	ret = read_properties_unlocked(perf,
3711				       u64_to_user_ptr(param->properties_ptr),
3712				       param->num_properties,
3713				       &props);
3714	if (ret)
3715		return ret;
3716
3717	mutex_lock(&perf->lock);
3718	ret = i915_perf_open_ioctl_locked(perf, param, &props, file);
3719	mutex_unlock(&perf->lock);
3720
3721	return ret;
3722}
3723
3724/**
3725 * i915_perf_register - exposes i915-perf to userspace
3726 * @i915: i915 device instance
3727 *
3728 * In particular OA metric sets are advertised under a sysfs metrics/
3729 * directory allowing userspace to enumerate valid IDs that can be
3730 * used to open an i915-perf stream.
3731 */
3732void i915_perf_register(struct drm_i915_private *i915)
3733{
3734	struct i915_perf *perf = &i915->perf;
3735
3736	if (!perf->i915)
3737		return;
3738
3739	/* To be sure we're synchronized with an attempted
3740	 * i915_perf_open_ioctl(); considering that we register after
3741	 * being exposed to userspace.
3742	 */
3743	mutex_lock(&perf->lock);
3744
3745	perf->metrics_kobj =
3746		kobject_create_and_add("metrics",
3747				       &i915->drm.primary->kdev->kobj);
3748
3749	mutex_unlock(&perf->lock);
3750}
3751
3752/**
3753 * i915_perf_unregister - hide i915-perf from userspace
3754 * @i915: i915 device instance
3755 *
3756 * i915-perf state cleanup is split up into an 'unregister' and
3757 * 'deinit' phase where the interface is first hidden from
3758 * userspace by i915_perf_unregister() before cleaning up
3759 * remaining state in i915_perf_fini().
3760 */
3761void i915_perf_unregister(struct drm_i915_private *i915)
3762{
3763	struct i915_perf *perf = &i915->perf;
3764
3765	if (!perf->metrics_kobj)
3766		return;
3767
3768	kobject_put(perf->metrics_kobj);
3769	perf->metrics_kobj = NULL;
3770}
3771
3772static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
3773{
3774	static const i915_reg_t flex_eu_regs[] = {
3775		EU_PERF_CNTL0,
3776		EU_PERF_CNTL1,
3777		EU_PERF_CNTL2,
3778		EU_PERF_CNTL3,
3779		EU_PERF_CNTL4,
3780		EU_PERF_CNTL5,
3781		EU_PERF_CNTL6,
3782	};
3783	int i;
3784
3785	for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
3786		if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
3787			return true;
3788	}
3789	return false;
3790}
3791
3792#define ADDR_IN_RANGE(addr, start, end) \
3793	((addr) >= (start) && \
3794	 (addr) <= (end))
3795
3796#define REG_IN_RANGE(addr, start, end) \
3797	((addr) >= i915_mmio_reg_offset(start) && \
3798	 (addr) <= i915_mmio_reg_offset(end))
3799
3800#define REG_EQUAL(addr, mmio) \
3801	((addr) == i915_mmio_reg_offset(mmio))
3802
3803static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
3804{
3805	return REG_IN_RANGE(addr, OASTARTTRIG1, OASTARTTRIG8) ||
3806	       REG_IN_RANGE(addr, OAREPORTTRIG1, OAREPORTTRIG8) ||
3807	       REG_IN_RANGE(addr, OACEC0_0, OACEC7_1);
3808}
3809
3810static bool gen7_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3811{
3812	return REG_EQUAL(addr, HALF_SLICE_CHICKEN2) ||
3813	       REG_IN_RANGE(addr, MICRO_BP0_0, NOA_WRITE) ||
3814	       REG_IN_RANGE(addr, OA_PERFCNT1_LO, OA_PERFCNT2_HI) ||
3815	       REG_IN_RANGE(addr, OA_PERFMATRIX_LO, OA_PERFMATRIX_HI);
3816}
3817
3818static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3819{
3820	return gen7_is_valid_mux_addr(perf, addr) ||
3821	       REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
3822	       REG_IN_RANGE(addr, RPM_CONFIG0, NOA_CONFIG(8));
3823}
3824
3825static bool gen10_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3826{
3827	return gen8_is_valid_mux_addr(perf, addr) ||
3828	       REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
3829	       REG_IN_RANGE(addr, OA_PERFCNT3_LO, OA_PERFCNT4_HI);
3830}
3831
3832static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3833{
3834	return gen7_is_valid_mux_addr(perf, addr) ||
3835	       ADDR_IN_RANGE(addr, 0x25100, 0x2FF90) ||
3836	       REG_IN_RANGE(addr, HSW_MBVID2_NOA0, HSW_MBVID2_NOA9) ||
3837	       REG_EQUAL(addr, HSW_MBVID2_MISR0);
3838}
3839
3840static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3841{
3842	return gen7_is_valid_mux_addr(perf, addr) ||
3843	       ADDR_IN_RANGE(addr, 0x182300, 0x1823A4);
3844}
3845
3846static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
3847{
3848	return REG_IN_RANGE(addr, GEN12_OAG_OASTARTTRIG1, GEN12_OAG_OASTARTTRIG8) ||
3849	       REG_IN_RANGE(addr, GEN12_OAG_OAREPORTTRIG1, GEN12_OAG_OAREPORTTRIG8) ||
3850	       REG_IN_RANGE(addr, GEN12_OAG_CEC0_0, GEN12_OAG_CEC7_1) ||
3851	       REG_IN_RANGE(addr, GEN12_OAG_SCEC0_0, GEN12_OAG_SCEC7_1) ||
3852	       REG_EQUAL(addr, GEN12_OAA_DBG_REG) ||
3853	       REG_EQUAL(addr, GEN12_OAG_OA_PESS) ||
3854	       REG_EQUAL(addr, GEN12_OAG_SPCTR_CNF);
3855}
3856
3857static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3858{
3859	return REG_EQUAL(addr, NOA_WRITE) ||
3860	       REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
3861	       REG_EQUAL(addr, GDT_CHICKEN_BITS) ||
3862	       REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
3863	       REG_EQUAL(addr, RPM_CONFIG0) ||
3864	       REG_EQUAL(addr, RPM_CONFIG1) ||
3865	       REG_IN_RANGE(addr, NOA_CONFIG(0), NOA_CONFIG(8));
3866}
3867
3868static u32 mask_reg_value(u32 reg, u32 val)
3869{
3870	/* HALF_SLICE_CHICKEN2 is programmed with a the
3871	 * WaDisableSTUnitPowerOptimization workaround. Make sure the value
3872	 * programmed by userspace doesn't change this.
3873	 */
3874	if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
3875		val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
3876
3877	/* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
3878	 * indicated by its name and a bunch of selection fields used by OA
3879	 * configs.
3880	 */
3881	if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
3882		val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
3883
3884	return val;
3885}
3886
3887static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
3888					 bool (*is_valid)(struct i915_perf *perf, u32 addr),
3889					 u32 __user *regs,
3890					 u32 n_regs)
3891{
3892	struct i915_oa_reg *oa_regs;
3893	int err;
3894	u32 i;
3895
3896	if (!n_regs)
3897		return NULL;
3898
3899	/* No is_valid function means we're not allowing any register to be programmed. */
3900	GEM_BUG_ON(!is_valid);
3901	if (!is_valid)
3902		return ERR_PTR(-EINVAL);
3903
3904	oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
3905	if (!oa_regs)
3906		return ERR_PTR(-ENOMEM);
3907
3908	for (i = 0; i < n_regs; i++) {
3909		u32 addr, value;
3910
3911		err = get_user(addr, regs);
3912		if (err)
3913			goto addr_err;
3914
3915		if (!is_valid(perf, addr)) {
3916			DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
3917			err = -EINVAL;
3918			goto addr_err;
3919		}
3920
3921		err = get_user(value, regs + 1);
3922		if (err)
3923			goto addr_err;
3924
3925		oa_regs[i].addr = _MMIO(addr);
3926		oa_regs[i].value = mask_reg_value(addr, value);
3927
3928		regs += 2;
3929	}
3930
3931	return oa_regs;
3932
3933addr_err:
3934	kfree(oa_regs);
3935	return ERR_PTR(err);
3936}
3937
3938static ssize_t show_dynamic_id(struct device *dev,
3939			       struct device_attribute *attr,
3940			       char *buf)
3941{
3942	struct i915_oa_config *oa_config =
3943		container_of(attr, typeof(*oa_config), sysfs_metric_id);
3944
3945	return sprintf(buf, "%d\n", oa_config->id);
3946}
3947
3948static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf,
3949					 struct i915_oa_config *oa_config)
3950{
3951	sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
3952	oa_config->sysfs_metric_id.attr.name = "id";
3953	oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
3954	oa_config->sysfs_metric_id.show = show_dynamic_id;
3955	oa_config->sysfs_metric_id.store = NULL;
3956
3957	oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
3958	oa_config->attrs[1] = NULL;
3959
3960	oa_config->sysfs_metric.name = oa_config->uuid;
3961	oa_config->sysfs_metric.attrs = oa_config->attrs;
3962
3963	return sysfs_create_group(perf->metrics_kobj,
3964				  &oa_config->sysfs_metric);
3965}
3966
3967/**
3968 * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
3969 * @dev: drm device
3970 * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
3971 *        userspace (unvalidated)
3972 * @file: drm file
3973 *
3974 * Validates the submitted OA register to be saved into a new OA config that
3975 * can then be used for programming the OA unit and its NOA network.
3976 *
3977 * Returns: A new allocated config number to be used with the perf open ioctl
3978 * or a negative error code on failure.
3979 */
3980int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
3981			       struct drm_file *file)
3982{
3983	struct i915_perf *perf = &to_i915(dev)->perf;
3984	struct drm_i915_perf_oa_config *args = data;
3985	struct i915_oa_config *oa_config, *tmp;
3986	struct i915_oa_reg *regs;
3987	int err, id;
3988
3989	if (!perf->i915) {
3990		DRM_DEBUG("i915 perf interface not available for this system\n");
3991		return -ENOTSUPP;
3992	}
3993
3994	if (!perf->metrics_kobj) {
3995		DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
3996		return -EINVAL;
3997	}
3998
3999	if (i915_perf_stream_paranoid && !perfmon_capable()) {
4000		DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
4001		return -EACCES;
4002	}
4003
4004	if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
4005	    (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
4006	    (!args->flex_regs_ptr || !args->n_flex_regs)) {
4007		DRM_DEBUG("No OA registers given\n");
4008		return -EINVAL;
4009	}
4010
4011	oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
4012	if (!oa_config) {
4013		DRM_DEBUG("Failed to allocate memory for the OA config\n");
4014		return -ENOMEM;
4015	}
4016
4017	oa_config->perf = perf;
4018	kref_init(&oa_config->ref);
4019
4020	if (!uuid_is_valid(args->uuid)) {
4021		DRM_DEBUG("Invalid uuid format for OA config\n");
4022		err = -EINVAL;
4023		goto reg_err;
4024	}
4025
4026	/* Last character in oa_config->uuid will be 0 because oa_config is
4027	 * kzalloc.
4028	 */
4029	memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
4030
4031	oa_config->mux_regs_len = args->n_mux_regs;
4032	regs = alloc_oa_regs(perf,
4033			     perf->ops.is_valid_mux_reg,
4034			     u64_to_user_ptr(args->mux_regs_ptr),
4035			     args->n_mux_regs);
4036
4037	if (IS_ERR(regs)) {
4038		DRM_DEBUG("Failed to create OA config for mux_regs\n");
4039		err = PTR_ERR(regs);
4040		goto reg_err;
4041	}
4042	oa_config->mux_regs = regs;
4043
4044	oa_config->b_counter_regs_len = args->n_boolean_regs;
4045	regs = alloc_oa_regs(perf,
4046			     perf->ops.is_valid_b_counter_reg,
4047			     u64_to_user_ptr(args->boolean_regs_ptr),
4048			     args->n_boolean_regs);
4049
4050	if (IS_ERR(regs)) {
4051		DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
4052		err = PTR_ERR(regs);
4053		goto reg_err;
4054	}
4055	oa_config->b_counter_regs = regs;
4056
4057	if (INTEL_GEN(perf->i915) < 8) {
4058		if (args->n_flex_regs != 0) {
4059			err = -EINVAL;
4060			goto reg_err;
4061		}
4062	} else {
4063		oa_config->flex_regs_len = args->n_flex_regs;
4064		regs = alloc_oa_regs(perf,
4065				     perf->ops.is_valid_flex_reg,
4066				     u64_to_user_ptr(args->flex_regs_ptr),
4067				     args->n_flex_regs);
4068
4069		if (IS_ERR(regs)) {
4070			DRM_DEBUG("Failed to create OA config for flex_regs\n");
4071			err = PTR_ERR(regs);
4072			goto reg_err;
4073		}
4074		oa_config->flex_regs = regs;
4075	}
4076
4077	err = mutex_lock_interruptible(&perf->metrics_lock);
4078	if (err)
4079		goto reg_err;
4080
4081	/* We shouldn't have too many configs, so this iteration shouldn't be
4082	 * too costly.
4083	 */
4084	idr_for_each_entry(&perf->metrics_idr, tmp, id) {
4085		if (!strcmp(tmp->uuid, oa_config->uuid)) {
4086			DRM_DEBUG("OA config already exists with this uuid\n");
4087			err = -EADDRINUSE;
4088			goto sysfs_err;
4089		}
4090	}
4091
4092	err = create_dynamic_oa_sysfs_entry(perf, oa_config);
4093	if (err) {
4094		DRM_DEBUG("Failed to create sysfs entry for OA config\n");
4095		goto sysfs_err;
4096	}
4097
4098	/* Config id 0 is invalid, id 1 for kernel stored test config. */
4099	oa_config->id = idr_alloc(&perf->metrics_idr,
4100				  oa_config, 2,
4101				  0, GFP_KERNEL);
4102	if (oa_config->id < 0) {
4103		DRM_DEBUG("Failed to create sysfs entry for OA config\n");
4104		err = oa_config->id;
4105		goto sysfs_err;
4106	}
4107
4108	mutex_unlock(&perf->metrics_lock);
4109
4110	DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id);
4111
4112	return oa_config->id;
4113
4114sysfs_err:
4115	mutex_unlock(&perf->metrics_lock);
4116reg_err:
4117	i915_oa_config_put(oa_config);
4118	DRM_DEBUG("Failed to add new OA config\n");
4119	return err;
4120}
4121
4122/**
4123 * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
4124 * @dev: drm device
4125 * @data: ioctl data (pointer to u64 integer) copied from userspace
4126 * @file: drm file
4127 *
4128 * Configs can be removed while being used, the will stop appearing in sysfs
4129 * and their content will be freed when the stream using the config is closed.
4130 *
4131 * Returns: 0 on success or a negative error code on failure.
4132 */
4133int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
4134				  struct drm_file *file)
4135{
4136	struct i915_perf *perf = &to_i915(dev)->perf;
4137	u64 *arg = data;
4138	struct i915_oa_config *oa_config;
4139	int ret;
4140
4141	if (!perf->i915) {
4142		DRM_DEBUG("i915 perf interface not available for this system\n");
4143		return -ENOTSUPP;
4144	}
4145
4146	if (i915_perf_stream_paranoid && !perfmon_capable()) {
4147		DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
4148		return -EACCES;
4149	}
4150
4151	ret = mutex_lock_interruptible(&perf->metrics_lock);
4152	if (ret)
4153		return ret;
4154
4155	oa_config = idr_find(&perf->metrics_idr, *arg);
4156	if (!oa_config) {
4157		DRM_DEBUG("Failed to remove unknown OA config\n");
4158		ret = -ENOENT;
4159		goto err_unlock;
4160	}
4161
4162	GEM_BUG_ON(*arg != oa_config->id);
4163
4164	sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric);
4165
4166	idr_remove(&perf->metrics_idr, *arg);
4167
4168	mutex_unlock(&perf->metrics_lock);
4169
4170	DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
4171
4172	i915_oa_config_put(oa_config);
4173
4174	return 0;
4175
4176err_unlock:
4177	mutex_unlock(&perf->metrics_lock);
4178	return ret;
4179}
4180
4181static struct ctl_table oa_table[] = {
4182	{
4183	 .procname = "perf_stream_paranoid",
4184	 .data = &i915_perf_stream_paranoid,
4185	 .maxlen = sizeof(i915_perf_stream_paranoid),
4186	 .mode = 0644,
4187	 .proc_handler = proc_dointvec_minmax,
4188	 .extra1 = SYSCTL_ZERO,
4189	 .extra2 = SYSCTL_ONE,
4190	 },
4191	{
4192	 .procname = "oa_max_sample_rate",
4193	 .data = &i915_oa_max_sample_rate,
4194	 .maxlen = sizeof(i915_oa_max_sample_rate),
4195	 .mode = 0644,
4196	 .proc_handler = proc_dointvec_minmax,
4197	 .extra1 = SYSCTL_ZERO,
4198	 .extra2 = &oa_sample_rate_hard_limit,
4199	 },
4200	{}
4201};
4202
4203static struct ctl_table i915_root[] = {
4204	{
4205	 .procname = "i915",
4206	 .maxlen = 0,
4207	 .mode = 0555,
4208	 .child = oa_table,
4209	 },
4210	{}
4211};
4212
4213static struct ctl_table dev_root[] = {
4214	{
4215	 .procname = "dev",
4216	 .maxlen = 0,
4217	 .mode = 0555,
4218	 .child = i915_root,
4219	 },
4220	{}
4221};
4222
4223/**
4224 * i915_perf_init - initialize i915-perf state on module bind
4225 * @i915: i915 device instance
4226 *
4227 * Initializes i915-perf state without exposing anything to userspace.
4228 *
4229 * Note: i915-perf initialization is split into an 'init' and 'register'
4230 * phase with the i915_perf_register() exposing state to userspace.
4231 */
4232void i915_perf_init(struct drm_i915_private *i915)
4233{
4234	struct i915_perf *perf = &i915->perf;
4235
4236	/* XXX const struct i915_perf_ops! */
4237
4238	if (IS_HASWELL(i915)) {
4239		perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
4240		perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
4241		perf->ops.is_valid_flex_reg = NULL;
4242		perf->ops.enable_metric_set = hsw_enable_metric_set;
4243		perf->ops.disable_metric_set = hsw_disable_metric_set;
4244		perf->ops.oa_enable = gen7_oa_enable;
4245		perf->ops.oa_disable = gen7_oa_disable;
4246		perf->ops.read = gen7_oa_read;
4247		perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
4248
4249		perf->oa_formats = hsw_oa_formats;
4250	} else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
4251		/* Note: that although we could theoretically also support the
4252		 * legacy ringbuffer mode on BDW (and earlier iterations of
4253		 * this driver, before upstreaming did this) it didn't seem
4254		 * worth the complexity to maintain now that BDW+ enable
4255		 * execlist mode by default.
4256		 */
4257		perf->ops.read = gen8_oa_read;
4258
4259		if (IS_GEN_RANGE(i915, 8, 9)) {
4260			perf->oa_formats = gen8_plus_oa_formats;
4261
4262			perf->ops.is_valid_b_counter_reg =
4263				gen7_is_valid_b_counter_addr;
4264			perf->ops.is_valid_mux_reg =
4265				gen8_is_valid_mux_addr;
4266			perf->ops.is_valid_flex_reg =
4267				gen8_is_valid_flex_addr;
4268
4269			if (IS_CHERRYVIEW(i915)) {
4270				perf->ops.is_valid_mux_reg =
4271					chv_is_valid_mux_addr;
4272			}
4273
4274			perf->ops.oa_enable = gen8_oa_enable;
4275			perf->ops.oa_disable = gen8_oa_disable;
4276			perf->ops.enable_metric_set = gen8_enable_metric_set;
4277			perf->ops.disable_metric_set = gen8_disable_metric_set;
4278			perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4279
4280			if (IS_GEN(i915, 8)) {
4281				perf->ctx_oactxctrl_offset = 0x120;
4282				perf->ctx_flexeu0_offset = 0x2ce;
4283
4284				perf->gen8_valid_ctx_bit = BIT(25);
4285			} else {
4286				perf->ctx_oactxctrl_offset = 0x128;
4287				perf->ctx_flexeu0_offset = 0x3de;
4288
4289				perf->gen8_valid_ctx_bit = BIT(16);
4290			}
4291		} else if (IS_GEN_RANGE(i915, 10, 11)) {
4292			perf->oa_formats = gen8_plus_oa_formats;
4293
4294			perf->ops.is_valid_b_counter_reg =
4295				gen7_is_valid_b_counter_addr;
4296			perf->ops.is_valid_mux_reg =
4297				gen10_is_valid_mux_addr;
4298			perf->ops.is_valid_flex_reg =
4299				gen8_is_valid_flex_addr;
4300
4301			perf->ops.oa_enable = gen8_oa_enable;
4302			perf->ops.oa_disable = gen8_oa_disable;
4303			perf->ops.enable_metric_set = gen8_enable_metric_set;
4304			perf->ops.disable_metric_set = gen10_disable_metric_set;
4305			perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4306
4307			if (IS_GEN(i915, 10)) {
4308				perf->ctx_oactxctrl_offset = 0x128;
4309				perf->ctx_flexeu0_offset = 0x3de;
4310			} else {
4311				perf->ctx_oactxctrl_offset = 0x124;
4312				perf->ctx_flexeu0_offset = 0x78e;
4313			}
4314			perf->gen8_valid_ctx_bit = BIT(16);
4315		} else if (IS_GEN(i915, 12)) {
4316			perf->oa_formats = gen12_oa_formats;
4317
4318			perf->ops.is_valid_b_counter_reg =
4319				gen12_is_valid_b_counter_addr;
4320			perf->ops.is_valid_mux_reg =
4321				gen12_is_valid_mux_addr;
4322			perf->ops.is_valid_flex_reg =
4323				gen8_is_valid_flex_addr;
4324
4325			perf->ops.oa_enable = gen12_oa_enable;
4326			perf->ops.oa_disable = gen12_oa_disable;
4327			perf->ops.enable_metric_set = gen12_enable_metric_set;
4328			perf->ops.disable_metric_set = gen12_disable_metric_set;
4329			perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read;
4330
4331			perf->ctx_flexeu0_offset = 0;
4332			perf->ctx_oactxctrl_offset = 0x144;
4333		}
4334	}
4335
4336	if (perf->ops.enable_metric_set) {
4337		mutex_init(&perf->lock);
4338
4339		oa_sample_rate_hard_limit =
4340			RUNTIME_INFO(i915)->cs_timestamp_frequency_hz / 2;
4341
4342		mutex_init(&perf->metrics_lock);
4343		idr_init(&perf->metrics_idr);
4344
4345		/* We set up some ratelimit state to potentially throttle any
4346		 * _NOTES about spurious, invalid OA reports which we don't
4347		 * forward to userspace.
4348		 *
4349		 * We print a _NOTE about any throttling when closing the
4350		 * stream instead of waiting until driver _fini which no one
4351		 * would ever see.
4352		 *
4353		 * Using the same limiting factors as printk_ratelimit()
4354		 */
4355		ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10);
4356		/* Since we use a DRM_NOTE for spurious reports it would be
4357		 * inconsistent to let __ratelimit() automatically print a
4358		 * warning for throttling.
4359		 */
4360		ratelimit_set_flags(&perf->spurious_report_rs,
4361				    RATELIMIT_MSG_ON_RELEASE);
4362
4363		ratelimit_state_init(&perf->tail_pointer_race,
4364				     5 * HZ, 10);
4365		ratelimit_set_flags(&perf->tail_pointer_race,
4366				    RATELIMIT_MSG_ON_RELEASE);
4367
4368		atomic64_set(&perf->noa_programming_delay,
4369			     500 * 1000 /* 500us */);
4370
4371		perf->i915 = i915;
4372	}
4373}
4374
4375static int destroy_config(int id, void *p, void *data)
4376{
4377	i915_oa_config_put(p);
4378	return 0;
4379}
4380
4381void i915_perf_sysctl_register(void)
4382{
4383	sysctl_header = register_sysctl_table(dev_root);
4384}
4385
4386void i915_perf_sysctl_unregister(void)
4387{
4388	unregister_sysctl_table(sysctl_header);
4389}
4390
4391/**
4392 * i915_perf_fini - Counter part to i915_perf_init()
4393 * @i915: i915 device instance
4394 */
4395void i915_perf_fini(struct drm_i915_private *i915)
4396{
4397	struct i915_perf *perf = &i915->perf;
4398
4399	if (!perf->i915)
4400		return;
4401
4402	idr_for_each(&perf->metrics_idr, destroy_config, perf);
4403	idr_destroy(&perf->metrics_idr);
4404
4405	memset(&perf->ops, 0, sizeof(perf->ops));
4406	perf->i915 = NULL;
4407}
4408
4409/**
4410 * i915_perf_ioctl_version - Version of the i915-perf subsystem
4411 *
4412 * This version number is used by userspace to detect available features.
4413 */
4414int i915_perf_ioctl_version(void)
4415{
4416	/*
4417	 * 1: Initial version
4418	 *   I915_PERF_IOCTL_ENABLE
4419	 *   I915_PERF_IOCTL_DISABLE
4420	 *
4421	 * 2: Added runtime modification of OA config.
4422	 *   I915_PERF_IOCTL_CONFIG
4423	 *
4424	 * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold
4425	 *    preemption on a particular context so that performance data is
4426	 *    accessible from a delta of MI_RPC reports without looking at the
4427	 *    OA buffer.
4428	 *
4429	 * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can
4430	 *    be run for the duration of the performance recording based on
4431	 *    their SSEU configuration.
4432	 *
4433	 * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the
4434	 *    interval for the hrtimer used to check for OA data.
4435	 */
4436	return 5;
4437}
4438
4439#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
4440#include "selftests/i915_perf.c"
4441#endif