Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1/*
   2 * Copyright © 2015-2016 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *   Robert Bragg <robert@sixbynine.org>
  25 */
  26
  27
  28/**
  29 * DOC: i915 Perf Overview
  30 *
  31 * Gen graphics supports a large number of performance counters that can help
  32 * driver and application developers understand and optimize their use of the
  33 * GPU.
  34 *
  35 * This i915 perf interface enables userspace to configure and open a file
  36 * descriptor representing a stream of GPU metrics which can then be read() as
  37 * a stream of sample records.
  38 *
  39 * The interface is particularly suited to exposing buffered metrics that are
  40 * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
  41 *
  42 * Streams representing a single context are accessible to applications with a
  43 * corresponding drm file descriptor, such that OpenGL can use the interface
  44 * without special privileges. Access to system-wide metrics requires root
  45 * privileges by default, unless changed via the dev.i915.perf_event_paranoid
  46 * sysctl option.
  47 *
  48 */
  49
  50/**
  51 * DOC: i915 Perf History and Comparison with Core Perf
  52 *
  53 * The interface was initially inspired by the core Perf infrastructure but
  54 * some notable differences are:
  55 *
  56 * i915 perf file descriptors represent a "stream" instead of an "event"; where
  57 * a perf event primarily corresponds to a single 64bit value, while a stream
  58 * might sample sets of tightly-coupled counters, depending on the
  59 * configuration.  For example the Gen OA unit isn't designed to support
  60 * orthogonal configurations of individual counters; it's configured for a set
  61 * of related counters. Samples for an i915 perf stream capturing OA metrics
  62 * will include a set of counter values packed in a compact HW specific format.
  63 * The OA unit supports a number of different packing formats which can be
  64 * selected by the user opening the stream. Perf has support for grouping
  65 * events, but each event in the group is configured, validated and
  66 * authenticated individually with separate system calls.
  67 *
  68 * i915 perf stream configurations are provided as an array of u64 (key,value)
  69 * pairs, instead of a fixed struct with multiple miscellaneous config members,
  70 * interleaved with event-type specific members.
  71 *
  72 * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
  73 * The supported metrics are being written to memory by the GPU unsynchronized
  74 * with the CPU, using HW specific packing formats for counter sets. Sometimes
  75 * the constraints on HW configuration require reports to be filtered before it
  76 * would be acceptable to expose them to unprivileged applications - to hide
  77 * the metrics of other processes/contexts. For these use cases a read() based
  78 * interface is a good fit, and provides an opportunity to filter data as it
  79 * gets copied from the GPU mapped buffers to userspace buffers.
  80 *
  81 *
  82 * Issues hit with first prototype based on Core Perf
  83 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  84 *
  85 * The first prototype of this driver was based on the core perf
  86 * infrastructure, and while we did make that mostly work, with some changes to
  87 * perf, we found we were breaking or working around too many assumptions baked
  88 * into perf's currently cpu centric design.
  89 *
  90 * In the end we didn't see a clear benefit to making perf's implementation and
  91 * interface more complex by changing design assumptions while we knew we still
  92 * wouldn't be able to use any existing perf based userspace tools.
  93 *
  94 * Also considering the Gen specific nature of the Observability hardware and
  95 * how userspace will sometimes need to combine i915 perf OA metrics with
  96 * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
  97 * expecting the interface to be used by a platform specific userspace such as
  98 * OpenGL or tools. This is to say; we aren't inherently missing out on having
  99 * a standard vendor/architecture agnostic interface by not using perf.
 100 *
 101 *
 102 * For posterity, in case we might re-visit trying to adapt core perf to be
 103 * better suited to exposing i915 metrics these were the main pain points we
 104 * hit:
 105 *
 106 * - The perf based OA PMU driver broke some significant design assumptions:
 107 *
 108 *   Existing perf pmus are used for profiling work on a cpu and we were
 109 *   introducing the idea of _IS_DEVICE pmus with different security
 110 *   implications, the need to fake cpu-related data (such as user/kernel
 111 *   registers) to fit with perf's current design, and adding _DEVICE records
 112 *   as a way to forward device-specific status records.
 113 *
 114 *   The OA unit writes reports of counters into a circular buffer, without
 115 *   involvement from the CPU, making our PMU driver the first of a kind.
 116 *
 117 *   Given the way we were periodically forward data from the GPU-mapped, OA
 118 *   buffer to perf's buffer, those bursts of sample writes looked to perf like
 119 *   we were sampling too fast and so we had to subvert its throttling checks.
 120 *
 121 *   Perf supports groups of counters and allows those to be read via
 122 *   transactions internally but transactions currently seem designed to be
 123 *   explicitly initiated from the cpu (say in response to a userspace read())
 124 *   and while we could pull a report out of the OA buffer we can't
 125 *   trigger a report from the cpu on demand.
 126 *
 127 *   Related to being report based; the OA counters are configured in HW as a
 128 *   set while perf generally expects counter configurations to be orthogonal.
 129 *   Although counters can be associated with a group leader as they are
 130 *   opened, there's no clear precedent for being able to provide group-wide
 131 *   configuration attributes (for example we want to let userspace choose the
 132 *   OA unit report format used to capture all counters in a set, or specify a
 133 *   GPU context to filter metrics on). We avoided using perf's grouping
 134 *   feature and forwarded OA reports to userspace via perf's 'raw' sample
 135 *   field. This suited our userspace well considering how coupled the counters
 136 *   are when dealing with normalizing. It would be inconvenient to split
 137 *   counters up into separate events, only to require userspace to recombine
 138 *   them. For Mesa it's also convenient to be forwarded raw, periodic reports
 139 *   for combining with the side-band raw reports it captures using
 140 *   MI_REPORT_PERF_COUNT commands.
 141 *
 142 *   - As a side note on perf's grouping feature; there was also some concern
 143 *     that using PERF_FORMAT_GROUP as a way to pack together counter values
 144 *     would quite drastically inflate our sample sizes, which would likely
 145 *     lower the effective sampling resolutions we could use when the available
 146 *     memory bandwidth is limited.
 147 *
 148 *     With the OA unit's report formats, counters are packed together as 32
 149 *     or 40bit values, with the largest report size being 256 bytes.
 150 *
 151 *     PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
 152 *     documented ordering to the values, implying PERF_FORMAT_ID must also be
 153 *     used to add a 64bit ID before each value; giving 16 bytes per counter.
 154 *
 155 *   Related to counter orthogonality; we can't time share the OA unit, while
 156 *   event scheduling is a central design idea within perf for allowing
 157 *   userspace to open + enable more events than can be configured in HW at any
 158 *   one time.  The OA unit is not designed to allow re-configuration while in
 159 *   use. We can't reconfigure the OA unit without losing internal OA unit
 160 *   state which we can't access explicitly to save and restore. Reconfiguring
 161 *   the OA unit is also relatively slow, involving ~100 register writes. From
 162 *   userspace Mesa also depends on a stable OA configuration when emitting
 163 *   MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
 164 *   disabled while there are outstanding MI_RPC commands lest we hang the
 165 *   command streamer.
 166 *
 167 *   The contents of sample records aren't extensible by device drivers (i.e.
 168 *   the sample_type bits). As an example; Sourab Gupta had been looking to
 169 *   attach GPU timestamps to our OA samples. We were shoehorning OA reports
 170 *   into sample records by using the 'raw' field, but it's tricky to pack more
 171 *   than one thing into this field because events/core.c currently only lets a
 172 *   pmu give a single raw data pointer plus len which will be copied into the
 173 *   ring buffer. To include more than the OA report we'd have to copy the
 174 *   report into an intermediate larger buffer. I'd been considering allowing a
 175 *   vector of data+len values to be specified for copying the raw data, but
 176 *   it felt like a kludge to being using the raw field for this purpose.
 177 *
 178 * - It felt like our perf based PMU was making some technical compromises
 179 *   just for the sake of using perf:
 180 *
 181 *   perf_event_open() requires events to either relate to a pid or a specific
 182 *   cpu core, while our device pmu related to neither.  Events opened with a
 183 *   pid will be automatically enabled/disabled according to the scheduling of
 184 *   that process - so not appropriate for us. When an event is related to a
 185 *   cpu id, perf ensures pmu methods will be invoked via an inter process
 186 *   interrupt on that core. To avoid invasive changes our userspace opened OA
 187 *   perf events for a specific cpu. This was workable but it meant the
 188 *   majority of the OA driver ran in atomic context, including all OA report
 189 *   forwarding, which wasn't really necessary in our case and seems to make
 190 *   our locking requirements somewhat complex as we handled the interaction
 191 *   with the rest of the i915 driver.
 192 */
 193
 194#include <linux/anon_inodes.h>
 195#include <linux/sizes.h>
 196#include <linux/uuid.h>
 197
 198#include "gem/i915_gem_context.h"
 199#include "gt/intel_engine_pm.h"
 200#include "gt/intel_engine_user.h"
 201#include "gt/intel_execlists_submission.h"
 202#include "gt/intel_gpu_commands.h"
 203#include "gt/intel_gt.h"
 204#include "gt/intel_gt_clock_utils.h"
 205#include "gt/intel_lrc.h"
 206#include "gt/intel_ring.h"
 207
 208#include "i915_drv.h"
 209#include "i915_perf.h"
 210
 211/* HW requires this to be a power of two, between 128k and 16M, though driver
 212 * is currently generally designed assuming the largest 16M size is used such
 213 * that the overflow cases are unlikely in normal operation.
 214 */
 215#define OA_BUFFER_SIZE		SZ_16M
 216
 217#define OA_TAKEN(tail, head)	((tail - head) & (OA_BUFFER_SIZE - 1))
 218
 219/**
 220 * DOC: OA Tail Pointer Race
 221 *
 222 * There's a HW race condition between OA unit tail pointer register updates and
 223 * writes to memory whereby the tail pointer can sometimes get ahead of what's
 224 * been written out to the OA buffer so far (in terms of what's visible to the
 225 * CPU).
 226 *
 227 * Although this can be observed explicitly while copying reports to userspace
 228 * by checking for a zeroed report-id field in tail reports, we want to account
 229 * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of
 230 * redundant read() attempts.
 231 *
 232 * We workaround this issue in oa_buffer_check_unlocked() by reading the reports
 233 * in the OA buffer, starting from the tail reported by the HW until we find a
 234 * report with its first 2 dwords not 0 meaning its previous report is
 235 * completely in memory and ready to be read. Those dwords are also set to 0
 236 * once read and the whole buffer is cleared upon OA buffer initialization. The
 237 * first dword is the reason for this report while the second is the timestamp,
 238 * making the chances of having those 2 fields at 0 fairly unlikely. A more
 239 * detailed explanation is available in oa_buffer_check_unlocked().
 240 *
 241 * Most of the implementation details for this workaround are in
 242 * oa_buffer_check_unlocked() and _append_oa_reports()
 243 *
 244 * Note for posterity: previously the driver used to define an effective tail
 245 * pointer that lagged the real pointer by a 'tail margin' measured in bytes
 246 * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
 247 * This was flawed considering that the OA unit may also automatically generate
 248 * non-periodic reports (such as on context switch) or the OA unit may be
 249 * enabled without any periodic sampling.
 250 */
 251#define OA_TAIL_MARGIN_NSEC	100000ULL
 252#define INVALID_TAIL_PTR	0xffffffff
 253
 254/* The default frequency for checking whether the OA unit has written new
 255 * reports to the circular OA buffer...
 256 */
 257#define DEFAULT_POLL_FREQUENCY_HZ 200
 258#define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
 259
 260/* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
 261static u32 i915_perf_stream_paranoid = true;
 262
 263/* The maximum exponent the hardware accepts is 63 (essentially it selects one
 264 * of the 64bit timestamp bits to trigger reports from) but there's currently
 265 * no known use case for sampling as infrequently as once per 47 thousand years.
 266 *
 267 * Since the timestamps included in OA reports are only 32bits it seems
 268 * reasonable to limit the OA exponent where it's still possible to account for
 269 * overflow in OA report timestamps.
 270 */
 271#define OA_EXPONENT_MAX 31
 272
 273#define INVALID_CTX_ID 0xffffffff
 274
 275/* On Gen8+ automatically triggered OA reports include a 'reason' field... */
 276#define OAREPORT_REASON_MASK           0x3f
 277#define OAREPORT_REASON_MASK_EXTENDED  0x7f
 278#define OAREPORT_REASON_SHIFT          19
 279#define OAREPORT_REASON_TIMER          (1<<0)
 280#define OAREPORT_REASON_CTX_SWITCH     (1<<3)
 281#define OAREPORT_REASON_CLK_RATIO      (1<<5)
 282
 283
 284/* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
 285 *
 286 * The highest sampling frequency we can theoretically program the OA unit
 287 * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
 288 *
 289 * Initialized just before we register the sysctl parameter.
 290 */
 291static int oa_sample_rate_hard_limit;
 292
 293/* Theoretically we can program the OA unit to sample every 160ns but don't
 294 * allow that by default unless root...
 295 *
 296 * The default threshold of 100000Hz is based on perf's similar
 297 * kernel.perf_event_max_sample_rate sysctl parameter.
 298 */
 299static u32 i915_oa_max_sample_rate = 100000;
 300
 301/* XXX: beware if future OA HW adds new report formats that the current
 302 * code assumes all reports have a power-of-two size and ~(size - 1) can
 303 * be used as a mask to align the OA tail pointer.
 304 */
 305static const struct i915_oa_format oa_formats[I915_OA_FORMAT_MAX] = {
 306	[I915_OA_FORMAT_A13]	    = { 0, 64 },
 307	[I915_OA_FORMAT_A29]	    = { 1, 128 },
 308	[I915_OA_FORMAT_A13_B8_C8]  = { 2, 128 },
 309	/* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
 310	[I915_OA_FORMAT_B4_C8]	    = { 4, 64 },
 311	[I915_OA_FORMAT_A45_B8_C8]  = { 5, 256 },
 312	[I915_OA_FORMAT_B4_C8_A16]  = { 6, 128 },
 313	[I915_OA_FORMAT_C4_B8]	    = { 7, 64 },
 314	[I915_OA_FORMAT_A12]		    = { 0, 64 },
 315	[I915_OA_FORMAT_A12_B8_C8]	    = { 2, 128 },
 316	[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
 317};
 318
 319#define SAMPLE_OA_REPORT      (1<<0)
 320
 321/**
 322 * struct perf_open_properties - for validated properties given to open a stream
 323 * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
 324 * @single_context: Whether a single or all gpu contexts should be monitored
 325 * @hold_preemption: Whether the preemption is disabled for the filtered
 326 *                   context
 327 * @ctx_handle: A gem ctx handle for use with @single_context
 328 * @metrics_set: An ID for an OA unit metric set advertised via sysfs
 329 * @oa_format: An OA unit HW report format
 330 * @oa_periodic: Whether to enable periodic OA unit sampling
 331 * @oa_period_exponent: The OA unit sampling period is derived from this
 332 * @engine: The engine (typically rcs0) being monitored by the OA unit
 333 * @has_sseu: Whether @sseu was specified by userspace
 334 * @sseu: internal SSEU configuration computed either from the userspace
 335 *        specified configuration in the opening parameters or a default value
 336 *        (see get_default_sseu_config())
 337 * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA
 338 * data availability
 339 *
 340 * As read_properties_unlocked() enumerates and validates the properties given
 341 * to open a stream of metrics the configuration is built up in the structure
 342 * which starts out zero initialized.
 343 */
 344struct perf_open_properties {
 345	u32 sample_flags;
 346
 347	u64 single_context:1;
 348	u64 hold_preemption:1;
 349	u64 ctx_handle;
 350
 351	/* OA sampling state */
 352	int metrics_set;
 353	int oa_format;
 354	bool oa_periodic;
 355	int oa_period_exponent;
 356
 357	struct intel_engine_cs *engine;
 358
 359	bool has_sseu;
 360	struct intel_sseu sseu;
 361
 362	u64 poll_oa_period;
 363};
 364
 365struct i915_oa_config_bo {
 366	struct llist_node node;
 367
 368	struct i915_oa_config *oa_config;
 369	struct i915_vma *vma;
 370};
 371
 372static struct ctl_table_header *sysctl_header;
 373
 374static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
 375
 376void i915_oa_config_release(struct kref *ref)
 377{
 378	struct i915_oa_config *oa_config =
 379		container_of(ref, typeof(*oa_config), ref);
 380
 381	kfree(oa_config->flex_regs);
 382	kfree(oa_config->b_counter_regs);
 383	kfree(oa_config->mux_regs);
 384
 385	kfree_rcu(oa_config, rcu);
 386}
 387
 388struct i915_oa_config *
 389i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
 390{
 391	struct i915_oa_config *oa_config;
 392
 393	rcu_read_lock();
 394	oa_config = idr_find(&perf->metrics_idr, metrics_set);
 395	if (oa_config)
 396		oa_config = i915_oa_config_get(oa_config);
 397	rcu_read_unlock();
 398
 399	return oa_config;
 400}
 401
 402static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo)
 403{
 404	i915_oa_config_put(oa_bo->oa_config);
 405	i915_vma_put(oa_bo->vma);
 406	kfree(oa_bo);
 407}
 408
 409static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream)
 410{
 411	struct intel_uncore *uncore = stream->uncore;
 412
 413	return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) &
 414	       GEN12_OAG_OATAILPTR_MASK;
 415}
 416
 417static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
 418{
 419	struct intel_uncore *uncore = stream->uncore;
 420
 421	return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
 422}
 423
 424static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
 425{
 426	struct intel_uncore *uncore = stream->uncore;
 427	u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
 428
 429	return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
 430}
 431
 432/**
 433 * oa_buffer_check_unlocked - check for data and update tail ptr state
 434 * @stream: i915 stream instance
 435 *
 436 * This is either called via fops (for blocking reads in user ctx) or the poll
 437 * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
 438 * if there is data available for userspace to read.
 439 *
 440 * This function is central to providing a workaround for the OA unit tail
 441 * pointer having a race with respect to what data is visible to the CPU.
 442 * It is responsible for reading tail pointers from the hardware and giving
 443 * the pointers time to 'age' before they are made available for reading.
 444 * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
 445 *
 446 * Besides returning true when there is data available to read() this function
 447 * also updates the tail, aging_tail and aging_timestamp in the oa_buffer
 448 * object.
 449 *
 450 * Note: It's safe to read OA config state here unlocked, assuming that this is
 451 * only called while the stream is enabled, while the global OA configuration
 452 * can't be modified.
 453 *
 454 * Returns: %true if the OA buffer contains data, else %false
 455 */
 456static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
 457{
 458	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
 459	int report_size = stream->oa_buffer.format_size;
 460	unsigned long flags;
 461	bool pollin;
 462	u32 hw_tail;
 463	u64 now;
 464
 465	/* We have to consider the (unlikely) possibility that read() errors
 466	 * could result in an OA buffer reset which might reset the head and
 467	 * tail state.
 468	 */
 469	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
 470
 471	hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
 472
 473	/* The tail pointer increases in 64 byte increments,
 474	 * not in report_size steps...
 475	 */
 476	hw_tail &= ~(report_size - 1);
 477
 478	now = ktime_get_mono_fast_ns();
 479
 480	if (hw_tail == stream->oa_buffer.aging_tail &&
 481	    (now - stream->oa_buffer.aging_timestamp) > OA_TAIL_MARGIN_NSEC) {
 482		/* If the HW tail hasn't move since the last check and the HW
 483		 * tail has been aging for long enough, declare it the new
 484		 * tail.
 485		 */
 486		stream->oa_buffer.tail = stream->oa_buffer.aging_tail;
 487	} else {
 488		u32 head, tail, aged_tail;
 489
 490		/* NB: The head we observe here might effectively be a little
 491		 * out of date. If a read() is in progress, the head could be
 492		 * anywhere between this head and stream->oa_buffer.tail.
 493		 */
 494		head = stream->oa_buffer.head - gtt_offset;
 495		aged_tail = stream->oa_buffer.tail - gtt_offset;
 496
 497		hw_tail -= gtt_offset;
 498		tail = hw_tail;
 499
 500		/* Walk the stream backward until we find a report with dword 0
 501		 * & 1 not at 0. Since the circular buffer pointers progress by
 502		 * increments of 64 bytes and that reports can be up to 256
 503		 * bytes long, we can't tell whether a report has fully landed
 504		 * in memory before the first 2 dwords of the following report
 505		 * have effectively landed.
 506		 *
 507		 * This is assuming that the writes of the OA unit land in
 508		 * memory in the order they were written to.
 509		 * If not : (╯°□°)╯︵ ┻━┻
 510		 */
 511		while (OA_TAKEN(tail, aged_tail) >= report_size) {
 512			u32 *report32 = (void *)(stream->oa_buffer.vaddr + tail);
 513
 514			if (report32[0] != 0 || report32[1] != 0)
 515				break;
 516
 517			tail = (tail - report_size) & (OA_BUFFER_SIZE - 1);
 518		}
 519
 520		if (OA_TAKEN(hw_tail, tail) > report_size &&
 521		    __ratelimit(&stream->perf->tail_pointer_race))
 522			DRM_NOTE("unlanded report(s) head=0x%x "
 523				 "tail=0x%x hw_tail=0x%x\n",
 524				 head, tail, hw_tail);
 525
 526		stream->oa_buffer.tail = gtt_offset + tail;
 527		stream->oa_buffer.aging_tail = gtt_offset + hw_tail;
 528		stream->oa_buffer.aging_timestamp = now;
 529	}
 530
 531	pollin = OA_TAKEN(stream->oa_buffer.tail - gtt_offset,
 532			  stream->oa_buffer.head - gtt_offset) >= report_size;
 533
 534	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
 535
 536	return pollin;
 537}
 538
 539/**
 540 * append_oa_status - Appends a status record to a userspace read() buffer.
 541 * @stream: An i915-perf stream opened for OA metrics
 542 * @buf: destination buffer given by userspace
 543 * @count: the number of bytes userspace wants to read
 544 * @offset: (inout): the current position for writing into @buf
 545 * @type: The kind of status to report to userspace
 546 *
 547 * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
 548 * into the userspace read() buffer.
 549 *
 550 * The @buf @offset will only be updated on success.
 551 *
 552 * Returns: 0 on success, negative error code on failure.
 553 */
 554static int append_oa_status(struct i915_perf_stream *stream,
 555			    char __user *buf,
 556			    size_t count,
 557			    size_t *offset,
 558			    enum drm_i915_perf_record_type type)
 559{
 560	struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
 561
 562	if ((count - *offset) < header.size)
 563		return -ENOSPC;
 564
 565	if (copy_to_user(buf + *offset, &header, sizeof(header)))
 566		return -EFAULT;
 567
 568	(*offset) += header.size;
 569
 570	return 0;
 571}
 572
 573/**
 574 * append_oa_sample - Copies single OA report into userspace read() buffer.
 575 * @stream: An i915-perf stream opened for OA metrics
 576 * @buf: destination buffer given by userspace
 577 * @count: the number of bytes userspace wants to read
 578 * @offset: (inout): the current position for writing into @buf
 579 * @report: A single OA report to (optionally) include as part of the sample
 580 *
 581 * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
 582 * properties when opening a stream, tracked as `stream->sample_flags`. This
 583 * function copies the requested components of a single sample to the given
 584 * read() @buf.
 585 *
 586 * The @buf @offset will only be updated on success.
 587 *
 588 * Returns: 0 on success, negative error code on failure.
 589 */
 590static int append_oa_sample(struct i915_perf_stream *stream,
 591			    char __user *buf,
 592			    size_t count,
 593			    size_t *offset,
 594			    const u8 *report)
 595{
 596	int report_size = stream->oa_buffer.format_size;
 597	struct drm_i915_perf_record_header header;
 598
 599	header.type = DRM_I915_PERF_RECORD_SAMPLE;
 600	header.pad = 0;
 601	header.size = stream->sample_size;
 602
 603	if ((count - *offset) < header.size)
 604		return -ENOSPC;
 605
 606	buf += *offset;
 607	if (copy_to_user(buf, &header, sizeof(header)))
 608		return -EFAULT;
 609	buf += sizeof(header);
 610
 611	if (copy_to_user(buf, report, report_size))
 612		return -EFAULT;
 613
 614	(*offset) += header.size;
 615
 616	return 0;
 617}
 618
 619/**
 620 * gen8_append_oa_reports - Copies all buffered OA reports into
 621 *			    userspace read() buffer.
 622 * @stream: An i915-perf stream opened for OA metrics
 623 * @buf: destination buffer given by userspace
 624 * @count: the number of bytes userspace wants to read
 625 * @offset: (inout): the current position for writing into @buf
 626 *
 627 * Notably any error condition resulting in a short read (-%ENOSPC or
 628 * -%EFAULT) will be returned even though one or more records may
 629 * have been successfully copied. In this case it's up to the caller
 630 * to decide if the error should be squashed before returning to
 631 * userspace.
 632 *
 633 * Note: reports are consumed from the head, and appended to the
 634 * tail, so the tail chases the head?... If you think that's mad
 635 * and back-to-front you're not alone, but this follows the
 636 * Gen PRM naming convention.
 637 *
 638 * Returns: 0 on success, negative error code on failure.
 639 */
 640static int gen8_append_oa_reports(struct i915_perf_stream *stream,
 641				  char __user *buf,
 642				  size_t count,
 643				  size_t *offset)
 644{
 645	struct intel_uncore *uncore = stream->uncore;
 646	int report_size = stream->oa_buffer.format_size;
 647	u8 *oa_buf_base = stream->oa_buffer.vaddr;
 648	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
 649	u32 mask = (OA_BUFFER_SIZE - 1);
 650	size_t start_offset = *offset;
 651	unsigned long flags;
 652	u32 head, tail;
 653	u32 taken;
 654	int ret = 0;
 655
 656	if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
 657		return -EIO;
 658
 659	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
 660
 661	head = stream->oa_buffer.head;
 662	tail = stream->oa_buffer.tail;
 663
 664	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
 665
 666	/*
 667	 * NB: oa_buffer.head/tail include the gtt_offset which we don't want
 668	 * while indexing relative to oa_buf_base.
 669	 */
 670	head -= gtt_offset;
 671	tail -= gtt_offset;
 672
 673	/*
 674	 * An out of bounds or misaligned head or tail pointer implies a driver
 675	 * bug since we validate + align the tail pointers we read from the
 676	 * hardware and we are in full control of the head pointer which should
 677	 * only be incremented by multiples of the report size (notably also
 678	 * all a power of two).
 679	 */
 680	if (drm_WARN_ONCE(&uncore->i915->drm,
 681			  head > OA_BUFFER_SIZE || head % report_size ||
 682			  tail > OA_BUFFER_SIZE || tail % report_size,
 683			  "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
 684			  head, tail))
 685		return -EIO;
 686
 687
 688	for (/* none */;
 689	     (taken = OA_TAKEN(tail, head));
 690	     head = (head + report_size) & mask) {
 691		u8 *report = oa_buf_base + head;
 692		u32 *report32 = (void *)report;
 693		u32 ctx_id;
 694		u32 reason;
 695
 696		/*
 697		 * All the report sizes factor neatly into the buffer
 698		 * size so we never expect to see a report split
 699		 * between the beginning and end of the buffer.
 700		 *
 701		 * Given the initial alignment check a misalignment
 702		 * here would imply a driver bug that would result
 703		 * in an overrun.
 704		 */
 705		if (drm_WARN_ON(&uncore->i915->drm,
 706				(OA_BUFFER_SIZE - head) < report_size)) {
 707			drm_err(&uncore->i915->drm,
 708				"Spurious OA head ptr: non-integral report offset\n");
 709			break;
 710		}
 711
 712		/*
 713		 * The reason field includes flags identifying what
 714		 * triggered this specific report (mostly timer
 715		 * triggered or e.g. due to a context switch).
 716		 *
 717		 * This field is never expected to be zero so we can
 718		 * check that the report isn't invalid before copying
 719		 * it to userspace...
 720		 */
 721		reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
 722			  (GRAPHICS_VER(stream->perf->i915) == 12 ?
 723			   OAREPORT_REASON_MASK_EXTENDED :
 724			   OAREPORT_REASON_MASK));
 725
 726		ctx_id = report32[2] & stream->specific_ctx_id_mask;
 727
 728		/*
 729		 * Squash whatever is in the CTX_ID field if it's marked as
 730		 * invalid to be sure we avoid false-positive, single-context
 731		 * filtering below...
 732		 *
 733		 * Note: that we don't clear the valid_ctx_bit so userspace can
 734		 * understand that the ID has been squashed by the kernel.
 735		 */
 736		if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) &&
 737		    GRAPHICS_VER(stream->perf->i915) <= 11)
 738			ctx_id = report32[2] = INVALID_CTX_ID;
 739
 740		/*
 741		 * NB: For Gen 8 the OA unit no longer supports clock gating
 742		 * off for a specific context and the kernel can't securely
 743		 * stop the counters from updating as system-wide / global
 744		 * values.
 745		 *
 746		 * Automatic reports now include a context ID so reports can be
 747		 * filtered on the cpu but it's not worth trying to
 748		 * automatically subtract/hide counter progress for other
 749		 * contexts while filtering since we can't stop userspace
 750		 * issuing MI_REPORT_PERF_COUNT commands which would still
 751		 * provide a side-band view of the real values.
 752		 *
 753		 * To allow userspace (such as Mesa/GL_INTEL_performance_query)
 754		 * to normalize counters for a single filtered context then it
 755		 * needs be forwarded bookend context-switch reports so that it
 756		 * can track switches in between MI_REPORT_PERF_COUNT commands
 757		 * and can itself subtract/ignore the progress of counters
 758		 * associated with other contexts. Note that the hardware
 759		 * automatically triggers reports when switching to a new
 760		 * context which are tagged with the ID of the newly active
 761		 * context. To avoid the complexity (and likely fragility) of
 762		 * reading ahead while parsing reports to try and minimize
 763		 * forwarding redundant context switch reports (i.e. between
 764		 * other, unrelated contexts) we simply elect to forward them
 765		 * all.
 766		 *
 767		 * We don't rely solely on the reason field to identify context
 768		 * switches since it's not-uncommon for periodic samples to
 769		 * identify a switch before any 'context switch' report.
 770		 */
 771		if (!stream->perf->exclusive_stream->ctx ||
 772		    stream->specific_ctx_id == ctx_id ||
 773		    stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
 774		    reason & OAREPORT_REASON_CTX_SWITCH) {
 775
 776			/*
 777			 * While filtering for a single context we avoid
 778			 * leaking the IDs of other contexts.
 779			 */
 780			if (stream->perf->exclusive_stream->ctx &&
 781			    stream->specific_ctx_id != ctx_id) {
 782				report32[2] = INVALID_CTX_ID;
 783			}
 784
 785			ret = append_oa_sample(stream, buf, count, offset,
 786					       report);
 787			if (ret)
 788				break;
 789
 790			stream->oa_buffer.last_ctx_id = ctx_id;
 791		}
 792
 793		/*
 794		 * Clear out the first 2 dword as a mean to detect unlanded
 795		 * reports.
 796		 */
 797		report32[0] = 0;
 798		report32[1] = 0;
 799	}
 800
 801	if (start_offset != *offset) {
 802		i915_reg_t oaheadptr;
 803
 804		oaheadptr = GRAPHICS_VER(stream->perf->i915) == 12 ?
 805			    GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR;
 806
 807		spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
 808
 809		/*
 810		 * We removed the gtt_offset for the copy loop above, indexing
 811		 * relative to oa_buf_base so put back here...
 812		 */
 813		head += gtt_offset;
 814		intel_uncore_write(uncore, oaheadptr,
 815				   head & GEN12_OAG_OAHEADPTR_MASK);
 816		stream->oa_buffer.head = head;
 817
 818		spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
 819	}
 820
 821	return ret;
 822}
 823
 824/**
 825 * gen8_oa_read - copy status records then buffered OA reports
 826 * @stream: An i915-perf stream opened for OA metrics
 827 * @buf: destination buffer given by userspace
 828 * @count: the number of bytes userspace wants to read
 829 * @offset: (inout): the current position for writing into @buf
 830 *
 831 * Checks OA unit status registers and if necessary appends corresponding
 832 * status records for userspace (such as for a buffer full condition) and then
 833 * initiate appending any buffered OA reports.
 834 *
 835 * Updates @offset according to the number of bytes successfully copied into
 836 * the userspace buffer.
 837 *
 838 * NB: some data may be successfully copied to the userspace buffer
 839 * even if an error is returned, and this is reflected in the
 840 * updated @offset.
 841 *
 842 * Returns: zero on success or a negative error code
 843 */
 844static int gen8_oa_read(struct i915_perf_stream *stream,
 845			char __user *buf,
 846			size_t count,
 847			size_t *offset)
 848{
 849	struct intel_uncore *uncore = stream->uncore;
 850	u32 oastatus;
 851	i915_reg_t oastatus_reg;
 852	int ret;
 853
 854	if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
 855		return -EIO;
 856
 857	oastatus_reg = GRAPHICS_VER(stream->perf->i915) == 12 ?
 858		       GEN12_OAG_OASTATUS : GEN8_OASTATUS;
 859
 860	oastatus = intel_uncore_read(uncore, oastatus_reg);
 861
 862	/*
 863	 * We treat OABUFFER_OVERFLOW as a significant error:
 864	 *
 865	 * Although theoretically we could handle this more gracefully
 866	 * sometimes, some Gens don't correctly suppress certain
 867	 * automatically triggered reports in this condition and so we
 868	 * have to assume that old reports are now being trampled
 869	 * over.
 870	 *
 871	 * Considering how we don't currently give userspace control
 872	 * over the OA buffer size and always configure a large 16MB
 873	 * buffer, then a buffer overflow does anyway likely indicate
 874	 * that something has gone quite badly wrong.
 875	 */
 876	if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
 877		ret = append_oa_status(stream, buf, count, offset,
 878				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
 879		if (ret)
 880			return ret;
 881
 882		DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
 883			  stream->period_exponent);
 884
 885		stream->perf->ops.oa_disable(stream);
 886		stream->perf->ops.oa_enable(stream);
 887
 888		/*
 889		 * Note: .oa_enable() is expected to re-init the oabuffer and
 890		 * reset GEN8_OASTATUS for us
 891		 */
 892		oastatus = intel_uncore_read(uncore, oastatus_reg);
 893	}
 894
 895	if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
 896		ret = append_oa_status(stream, buf, count, offset,
 897				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
 898		if (ret)
 899			return ret;
 900
 901		intel_uncore_rmw(uncore, oastatus_reg,
 902				 GEN8_OASTATUS_COUNTER_OVERFLOW |
 903				 GEN8_OASTATUS_REPORT_LOST,
 904				 IS_GRAPHICS_VER(uncore->i915, 8, 11) ?
 905				 (GEN8_OASTATUS_HEAD_POINTER_WRAP |
 906				  GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
 907	}
 908
 909	return gen8_append_oa_reports(stream, buf, count, offset);
 910}
 911
 912/**
 913 * gen7_append_oa_reports - Copies all buffered OA reports into
 914 *			    userspace read() buffer.
 915 * @stream: An i915-perf stream opened for OA metrics
 916 * @buf: destination buffer given by userspace
 917 * @count: the number of bytes userspace wants to read
 918 * @offset: (inout): the current position for writing into @buf
 919 *
 920 * Notably any error condition resulting in a short read (-%ENOSPC or
 921 * -%EFAULT) will be returned even though one or more records may
 922 * have been successfully copied. In this case it's up to the caller
 923 * to decide if the error should be squashed before returning to
 924 * userspace.
 925 *
 926 * Note: reports are consumed from the head, and appended to the
 927 * tail, so the tail chases the head?... If you think that's mad
 928 * and back-to-front you're not alone, but this follows the
 929 * Gen PRM naming convention.
 930 *
 931 * Returns: 0 on success, negative error code on failure.
 932 */
 933static int gen7_append_oa_reports(struct i915_perf_stream *stream,
 934				  char __user *buf,
 935				  size_t count,
 936				  size_t *offset)
 937{
 938	struct intel_uncore *uncore = stream->uncore;
 939	int report_size = stream->oa_buffer.format_size;
 940	u8 *oa_buf_base = stream->oa_buffer.vaddr;
 941	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
 942	u32 mask = (OA_BUFFER_SIZE - 1);
 943	size_t start_offset = *offset;
 944	unsigned long flags;
 945	u32 head, tail;
 946	u32 taken;
 947	int ret = 0;
 948
 949	if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
 950		return -EIO;
 951
 952	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
 953
 954	head = stream->oa_buffer.head;
 955	tail = stream->oa_buffer.tail;
 956
 957	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
 958
 959	/* NB: oa_buffer.head/tail include the gtt_offset which we don't want
 960	 * while indexing relative to oa_buf_base.
 961	 */
 962	head -= gtt_offset;
 963	tail -= gtt_offset;
 964
 965	/* An out of bounds or misaligned head or tail pointer implies a driver
 966	 * bug since we validate + align the tail pointers we read from the
 967	 * hardware and we are in full control of the head pointer which should
 968	 * only be incremented by multiples of the report size (notably also
 969	 * all a power of two).
 970	 */
 971	if (drm_WARN_ONCE(&uncore->i915->drm,
 972			  head > OA_BUFFER_SIZE || head % report_size ||
 973			  tail > OA_BUFFER_SIZE || tail % report_size,
 974			  "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
 975			  head, tail))
 976		return -EIO;
 977
 978
 979	for (/* none */;
 980	     (taken = OA_TAKEN(tail, head));
 981	     head = (head + report_size) & mask) {
 982		u8 *report = oa_buf_base + head;
 983		u32 *report32 = (void *)report;
 984
 985		/* All the report sizes factor neatly into the buffer
 986		 * size so we never expect to see a report split
 987		 * between the beginning and end of the buffer.
 988		 *
 989		 * Given the initial alignment check a misalignment
 990		 * here would imply a driver bug that would result
 991		 * in an overrun.
 992		 */
 993		if (drm_WARN_ON(&uncore->i915->drm,
 994				(OA_BUFFER_SIZE - head) < report_size)) {
 995			drm_err(&uncore->i915->drm,
 996				"Spurious OA head ptr: non-integral report offset\n");
 997			break;
 998		}
 999
1000		/* The report-ID field for periodic samples includes
1001		 * some undocumented flags related to what triggered
1002		 * the report and is never expected to be zero so we
1003		 * can check that the report isn't invalid before
1004		 * copying it to userspace...
1005		 */
1006		if (report32[0] == 0) {
1007			if (__ratelimit(&stream->perf->spurious_report_rs))
1008				DRM_NOTE("Skipping spurious, invalid OA report\n");
1009			continue;
1010		}
1011
1012		ret = append_oa_sample(stream, buf, count, offset, report);
1013		if (ret)
1014			break;
1015
1016		/* Clear out the first 2 dwords as a mean to detect unlanded
1017		 * reports.
1018		 */
1019		report32[0] = 0;
1020		report32[1] = 0;
1021	}
1022
1023	if (start_offset != *offset) {
1024		spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1025
1026		/* We removed the gtt_offset for the copy loop above, indexing
1027		 * relative to oa_buf_base so put back here...
1028		 */
1029		head += gtt_offset;
1030
1031		intel_uncore_write(uncore, GEN7_OASTATUS2,
1032				   (head & GEN7_OASTATUS2_HEAD_MASK) |
1033				   GEN7_OASTATUS2_MEM_SELECT_GGTT);
1034		stream->oa_buffer.head = head;
1035
1036		spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1037	}
1038
1039	return ret;
1040}
1041
1042/**
1043 * gen7_oa_read - copy status records then buffered OA reports
1044 * @stream: An i915-perf stream opened for OA metrics
1045 * @buf: destination buffer given by userspace
1046 * @count: the number of bytes userspace wants to read
1047 * @offset: (inout): the current position for writing into @buf
1048 *
1049 * Checks Gen 7 specific OA unit status registers and if necessary appends
1050 * corresponding status records for userspace (such as for a buffer full
1051 * condition) and then initiate appending any buffered OA reports.
1052 *
1053 * Updates @offset according to the number of bytes successfully copied into
1054 * the userspace buffer.
1055 *
1056 * Returns: zero on success or a negative error code
1057 */
1058static int gen7_oa_read(struct i915_perf_stream *stream,
1059			char __user *buf,
1060			size_t count,
1061			size_t *offset)
1062{
1063	struct intel_uncore *uncore = stream->uncore;
1064	u32 oastatus1;
1065	int ret;
1066
1067	if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
1068		return -EIO;
1069
1070	oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1071
1072	/* XXX: On Haswell we don't have a safe way to clear oastatus1
1073	 * bits while the OA unit is enabled (while the tail pointer
1074	 * may be updated asynchronously) so we ignore status bits
1075	 * that have already been reported to userspace.
1076	 */
1077	oastatus1 &= ~stream->perf->gen7_latched_oastatus1;
1078
1079	/* We treat OABUFFER_OVERFLOW as a significant error:
1080	 *
1081	 * - The status can be interpreted to mean that the buffer is
1082	 *   currently full (with a higher precedence than OA_TAKEN()
1083	 *   which will start to report a near-empty buffer after an
1084	 *   overflow) but it's awkward that we can't clear the status
1085	 *   on Haswell, so without a reset we won't be able to catch
1086	 *   the state again.
1087	 *
1088	 * - Since it also implies the HW has started overwriting old
1089	 *   reports it may also affect our sanity checks for invalid
1090	 *   reports when copying to userspace that assume new reports
1091	 *   are being written to cleared memory.
1092	 *
1093	 * - In the future we may want to introduce a flight recorder
1094	 *   mode where the driver will automatically maintain a safe
1095	 *   guard band between head/tail, avoiding this overflow
1096	 *   condition, but we avoid the added driver complexity for
1097	 *   now.
1098	 */
1099	if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
1100		ret = append_oa_status(stream, buf, count, offset,
1101				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
1102		if (ret)
1103			return ret;
1104
1105		DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
1106			  stream->period_exponent);
1107
1108		stream->perf->ops.oa_disable(stream);
1109		stream->perf->ops.oa_enable(stream);
1110
1111		oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1112	}
1113
1114	if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
1115		ret = append_oa_status(stream, buf, count, offset,
1116				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
1117		if (ret)
1118			return ret;
1119		stream->perf->gen7_latched_oastatus1 |=
1120			GEN7_OASTATUS1_REPORT_LOST;
1121	}
1122
1123	return gen7_append_oa_reports(stream, buf, count, offset);
1124}
1125
1126/**
1127 * i915_oa_wait_unlocked - handles blocking IO until OA data available
1128 * @stream: An i915-perf stream opened for OA metrics
1129 *
1130 * Called when userspace tries to read() from a blocking stream FD opened
1131 * for OA metrics. It waits until the hrtimer callback finds a non-empty
1132 * OA buffer and wakes us.
1133 *
1134 * Note: it's acceptable to have this return with some false positives
1135 * since any subsequent read handling will return -EAGAIN if there isn't
1136 * really data ready for userspace yet.
1137 *
1138 * Returns: zero on success or a negative error code
1139 */
1140static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
1141{
1142	/* We would wait indefinitely if periodic sampling is not enabled */
1143	if (!stream->periodic)
1144		return -EIO;
1145
1146	return wait_event_interruptible(stream->poll_wq,
1147					oa_buffer_check_unlocked(stream));
1148}
1149
1150/**
1151 * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
1152 * @stream: An i915-perf stream opened for OA metrics
1153 * @file: An i915 perf stream file
1154 * @wait: poll() state table
1155 *
1156 * For handling userspace polling on an i915 perf stream opened for OA metrics,
1157 * this starts a poll_wait with the wait queue that our hrtimer callback wakes
1158 * when it sees data ready to read in the circular OA buffer.
1159 */
1160static void i915_oa_poll_wait(struct i915_perf_stream *stream,
1161			      struct file *file,
1162			      poll_table *wait)
1163{
1164	poll_wait(file, &stream->poll_wq, wait);
1165}
1166
1167/**
1168 * i915_oa_read - just calls through to &i915_oa_ops->read
1169 * @stream: An i915-perf stream opened for OA metrics
1170 * @buf: destination buffer given by userspace
1171 * @count: the number of bytes userspace wants to read
1172 * @offset: (inout): the current position for writing into @buf
1173 *
1174 * Updates @offset according to the number of bytes successfully copied into
1175 * the userspace buffer.
1176 *
1177 * Returns: zero on success or a negative error code
1178 */
1179static int i915_oa_read(struct i915_perf_stream *stream,
1180			char __user *buf,
1181			size_t count,
1182			size_t *offset)
1183{
1184	return stream->perf->ops.read(stream, buf, count, offset);
1185}
1186
1187static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
1188{
1189	struct i915_gem_engines_iter it;
1190	struct i915_gem_context *ctx = stream->ctx;
1191	struct intel_context *ce;
1192	struct i915_gem_ww_ctx ww;
1193	int err = -ENODEV;
1194
1195	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1196		if (ce->engine != stream->engine) /* first match! */
1197			continue;
1198
1199		err = 0;
1200		break;
1201	}
1202	i915_gem_context_unlock_engines(ctx);
1203
1204	if (err)
1205		return ERR_PTR(err);
1206
1207	i915_gem_ww_ctx_init(&ww, true);
1208retry:
1209	/*
1210	 * As the ID is the gtt offset of the context's vma we
1211	 * pin the vma to ensure the ID remains fixed.
1212	 */
1213	err = intel_context_pin_ww(ce, &ww);
1214	if (err == -EDEADLK) {
1215		err = i915_gem_ww_ctx_backoff(&ww);
1216		if (!err)
1217			goto retry;
1218	}
1219	i915_gem_ww_ctx_fini(&ww);
1220
1221	if (err)
1222		return ERR_PTR(err);
1223
1224	stream->pinned_ctx = ce;
1225	return stream->pinned_ctx;
1226}
1227
1228/**
1229 * oa_get_render_ctx_id - determine and hold ctx hw id
1230 * @stream: An i915-perf stream opened for OA metrics
1231 *
1232 * Determine the render context hw id, and ensure it remains fixed for the
1233 * lifetime of the stream. This ensures that we don't have to worry about
1234 * updating the context ID in OACONTROL on the fly.
1235 *
1236 * Returns: zero on success or a negative error code
1237 */
1238static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
1239{
1240	struct intel_context *ce;
1241
1242	ce = oa_pin_context(stream);
1243	if (IS_ERR(ce))
1244		return PTR_ERR(ce);
1245
1246	switch (GRAPHICS_VER(ce->engine->i915)) {
1247	case 7: {
1248		/*
1249		 * On Haswell we don't do any post processing of the reports
1250		 * and don't need to use the mask.
1251		 */
1252		stream->specific_ctx_id = i915_ggtt_offset(ce->state);
1253		stream->specific_ctx_id_mask = 0;
1254		break;
1255	}
1256
1257	case 8:
1258	case 9:
1259	case 10:
1260		if (intel_engine_uses_guc(ce->engine)) {
1261			/*
1262			 * When using GuC, the context descriptor we write in
1263			 * i915 is read by GuC and rewritten before it's
1264			 * actually written into the hardware. The LRCA is
1265			 * what is put into the context id field of the
1266			 * context descriptor by GuC. Because it's aligned to
1267			 * a page, the lower 12bits are always at 0 and
1268			 * dropped by GuC. They won't be part of the context
1269			 * ID in the OA reports, so squash those lower bits.
1270			 */
1271			stream->specific_ctx_id = ce->lrc.lrca >> 12;
1272
1273			/*
1274			 * GuC uses the top bit to signal proxy submission, so
1275			 * ignore that bit.
1276			 */
1277			stream->specific_ctx_id_mask =
1278				(1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
1279		} else {
1280			stream->specific_ctx_id_mask =
1281				(1U << GEN8_CTX_ID_WIDTH) - 1;
1282			stream->specific_ctx_id = stream->specific_ctx_id_mask;
1283		}
1284		break;
1285
1286	case 11:
1287	case 12: {
1288		stream->specific_ctx_id_mask =
1289			((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
1290		/*
1291		 * Pick an unused context id
1292		 * 0 - BITS_PER_LONG are used by other contexts
1293		 * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context
1294		 */
1295		stream->specific_ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
1296		break;
1297	}
1298
1299	default:
1300		MISSING_CASE(GRAPHICS_VER(ce->engine->i915));
1301	}
1302
1303	ce->tag = stream->specific_ctx_id;
1304
1305	drm_dbg(&stream->perf->i915->drm,
1306		"filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
1307		stream->specific_ctx_id,
1308		stream->specific_ctx_id_mask);
1309
1310	return 0;
1311}
1312
1313/**
1314 * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
1315 * @stream: An i915-perf stream opened for OA metrics
1316 *
1317 * In case anything needed doing to ensure the context HW ID would remain valid
1318 * for the lifetime of the stream, then that can be undone here.
1319 */
1320static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
1321{
1322	struct intel_context *ce;
1323
1324	ce = fetch_and_zero(&stream->pinned_ctx);
1325	if (ce) {
1326		ce->tag = 0; /* recomputed on next submission after parking */
1327		intel_context_unpin(ce);
1328	}
1329
1330	stream->specific_ctx_id = INVALID_CTX_ID;
1331	stream->specific_ctx_id_mask = 0;
1332}
1333
1334static void
1335free_oa_buffer(struct i915_perf_stream *stream)
1336{
1337	i915_vma_unpin_and_release(&stream->oa_buffer.vma,
1338				   I915_VMA_RELEASE_MAP);
1339
1340	stream->oa_buffer.vaddr = NULL;
1341}
1342
1343static void
1344free_oa_configs(struct i915_perf_stream *stream)
1345{
1346	struct i915_oa_config_bo *oa_bo, *tmp;
1347
1348	i915_oa_config_put(stream->oa_config);
1349	llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
1350		free_oa_config_bo(oa_bo);
1351}
1352
1353static void
1354free_noa_wait(struct i915_perf_stream *stream)
1355{
1356	i915_vma_unpin_and_release(&stream->noa_wait, 0);
1357}
1358
1359static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
1360{
1361	struct i915_perf *perf = stream->perf;
1362
1363	BUG_ON(stream != perf->exclusive_stream);
1364
1365	/*
1366	 * Unset exclusive_stream first, it will be checked while disabling
1367	 * the metric set on gen8+.
1368	 *
1369	 * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
1370	 */
1371	WRITE_ONCE(perf->exclusive_stream, NULL);
1372	perf->ops.disable_metric_set(stream);
1373
1374	free_oa_buffer(stream);
1375
1376	intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
1377	intel_engine_pm_put(stream->engine);
1378
1379	if (stream->ctx)
1380		oa_put_render_ctx_id(stream);
1381
1382	free_oa_configs(stream);
1383	free_noa_wait(stream);
1384
1385	if (perf->spurious_report_rs.missed) {
1386		DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
1387			 perf->spurious_report_rs.missed);
1388	}
1389}
1390
1391static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
1392{
1393	struct intel_uncore *uncore = stream->uncore;
1394	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1395	unsigned long flags;
1396
1397	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1398
1399	/* Pre-DevBDW: OABUFFER must be set with counters off,
1400	 * before OASTATUS1, but after OASTATUS2
1401	 */
1402	intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */
1403			   gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT);
1404	stream->oa_buffer.head = gtt_offset;
1405
1406	intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset);
1407
1408	intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */
1409			   gtt_offset | OABUFFER_SIZE_16M);
1410
1411	/* Mark that we need updated tail pointers to read from... */
1412	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1413	stream->oa_buffer.tail = gtt_offset;
1414
1415	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1416
1417	/* On Haswell we have to track which OASTATUS1 flags we've
1418	 * already seen since they can't be cleared while periodic
1419	 * sampling is enabled.
1420	 */
1421	stream->perf->gen7_latched_oastatus1 = 0;
1422
1423	/* NB: although the OA buffer will initially be allocated
1424	 * zeroed via shmfs (and so this memset is redundant when
1425	 * first allocating), we may re-init the OA buffer, either
1426	 * when re-enabling a stream or in error/reset paths.
1427	 *
1428	 * The reason we clear the buffer for each re-init is for the
1429	 * sanity check in gen7_append_oa_reports() that looks at the
1430	 * report-id field to make sure it's non-zero which relies on
1431	 * the assumption that new reports are being written to zeroed
1432	 * memory...
1433	 */
1434	memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1435}
1436
1437static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
1438{
1439	struct intel_uncore *uncore = stream->uncore;
1440	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1441	unsigned long flags;
1442
1443	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1444
1445	intel_uncore_write(uncore, GEN8_OASTATUS, 0);
1446	intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset);
1447	stream->oa_buffer.head = gtt_offset;
1448
1449	intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0);
1450
1451	/*
1452	 * PRM says:
1453	 *
1454	 *  "This MMIO must be set before the OATAILPTR
1455	 *  register and after the OAHEADPTR register. This is
1456	 *  to enable proper functionality of the overflow
1457	 *  bit."
1458	 */
1459	intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset |
1460		   OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1461	intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
1462
1463	/* Mark that we need updated tail pointers to read from... */
1464	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1465	stream->oa_buffer.tail = gtt_offset;
1466
1467	/*
1468	 * Reset state used to recognise context switches, affecting which
1469	 * reports we will forward to userspace while filtering for a single
1470	 * context.
1471	 */
1472	stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1473
1474	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1475
1476	/*
1477	 * NB: although the OA buffer will initially be allocated
1478	 * zeroed via shmfs (and so this memset is redundant when
1479	 * first allocating), we may re-init the OA buffer, either
1480	 * when re-enabling a stream or in error/reset paths.
1481	 *
1482	 * The reason we clear the buffer for each re-init is for the
1483	 * sanity check in gen8_append_oa_reports() that looks at the
1484	 * reason field to make sure it's non-zero which relies on
1485	 * the assumption that new reports are being written to zeroed
1486	 * memory...
1487	 */
1488	memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1489}
1490
1491static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
1492{
1493	struct intel_uncore *uncore = stream->uncore;
1494	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1495	unsigned long flags;
1496
1497	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1498
1499	intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0);
1500	intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR,
1501			   gtt_offset & GEN12_OAG_OAHEADPTR_MASK);
1502	stream->oa_buffer.head = gtt_offset;
1503
1504	/*
1505	 * PRM says:
1506	 *
1507	 *  "This MMIO must be set before the OATAILPTR
1508	 *  register and after the OAHEADPTR register. This is
1509	 *  to enable proper functionality of the overflow
1510	 *  bit."
1511	 */
1512	intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset |
1513			   OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1514	intel_uncore_write(uncore, GEN12_OAG_OATAILPTR,
1515			   gtt_offset & GEN12_OAG_OATAILPTR_MASK);
1516
1517	/* Mark that we need updated tail pointers to read from... */
1518	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1519	stream->oa_buffer.tail = gtt_offset;
1520
1521	/*
1522	 * Reset state used to recognise context switches, affecting which
1523	 * reports we will forward to userspace while filtering for a single
1524	 * context.
1525	 */
1526	stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1527
1528	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1529
1530	/*
1531	 * NB: although the OA buffer will initially be allocated
1532	 * zeroed via shmfs (and so this memset is redundant when
1533	 * first allocating), we may re-init the OA buffer, either
1534	 * when re-enabling a stream or in error/reset paths.
1535	 *
1536	 * The reason we clear the buffer for each re-init is for the
1537	 * sanity check in gen8_append_oa_reports() that looks at the
1538	 * reason field to make sure it's non-zero which relies on
1539	 * the assumption that new reports are being written to zeroed
1540	 * memory...
1541	 */
1542	memset(stream->oa_buffer.vaddr, 0,
1543	       stream->oa_buffer.vma->size);
1544}
1545
1546static int alloc_oa_buffer(struct i915_perf_stream *stream)
1547{
1548	struct drm_i915_private *i915 = stream->perf->i915;
1549	struct drm_i915_gem_object *bo;
1550	struct i915_vma *vma;
1551	int ret;
1552
1553	if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma))
1554		return -ENODEV;
1555
1556	BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
1557	BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
1558
1559	bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
1560	if (IS_ERR(bo)) {
1561		drm_err(&i915->drm, "Failed to allocate OA buffer\n");
1562		return PTR_ERR(bo);
1563	}
1564
1565	i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
1566
1567	/* PreHSW required 512K alignment, HSW requires 16M */
1568	vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
1569	if (IS_ERR(vma)) {
1570		ret = PTR_ERR(vma);
1571		goto err_unref;
1572	}
1573	stream->oa_buffer.vma = vma;
1574
1575	stream->oa_buffer.vaddr =
1576		i915_gem_object_pin_map_unlocked(bo, I915_MAP_WB);
1577	if (IS_ERR(stream->oa_buffer.vaddr)) {
1578		ret = PTR_ERR(stream->oa_buffer.vaddr);
1579		goto err_unpin;
1580	}
1581
1582	return 0;
1583
1584err_unpin:
1585	__i915_vma_unpin(vma);
1586
1587err_unref:
1588	i915_gem_object_put(bo);
1589
1590	stream->oa_buffer.vaddr = NULL;
1591	stream->oa_buffer.vma = NULL;
1592
1593	return ret;
1594}
1595
1596static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
1597				  bool save, i915_reg_t reg, u32 offset,
1598				  u32 dword_count)
1599{
1600	u32 cmd;
1601	u32 d;
1602
1603	cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
1604	cmd |= MI_SRM_LRM_GLOBAL_GTT;
1605	if (GRAPHICS_VER(stream->perf->i915) >= 8)
1606		cmd++;
1607
1608	for (d = 0; d < dword_count; d++) {
1609		*cs++ = cmd;
1610		*cs++ = i915_mmio_reg_offset(reg) + 4 * d;
1611		*cs++ = intel_gt_scratch_offset(stream->engine->gt,
1612						offset) + 4 * d;
1613		*cs++ = 0;
1614	}
1615
1616	return cs;
1617}
1618
1619static int alloc_noa_wait(struct i915_perf_stream *stream)
1620{
1621	struct drm_i915_private *i915 = stream->perf->i915;
1622	struct drm_i915_gem_object *bo;
1623	struct i915_vma *vma;
1624	const u64 delay_ticks = 0xffffffffffffffff -
1625		intel_gt_ns_to_clock_interval(stream->perf->i915->ggtt.vm.gt,
1626					      atomic64_read(&stream->perf->noa_programming_delay));
1627	const u32 base = stream->engine->mmio_base;
1628#define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
1629	u32 *batch, *ts0, *cs, *jump;
1630	struct i915_gem_ww_ctx ww;
1631	int ret, i;
1632	enum {
1633		START_TS,
1634		NOW_TS,
1635		DELTA_TS,
1636		JUMP_PREDICATE,
1637		DELTA_TARGET,
1638		N_CS_GPR
1639	};
1640
1641	bo = i915_gem_object_create_internal(i915, 4096);
1642	if (IS_ERR(bo)) {
1643		drm_err(&i915->drm,
1644			"Failed to allocate NOA wait batchbuffer\n");
1645		return PTR_ERR(bo);
1646	}
1647
1648	i915_gem_ww_ctx_init(&ww, true);
1649retry:
1650	ret = i915_gem_object_lock(bo, &ww);
1651	if (ret)
1652		goto out_ww;
1653
1654	/*
1655	 * We pin in GGTT because we jump into this buffer now because
1656	 * multiple OA config BOs will have a jump to this address and it
1657	 * needs to be fixed during the lifetime of the i915/perf stream.
1658	 */
1659	vma = i915_gem_object_ggtt_pin_ww(bo, &ww, NULL, 0, 0, PIN_HIGH);
1660	if (IS_ERR(vma)) {
1661		ret = PTR_ERR(vma);
1662		goto out_ww;
1663	}
1664
1665	batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
1666	if (IS_ERR(batch)) {
1667		ret = PTR_ERR(batch);
1668		goto err_unpin;
1669	}
1670
1671	/* Save registers. */
1672	for (i = 0; i < N_CS_GPR; i++)
1673		cs = save_restore_register(
1674			stream, cs, true /* save */, CS_GPR(i),
1675			INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
1676	cs = save_restore_register(
1677		stream, cs, true /* save */, MI_PREDICATE_RESULT_1,
1678		INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
1679
1680	/* First timestamp snapshot location. */
1681	ts0 = cs;
1682
1683	/*
1684	 * Initial snapshot of the timestamp register to implement the wait.
1685	 * We work with 32b values, so clear out the top 32b bits of the
1686	 * register because the ALU works 64bits.
1687	 */
1688	*cs++ = MI_LOAD_REGISTER_IMM(1);
1689	*cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4;
1690	*cs++ = 0;
1691	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1692	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1693	*cs++ = i915_mmio_reg_offset(CS_GPR(START_TS));
1694
1695	/*
1696	 * This is the location we're going to jump back into until the
1697	 * required amount of time has passed.
1698	 */
1699	jump = cs;
1700
1701	/*
1702	 * Take another snapshot of the timestamp register. Take care to clear
1703	 * up the top 32bits of CS_GPR(1) as we're using it for other
1704	 * operations below.
1705	 */
1706	*cs++ = MI_LOAD_REGISTER_IMM(1);
1707	*cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4;
1708	*cs++ = 0;
1709	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1710	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1711	*cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS));
1712
1713	/*
1714	 * Do a diff between the 2 timestamps and store the result back into
1715	 * CS_GPR(1).
1716	 */
1717	*cs++ = MI_MATH(5);
1718	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS));
1719	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS));
1720	*cs++ = MI_MATH_SUB;
1721	*cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU);
1722	*cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
1723
1724	/*
1725	 * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the
1726	 * timestamp have rolled over the 32bits) into the predicate register
1727	 * to be used for the predicated jump.
1728	 */
1729	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1730	*cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
1731	*cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
1732
1733	/* Restart from the beginning if we had timestamps roll over. */
1734	*cs++ = (GRAPHICS_VER(i915) < 8 ?
1735		 MI_BATCH_BUFFER_START :
1736		 MI_BATCH_BUFFER_START_GEN8) |
1737		MI_BATCH_PREDICATE;
1738	*cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4;
1739	*cs++ = 0;
1740
1741	/*
1742	 * Now add the diff between to previous timestamps and add it to :
1743	 *      (((1 * << 64) - 1) - delay_ns)
1744	 *
1745	 * When the Carry Flag contains 1 this means the elapsed time is
1746	 * longer than the expected delay, and we can exit the wait loop.
1747	 */
1748	*cs++ = MI_LOAD_REGISTER_IMM(2);
1749	*cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET));
1750	*cs++ = lower_32_bits(delay_ticks);
1751	*cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4;
1752	*cs++ = upper_32_bits(delay_ticks);
1753
1754	*cs++ = MI_MATH(4);
1755	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS));
1756	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET));
1757	*cs++ = MI_MATH_ADD;
1758	*cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
1759
1760	*cs++ = MI_ARB_CHECK;
1761
1762	/*
1763	 * Transfer the result into the predicate register to be used for the
1764	 * predicated jump.
1765	 */
1766	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1767	*cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
1768	*cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
1769
1770	/* Predicate the jump.  */
1771	*cs++ = (GRAPHICS_VER(i915) < 8 ?
1772		 MI_BATCH_BUFFER_START :
1773		 MI_BATCH_BUFFER_START_GEN8) |
1774		MI_BATCH_PREDICATE;
1775	*cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4;
1776	*cs++ = 0;
1777
1778	/* Restore registers. */
1779	for (i = 0; i < N_CS_GPR; i++)
1780		cs = save_restore_register(
1781			stream, cs, false /* restore */, CS_GPR(i),
1782			INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
1783	cs = save_restore_register(
1784		stream, cs, false /* restore */, MI_PREDICATE_RESULT_1,
1785		INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
1786
1787	/* And return to the ring. */
1788	*cs++ = MI_BATCH_BUFFER_END;
1789
1790	GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
1791
1792	i915_gem_object_flush_map(bo);
1793	__i915_gem_object_release_map(bo);
1794
1795	stream->noa_wait = vma;
1796	goto out_ww;
1797
1798err_unpin:
1799	i915_vma_unpin_and_release(&vma, 0);
1800out_ww:
1801	if (ret == -EDEADLK) {
1802		ret = i915_gem_ww_ctx_backoff(&ww);
1803		if (!ret)
1804			goto retry;
1805	}
1806	i915_gem_ww_ctx_fini(&ww);
1807	if (ret)
1808		i915_gem_object_put(bo);
1809	return ret;
1810}
1811
1812static u32 *write_cs_mi_lri(u32 *cs,
1813			    const struct i915_oa_reg *reg_data,
1814			    u32 n_regs)
1815{
1816	u32 i;
1817
1818	for (i = 0; i < n_regs; i++) {
1819		if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) {
1820			u32 n_lri = min_t(u32,
1821					  n_regs - i,
1822					  MI_LOAD_REGISTER_IMM_MAX_REGS);
1823
1824			*cs++ = MI_LOAD_REGISTER_IMM(n_lri);
1825		}
1826		*cs++ = i915_mmio_reg_offset(reg_data[i].addr);
1827		*cs++ = reg_data[i].value;
1828	}
1829
1830	return cs;
1831}
1832
1833static int num_lri_dwords(int num_regs)
1834{
1835	int count = 0;
1836
1837	if (num_regs > 0) {
1838		count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS);
1839		count += num_regs * 2;
1840	}
1841
1842	return count;
1843}
1844
1845static struct i915_oa_config_bo *
1846alloc_oa_config_buffer(struct i915_perf_stream *stream,
1847		       struct i915_oa_config *oa_config)
1848{
1849	struct drm_i915_gem_object *obj;
1850	struct i915_oa_config_bo *oa_bo;
1851	struct i915_gem_ww_ctx ww;
1852	size_t config_length = 0;
1853	u32 *cs;
1854	int err;
1855
1856	oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL);
1857	if (!oa_bo)
1858		return ERR_PTR(-ENOMEM);
1859
1860	config_length += num_lri_dwords(oa_config->mux_regs_len);
1861	config_length += num_lri_dwords(oa_config->b_counter_regs_len);
1862	config_length += num_lri_dwords(oa_config->flex_regs_len);
1863	config_length += 3; /* MI_BATCH_BUFFER_START */
1864	config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
1865
1866	obj = i915_gem_object_create_shmem(stream->perf->i915, config_length);
1867	if (IS_ERR(obj)) {
1868		err = PTR_ERR(obj);
1869		goto err_free;
1870	}
1871
1872	i915_gem_ww_ctx_init(&ww, true);
1873retry:
1874	err = i915_gem_object_lock(obj, &ww);
1875	if (err)
1876		goto out_ww;
1877
1878	cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
1879	if (IS_ERR(cs)) {
1880		err = PTR_ERR(cs);
1881		goto out_ww;
1882	}
1883
1884	cs = write_cs_mi_lri(cs,
1885			     oa_config->mux_regs,
1886			     oa_config->mux_regs_len);
1887	cs = write_cs_mi_lri(cs,
1888			     oa_config->b_counter_regs,
1889			     oa_config->b_counter_regs_len);
1890	cs = write_cs_mi_lri(cs,
1891			     oa_config->flex_regs,
1892			     oa_config->flex_regs_len);
1893
1894	/* Jump into the active wait. */
1895	*cs++ = (GRAPHICS_VER(stream->perf->i915) < 8 ?
1896		 MI_BATCH_BUFFER_START :
1897		 MI_BATCH_BUFFER_START_GEN8);
1898	*cs++ = i915_ggtt_offset(stream->noa_wait);
1899	*cs++ = 0;
1900
1901	i915_gem_object_flush_map(obj);
1902	__i915_gem_object_release_map(obj);
1903
1904	oa_bo->vma = i915_vma_instance(obj,
1905				       &stream->engine->gt->ggtt->vm,
1906				       NULL);
1907	if (IS_ERR(oa_bo->vma)) {
1908		err = PTR_ERR(oa_bo->vma);
1909		goto out_ww;
1910	}
1911
1912	oa_bo->oa_config = i915_oa_config_get(oa_config);
1913	llist_add(&oa_bo->node, &stream->oa_config_bos);
1914
1915out_ww:
1916	if (err == -EDEADLK) {
1917		err = i915_gem_ww_ctx_backoff(&ww);
1918		if (!err)
1919			goto retry;
1920	}
1921	i915_gem_ww_ctx_fini(&ww);
1922
1923	if (err)
1924		i915_gem_object_put(obj);
1925err_free:
1926	if (err) {
1927		kfree(oa_bo);
1928		return ERR_PTR(err);
1929	}
1930	return oa_bo;
1931}
1932
1933static struct i915_vma *
1934get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
1935{
1936	struct i915_oa_config_bo *oa_bo;
1937
1938	/*
1939	 * Look for the buffer in the already allocated BOs attached
1940	 * to the stream.
1941	 */
1942	llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
1943		if (oa_bo->oa_config == oa_config &&
1944		    memcmp(oa_bo->oa_config->uuid,
1945			   oa_config->uuid,
1946			   sizeof(oa_config->uuid)) == 0)
1947			goto out;
1948	}
1949
1950	oa_bo = alloc_oa_config_buffer(stream, oa_config);
1951	if (IS_ERR(oa_bo))
1952		return ERR_CAST(oa_bo);
1953
1954out:
1955	return i915_vma_get(oa_bo->vma);
1956}
1957
1958static int
1959emit_oa_config(struct i915_perf_stream *stream,
1960	       struct i915_oa_config *oa_config,
1961	       struct intel_context *ce,
1962	       struct i915_active *active)
1963{
1964	struct i915_request *rq;
1965	struct i915_vma *vma;
1966	struct i915_gem_ww_ctx ww;
1967	int err;
1968
1969	vma = get_oa_vma(stream, oa_config);
1970	if (IS_ERR(vma))
1971		return PTR_ERR(vma);
1972
1973	i915_gem_ww_ctx_init(&ww, true);
1974retry:
1975	err = i915_gem_object_lock(vma->obj, &ww);
1976	if (err)
1977		goto err;
1978
1979	err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
1980	if (err)
1981		goto err;
1982
1983	intel_engine_pm_get(ce->engine);
1984	rq = i915_request_create(ce);
1985	intel_engine_pm_put(ce->engine);
1986	if (IS_ERR(rq)) {
1987		err = PTR_ERR(rq);
1988		goto err_vma_unpin;
1989	}
1990
1991	if (!IS_ERR_OR_NULL(active)) {
1992		/* After all individual context modifications */
1993		err = i915_request_await_active(rq, active,
1994						I915_ACTIVE_AWAIT_ACTIVE);
1995		if (err)
1996			goto err_add_request;
1997
1998		err = i915_active_add_request(active, rq);
1999		if (err)
2000			goto err_add_request;
2001	}
2002
2003	err = i915_request_await_object(rq, vma->obj, 0);
2004	if (!err)
2005		err = i915_vma_move_to_active(vma, rq, 0);
2006	if (err)
2007		goto err_add_request;
2008
2009	err = rq->engine->emit_bb_start(rq,
2010					vma->node.start, 0,
2011					I915_DISPATCH_SECURE);
2012	if (err)
2013		goto err_add_request;
2014
2015err_add_request:
2016	i915_request_add(rq);
2017err_vma_unpin:
2018	i915_vma_unpin(vma);
2019err:
2020	if (err == -EDEADLK) {
2021		err = i915_gem_ww_ctx_backoff(&ww);
2022		if (!err)
2023			goto retry;
2024	}
2025
2026	i915_gem_ww_ctx_fini(&ww);
2027	i915_vma_put(vma);
2028	return err;
2029}
2030
2031static struct intel_context *oa_context(struct i915_perf_stream *stream)
2032{
2033	return stream->pinned_ctx ?: stream->engine->kernel_context;
2034}
2035
2036static int
2037hsw_enable_metric_set(struct i915_perf_stream *stream,
2038		      struct i915_active *active)
2039{
2040	struct intel_uncore *uncore = stream->uncore;
2041
2042	/*
2043	 * PRM:
2044	 *
2045	 * OA unit is using “crclk” for its functionality. When trunk
2046	 * level clock gating takes place, OA clock would be gated,
2047	 * unable to count the events from non-render clock domain.
2048	 * Render clock gating must be disabled when OA is enabled to
2049	 * count the events from non-render domain. Unit level clock
2050	 * gating for RCS should also be disabled.
2051	 */
2052	intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2053			 GEN7_DOP_CLOCK_GATE_ENABLE, 0);
2054	intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2055			 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
2056
2057	return emit_oa_config(stream,
2058			      stream->oa_config, oa_context(stream),
2059			      active);
2060}
2061
2062static void hsw_disable_metric_set(struct i915_perf_stream *stream)
2063{
2064	struct intel_uncore *uncore = stream->uncore;
2065
2066	intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2067			 GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0);
2068	intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2069			 0, GEN7_DOP_CLOCK_GATE_ENABLE);
2070
2071	intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2072}
2073
2074static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
2075			      i915_reg_t reg)
2076{
2077	u32 mmio = i915_mmio_reg_offset(reg);
2078	int i;
2079
2080	/*
2081	 * This arbitrary default will select the 'EU FPU0 Pipeline
2082	 * Active' event. In the future it's anticipated that there
2083	 * will be an explicit 'No Event' we can select, but not yet...
2084	 */
2085	if (!oa_config)
2086		return 0;
2087
2088	for (i = 0; i < oa_config->flex_regs_len; i++) {
2089		if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio)
2090			return oa_config->flex_regs[i].value;
2091	}
2092
2093	return 0;
2094}
2095/*
2096 * NB: It must always remain pointer safe to run this even if the OA unit
2097 * has been disabled.
2098 *
2099 * It's fine to put out-of-date values into these per-context registers
2100 * in the case that the OA unit has been disabled.
2101 */
2102static void
2103gen8_update_reg_state_unlocked(const struct intel_context *ce,
2104			       const struct i915_perf_stream *stream)
2105{
2106	u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
2107	u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2108	/* The MMIO offsets for Flex EU registers aren't contiguous */
2109	i915_reg_t flex_regs[] = {
2110		EU_PERF_CNTL0,
2111		EU_PERF_CNTL1,
2112		EU_PERF_CNTL2,
2113		EU_PERF_CNTL3,
2114		EU_PERF_CNTL4,
2115		EU_PERF_CNTL5,
2116		EU_PERF_CNTL6,
2117	};
2118	u32 *reg_state = ce->lrc_reg_state;
2119	int i;
2120
2121	reg_state[ctx_oactxctrl + 1] =
2122		(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2123		(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2124		GEN8_OA_COUNTER_RESUME;
2125
2126	for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
2127		reg_state[ctx_flexeu0 + i * 2 + 1] =
2128			oa_config_flex_reg(stream->oa_config, flex_regs[i]);
2129}
2130
2131struct flex {
2132	i915_reg_t reg;
2133	u32 offset;
2134	u32 value;
2135};
2136
2137static int
2138gen8_store_flex(struct i915_request *rq,
2139		struct intel_context *ce,
2140		const struct flex *flex, unsigned int count)
2141{
2142	u32 offset;
2143	u32 *cs;
2144
2145	cs = intel_ring_begin(rq, 4 * count);
2146	if (IS_ERR(cs))
2147		return PTR_ERR(cs);
2148
2149	offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET;
2150	do {
2151		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
2152		*cs++ = offset + flex->offset * sizeof(u32);
2153		*cs++ = 0;
2154		*cs++ = flex->value;
2155	} while (flex++, --count);
2156
2157	intel_ring_advance(rq, cs);
2158
2159	return 0;
2160}
2161
2162static int
2163gen8_load_flex(struct i915_request *rq,
2164	       struct intel_context *ce,
2165	       const struct flex *flex, unsigned int count)
2166{
2167	u32 *cs;
2168
2169	GEM_BUG_ON(!count || count > 63);
2170
2171	cs = intel_ring_begin(rq, 2 * count + 2);
2172	if (IS_ERR(cs))
2173		return PTR_ERR(cs);
2174
2175	*cs++ = MI_LOAD_REGISTER_IMM(count);
2176	do {
2177		*cs++ = i915_mmio_reg_offset(flex->reg);
2178		*cs++ = flex->value;
2179	} while (flex++, --count);
2180	*cs++ = MI_NOOP;
2181
2182	intel_ring_advance(rq, cs);
2183
2184	return 0;
2185}
2186
2187static int gen8_modify_context(struct intel_context *ce,
2188			       const struct flex *flex, unsigned int count)
2189{
2190	struct i915_request *rq;
2191	int err;
2192
2193	rq = intel_engine_create_kernel_request(ce->engine);
2194	if (IS_ERR(rq))
2195		return PTR_ERR(rq);
2196
2197	/* Serialise with the remote context */
2198	err = intel_context_prepare_remote_request(ce, rq);
2199	if (err == 0)
2200		err = gen8_store_flex(rq, ce, flex, count);
2201
2202	i915_request_add(rq);
2203	return err;
2204}
2205
2206static int
2207gen8_modify_self(struct intel_context *ce,
2208		 const struct flex *flex, unsigned int count,
2209		 struct i915_active *active)
2210{
2211	struct i915_request *rq;
2212	int err;
2213
2214	intel_engine_pm_get(ce->engine);
2215	rq = i915_request_create(ce);
2216	intel_engine_pm_put(ce->engine);
2217	if (IS_ERR(rq))
2218		return PTR_ERR(rq);
2219
2220	if (!IS_ERR_OR_NULL(active)) {
2221		err = i915_active_add_request(active, rq);
2222		if (err)
2223			goto err_add_request;
2224	}
2225
2226	err = gen8_load_flex(rq, ce, flex, count);
2227	if (err)
2228		goto err_add_request;
2229
2230err_add_request:
2231	i915_request_add(rq);
2232	return err;
2233}
2234
2235static int gen8_configure_context(struct i915_gem_context *ctx,
2236				  struct flex *flex, unsigned int count)
2237{
2238	struct i915_gem_engines_iter it;
2239	struct intel_context *ce;
2240	int err = 0;
2241
2242	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2243		GEM_BUG_ON(ce == ce->engine->kernel_context);
2244
2245		if (ce->engine->class != RENDER_CLASS)
2246			continue;
2247
2248		/* Otherwise OA settings will be set upon first use */
2249		if (!intel_context_pin_if_active(ce))
2250			continue;
2251
2252		flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu);
2253		err = gen8_modify_context(ce, flex, count);
2254
2255		intel_context_unpin(ce);
2256		if (err)
2257			break;
2258	}
2259	i915_gem_context_unlock_engines(ctx);
2260
2261	return err;
2262}
2263
2264static int gen12_configure_oar_context(struct i915_perf_stream *stream,
2265				       struct i915_active *active)
2266{
2267	int err;
2268	struct intel_context *ce = stream->pinned_ctx;
2269	u32 format = stream->oa_buffer.format;
2270	struct flex regs_context[] = {
2271		{
2272			GEN8_OACTXCONTROL,
2273			stream->perf->ctx_oactxctrl_offset + 1,
2274			active ? GEN8_OA_COUNTER_RESUME : 0,
2275		},
2276	};
2277	/* Offsets in regs_lri are not used since this configuration is only
2278	 * applied using LRI. Initialize the correct offsets for posterity.
2279	 */
2280#define GEN12_OAR_OACONTROL_OFFSET 0x5B0
2281	struct flex regs_lri[] = {
2282		{
2283			GEN12_OAR_OACONTROL,
2284			GEN12_OAR_OACONTROL_OFFSET + 1,
2285			(format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
2286			(active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
2287		},
2288		{
2289			RING_CONTEXT_CONTROL(ce->engine->mmio_base),
2290			CTX_CONTEXT_CONTROL,
2291			_MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
2292				      active ?
2293				      GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
2294				      0)
2295		},
2296	};
2297
2298	/* Modify the context image of pinned context with regs_context*/
2299	err = intel_context_lock_pinned(ce);
2300	if (err)
2301		return err;
2302
2303	err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context));
2304	intel_context_unlock_pinned(ce);
2305	if (err)
2306		return err;
2307
2308	/* Apply regs_lri using LRI with pinned context */
2309	return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active);
2310}
2311
2312/*
2313 * Manages updating the per-context aspects of the OA stream
2314 * configuration across all contexts.
2315 *
2316 * The awkward consideration here is that OACTXCONTROL controls the
2317 * exponent for periodic sampling which is primarily used for system
2318 * wide profiling where we'd like a consistent sampling period even in
2319 * the face of context switches.
2320 *
2321 * Our approach of updating the register state context (as opposed to
2322 * say using a workaround batch buffer) ensures that the hardware
2323 * won't automatically reload an out-of-date timer exponent even
2324 * transiently before a WA BB could be parsed.
2325 *
2326 * This function needs to:
2327 * - Ensure the currently running context's per-context OA state is
2328 *   updated
2329 * - Ensure that all existing contexts will have the correct per-context
2330 *   OA state if they are scheduled for use.
2331 * - Ensure any new contexts will be initialized with the correct
2332 *   per-context OA state.
2333 *
2334 * Note: it's only the RCS/Render context that has any OA state.
2335 * Note: the first flex register passed must always be R_PWR_CLK_STATE
2336 */
2337static int
2338oa_configure_all_contexts(struct i915_perf_stream *stream,
2339			  struct flex *regs,
2340			  size_t num_regs,
2341			  struct i915_active *active)
2342{
2343	struct drm_i915_private *i915 = stream->perf->i915;
2344	struct intel_engine_cs *engine;
2345	struct i915_gem_context *ctx, *cn;
2346	int err;
2347
2348	lockdep_assert_held(&stream->perf->lock);
2349
2350	/*
2351	 * The OA register config is setup through the context image. This image
2352	 * might be written to by the GPU on context switch (in particular on
2353	 * lite-restore). This means we can't safely update a context's image,
2354	 * if this context is scheduled/submitted to run on the GPU.
2355	 *
2356	 * We could emit the OA register config through the batch buffer but
2357	 * this might leave small interval of time where the OA unit is
2358	 * configured at an invalid sampling period.
2359	 *
2360	 * Note that since we emit all requests from a single ring, there
2361	 * is still an implicit global barrier here that may cause a high
2362	 * priority context to wait for an otherwise independent low priority
2363	 * context. Contexts idle at the time of reconfiguration are not
2364	 * trapped behind the barrier.
2365	 */
2366	spin_lock(&i915->gem.contexts.lock);
2367	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
2368		if (!kref_get_unless_zero(&ctx->ref))
2369			continue;
2370
2371		spin_unlock(&i915->gem.contexts.lock);
2372
2373		err = gen8_configure_context(ctx, regs, num_regs);
2374		if (err) {
2375			i915_gem_context_put(ctx);
2376			return err;
2377		}
2378
2379		spin_lock(&i915->gem.contexts.lock);
2380		list_safe_reset_next(ctx, cn, link);
2381		i915_gem_context_put(ctx);
2382	}
2383	spin_unlock(&i915->gem.contexts.lock);
2384
2385	/*
2386	 * After updating all other contexts, we need to modify ourselves.
2387	 * If we don't modify the kernel_context, we do not get events while
2388	 * idle.
2389	 */
2390	for_each_uabi_engine(engine, i915) {
2391		struct intel_context *ce = engine->kernel_context;
2392
2393		if (engine->class != RENDER_CLASS)
2394			continue;
2395
2396		regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu);
2397
2398		err = gen8_modify_self(ce, regs, num_regs, active);
2399		if (err)
2400			return err;
2401	}
2402
2403	return 0;
2404}
2405
2406static int
2407gen12_configure_all_contexts(struct i915_perf_stream *stream,
2408			     const struct i915_oa_config *oa_config,
2409			     struct i915_active *active)
2410{
2411	struct flex regs[] = {
2412		{
2413			GEN8_R_PWR_CLK_STATE,
2414			CTX_R_PWR_CLK_STATE,
2415		},
2416	};
2417
2418	return oa_configure_all_contexts(stream,
2419					 regs, ARRAY_SIZE(regs),
2420					 active);
2421}
2422
2423static int
2424lrc_configure_all_contexts(struct i915_perf_stream *stream,
2425			   const struct i915_oa_config *oa_config,
2426			   struct i915_active *active)
2427{
2428	/* The MMIO offsets for Flex EU registers aren't contiguous */
2429	const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2430#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
2431	struct flex regs[] = {
2432		{
2433			GEN8_R_PWR_CLK_STATE,
2434			CTX_R_PWR_CLK_STATE,
2435		},
2436		{
2437			GEN8_OACTXCONTROL,
2438			stream->perf->ctx_oactxctrl_offset + 1,
2439		},
2440		{ EU_PERF_CNTL0, ctx_flexeuN(0) },
2441		{ EU_PERF_CNTL1, ctx_flexeuN(1) },
2442		{ EU_PERF_CNTL2, ctx_flexeuN(2) },
2443		{ EU_PERF_CNTL3, ctx_flexeuN(3) },
2444		{ EU_PERF_CNTL4, ctx_flexeuN(4) },
2445		{ EU_PERF_CNTL5, ctx_flexeuN(5) },
2446		{ EU_PERF_CNTL6, ctx_flexeuN(6) },
2447	};
2448#undef ctx_flexeuN
2449	int i;
2450
2451	regs[1].value =
2452		(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2453		(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2454		GEN8_OA_COUNTER_RESUME;
2455
2456	for (i = 2; i < ARRAY_SIZE(regs); i++)
2457		regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
2458
2459	return oa_configure_all_contexts(stream,
2460					 regs, ARRAY_SIZE(regs),
2461					 active);
2462}
2463
2464static int
2465gen8_enable_metric_set(struct i915_perf_stream *stream,
2466		       struct i915_active *active)
2467{
2468	struct intel_uncore *uncore = stream->uncore;
2469	struct i915_oa_config *oa_config = stream->oa_config;
2470	int ret;
2471
2472	/*
2473	 * We disable slice/unslice clock ratio change reports on SKL since
2474	 * they are too noisy. The HW generates a lot of redundant reports
2475	 * where the ratio hasn't really changed causing a lot of redundant
2476	 * work to processes and increasing the chances we'll hit buffer
2477	 * overruns.
2478	 *
2479	 * Although we don't currently use the 'disable overrun' OABUFFER
2480	 * feature it's worth noting that clock ratio reports have to be
2481	 * disabled before considering to use that feature since the HW doesn't
2482	 * correctly block these reports.
2483	 *
2484	 * Currently none of the high-level metrics we have depend on knowing
2485	 * this ratio to normalize.
2486	 *
2487	 * Note: This register is not power context saved and restored, but
2488	 * that's OK considering that we disable RC6 while the OA unit is
2489	 * enabled.
2490	 *
2491	 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
2492	 * be read back from automatically triggered reports, as part of the
2493	 * RPT_ID field.
2494	 */
2495	if (IS_GRAPHICS_VER(stream->perf->i915, 9, 11)) {
2496		intel_uncore_write(uncore, GEN8_OA_DEBUG,
2497				   _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2498						      GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
2499	}
2500
2501	/*
2502	 * Update all contexts prior writing the mux configurations as we need
2503	 * to make sure all slices/subslices are ON before writing to NOA
2504	 * registers.
2505	 */
2506	ret = lrc_configure_all_contexts(stream, oa_config, active);
2507	if (ret)
2508		return ret;
2509
2510	return emit_oa_config(stream,
2511			      stream->oa_config, oa_context(stream),
2512			      active);
2513}
2514
2515static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
2516{
2517	return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
2518			     (stream->sample_flags & SAMPLE_OA_REPORT) ?
2519			     0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
2520}
2521
2522static int
2523gen12_enable_metric_set(struct i915_perf_stream *stream,
2524			struct i915_active *active)
2525{
2526	struct intel_uncore *uncore = stream->uncore;
2527	struct i915_oa_config *oa_config = stream->oa_config;
2528	bool periodic = stream->periodic;
2529	u32 period_exponent = stream->period_exponent;
2530	int ret;
2531
2532	intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG,
2533			   /* Disable clk ratio reports, like previous Gens. */
2534			   _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2535					      GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
2536			   /*
2537			    * If the user didn't require OA reports, instruct
2538			    * the hardware not to emit ctx switch reports.
2539			    */
2540			   oag_report_ctx_switches(stream));
2541
2542	intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ?
2543			   (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
2544			    GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE |
2545			    (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT))
2546			    : 0);
2547
2548	/*
2549	 * Update all contexts prior writing the mux configurations as we need
2550	 * to make sure all slices/subslices are ON before writing to NOA
2551	 * registers.
2552	 */
2553	ret = gen12_configure_all_contexts(stream, oa_config, active);
2554	if (ret)
2555		return ret;
2556
2557	/*
2558	 * For Gen12, performance counters are context
2559	 * saved/restored. Only enable it for the context that
2560	 * requested this.
2561	 */
2562	if (stream->ctx) {
2563		ret = gen12_configure_oar_context(stream, active);
2564		if (ret)
2565			return ret;
2566	}
2567
2568	return emit_oa_config(stream,
2569			      stream->oa_config, oa_context(stream),
2570			      active);
2571}
2572
2573static void gen8_disable_metric_set(struct i915_perf_stream *stream)
2574{
2575	struct intel_uncore *uncore = stream->uncore;
2576
2577	/* Reset all contexts' slices/subslices configurations. */
2578	lrc_configure_all_contexts(stream, NULL, NULL);
2579
2580	intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2581}
2582
2583static void gen10_disable_metric_set(struct i915_perf_stream *stream)
2584{
2585	struct intel_uncore *uncore = stream->uncore;
2586
2587	/* Reset all contexts' slices/subslices configurations. */
2588	lrc_configure_all_contexts(stream, NULL, NULL);
2589
2590	/* Make sure we disable noa to save power. */
2591	intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2592}
2593
2594static void gen12_disable_metric_set(struct i915_perf_stream *stream)
2595{
2596	struct intel_uncore *uncore = stream->uncore;
2597
2598	/* Reset all contexts' slices/subslices configurations. */
2599	gen12_configure_all_contexts(stream, NULL, NULL);
2600
2601	/* disable the context save/restore or OAR counters */
2602	if (stream->ctx)
2603		gen12_configure_oar_context(stream, NULL);
2604
2605	/* Make sure we disable noa to save power. */
2606	intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2607}
2608
2609static void gen7_oa_enable(struct i915_perf_stream *stream)
2610{
2611	struct intel_uncore *uncore = stream->uncore;
2612	struct i915_gem_context *ctx = stream->ctx;
2613	u32 ctx_id = stream->specific_ctx_id;
2614	bool periodic = stream->periodic;
2615	u32 period_exponent = stream->period_exponent;
2616	u32 report_format = stream->oa_buffer.format;
2617
2618	/*
2619	 * Reset buf pointers so we don't forward reports from before now.
2620	 *
2621	 * Think carefully if considering trying to avoid this, since it
2622	 * also ensures status flags and the buffer itself are cleared
2623	 * in error paths, and we have checks for invalid reports based
2624	 * on the assumption that certain fields are written to zeroed
2625	 * memory which this helps maintains.
2626	 */
2627	gen7_init_oa_buffer(stream);
2628
2629	intel_uncore_write(uncore, GEN7_OACONTROL,
2630			   (ctx_id & GEN7_OACONTROL_CTX_MASK) |
2631			   (period_exponent <<
2632			    GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
2633			   (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
2634			   (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
2635			   (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
2636			   GEN7_OACONTROL_ENABLE);
2637}
2638
2639static void gen8_oa_enable(struct i915_perf_stream *stream)
2640{
2641	struct intel_uncore *uncore = stream->uncore;
2642	u32 report_format = stream->oa_buffer.format;
2643
2644	/*
2645	 * Reset buf pointers so we don't forward reports from before now.
2646	 *
2647	 * Think carefully if considering trying to avoid this, since it
2648	 * also ensures status flags and the buffer itself are cleared
2649	 * in error paths, and we have checks for invalid reports based
2650	 * on the assumption that certain fields are written to zeroed
2651	 * memory which this helps maintains.
2652	 */
2653	gen8_init_oa_buffer(stream);
2654
2655	/*
2656	 * Note: we don't rely on the hardware to perform single context
2657	 * filtering and instead filter on the cpu based on the context-id
2658	 * field of reports
2659	 */
2660	intel_uncore_write(uncore, GEN8_OACONTROL,
2661			   (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) |
2662			   GEN8_OA_COUNTER_ENABLE);
2663}
2664
2665static void gen12_oa_enable(struct i915_perf_stream *stream)
2666{
2667	struct intel_uncore *uncore = stream->uncore;
2668	u32 report_format = stream->oa_buffer.format;
2669
2670	/*
2671	 * If we don't want OA reports from the OA buffer, then we don't even
2672	 * need to program the OAG unit.
2673	 */
2674	if (!(stream->sample_flags & SAMPLE_OA_REPORT))
2675		return;
2676
2677	gen12_init_oa_buffer(stream);
2678
2679	intel_uncore_write(uncore, GEN12_OAG_OACONTROL,
2680			   (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) |
2681			   GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE);
2682}
2683
2684/**
2685 * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
2686 * @stream: An i915 perf stream opened for OA metrics
2687 *
2688 * [Re]enables hardware periodic sampling according to the period configured
2689 * when opening the stream. This also starts a hrtimer that will periodically
2690 * check for data in the circular OA buffer for notifying userspace (e.g.
2691 * during a read() or poll()).
2692 */
2693static void i915_oa_stream_enable(struct i915_perf_stream *stream)
2694{
2695	stream->pollin = false;
2696
2697	stream->perf->ops.oa_enable(stream);
2698
2699	if (stream->sample_flags & SAMPLE_OA_REPORT)
2700		hrtimer_start(&stream->poll_check_timer,
2701			      ns_to_ktime(stream->poll_oa_period),
2702			      HRTIMER_MODE_REL_PINNED);
2703}
2704
2705static void gen7_oa_disable(struct i915_perf_stream *stream)
2706{
2707	struct intel_uncore *uncore = stream->uncore;
2708
2709	intel_uncore_write(uncore, GEN7_OACONTROL, 0);
2710	if (intel_wait_for_register(uncore,
2711				    GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
2712				    50))
2713		drm_err(&stream->perf->i915->drm,
2714			"wait for OA to be disabled timed out\n");
2715}
2716
2717static void gen8_oa_disable(struct i915_perf_stream *stream)
2718{
2719	struct intel_uncore *uncore = stream->uncore;
2720
2721	intel_uncore_write(uncore, GEN8_OACONTROL, 0);
2722	if (intel_wait_for_register(uncore,
2723				    GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
2724				    50))
2725		drm_err(&stream->perf->i915->drm,
2726			"wait for OA to be disabled timed out\n");
2727}
2728
2729static void gen12_oa_disable(struct i915_perf_stream *stream)
2730{
2731	struct intel_uncore *uncore = stream->uncore;
2732
2733	intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0);
2734	if (intel_wait_for_register(uncore,
2735				    GEN12_OAG_OACONTROL,
2736				    GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
2737				    50))
2738		drm_err(&stream->perf->i915->drm,
2739			"wait for OA to be disabled timed out\n");
2740
2741	intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1);
2742	if (intel_wait_for_register(uncore,
2743				    GEN12_OA_TLB_INV_CR,
2744				    1, 0,
2745				    50))
2746		drm_err(&stream->perf->i915->drm,
2747			"wait for OA tlb invalidate timed out\n");
2748}
2749
2750/**
2751 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
2752 * @stream: An i915 perf stream opened for OA metrics
2753 *
2754 * Stops the OA unit from periodically writing counter reports into the
2755 * circular OA buffer. This also stops the hrtimer that periodically checks for
2756 * data in the circular OA buffer, for notifying userspace.
2757 */
2758static void i915_oa_stream_disable(struct i915_perf_stream *stream)
2759{
2760	stream->perf->ops.oa_disable(stream);
2761
2762	if (stream->sample_flags & SAMPLE_OA_REPORT)
2763		hrtimer_cancel(&stream->poll_check_timer);
2764}
2765
2766static const struct i915_perf_stream_ops i915_oa_stream_ops = {
2767	.destroy = i915_oa_stream_destroy,
2768	.enable = i915_oa_stream_enable,
2769	.disable = i915_oa_stream_disable,
2770	.wait_unlocked = i915_oa_wait_unlocked,
2771	.poll_wait = i915_oa_poll_wait,
2772	.read = i915_oa_read,
2773};
2774
2775static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
2776{
2777	struct i915_active *active;
2778	int err;
2779
2780	active = i915_active_create();
2781	if (!active)
2782		return -ENOMEM;
2783
2784	err = stream->perf->ops.enable_metric_set(stream, active);
2785	if (err == 0)
2786		__i915_active_wait(active, TASK_UNINTERRUPTIBLE);
2787
2788	i915_active_put(active);
2789	return err;
2790}
2791
2792static void
2793get_default_sseu_config(struct intel_sseu *out_sseu,
2794			struct intel_engine_cs *engine)
2795{
2796	const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu;
2797
2798	*out_sseu = intel_sseu_from_device_info(devinfo_sseu);
2799
2800	if (GRAPHICS_VER(engine->i915) == 11) {
2801		/*
2802		 * We only need subslice count so it doesn't matter which ones
2803		 * we select - just turn off low bits in the amount of half of
2804		 * all available subslices per slice.
2805		 */
2806		out_sseu->subslice_mask =
2807			~(~0 << (hweight8(out_sseu->subslice_mask) / 2));
2808		out_sseu->slice_mask = 0x1;
2809	}
2810}
2811
2812static int
2813get_sseu_config(struct intel_sseu *out_sseu,
2814		struct intel_engine_cs *engine,
2815		const struct drm_i915_gem_context_param_sseu *drm_sseu)
2816{
2817	if (drm_sseu->engine.engine_class != engine->uabi_class ||
2818	    drm_sseu->engine.engine_instance != engine->uabi_instance)
2819		return -EINVAL;
2820
2821	return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu);
2822}
2823
2824/**
2825 * i915_oa_stream_init - validate combined props for OA stream and init
2826 * @stream: An i915 perf stream
2827 * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
2828 * @props: The property state that configures stream (individually validated)
2829 *
2830 * While read_properties_unlocked() validates properties in isolation it
2831 * doesn't ensure that the combination necessarily makes sense.
2832 *
2833 * At this point it has been determined that userspace wants a stream of
2834 * OA metrics, but still we need to further validate the combined
2835 * properties are OK.
2836 *
2837 * If the configuration makes sense then we can allocate memory for
2838 * a circular OA buffer and apply the requested metric set configuration.
2839 *
2840 * Returns: zero on success or a negative error code.
2841 */
2842static int i915_oa_stream_init(struct i915_perf_stream *stream,
2843			       struct drm_i915_perf_open_param *param,
2844			       struct perf_open_properties *props)
2845{
2846	struct drm_i915_private *i915 = stream->perf->i915;
2847	struct i915_perf *perf = stream->perf;
2848	int format_size;
2849	int ret;
2850
2851	if (!props->engine) {
2852		DRM_DEBUG("OA engine not specified\n");
2853		return -EINVAL;
2854	}
2855
2856	/*
2857	 * If the sysfs metrics/ directory wasn't registered for some
2858	 * reason then don't let userspace try their luck with config
2859	 * IDs
2860	 */
2861	if (!perf->metrics_kobj) {
2862		DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
2863		return -EINVAL;
2864	}
2865
2866	if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
2867	    (GRAPHICS_VER(perf->i915) < 12 || !stream->ctx)) {
2868		DRM_DEBUG("Only OA report sampling supported\n");
2869		return -EINVAL;
2870	}
2871
2872	if (!perf->ops.enable_metric_set) {
2873		DRM_DEBUG("OA unit not supported\n");
2874		return -ENODEV;
2875	}
2876
2877	/*
2878	 * To avoid the complexity of having to accurately filter
2879	 * counter reports and marshal to the appropriate client
2880	 * we currently only allow exclusive access
2881	 */
2882	if (perf->exclusive_stream) {
2883		DRM_DEBUG("OA unit already in use\n");
2884		return -EBUSY;
2885	}
2886
2887	if (!props->oa_format) {
2888		DRM_DEBUG("OA report format not specified\n");
2889		return -EINVAL;
2890	}
2891
2892	stream->engine = props->engine;
2893	stream->uncore = stream->engine->gt->uncore;
2894
2895	stream->sample_size = sizeof(struct drm_i915_perf_record_header);
2896
2897	format_size = perf->oa_formats[props->oa_format].size;
2898
2899	stream->sample_flags = props->sample_flags;
2900	stream->sample_size += format_size;
2901
2902	stream->oa_buffer.format_size = format_size;
2903	if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0))
2904		return -EINVAL;
2905
2906	stream->hold_preemption = props->hold_preemption;
2907
2908	stream->oa_buffer.format =
2909		perf->oa_formats[props->oa_format].format;
2910
2911	stream->periodic = props->oa_periodic;
2912	if (stream->periodic)
2913		stream->period_exponent = props->oa_period_exponent;
2914
2915	if (stream->ctx) {
2916		ret = oa_get_render_ctx_id(stream);
2917		if (ret) {
2918			DRM_DEBUG("Invalid context id to filter with\n");
2919			return ret;
2920		}
2921	}
2922
2923	ret = alloc_noa_wait(stream);
2924	if (ret) {
2925		DRM_DEBUG("Unable to allocate NOA wait batch buffer\n");
2926		goto err_noa_wait_alloc;
2927	}
2928
2929	stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set);
2930	if (!stream->oa_config) {
2931		DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
2932		ret = -EINVAL;
2933		goto err_config;
2934	}
2935
2936	/* PRM - observability performance counters:
2937	 *
2938	 *   OACONTROL, performance counter enable, note:
2939	 *
2940	 *   "When this bit is set, in order to have coherent counts,
2941	 *   RC6 power state and trunk clock gating must be disabled.
2942	 *   This can be achieved by programming MMIO registers as
2943	 *   0xA094=0 and 0xA090[31]=1"
2944	 *
2945	 *   In our case we are expecting that taking pm + FORCEWAKE
2946	 *   references will effectively disable RC6.
2947	 */
2948	intel_engine_pm_get(stream->engine);
2949	intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL);
2950
2951	ret = alloc_oa_buffer(stream);
2952	if (ret)
2953		goto err_oa_buf_alloc;
2954
2955	stream->ops = &i915_oa_stream_ops;
2956
2957	perf->sseu = props->sseu;
2958	WRITE_ONCE(perf->exclusive_stream, stream);
2959
2960	ret = i915_perf_stream_enable_sync(stream);
2961	if (ret) {
2962		DRM_DEBUG("Unable to enable metric set\n");
2963		goto err_enable;
2964	}
2965
2966	DRM_DEBUG("opening stream oa config uuid=%s\n",
2967		  stream->oa_config->uuid);
2968
2969	hrtimer_init(&stream->poll_check_timer,
2970		     CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2971	stream->poll_check_timer.function = oa_poll_check_timer_cb;
2972	init_waitqueue_head(&stream->poll_wq);
2973	spin_lock_init(&stream->oa_buffer.ptr_lock);
2974
2975	return 0;
2976
2977err_enable:
2978	WRITE_ONCE(perf->exclusive_stream, NULL);
2979	perf->ops.disable_metric_set(stream);
2980
2981	free_oa_buffer(stream);
2982
2983err_oa_buf_alloc:
2984	free_oa_configs(stream);
2985
2986	intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
2987	intel_engine_pm_put(stream->engine);
2988
2989err_config:
2990	free_noa_wait(stream);
2991
2992err_noa_wait_alloc:
2993	if (stream->ctx)
2994		oa_put_render_ctx_id(stream);
2995
2996	return ret;
2997}
2998
2999void i915_oa_init_reg_state(const struct intel_context *ce,
3000			    const struct intel_engine_cs *engine)
3001{
3002	struct i915_perf_stream *stream;
3003
3004	if (engine->class != RENDER_CLASS)
3005		return;
3006
3007	/* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
3008	stream = READ_ONCE(engine->i915->perf.exclusive_stream);
3009	if (stream && GRAPHICS_VER(stream->perf->i915) < 12)
3010		gen8_update_reg_state_unlocked(ce, stream);
3011}
3012
3013/**
3014 * i915_perf_read - handles read() FOP for i915 perf stream FDs
3015 * @file: An i915 perf stream file
3016 * @buf: destination buffer given by userspace
3017 * @count: the number of bytes userspace wants to read
3018 * @ppos: (inout) file seek position (unused)
3019 *
3020 * The entry point for handling a read() on a stream file descriptor from
3021 * userspace. Most of the work is left to the i915_perf_read_locked() and
3022 * &i915_perf_stream_ops->read but to save having stream implementations (of
3023 * which we might have multiple later) we handle blocking read here.
3024 *
3025 * We can also consistently treat trying to read from a disabled stream
3026 * as an IO error so implementations can assume the stream is enabled
3027 * while reading.
3028 *
3029 * Returns: The number of bytes copied or a negative error code on failure.
3030 */
3031static ssize_t i915_perf_read(struct file *file,
3032			      char __user *buf,
3033			      size_t count,
3034			      loff_t *ppos)
3035{
3036	struct i915_perf_stream *stream = file->private_data;
3037	struct i915_perf *perf = stream->perf;
3038	size_t offset = 0;
3039	int ret;
3040
3041	/* To ensure it's handled consistently we simply treat all reads of a
3042	 * disabled stream as an error. In particular it might otherwise lead
3043	 * to a deadlock for blocking file descriptors...
3044	 */
3045	if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT))
3046		return -EIO;
3047
3048	if (!(file->f_flags & O_NONBLOCK)) {
3049		/* There's the small chance of false positives from
3050		 * stream->ops->wait_unlocked.
3051		 *
3052		 * E.g. with single context filtering since we only wait until
3053		 * oabuffer has >= 1 report we don't immediately know whether
3054		 * any reports really belong to the current context
3055		 */
3056		do {
3057			ret = stream->ops->wait_unlocked(stream);
3058			if (ret)
3059				return ret;
3060
3061			mutex_lock(&perf->lock);
3062			ret = stream->ops->read(stream, buf, count, &offset);
3063			mutex_unlock(&perf->lock);
3064		} while (!offset && !ret);
3065	} else {
3066		mutex_lock(&perf->lock);
3067		ret = stream->ops->read(stream, buf, count, &offset);
3068		mutex_unlock(&perf->lock);
3069	}
3070
3071	/* We allow the poll checking to sometimes report false positive EPOLLIN
3072	 * events where we might actually report EAGAIN on read() if there's
3073	 * not really any data available. In this situation though we don't
3074	 * want to enter a busy loop between poll() reporting a EPOLLIN event
3075	 * and read() returning -EAGAIN. Clearing the oa.pollin state here
3076	 * effectively ensures we back off until the next hrtimer callback
3077	 * before reporting another EPOLLIN event.
3078	 * The exception to this is if ops->read() returned -ENOSPC which means
3079	 * that more OA data is available than could fit in the user provided
3080	 * buffer. In this case we want the next poll() call to not block.
3081	 */
3082	if (ret != -ENOSPC)
3083		stream->pollin = false;
3084
3085	/* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
3086	return offset ?: (ret ?: -EAGAIN);
3087}
3088
3089static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
3090{
3091	struct i915_perf_stream *stream =
3092		container_of(hrtimer, typeof(*stream), poll_check_timer);
3093
3094	if (oa_buffer_check_unlocked(stream)) {
3095		stream->pollin = true;
3096		wake_up(&stream->poll_wq);
3097	}
3098
3099	hrtimer_forward_now(hrtimer,
3100			    ns_to_ktime(stream->poll_oa_period));
3101
3102	return HRTIMER_RESTART;
3103}
3104
3105/**
3106 * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
3107 * @stream: An i915 perf stream
3108 * @file: An i915 perf stream file
3109 * @wait: poll() state table
3110 *
3111 * For handling userspace polling on an i915 perf stream, this calls through to
3112 * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
3113 * will be woken for new stream data.
3114 *
3115 * Note: The &perf->lock mutex has been taken to serialize
3116 * with any non-file-operation driver hooks.
3117 *
3118 * Returns: any poll events that are ready without sleeping
3119 */
3120static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream,
3121				      struct file *file,
3122				      poll_table *wait)
3123{
3124	__poll_t events = 0;
3125
3126	stream->ops->poll_wait(stream, file, wait);
3127
3128	/* Note: we don't explicitly check whether there's something to read
3129	 * here since this path may be very hot depending on what else
3130	 * userspace is polling, or on the timeout in use. We rely solely on
3131	 * the hrtimer/oa_poll_check_timer_cb to notify us when there are
3132	 * samples to read.
3133	 */
3134	if (stream->pollin)
3135		events |= EPOLLIN;
3136
3137	return events;
3138}
3139
3140/**
3141 * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
3142 * @file: An i915 perf stream file
3143 * @wait: poll() state table
3144 *
3145 * For handling userspace polling on an i915 perf stream, this ensures
3146 * poll_wait() gets called with a wait queue that will be woken for new stream
3147 * data.
3148 *
3149 * Note: Implementation deferred to i915_perf_poll_locked()
3150 *
3151 * Returns: any poll events that are ready without sleeping
3152 */
3153static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
3154{
3155	struct i915_perf_stream *stream = file->private_data;
3156	struct i915_perf *perf = stream->perf;
3157	__poll_t ret;
3158
3159	mutex_lock(&perf->lock);
3160	ret = i915_perf_poll_locked(stream, file, wait);
3161	mutex_unlock(&perf->lock);
3162
3163	return ret;
3164}
3165
3166/**
3167 * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
3168 * @stream: A disabled i915 perf stream
3169 *
3170 * [Re]enables the associated capture of data for this stream.
3171 *
3172 * If a stream was previously enabled then there's currently no intention
3173 * to provide userspace any guarantee about the preservation of previously
3174 * buffered data.
3175 */
3176static void i915_perf_enable_locked(struct i915_perf_stream *stream)
3177{
3178	if (stream->enabled)
3179		return;
3180
3181	/* Allow stream->ops->enable() to refer to this */
3182	stream->enabled = true;
3183
3184	if (stream->ops->enable)
3185		stream->ops->enable(stream);
3186
3187	if (stream->hold_preemption)
3188		intel_context_set_nopreempt(stream->pinned_ctx);
3189}
3190
3191/**
3192 * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
3193 * @stream: An enabled i915 perf stream
3194 *
3195 * Disables the associated capture of data for this stream.
3196 *
3197 * The intention is that disabling an re-enabling a stream will ideally be
3198 * cheaper than destroying and re-opening a stream with the same configuration,
3199 * though there are no formal guarantees about what state or buffered data
3200 * must be retained between disabling and re-enabling a stream.
3201 *
3202 * Note: while a stream is disabled it's considered an error for userspace
3203 * to attempt to read from the stream (-EIO).
3204 */
3205static void i915_perf_disable_locked(struct i915_perf_stream *stream)
3206{
3207	if (!stream->enabled)
3208		return;
3209
3210	/* Allow stream->ops->disable() to refer to this */
3211	stream->enabled = false;
3212
3213	if (stream->hold_preemption)
3214		intel_context_clear_nopreempt(stream->pinned_ctx);
3215
3216	if (stream->ops->disable)
3217		stream->ops->disable(stream);
3218}
3219
3220static long i915_perf_config_locked(struct i915_perf_stream *stream,
3221				    unsigned long metrics_set)
3222{
3223	struct i915_oa_config *config;
3224	long ret = stream->oa_config->id;
3225
3226	config = i915_perf_get_oa_config(stream->perf, metrics_set);
3227	if (!config)
3228		return -EINVAL;
3229
3230	if (config != stream->oa_config) {
3231		int err;
3232
3233		/*
3234		 * If OA is bound to a specific context, emit the
3235		 * reconfiguration inline from that context. The update
3236		 * will then be ordered with respect to submission on that
3237		 * context.
3238		 *
3239		 * When set globally, we use a low priority kernel context,
3240		 * so it will effectively take effect when idle.
3241		 */
3242		err = emit_oa_config(stream, config, oa_context(stream), NULL);
3243		if (!err)
3244			config = xchg(&stream->oa_config, config);
3245		else
3246			ret = err;
3247	}
3248
3249	i915_oa_config_put(config);
3250
3251	return ret;
3252}
3253
3254/**
3255 * i915_perf_ioctl_locked - support ioctl() usage with i915 perf stream FDs
3256 * @stream: An i915 perf stream
3257 * @cmd: the ioctl request
3258 * @arg: the ioctl data
3259 *
3260 * Note: The &perf->lock mutex has been taken to serialize
3261 * with any non-file-operation driver hooks.
3262 *
3263 * Returns: zero on success or a negative error code. Returns -EINVAL for
3264 * an unknown ioctl request.
3265 */
3266static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
3267				   unsigned int cmd,
3268				   unsigned long arg)
3269{
3270	switch (cmd) {
3271	case I915_PERF_IOCTL_ENABLE:
3272		i915_perf_enable_locked(stream);
3273		return 0;
3274	case I915_PERF_IOCTL_DISABLE:
3275		i915_perf_disable_locked(stream);
3276		return 0;
3277	case I915_PERF_IOCTL_CONFIG:
3278		return i915_perf_config_locked(stream, arg);
3279	}
3280
3281	return -EINVAL;
3282}
3283
3284/**
3285 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
3286 * @file: An i915 perf stream file
3287 * @cmd: the ioctl request
3288 * @arg: the ioctl data
3289 *
3290 * Implementation deferred to i915_perf_ioctl_locked().
3291 *
3292 * Returns: zero on success or a negative error code. Returns -EINVAL for
3293 * an unknown ioctl request.
3294 */
3295static long i915_perf_ioctl(struct file *file,
3296			    unsigned int cmd,
3297			    unsigned long arg)
3298{
3299	struct i915_perf_stream *stream = file->private_data;
3300	struct i915_perf *perf = stream->perf;
3301	long ret;
3302
3303	mutex_lock(&perf->lock);
3304	ret = i915_perf_ioctl_locked(stream, cmd, arg);
3305	mutex_unlock(&perf->lock);
3306
3307	return ret;
3308}
3309
3310/**
3311 * i915_perf_destroy_locked - destroy an i915 perf stream
3312 * @stream: An i915 perf stream
3313 *
3314 * Frees all resources associated with the given i915 perf @stream, disabling
3315 * any associated data capture in the process.
3316 *
3317 * Note: The &perf->lock mutex has been taken to serialize
3318 * with any non-file-operation driver hooks.
3319 */
3320static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
3321{
3322	if (stream->enabled)
3323		i915_perf_disable_locked(stream);
3324
3325	if (stream->ops->destroy)
3326		stream->ops->destroy(stream);
3327
3328	if (stream->ctx)
3329		i915_gem_context_put(stream->ctx);
3330
3331	kfree(stream);
3332}
3333
3334/**
3335 * i915_perf_release - handles userspace close() of a stream file
3336 * @inode: anonymous inode associated with file
3337 * @file: An i915 perf stream file
3338 *
3339 * Cleans up any resources associated with an open i915 perf stream file.
3340 *
3341 * NB: close() can't really fail from the userspace point of view.
3342 *
3343 * Returns: zero on success or a negative error code.
3344 */
3345static int i915_perf_release(struct inode *inode, struct file *file)
3346{
3347	struct i915_perf_stream *stream = file->private_data;
3348	struct i915_perf *perf = stream->perf;
3349
3350	mutex_lock(&perf->lock);
3351	i915_perf_destroy_locked(stream);
3352	mutex_unlock(&perf->lock);
3353
3354	/* Release the reference the perf stream kept on the driver. */
3355	drm_dev_put(&perf->i915->drm);
3356
3357	return 0;
3358}
3359
3360
3361static const struct file_operations fops = {
3362	.owner		= THIS_MODULE,
3363	.llseek		= no_llseek,
3364	.release	= i915_perf_release,
3365	.poll		= i915_perf_poll,
3366	.read		= i915_perf_read,
3367	.unlocked_ioctl	= i915_perf_ioctl,
3368	/* Our ioctl have no arguments, so it's safe to use the same function
3369	 * to handle 32bits compatibility.
3370	 */
3371	.compat_ioctl   = i915_perf_ioctl,
3372};
3373
3374
3375/**
3376 * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
3377 * @perf: i915 perf instance
3378 * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
3379 * @props: individually validated u64 property value pairs
3380 * @file: drm file
3381 *
3382 * See i915_perf_ioctl_open() for interface details.
3383 *
3384 * Implements further stream config validation and stream initialization on
3385 * behalf of i915_perf_open_ioctl() with the &perf->lock mutex
3386 * taken to serialize with any non-file-operation driver hooks.
3387 *
3388 * Note: at this point the @props have only been validated in isolation and
3389 * it's still necessary to validate that the combination of properties makes
3390 * sense.
3391 *
3392 * In the case where userspace is interested in OA unit metrics then further
3393 * config validation and stream initialization details will be handled by
3394 * i915_oa_stream_init(). The code here should only validate config state that
3395 * will be relevant to all stream types / backends.
3396 *
3397 * Returns: zero on success or a negative error code.
3398 */
3399static int
3400i915_perf_open_ioctl_locked(struct i915_perf *perf,
3401			    struct drm_i915_perf_open_param *param,
3402			    struct perf_open_properties *props,
3403			    struct drm_file *file)
3404{
3405	struct i915_gem_context *specific_ctx = NULL;
3406	struct i915_perf_stream *stream = NULL;
3407	unsigned long f_flags = 0;
3408	bool privileged_op = true;
3409	int stream_fd;
3410	int ret;
3411
3412	if (props->single_context) {
3413		u32 ctx_handle = props->ctx_handle;
3414		struct drm_i915_file_private *file_priv = file->driver_priv;
3415
3416		specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
3417		if (!specific_ctx) {
3418			DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
3419				  ctx_handle);
3420			ret = -ENOENT;
3421			goto err;
3422		}
3423	}
3424
3425	/*
3426	 * On Haswell the OA unit supports clock gating off for a specific
3427	 * context and in this mode there's no visibility of metrics for the
3428	 * rest of the system, which we consider acceptable for a
3429	 * non-privileged client.
3430	 *
3431	 * For Gen8->11 the OA unit no longer supports clock gating off for a
3432	 * specific context and the kernel can't securely stop the counters
3433	 * from updating as system-wide / global values. Even though we can
3434	 * filter reports based on the included context ID we can't block
3435	 * clients from seeing the raw / global counter values via
3436	 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
3437	 * enable the OA unit by default.
3438	 *
3439	 * For Gen12+ we gain a new OAR unit that only monitors the RCS on a
3440	 * per context basis. So we can relax requirements there if the user
3441	 * doesn't request global stream access (i.e. query based sampling
3442	 * using MI_RECORD_PERF_COUNT.
3443	 */
3444	if (IS_HASWELL(perf->i915) && specific_ctx)
3445		privileged_op = false;
3446	else if (GRAPHICS_VER(perf->i915) == 12 && specific_ctx &&
3447		 (props->sample_flags & SAMPLE_OA_REPORT) == 0)
3448		privileged_op = false;
3449
3450	if (props->hold_preemption) {
3451		if (!props->single_context) {
3452			DRM_DEBUG("preemption disable with no context\n");
3453			ret = -EINVAL;
3454			goto err;
3455		}
3456		privileged_op = true;
3457	}
3458
3459	/*
3460	 * Asking for SSEU configuration is a priviliged operation.
3461	 */
3462	if (props->has_sseu)
3463		privileged_op = true;
3464	else
3465		get_default_sseu_config(&props->sseu, props->engine);
3466
3467	/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
3468	 * we check a dev.i915.perf_stream_paranoid sysctl option
3469	 * to determine if it's ok to access system wide OA counters
3470	 * without CAP_PERFMON or CAP_SYS_ADMIN privileges.
3471	 */
3472	if (privileged_op &&
3473	    i915_perf_stream_paranoid && !perfmon_capable()) {
3474		DRM_DEBUG("Insufficient privileges to open i915 perf stream\n");
3475		ret = -EACCES;
3476		goto err_ctx;
3477	}
3478
3479	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
3480	if (!stream) {
3481		ret = -ENOMEM;
3482		goto err_ctx;
3483	}
3484
3485	stream->perf = perf;
3486	stream->ctx = specific_ctx;
3487	stream->poll_oa_period = props->poll_oa_period;
3488
3489	ret = i915_oa_stream_init(stream, param, props);
3490	if (ret)
3491		goto err_alloc;
3492
3493	/* we avoid simply assigning stream->sample_flags = props->sample_flags
3494	 * to have _stream_init check the combination of sample flags more
3495	 * thoroughly, but still this is the expected result at this point.
3496	 */
3497	if (WARN_ON(stream->sample_flags != props->sample_flags)) {
3498		ret = -ENODEV;
3499		goto err_flags;
3500	}
3501
3502	if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
3503		f_flags |= O_CLOEXEC;
3504	if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
3505		f_flags |= O_NONBLOCK;
3506
3507	stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
3508	if (stream_fd < 0) {
3509		ret = stream_fd;
3510		goto err_flags;
3511	}
3512
3513	if (!(param->flags & I915_PERF_FLAG_DISABLED))
3514		i915_perf_enable_locked(stream);
3515
3516	/* Take a reference on the driver that will be kept with stream_fd
3517	 * until its release.
3518	 */
3519	drm_dev_get(&perf->i915->drm);
3520
3521	return stream_fd;
3522
3523err_flags:
3524	if (stream->ops->destroy)
3525		stream->ops->destroy(stream);
3526err_alloc:
3527	kfree(stream);
3528err_ctx:
3529	if (specific_ctx)
3530		i915_gem_context_put(specific_ctx);
3531err:
3532	return ret;
3533}
3534
3535static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
3536{
3537	return intel_gt_clock_interval_to_ns(perf->i915->ggtt.vm.gt,
3538					     2ULL << exponent);
3539}
3540
3541static __always_inline bool
3542oa_format_valid(struct i915_perf *perf, enum drm_i915_oa_format format)
3543{
3544	return test_bit(format, perf->format_mask);
3545}
3546
3547static __always_inline void
3548oa_format_add(struct i915_perf *perf, enum drm_i915_oa_format format)
3549{
3550	__set_bit(format, perf->format_mask);
3551}
3552
3553/**
3554 * read_properties_unlocked - validate + copy userspace stream open properties
3555 * @perf: i915 perf instance
3556 * @uprops: The array of u64 key value pairs given by userspace
3557 * @n_props: The number of key value pairs expected in @uprops
3558 * @props: The stream configuration built up while validating properties
3559 *
3560 * Note this function only validates properties in isolation it doesn't
3561 * validate that the combination of properties makes sense or that all
3562 * properties necessary for a particular kind of stream have been set.
3563 *
3564 * Note that there currently aren't any ordering requirements for properties so
3565 * we shouldn't validate or assume anything about ordering here. This doesn't
3566 * rule out defining new properties with ordering requirements in the future.
3567 */
3568static int read_properties_unlocked(struct i915_perf *perf,
3569				    u64 __user *uprops,
3570				    u32 n_props,
3571				    struct perf_open_properties *props)
3572{
3573	u64 __user *uprop = uprops;
3574	u32 i;
3575	int ret;
3576
3577	memset(props, 0, sizeof(struct perf_open_properties));
3578	props->poll_oa_period = DEFAULT_POLL_PERIOD_NS;
3579
3580	if (!n_props) {
3581		DRM_DEBUG("No i915 perf properties given\n");
3582		return -EINVAL;
3583	}
3584
3585	/* At the moment we only support using i915-perf on the RCS. */
3586	props->engine = intel_engine_lookup_user(perf->i915,
3587						 I915_ENGINE_CLASS_RENDER,
3588						 0);
3589	if (!props->engine) {
3590		DRM_DEBUG("No RENDER-capable engines\n");
3591		return -EINVAL;
3592	}
3593
3594	/* Considering that ID = 0 is reserved and assuming that we don't
3595	 * (currently) expect any configurations to ever specify duplicate
3596	 * values for a particular property ID then the last _PROP_MAX value is
3597	 * one greater than the maximum number of properties we expect to get
3598	 * from userspace.
3599	 */
3600	if (n_props >= DRM_I915_PERF_PROP_MAX) {
3601		DRM_DEBUG("More i915 perf properties specified than exist\n");
3602		return -EINVAL;
3603	}
3604
3605	for (i = 0; i < n_props; i++) {
3606		u64 oa_period, oa_freq_hz;
3607		u64 id, value;
3608
3609		ret = get_user(id, uprop);
3610		if (ret)
3611			return ret;
3612
3613		ret = get_user(value, uprop + 1);
3614		if (ret)
3615			return ret;
3616
3617		if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
3618			DRM_DEBUG("Unknown i915 perf property ID\n");
3619			return -EINVAL;
3620		}
3621
3622		switch ((enum drm_i915_perf_property_id)id) {
3623		case DRM_I915_PERF_PROP_CTX_HANDLE:
3624			props->single_context = 1;
3625			props->ctx_handle = value;
3626			break;
3627		case DRM_I915_PERF_PROP_SAMPLE_OA:
3628			if (value)
3629				props->sample_flags |= SAMPLE_OA_REPORT;
3630			break;
3631		case DRM_I915_PERF_PROP_OA_METRICS_SET:
3632			if (value == 0) {
3633				DRM_DEBUG("Unknown OA metric set ID\n");
3634				return -EINVAL;
3635			}
3636			props->metrics_set = value;
3637			break;
3638		case DRM_I915_PERF_PROP_OA_FORMAT:
3639			if (value == 0 || value >= I915_OA_FORMAT_MAX) {
3640				DRM_DEBUG("Out-of-range OA report format %llu\n",
3641					  value);
3642				return -EINVAL;
3643			}
3644			if (!oa_format_valid(perf, value)) {
3645				DRM_DEBUG("Unsupported OA report format %llu\n",
3646					  value);
3647				return -EINVAL;
3648			}
3649			props->oa_format = value;
3650			break;
3651		case DRM_I915_PERF_PROP_OA_EXPONENT:
3652			if (value > OA_EXPONENT_MAX) {
3653				DRM_DEBUG("OA timer exponent too high (> %u)\n",
3654					 OA_EXPONENT_MAX);
3655				return -EINVAL;
3656			}
3657
3658			/* Theoretically we can program the OA unit to sample
3659			 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
3660			 * for BXT. We don't allow such high sampling
3661			 * frequencies by default unless root.
3662			 */
3663
3664			BUILD_BUG_ON(sizeof(oa_period) != 8);
3665			oa_period = oa_exponent_to_ns(perf, value);
3666
3667			/* This check is primarily to ensure that oa_period <=
3668			 * UINT32_MAX (before passing to do_div which only
3669			 * accepts a u32 denominator), but we can also skip
3670			 * checking anything < 1Hz which implicitly can't be
3671			 * limited via an integer oa_max_sample_rate.
3672			 */
3673			if (oa_period <= NSEC_PER_SEC) {
3674				u64 tmp = NSEC_PER_SEC;
3675				do_div(tmp, oa_period);
3676				oa_freq_hz = tmp;
3677			} else
3678				oa_freq_hz = 0;
3679
3680			if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) {
3681				DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",
3682					  i915_oa_max_sample_rate);
3683				return -EACCES;
3684			}
3685
3686			props->oa_periodic = true;
3687			props->oa_period_exponent = value;
3688			break;
3689		case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
3690			props->hold_preemption = !!value;
3691			break;
3692		case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
3693			struct drm_i915_gem_context_param_sseu user_sseu;
3694
3695			if (copy_from_user(&user_sseu,
3696					   u64_to_user_ptr(value),
3697					   sizeof(user_sseu))) {
3698				DRM_DEBUG("Unable to copy global sseu parameter\n");
3699				return -EFAULT;
3700			}
3701
3702			ret = get_sseu_config(&props->sseu, props->engine, &user_sseu);
3703			if (ret) {
3704				DRM_DEBUG("Invalid SSEU configuration\n");
3705				return ret;
3706			}
3707			props->has_sseu = true;
3708			break;
3709		}
3710		case DRM_I915_PERF_PROP_POLL_OA_PERIOD:
3711			if (value < 100000 /* 100us */) {
3712				DRM_DEBUG("OA availability timer too small (%lluns < 100us)\n",
3713					  value);
3714				return -EINVAL;
3715			}
3716			props->poll_oa_period = value;
3717			break;
3718		case DRM_I915_PERF_PROP_MAX:
3719			MISSING_CASE(id);
3720			return -EINVAL;
3721		}
3722
3723		uprop += 2;
3724	}
3725
3726	return 0;
3727}
3728
3729/**
3730 * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
3731 * @dev: drm device
3732 * @data: ioctl data copied from userspace (unvalidated)
3733 * @file: drm file
3734 *
3735 * Validates the stream open parameters given by userspace including flags
3736 * and an array of u64 key, value pair properties.
3737 *
3738 * Very little is assumed up front about the nature of the stream being
3739 * opened (for instance we don't assume it's for periodic OA unit metrics). An
3740 * i915-perf stream is expected to be a suitable interface for other forms of
3741 * buffered data written by the GPU besides periodic OA metrics.
3742 *
3743 * Note we copy the properties from userspace outside of the i915 perf
3744 * mutex to avoid an awkward lockdep with mmap_lock.
3745 *
3746 * Most of the implementation details are handled by
3747 * i915_perf_open_ioctl_locked() after taking the &perf->lock
3748 * mutex for serializing with any non-file-operation driver hooks.
3749 *
3750 * Return: A newly opened i915 Perf stream file descriptor or negative
3751 * error code on failure.
3752 */
3753int i915_perf_open_ioctl(struct drm_device *dev, void *data,
3754			 struct drm_file *file)
3755{
3756	struct i915_perf *perf = &to_i915(dev)->perf;
3757	struct drm_i915_perf_open_param *param = data;
3758	struct perf_open_properties props;
3759	u32 known_open_flags;
3760	int ret;
3761
3762	if (!perf->i915) {
3763		DRM_DEBUG("i915 perf interface not available for this system\n");
3764		return -ENOTSUPP;
3765	}
3766
3767	known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
3768			   I915_PERF_FLAG_FD_NONBLOCK |
3769			   I915_PERF_FLAG_DISABLED;
3770	if (param->flags & ~known_open_flags) {
3771		DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n");
3772		return -EINVAL;
3773	}
3774
3775	ret = read_properties_unlocked(perf,
3776				       u64_to_user_ptr(param->properties_ptr),
3777				       param->num_properties,
3778				       &props);
3779	if (ret)
3780		return ret;
3781
3782	mutex_lock(&perf->lock);
3783	ret = i915_perf_open_ioctl_locked(perf, param, &props, file);
3784	mutex_unlock(&perf->lock);
3785
3786	return ret;
3787}
3788
3789/**
3790 * i915_perf_register - exposes i915-perf to userspace
3791 * @i915: i915 device instance
3792 *
3793 * In particular OA metric sets are advertised under a sysfs metrics/
3794 * directory allowing userspace to enumerate valid IDs that can be
3795 * used to open an i915-perf stream.
3796 */
3797void i915_perf_register(struct drm_i915_private *i915)
3798{
3799	struct i915_perf *perf = &i915->perf;
3800
3801	if (!perf->i915)
3802		return;
3803
3804	/* To be sure we're synchronized with an attempted
3805	 * i915_perf_open_ioctl(); considering that we register after
3806	 * being exposed to userspace.
3807	 */
3808	mutex_lock(&perf->lock);
3809
3810	perf->metrics_kobj =
3811		kobject_create_and_add("metrics",
3812				       &i915->drm.primary->kdev->kobj);
3813
3814	mutex_unlock(&perf->lock);
3815}
3816
3817/**
3818 * i915_perf_unregister - hide i915-perf from userspace
3819 * @i915: i915 device instance
3820 *
3821 * i915-perf state cleanup is split up into an 'unregister' and
3822 * 'deinit' phase where the interface is first hidden from
3823 * userspace by i915_perf_unregister() before cleaning up
3824 * remaining state in i915_perf_fini().
3825 */
3826void i915_perf_unregister(struct drm_i915_private *i915)
3827{
3828	struct i915_perf *perf = &i915->perf;
3829
3830	if (!perf->metrics_kobj)
3831		return;
3832
3833	kobject_put(perf->metrics_kobj);
3834	perf->metrics_kobj = NULL;
3835}
3836
3837static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
3838{
3839	static const i915_reg_t flex_eu_regs[] = {
3840		EU_PERF_CNTL0,
3841		EU_PERF_CNTL1,
3842		EU_PERF_CNTL2,
3843		EU_PERF_CNTL3,
3844		EU_PERF_CNTL4,
3845		EU_PERF_CNTL5,
3846		EU_PERF_CNTL6,
3847	};
3848	int i;
3849
3850	for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
3851		if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
3852			return true;
3853	}
3854	return false;
3855}
3856
3857#define ADDR_IN_RANGE(addr, start, end) \
3858	((addr) >= (start) && \
3859	 (addr) <= (end))
3860
3861#define REG_IN_RANGE(addr, start, end) \
3862	((addr) >= i915_mmio_reg_offset(start) && \
3863	 (addr) <= i915_mmio_reg_offset(end))
3864
3865#define REG_EQUAL(addr, mmio) \
3866	((addr) == i915_mmio_reg_offset(mmio))
3867
3868static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
3869{
3870	return REG_IN_RANGE(addr, OASTARTTRIG1, OASTARTTRIG8) ||
3871	       REG_IN_RANGE(addr, OAREPORTTRIG1, OAREPORTTRIG8) ||
3872	       REG_IN_RANGE(addr, OACEC0_0, OACEC7_1);
3873}
3874
3875static bool gen7_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3876{
3877	return REG_EQUAL(addr, HALF_SLICE_CHICKEN2) ||
3878	       REG_IN_RANGE(addr, MICRO_BP0_0, NOA_WRITE) ||
3879	       REG_IN_RANGE(addr, OA_PERFCNT1_LO, OA_PERFCNT2_HI) ||
3880	       REG_IN_RANGE(addr, OA_PERFMATRIX_LO, OA_PERFMATRIX_HI);
3881}
3882
3883static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3884{
3885	return gen7_is_valid_mux_addr(perf, addr) ||
3886	       REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
3887	       REG_IN_RANGE(addr, RPM_CONFIG0, NOA_CONFIG(8));
3888}
3889
3890static bool gen10_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3891{
3892	return gen8_is_valid_mux_addr(perf, addr) ||
3893	       REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
3894	       REG_IN_RANGE(addr, OA_PERFCNT3_LO, OA_PERFCNT4_HI);
3895}
3896
3897static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3898{
3899	return gen7_is_valid_mux_addr(perf, addr) ||
3900	       ADDR_IN_RANGE(addr, 0x25100, 0x2FF90) ||
3901	       REG_IN_RANGE(addr, HSW_MBVID2_NOA0, HSW_MBVID2_NOA9) ||
3902	       REG_EQUAL(addr, HSW_MBVID2_MISR0);
3903}
3904
3905static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3906{
3907	return gen7_is_valid_mux_addr(perf, addr) ||
3908	       ADDR_IN_RANGE(addr, 0x182300, 0x1823A4);
3909}
3910
3911static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
3912{
3913	return REG_IN_RANGE(addr, GEN12_OAG_OASTARTTRIG1, GEN12_OAG_OASTARTTRIG8) ||
3914	       REG_IN_RANGE(addr, GEN12_OAG_OAREPORTTRIG1, GEN12_OAG_OAREPORTTRIG8) ||
3915	       REG_IN_RANGE(addr, GEN12_OAG_CEC0_0, GEN12_OAG_CEC7_1) ||
3916	       REG_IN_RANGE(addr, GEN12_OAG_SCEC0_0, GEN12_OAG_SCEC7_1) ||
3917	       REG_EQUAL(addr, GEN12_OAA_DBG_REG) ||
3918	       REG_EQUAL(addr, GEN12_OAG_OA_PESS) ||
3919	       REG_EQUAL(addr, GEN12_OAG_SPCTR_CNF);
3920}
3921
3922static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3923{
3924	return REG_EQUAL(addr, NOA_WRITE) ||
3925	       REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
3926	       REG_EQUAL(addr, GDT_CHICKEN_BITS) ||
3927	       REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
3928	       REG_EQUAL(addr, RPM_CONFIG0) ||
3929	       REG_EQUAL(addr, RPM_CONFIG1) ||
3930	       REG_IN_RANGE(addr, NOA_CONFIG(0), NOA_CONFIG(8));
3931}
3932
3933static u32 mask_reg_value(u32 reg, u32 val)
3934{
3935	/* HALF_SLICE_CHICKEN2 is programmed with a the
3936	 * WaDisableSTUnitPowerOptimization workaround. Make sure the value
3937	 * programmed by userspace doesn't change this.
3938	 */
3939	if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
3940		val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
3941
3942	/* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
3943	 * indicated by its name and a bunch of selection fields used by OA
3944	 * configs.
3945	 */
3946	if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
3947		val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
3948
3949	return val;
3950}
3951
3952static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
3953					 bool (*is_valid)(struct i915_perf *perf, u32 addr),
3954					 u32 __user *regs,
3955					 u32 n_regs)
3956{
3957	struct i915_oa_reg *oa_regs;
3958	int err;
3959	u32 i;
3960
3961	if (!n_regs)
3962		return NULL;
3963
3964	/* No is_valid function means we're not allowing any register to be programmed. */
3965	GEM_BUG_ON(!is_valid);
3966	if (!is_valid)
3967		return ERR_PTR(-EINVAL);
3968
3969	oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
3970	if (!oa_regs)
3971		return ERR_PTR(-ENOMEM);
3972
3973	for (i = 0; i < n_regs; i++) {
3974		u32 addr, value;
3975
3976		err = get_user(addr, regs);
3977		if (err)
3978			goto addr_err;
3979
3980		if (!is_valid(perf, addr)) {
3981			DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
3982			err = -EINVAL;
3983			goto addr_err;
3984		}
3985
3986		err = get_user(value, regs + 1);
3987		if (err)
3988			goto addr_err;
3989
3990		oa_regs[i].addr = _MMIO(addr);
3991		oa_regs[i].value = mask_reg_value(addr, value);
3992
3993		regs += 2;
3994	}
3995
3996	return oa_regs;
3997
3998addr_err:
3999	kfree(oa_regs);
4000	return ERR_PTR(err);
4001}
4002
4003static ssize_t show_dynamic_id(struct device *dev,
4004			       struct device_attribute *attr,
4005			       char *buf)
4006{
4007	struct i915_oa_config *oa_config =
4008		container_of(attr, typeof(*oa_config), sysfs_metric_id);
4009
4010	return sprintf(buf, "%d\n", oa_config->id);
4011}
4012
4013static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf,
4014					 struct i915_oa_config *oa_config)
4015{
4016	sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
4017	oa_config->sysfs_metric_id.attr.name = "id";
4018	oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
4019	oa_config->sysfs_metric_id.show = show_dynamic_id;
4020	oa_config->sysfs_metric_id.store = NULL;
4021
4022	oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
4023	oa_config->attrs[1] = NULL;
4024
4025	oa_config->sysfs_metric.name = oa_config->uuid;
4026	oa_config->sysfs_metric.attrs = oa_config->attrs;
4027
4028	return sysfs_create_group(perf->metrics_kobj,
4029				  &oa_config->sysfs_metric);
4030}
4031
4032/**
4033 * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
4034 * @dev: drm device
4035 * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
4036 *        userspace (unvalidated)
4037 * @file: drm file
4038 *
4039 * Validates the submitted OA register to be saved into a new OA config that
4040 * can then be used for programming the OA unit and its NOA network.
4041 *
4042 * Returns: A new allocated config number to be used with the perf open ioctl
4043 * or a negative error code on failure.
4044 */
4045int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
4046			       struct drm_file *file)
4047{
4048	struct i915_perf *perf = &to_i915(dev)->perf;
4049	struct drm_i915_perf_oa_config *args = data;
4050	struct i915_oa_config *oa_config, *tmp;
4051	struct i915_oa_reg *regs;
4052	int err, id;
4053
4054	if (!perf->i915) {
4055		DRM_DEBUG("i915 perf interface not available for this system\n");
4056		return -ENOTSUPP;
4057	}
4058
4059	if (!perf->metrics_kobj) {
4060		DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
4061		return -EINVAL;
4062	}
4063
4064	if (i915_perf_stream_paranoid && !perfmon_capable()) {
4065		DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
4066		return -EACCES;
4067	}
4068
4069	if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
4070	    (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
4071	    (!args->flex_regs_ptr || !args->n_flex_regs)) {
4072		DRM_DEBUG("No OA registers given\n");
4073		return -EINVAL;
4074	}
4075
4076	oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
4077	if (!oa_config) {
4078		DRM_DEBUG("Failed to allocate memory for the OA config\n");
4079		return -ENOMEM;
4080	}
4081
4082	oa_config->perf = perf;
4083	kref_init(&oa_config->ref);
4084
4085	if (!uuid_is_valid(args->uuid)) {
4086		DRM_DEBUG("Invalid uuid format for OA config\n");
4087		err = -EINVAL;
4088		goto reg_err;
4089	}
4090
4091	/* Last character in oa_config->uuid will be 0 because oa_config is
4092	 * kzalloc.
4093	 */
4094	memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
4095
4096	oa_config->mux_regs_len = args->n_mux_regs;
4097	regs = alloc_oa_regs(perf,
4098			     perf->ops.is_valid_mux_reg,
4099			     u64_to_user_ptr(args->mux_regs_ptr),
4100			     args->n_mux_regs);
4101
4102	if (IS_ERR(regs)) {
4103		DRM_DEBUG("Failed to create OA config for mux_regs\n");
4104		err = PTR_ERR(regs);
4105		goto reg_err;
4106	}
4107	oa_config->mux_regs = regs;
4108
4109	oa_config->b_counter_regs_len = args->n_boolean_regs;
4110	regs = alloc_oa_regs(perf,
4111			     perf->ops.is_valid_b_counter_reg,
4112			     u64_to_user_ptr(args->boolean_regs_ptr),
4113			     args->n_boolean_regs);
4114
4115	if (IS_ERR(regs)) {
4116		DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
4117		err = PTR_ERR(regs);
4118		goto reg_err;
4119	}
4120	oa_config->b_counter_regs = regs;
4121
4122	if (GRAPHICS_VER(perf->i915) < 8) {
4123		if (args->n_flex_regs != 0) {
4124			err = -EINVAL;
4125			goto reg_err;
4126		}
4127	} else {
4128		oa_config->flex_regs_len = args->n_flex_regs;
4129		regs = alloc_oa_regs(perf,
4130				     perf->ops.is_valid_flex_reg,
4131				     u64_to_user_ptr(args->flex_regs_ptr),
4132				     args->n_flex_regs);
4133
4134		if (IS_ERR(regs)) {
4135			DRM_DEBUG("Failed to create OA config for flex_regs\n");
4136			err = PTR_ERR(regs);
4137			goto reg_err;
4138		}
4139		oa_config->flex_regs = regs;
4140	}
4141
4142	err = mutex_lock_interruptible(&perf->metrics_lock);
4143	if (err)
4144		goto reg_err;
4145
4146	/* We shouldn't have too many configs, so this iteration shouldn't be
4147	 * too costly.
4148	 */
4149	idr_for_each_entry(&perf->metrics_idr, tmp, id) {
4150		if (!strcmp(tmp->uuid, oa_config->uuid)) {
4151			DRM_DEBUG("OA config already exists with this uuid\n");
4152			err = -EADDRINUSE;
4153			goto sysfs_err;
4154		}
4155	}
4156
4157	err = create_dynamic_oa_sysfs_entry(perf, oa_config);
4158	if (err) {
4159		DRM_DEBUG("Failed to create sysfs entry for OA config\n");
4160		goto sysfs_err;
4161	}
4162
4163	/* Config id 0 is invalid, id 1 for kernel stored test config. */
4164	oa_config->id = idr_alloc(&perf->metrics_idr,
4165				  oa_config, 2,
4166				  0, GFP_KERNEL);
4167	if (oa_config->id < 0) {
4168		DRM_DEBUG("Failed to create sysfs entry for OA config\n");
4169		err = oa_config->id;
4170		goto sysfs_err;
4171	}
4172
4173	mutex_unlock(&perf->metrics_lock);
4174
4175	DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id);
4176
4177	return oa_config->id;
4178
4179sysfs_err:
4180	mutex_unlock(&perf->metrics_lock);
4181reg_err:
4182	i915_oa_config_put(oa_config);
4183	DRM_DEBUG("Failed to add new OA config\n");
4184	return err;
4185}
4186
4187/**
4188 * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
4189 * @dev: drm device
4190 * @data: ioctl data (pointer to u64 integer) copied from userspace
4191 * @file: drm file
4192 *
4193 * Configs can be removed while being used, the will stop appearing in sysfs
4194 * and their content will be freed when the stream using the config is closed.
4195 *
4196 * Returns: 0 on success or a negative error code on failure.
4197 */
4198int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
4199				  struct drm_file *file)
4200{
4201	struct i915_perf *perf = &to_i915(dev)->perf;
4202	u64 *arg = data;
4203	struct i915_oa_config *oa_config;
4204	int ret;
4205
4206	if (!perf->i915) {
4207		DRM_DEBUG("i915 perf interface not available for this system\n");
4208		return -ENOTSUPP;
4209	}
4210
4211	if (i915_perf_stream_paranoid && !perfmon_capable()) {
4212		DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
4213		return -EACCES;
4214	}
4215
4216	ret = mutex_lock_interruptible(&perf->metrics_lock);
4217	if (ret)
4218		return ret;
4219
4220	oa_config = idr_find(&perf->metrics_idr, *arg);
4221	if (!oa_config) {
4222		DRM_DEBUG("Failed to remove unknown OA config\n");
4223		ret = -ENOENT;
4224		goto err_unlock;
4225	}
4226
4227	GEM_BUG_ON(*arg != oa_config->id);
4228
4229	sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric);
4230
4231	idr_remove(&perf->metrics_idr, *arg);
4232
4233	mutex_unlock(&perf->metrics_lock);
4234
4235	DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
4236
4237	i915_oa_config_put(oa_config);
4238
4239	return 0;
4240
4241err_unlock:
4242	mutex_unlock(&perf->metrics_lock);
4243	return ret;
4244}
4245
4246static struct ctl_table oa_table[] = {
4247	{
4248	 .procname = "perf_stream_paranoid",
4249	 .data = &i915_perf_stream_paranoid,
4250	 .maxlen = sizeof(i915_perf_stream_paranoid),
4251	 .mode = 0644,
4252	 .proc_handler = proc_dointvec_minmax,
4253	 .extra1 = SYSCTL_ZERO,
4254	 .extra2 = SYSCTL_ONE,
4255	 },
4256	{
4257	 .procname = "oa_max_sample_rate",
4258	 .data = &i915_oa_max_sample_rate,
4259	 .maxlen = sizeof(i915_oa_max_sample_rate),
4260	 .mode = 0644,
4261	 .proc_handler = proc_dointvec_minmax,
4262	 .extra1 = SYSCTL_ZERO,
4263	 .extra2 = &oa_sample_rate_hard_limit,
4264	 },
4265	{}
4266};
4267
4268static struct ctl_table i915_root[] = {
4269	{
4270	 .procname = "i915",
4271	 .maxlen = 0,
4272	 .mode = 0555,
4273	 .child = oa_table,
4274	 },
4275	{}
4276};
4277
4278static struct ctl_table dev_root[] = {
4279	{
4280	 .procname = "dev",
4281	 .maxlen = 0,
4282	 .mode = 0555,
4283	 .child = i915_root,
4284	 },
4285	{}
4286};
4287
4288static void oa_init_supported_formats(struct i915_perf *perf)
4289{
4290	struct drm_i915_private *i915 = perf->i915;
4291	enum intel_platform platform = INTEL_INFO(i915)->platform;
4292
4293	switch (platform) {
4294	case INTEL_HASWELL:
4295		oa_format_add(perf, I915_OA_FORMAT_A13);
4296		oa_format_add(perf, I915_OA_FORMAT_A13);
4297		oa_format_add(perf, I915_OA_FORMAT_A29);
4298		oa_format_add(perf, I915_OA_FORMAT_A13_B8_C8);
4299		oa_format_add(perf, I915_OA_FORMAT_B4_C8);
4300		oa_format_add(perf, I915_OA_FORMAT_A45_B8_C8);
4301		oa_format_add(perf, I915_OA_FORMAT_B4_C8_A16);
4302		oa_format_add(perf, I915_OA_FORMAT_C4_B8);
4303		break;
4304
4305	case INTEL_BROADWELL:
4306	case INTEL_CHERRYVIEW:
4307	case INTEL_SKYLAKE:
4308	case INTEL_BROXTON:
4309	case INTEL_KABYLAKE:
4310	case INTEL_GEMINILAKE:
4311	case INTEL_COFFEELAKE:
4312	case INTEL_COMETLAKE:
4313	case INTEL_CANNONLAKE:
4314	case INTEL_ICELAKE:
4315	case INTEL_ELKHARTLAKE:
4316	case INTEL_JASPERLAKE:
4317	case INTEL_TIGERLAKE:
4318	case INTEL_ROCKETLAKE:
4319	case INTEL_DG1:
4320	case INTEL_ALDERLAKE_S:
4321	case INTEL_ALDERLAKE_P:
4322		oa_format_add(perf, I915_OA_FORMAT_A12);
4323		oa_format_add(perf, I915_OA_FORMAT_A12_B8_C8);
4324		oa_format_add(perf, I915_OA_FORMAT_A32u40_A4u32_B8_C8);
4325		oa_format_add(perf, I915_OA_FORMAT_C4_B8);
4326		break;
4327
4328	default:
4329		MISSING_CASE(platform);
4330	}
4331}
4332
4333/**
4334 * i915_perf_init - initialize i915-perf state on module bind
4335 * @i915: i915 device instance
4336 *
4337 * Initializes i915-perf state without exposing anything to userspace.
4338 *
4339 * Note: i915-perf initialization is split into an 'init' and 'register'
4340 * phase with the i915_perf_register() exposing state to userspace.
4341 */
4342void i915_perf_init(struct drm_i915_private *i915)
4343{
4344	struct i915_perf *perf = &i915->perf;
4345
4346	/* XXX const struct i915_perf_ops! */
4347
4348	perf->oa_formats = oa_formats;
4349	if (IS_HASWELL(i915)) {
4350		perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
4351		perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
4352		perf->ops.is_valid_flex_reg = NULL;
4353		perf->ops.enable_metric_set = hsw_enable_metric_set;
4354		perf->ops.disable_metric_set = hsw_disable_metric_set;
4355		perf->ops.oa_enable = gen7_oa_enable;
4356		perf->ops.oa_disable = gen7_oa_disable;
4357		perf->ops.read = gen7_oa_read;
4358		perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
4359	} else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
4360		/* Note: that although we could theoretically also support the
4361		 * legacy ringbuffer mode on BDW (and earlier iterations of
4362		 * this driver, before upstreaming did this) it didn't seem
4363		 * worth the complexity to maintain now that BDW+ enable
4364		 * execlist mode by default.
4365		 */
4366		perf->ops.read = gen8_oa_read;
4367
4368		if (IS_GRAPHICS_VER(i915, 8, 9)) {
4369			perf->ops.is_valid_b_counter_reg =
4370				gen7_is_valid_b_counter_addr;
4371			perf->ops.is_valid_mux_reg =
4372				gen8_is_valid_mux_addr;
4373			perf->ops.is_valid_flex_reg =
4374				gen8_is_valid_flex_addr;
4375
4376			if (IS_CHERRYVIEW(i915)) {
4377				perf->ops.is_valid_mux_reg =
4378					chv_is_valid_mux_addr;
4379			}
4380
4381			perf->ops.oa_enable = gen8_oa_enable;
4382			perf->ops.oa_disable = gen8_oa_disable;
4383			perf->ops.enable_metric_set = gen8_enable_metric_set;
4384			perf->ops.disable_metric_set = gen8_disable_metric_set;
4385			perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4386
4387			if (GRAPHICS_VER(i915) == 8) {
4388				perf->ctx_oactxctrl_offset = 0x120;
4389				perf->ctx_flexeu0_offset = 0x2ce;
4390
4391				perf->gen8_valid_ctx_bit = BIT(25);
4392			} else {
4393				perf->ctx_oactxctrl_offset = 0x128;
4394				perf->ctx_flexeu0_offset = 0x3de;
4395
4396				perf->gen8_valid_ctx_bit = BIT(16);
4397			}
4398		} else if (IS_GRAPHICS_VER(i915, 10, 11)) {
4399			perf->ops.is_valid_b_counter_reg =
4400				gen7_is_valid_b_counter_addr;
4401			perf->ops.is_valid_mux_reg =
4402				gen10_is_valid_mux_addr;
4403			perf->ops.is_valid_flex_reg =
4404				gen8_is_valid_flex_addr;
4405
4406			perf->ops.oa_enable = gen8_oa_enable;
4407			perf->ops.oa_disable = gen8_oa_disable;
4408			perf->ops.enable_metric_set = gen8_enable_metric_set;
4409			perf->ops.disable_metric_set = gen10_disable_metric_set;
4410			perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4411
4412			if (GRAPHICS_VER(i915) == 10) {
4413				perf->ctx_oactxctrl_offset = 0x128;
4414				perf->ctx_flexeu0_offset = 0x3de;
4415			} else {
4416				perf->ctx_oactxctrl_offset = 0x124;
4417				perf->ctx_flexeu0_offset = 0x78e;
4418			}
4419			perf->gen8_valid_ctx_bit = BIT(16);
4420		} else if (GRAPHICS_VER(i915) == 12) {
4421			perf->ops.is_valid_b_counter_reg =
4422				gen12_is_valid_b_counter_addr;
4423			perf->ops.is_valid_mux_reg =
4424				gen12_is_valid_mux_addr;
4425			perf->ops.is_valid_flex_reg =
4426				gen8_is_valid_flex_addr;
4427
4428			perf->ops.oa_enable = gen12_oa_enable;
4429			perf->ops.oa_disable = gen12_oa_disable;
4430			perf->ops.enable_metric_set = gen12_enable_metric_set;
4431			perf->ops.disable_metric_set = gen12_disable_metric_set;
4432			perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read;
4433
4434			perf->ctx_flexeu0_offset = 0;
4435			perf->ctx_oactxctrl_offset = 0x144;
4436		}
4437	}
4438
4439	if (perf->ops.enable_metric_set) {
4440		mutex_init(&perf->lock);
4441
4442		/* Choose a representative limit */
4443		oa_sample_rate_hard_limit = i915->gt.clock_frequency / 2;
4444
4445		mutex_init(&perf->metrics_lock);
4446		idr_init_base(&perf->metrics_idr, 1);
4447
4448		/* We set up some ratelimit state to potentially throttle any
4449		 * _NOTES about spurious, invalid OA reports which we don't
4450		 * forward to userspace.
4451		 *
4452		 * We print a _NOTE about any throttling when closing the
4453		 * stream instead of waiting until driver _fini which no one
4454		 * would ever see.
4455		 *
4456		 * Using the same limiting factors as printk_ratelimit()
4457		 */
4458		ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10);
4459		/* Since we use a DRM_NOTE for spurious reports it would be
4460		 * inconsistent to let __ratelimit() automatically print a
4461		 * warning for throttling.
4462		 */
4463		ratelimit_set_flags(&perf->spurious_report_rs,
4464				    RATELIMIT_MSG_ON_RELEASE);
4465
4466		ratelimit_state_init(&perf->tail_pointer_race,
4467				     5 * HZ, 10);
4468		ratelimit_set_flags(&perf->tail_pointer_race,
4469				    RATELIMIT_MSG_ON_RELEASE);
4470
4471		atomic64_set(&perf->noa_programming_delay,
4472			     500 * 1000 /* 500us */);
4473
4474		perf->i915 = i915;
4475
4476		oa_init_supported_formats(perf);
4477	}
4478}
4479
4480static int destroy_config(int id, void *p, void *data)
4481{
4482	i915_oa_config_put(p);
4483	return 0;
4484}
4485
4486void i915_perf_sysctl_register(void)
4487{
4488	sysctl_header = register_sysctl_table(dev_root);
4489}
4490
4491void i915_perf_sysctl_unregister(void)
4492{
4493	unregister_sysctl_table(sysctl_header);
4494}
4495
4496/**
4497 * i915_perf_fini - Counter part to i915_perf_init()
4498 * @i915: i915 device instance
4499 */
4500void i915_perf_fini(struct drm_i915_private *i915)
4501{
4502	struct i915_perf *perf = &i915->perf;
4503
4504	if (!perf->i915)
4505		return;
4506
4507	idr_for_each(&perf->metrics_idr, destroy_config, perf);
4508	idr_destroy(&perf->metrics_idr);
4509
4510	memset(&perf->ops, 0, sizeof(perf->ops));
4511	perf->i915 = NULL;
4512}
4513
4514/**
4515 * i915_perf_ioctl_version - Version of the i915-perf subsystem
4516 *
4517 * This version number is used by userspace to detect available features.
4518 */
4519int i915_perf_ioctl_version(void)
4520{
4521	/*
4522	 * 1: Initial version
4523	 *   I915_PERF_IOCTL_ENABLE
4524	 *   I915_PERF_IOCTL_DISABLE
4525	 *
4526	 * 2: Added runtime modification of OA config.
4527	 *   I915_PERF_IOCTL_CONFIG
4528	 *
4529	 * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold
4530	 *    preemption on a particular context so that performance data is
4531	 *    accessible from a delta of MI_RPC reports without looking at the
4532	 *    OA buffer.
4533	 *
4534	 * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can
4535	 *    be run for the duration of the performance recording based on
4536	 *    their SSEU configuration.
4537	 *
4538	 * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the
4539	 *    interval for the hrtimer used to check for OA data.
4540	 */
4541	return 5;
4542}
4543
4544#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
4545#include "selftests/i915_perf.c"
4546#endif