Loading...
1#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
4#include <linux/hashtable.h>
5#include "i915_gem_batch_pool.h"
6
7#define I915_CMD_HASH_ORDER 9
8
9/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
10 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
11 * to give some inclination as to some of the magic values used in the various
12 * workarounds!
13 */
14#define CACHELINE_BYTES 64
15#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
16
17/*
18 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
19 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
20 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
21 *
22 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
23 * cacheline, the Head Pointer must not be greater than the Tail
24 * Pointer."
25 */
26#define I915_RING_FREE_SPACE 64
27
28struct intel_hw_status_page {
29 u32 *page_addr;
30 unsigned int gfx_addr;
31 struct drm_i915_gem_object *obj;
32};
33
34#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
35#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
36
37#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
38#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
39
40#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
41#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
42
43#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
44#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
45
46#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
47#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
48
49#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
50#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
51
52/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
53 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
54 */
55#define i915_semaphore_seqno_size sizeof(uint64_t)
56#define GEN8_SIGNAL_OFFSET(__ring, to) \
57 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
58 ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
59 (i915_semaphore_seqno_size * (to)))
60
61#define GEN8_WAIT_OFFSET(__ring, from) \
62 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
63 ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
64 (i915_semaphore_seqno_size * (__ring)->id))
65
66#define GEN8_RING_SEMAPHORE_INIT do { \
67 if (!dev_priv->semaphore_obj) { \
68 break; \
69 } \
70 ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
71 ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
72 ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
73 ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
74 ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
75 ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
76 } while(0)
77
78enum intel_ring_hangcheck_action {
79 HANGCHECK_IDLE = 0,
80 HANGCHECK_WAIT,
81 HANGCHECK_ACTIVE,
82 HANGCHECK_ACTIVE_LOOP,
83 HANGCHECK_KICK,
84 HANGCHECK_HUNG,
85};
86
87#define HANGCHECK_SCORE_RING_HUNG 31
88
89struct intel_ring_hangcheck {
90 u64 acthd;
91 u64 max_acthd;
92 u32 seqno;
93 int score;
94 enum intel_ring_hangcheck_action action;
95 int deadlock;
96 u32 instdone[I915_NUM_INSTDONE_REG];
97};
98
99struct intel_ringbuffer {
100 struct drm_i915_gem_object *obj;
101 void __iomem *virtual_start;
102 struct i915_vma *vma;
103
104 struct intel_engine_cs *ring;
105 struct list_head link;
106
107 u32 head;
108 u32 tail;
109 int space;
110 int size;
111 int effective_size;
112 int reserved_size;
113 int reserved_tail;
114 bool reserved_in_use;
115
116 /** We track the position of the requests in the ring buffer, and
117 * when each is retired we increment last_retired_head as the GPU
118 * must have finished processing the request and so we know we
119 * can advance the ringbuffer up to that position.
120 *
121 * last_retired_head is set to -1 after the value is consumed so
122 * we can detect new retirements.
123 */
124 u32 last_retired_head;
125};
126
127struct intel_context;
128struct drm_i915_reg_descriptor;
129
130/*
131 * we use a single page to load ctx workarounds so all of these
132 * values are referred in terms of dwords
133 *
134 * struct i915_wa_ctx_bb:
135 * offset: specifies batch starting position, also helpful in case
136 * if we want to have multiple batches at different offsets based on
137 * some criteria. It is not a requirement at the moment but provides
138 * an option for future use.
139 * size: size of the batch in DWORDS
140 */
141struct i915_ctx_workarounds {
142 struct i915_wa_ctx_bb {
143 u32 offset;
144 u32 size;
145 } indirect_ctx, per_ctx;
146 struct drm_i915_gem_object *obj;
147};
148
149struct intel_engine_cs {
150 const char *name;
151 enum intel_ring_id {
152 RCS = 0,
153 BCS,
154 VCS,
155 VCS2, /* Keep instances of the same type engine together. */
156 VECS
157 } id;
158#define I915_NUM_RINGS 5
159#define _VCS(n) (VCS + (n))
160 unsigned int exec_id;
161 unsigned int guc_id;
162 u32 mmio_base;
163 struct drm_device *dev;
164 struct intel_ringbuffer *buffer;
165 struct list_head buffers;
166
167 /*
168 * A pool of objects to use as shadow copies of client batch buffers
169 * when the command parser is enabled. Prevents the client from
170 * modifying the batch contents after software parsing.
171 */
172 struct i915_gem_batch_pool batch_pool;
173
174 struct intel_hw_status_page status_page;
175 struct i915_ctx_workarounds wa_ctx;
176
177 unsigned irq_refcount; /* protected by dev_priv->irq_lock */
178 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
179 struct drm_i915_gem_request *trace_irq_req;
180 bool __must_check (*irq_get)(struct intel_engine_cs *ring);
181 void (*irq_put)(struct intel_engine_cs *ring);
182
183 int (*init_hw)(struct intel_engine_cs *ring);
184
185 int (*init_context)(struct drm_i915_gem_request *req);
186
187 void (*write_tail)(struct intel_engine_cs *ring,
188 u32 value);
189 int __must_check (*flush)(struct drm_i915_gem_request *req,
190 u32 invalidate_domains,
191 u32 flush_domains);
192 int (*add_request)(struct drm_i915_gem_request *req);
193 /* Some chipsets are not quite as coherent as advertised and need
194 * an expensive kick to force a true read of the up-to-date seqno.
195 * However, the up-to-date seqno is not always required and the last
196 * seen value is good enough. Note that the seqno will always be
197 * monotonic, even if not coherent.
198 */
199 u32 (*get_seqno)(struct intel_engine_cs *ring,
200 bool lazy_coherency);
201 void (*set_seqno)(struct intel_engine_cs *ring,
202 u32 seqno);
203 int (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
204 u64 offset, u32 length,
205 unsigned dispatch_flags);
206#define I915_DISPATCH_SECURE 0x1
207#define I915_DISPATCH_PINNED 0x2
208#define I915_DISPATCH_RS 0x4
209 void (*cleanup)(struct intel_engine_cs *ring);
210
211 /* GEN8 signal/wait table - never trust comments!
212 * signal to signal to signal to signal to signal to
213 * RCS VCS BCS VECS VCS2
214 * --------------------------------------------------------------------
215 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
216 * |-------------------------------------------------------------------
217 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
218 * |-------------------------------------------------------------------
219 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
220 * |-------------------------------------------------------------------
221 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
222 * |-------------------------------------------------------------------
223 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
224 * |-------------------------------------------------------------------
225 *
226 * Generalization:
227 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
228 * ie. transpose of g(x, y)
229 *
230 * sync from sync from sync from sync from sync from
231 * RCS VCS BCS VECS VCS2
232 * --------------------------------------------------------------------
233 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
234 * |-------------------------------------------------------------------
235 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
236 * |-------------------------------------------------------------------
237 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
238 * |-------------------------------------------------------------------
239 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
240 * |-------------------------------------------------------------------
241 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
242 * |-------------------------------------------------------------------
243 *
244 * Generalization:
245 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
246 * ie. transpose of f(x, y)
247 */
248 struct {
249 u32 sync_seqno[I915_NUM_RINGS-1];
250
251 union {
252 struct {
253 /* our mbox written by others */
254 u32 wait[I915_NUM_RINGS];
255 /* mboxes this ring signals to */
256 i915_reg_t signal[I915_NUM_RINGS];
257 } mbox;
258 u64 signal_ggtt[I915_NUM_RINGS];
259 };
260
261 /* AKA wait() */
262 int (*sync_to)(struct drm_i915_gem_request *to_req,
263 struct intel_engine_cs *from,
264 u32 seqno);
265 int (*signal)(struct drm_i915_gem_request *signaller_req,
266 /* num_dwords needed by caller */
267 unsigned int num_dwords);
268 } semaphore;
269
270 /* Execlists */
271 spinlock_t execlist_lock;
272 struct list_head execlist_queue;
273 struct list_head execlist_retired_req_list;
274 u8 next_context_status_buffer;
275 bool disable_lite_restore_wa;
276 u32 ctx_desc_template;
277 u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
278 int (*emit_request)(struct drm_i915_gem_request *request);
279 int (*emit_flush)(struct drm_i915_gem_request *request,
280 u32 invalidate_domains,
281 u32 flush_domains);
282 int (*emit_bb_start)(struct drm_i915_gem_request *req,
283 u64 offset, unsigned dispatch_flags);
284
285 /**
286 * List of objects currently involved in rendering from the
287 * ringbuffer.
288 *
289 * Includes buffers having the contents of their GPU caches
290 * flushed, not necessarily primitives. last_read_req
291 * represents when the rendering involved will be completed.
292 *
293 * A reference is held on the buffer while on this list.
294 */
295 struct list_head active_list;
296
297 /**
298 * List of breadcrumbs associated with GPU requests currently
299 * outstanding.
300 */
301 struct list_head request_list;
302
303 /**
304 * Seqno of request most recently submitted to request_list.
305 * Used exclusively by hang checker to avoid grabbing lock while
306 * inspecting request list.
307 */
308 u32 last_submitted_seqno;
309
310 bool gpu_caches_dirty;
311
312 wait_queue_head_t irq_queue;
313
314 struct intel_context *last_context;
315
316 struct intel_ring_hangcheck hangcheck;
317
318 struct {
319 struct drm_i915_gem_object *obj;
320 u32 gtt_offset;
321 volatile u32 *cpu_page;
322 } scratch;
323
324 bool needs_cmd_parser;
325
326 /*
327 * Table of commands the command parser needs to know about
328 * for this ring.
329 */
330 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
331
332 /*
333 * Table of registers allowed in commands that read/write registers.
334 */
335 const struct drm_i915_reg_descriptor *reg_table;
336 int reg_count;
337
338 /*
339 * Table of registers allowed in commands that read/write registers, but
340 * only from the DRM master.
341 */
342 const struct drm_i915_reg_descriptor *master_reg_table;
343 int master_reg_count;
344
345 /*
346 * Returns the bitmask for the length field of the specified command.
347 * Return 0 for an unrecognized/invalid command.
348 *
349 * If the command parser finds an entry for a command in the ring's
350 * cmd_tables, it gets the command's length based on the table entry.
351 * If not, it calls this function to determine the per-ring length field
352 * encoding for the command (i.e. certain opcode ranges use certain bits
353 * to encode the command length in the header).
354 */
355 u32 (*get_cmd_length_mask)(u32 cmd_header);
356};
357
358static inline bool
359intel_ring_initialized(struct intel_engine_cs *ring)
360{
361 return ring->dev != NULL;
362}
363
364static inline unsigned
365intel_ring_flag(struct intel_engine_cs *ring)
366{
367 return 1 << ring->id;
368}
369
370static inline u32
371intel_ring_sync_index(struct intel_engine_cs *ring,
372 struct intel_engine_cs *other)
373{
374 int idx;
375
376 /*
377 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
378 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
379 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
380 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
381 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
382 */
383
384 idx = (other - ring) - 1;
385 if (idx < 0)
386 idx += I915_NUM_RINGS;
387
388 return idx;
389}
390
391static inline void
392intel_flush_status_page(struct intel_engine_cs *ring, int reg)
393{
394 drm_clflush_virt_range(&ring->status_page.page_addr[reg],
395 sizeof(uint32_t));
396}
397
398static inline u32
399intel_read_status_page(struct intel_engine_cs *ring,
400 int reg)
401{
402 /* Ensure that the compiler doesn't optimize away the load. */
403 barrier();
404 return ring->status_page.page_addr[reg];
405}
406
407static inline void
408intel_write_status_page(struct intel_engine_cs *ring,
409 int reg, u32 value)
410{
411 ring->status_page.page_addr[reg] = value;
412}
413
414/*
415 * Reads a dword out of the status page, which is written to from the command
416 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
417 * MI_STORE_DATA_IMM.
418 *
419 * The following dwords have a reserved meaning:
420 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
421 * 0x04: ring 0 head pointer
422 * 0x05: ring 1 head pointer (915-class)
423 * 0x06: ring 2 head pointer (915-class)
424 * 0x10-0x1b: Context status DWords (GM45)
425 * 0x1f: Last written status offset. (GM45)
426 * 0x20-0x2f: Reserved (Gen6+)
427 *
428 * The area from dword 0x30 to 0x3ff is available for driver usage.
429 */
430#define I915_GEM_HWS_INDEX 0x30
431#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
432#define I915_GEM_HWS_SCRATCH_INDEX 0x40
433#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
434
435struct intel_ringbuffer *
436intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
437int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
438 struct intel_ringbuffer *ringbuf);
439void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
440void intel_ringbuffer_free(struct intel_ringbuffer *ring);
441
442void intel_stop_ring_buffer(struct intel_engine_cs *ring);
443void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
444
445int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
446
447int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
448int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
449static inline void intel_ring_emit(struct intel_engine_cs *ring,
450 u32 data)
451{
452 struct intel_ringbuffer *ringbuf = ring->buffer;
453 iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
454 ringbuf->tail += 4;
455}
456static inline void intel_ring_emit_reg(struct intel_engine_cs *ring,
457 i915_reg_t reg)
458{
459 intel_ring_emit(ring, i915_mmio_reg_offset(reg));
460}
461static inline void intel_ring_advance(struct intel_engine_cs *ring)
462{
463 struct intel_ringbuffer *ringbuf = ring->buffer;
464 ringbuf->tail &= ringbuf->size - 1;
465}
466int __intel_ring_space(int head, int tail, int size);
467void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
468int intel_ring_space(struct intel_ringbuffer *ringbuf);
469bool intel_ring_stopped(struct intel_engine_cs *ring);
470
471int __must_check intel_ring_idle(struct intel_engine_cs *ring);
472void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
473int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
474int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
475
476void intel_fini_pipe_control(struct intel_engine_cs *ring);
477int intel_init_pipe_control(struct intel_engine_cs *ring);
478
479int intel_init_render_ring_buffer(struct drm_device *dev);
480int intel_init_bsd_ring_buffer(struct drm_device *dev);
481int intel_init_bsd2_ring_buffer(struct drm_device *dev);
482int intel_init_blt_ring_buffer(struct drm_device *dev);
483int intel_init_vebox_ring_buffer(struct drm_device *dev);
484
485u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
486
487int init_workarounds_ring(struct intel_engine_cs *ring);
488
489static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
490{
491 return ringbuf->tail;
492}
493
494/*
495 * Arbitrary size for largest possible 'add request' sequence. The code paths
496 * are complex and variable. Empirical measurement shows that the worst case
497 * is ILK at 136 words. Reserving too much is better than reserving too little
498 * as that allows for corner cases that might have been missed. So the figure
499 * has been rounded up to 160 words.
500 */
501#define MIN_SPACE_FOR_ADD_REQUEST 160
502
503/*
504 * Reserve space in the ring to guarantee that the i915_add_request() call
505 * will always have sufficient room to do its stuff. The request creation
506 * code calls this automatically.
507 */
508void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
509/* Cancel the reservation, e.g. because the request is being discarded. */
510void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
511/* Use the reserved space - for use by i915_add_request() only. */
512void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
513/* Finish with the reserved space - for use by i915_add_request() only. */
514void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
515
516/* Legacy ringbuffer specific portion of reservation code: */
517int intel_ring_reserve_space(struct drm_i915_gem_request *request);
518
519#endif /* _INTEL_RINGBUFFER_H_ */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _INTEL_RINGBUFFER_H_
3#define _INTEL_RINGBUFFER_H_
4
5#include <linux/hashtable.h>
6
7#include "i915_gem_batch_pool.h"
8#include "i915_gem_timeline.h"
9
10#include "i915_pmu.h"
11#include "i915_request.h"
12#include "i915_selftest.h"
13
14struct drm_printer;
15
16#define I915_CMD_HASH_ORDER 9
17
18/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
19 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
20 * to give some inclination as to some of the magic values used in the various
21 * workarounds!
22 */
23#define CACHELINE_BYTES 64
24#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
25
26struct intel_hw_status_page {
27 struct i915_vma *vma;
28 u32 *page_addr;
29 u32 ggtt_offset;
30};
31
32#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
33#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
34
35#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
36#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
37
38#define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
39#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
40
41#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
42#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
43
44#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
45#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
46
47#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
48#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
49
50/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
51 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
52 */
53enum intel_engine_hangcheck_action {
54 ENGINE_IDLE = 0,
55 ENGINE_WAIT,
56 ENGINE_ACTIVE_SEQNO,
57 ENGINE_ACTIVE_HEAD,
58 ENGINE_ACTIVE_SUBUNITS,
59 ENGINE_WAIT_KICK,
60 ENGINE_DEAD,
61};
62
63static inline const char *
64hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
65{
66 switch (a) {
67 case ENGINE_IDLE:
68 return "idle";
69 case ENGINE_WAIT:
70 return "wait";
71 case ENGINE_ACTIVE_SEQNO:
72 return "active seqno";
73 case ENGINE_ACTIVE_HEAD:
74 return "active head";
75 case ENGINE_ACTIVE_SUBUNITS:
76 return "active subunits";
77 case ENGINE_WAIT_KICK:
78 return "wait kick";
79 case ENGINE_DEAD:
80 return "dead";
81 }
82
83 return "unknown";
84}
85
86#define I915_MAX_SLICES 3
87#define I915_MAX_SUBSLICES 3
88
89#define instdone_slice_mask(dev_priv__) \
90 (INTEL_GEN(dev_priv__) == 7 ? \
91 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
92
93#define instdone_subslice_mask(dev_priv__) \
94 (INTEL_GEN(dev_priv__) == 7 ? \
95 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0])
96
97#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
98 for ((slice__) = 0, (subslice__) = 0; \
99 (slice__) < I915_MAX_SLICES; \
100 (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
101 (slice__) += ((subslice__) == 0)) \
102 for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
103 (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
104
105struct intel_instdone {
106 u32 instdone;
107 /* The following exist only in the RCS engine */
108 u32 slice_common;
109 u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
110 u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
111};
112
113struct intel_engine_hangcheck {
114 u64 acthd;
115 u32 seqno;
116 enum intel_engine_hangcheck_action action;
117 unsigned long action_timestamp;
118 int deadlock;
119 struct intel_instdone instdone;
120 struct i915_request *active_request;
121 bool stalled;
122};
123
124struct intel_ring {
125 struct i915_vma *vma;
126 void *vaddr;
127
128 struct list_head request_list;
129
130 u32 head;
131 u32 tail;
132 u32 emit;
133
134 u32 space;
135 u32 size;
136 u32 effective_size;
137};
138
139struct i915_gem_context;
140struct drm_i915_reg_table;
141
142/*
143 * we use a single page to load ctx workarounds so all of these
144 * values are referred in terms of dwords
145 *
146 * struct i915_wa_ctx_bb:
147 * offset: specifies batch starting position, also helpful in case
148 * if we want to have multiple batches at different offsets based on
149 * some criteria. It is not a requirement at the moment but provides
150 * an option for future use.
151 * size: size of the batch in DWORDS
152 */
153struct i915_ctx_workarounds {
154 struct i915_wa_ctx_bb {
155 u32 offset;
156 u32 size;
157 } indirect_ctx, per_ctx;
158 struct i915_vma *vma;
159};
160
161struct i915_request;
162
163#define I915_MAX_VCS 4
164#define I915_MAX_VECS 2
165
166/*
167 * Engine IDs definitions.
168 * Keep instances of the same type engine together.
169 */
170enum intel_engine_id {
171 RCS = 0,
172 BCS,
173 VCS,
174 VCS2,
175 VCS3,
176 VCS4,
177#define _VCS(n) (VCS + (n))
178 VECS,
179 VECS2
180#define _VECS(n) (VECS + (n))
181};
182
183struct i915_priolist {
184 struct rb_node node;
185 struct list_head requests;
186 int priority;
187};
188
189/**
190 * struct intel_engine_execlists - execlist submission queue and port state
191 *
192 * The struct intel_engine_execlists represents the combined logical state of
193 * driver and the hardware state for execlist mode of submission.
194 */
195struct intel_engine_execlists {
196 /**
197 * @tasklet: softirq tasklet for bottom handler
198 */
199 struct tasklet_struct tasklet;
200
201 /**
202 * @default_priolist: priority list for I915_PRIORITY_NORMAL
203 */
204 struct i915_priolist default_priolist;
205
206 /**
207 * @no_priolist: priority lists disabled
208 */
209 bool no_priolist;
210
211 /**
212 * @submit_reg: gen-specific execlist submission register
213 * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
214 * the ExecList Submission Queue Contents register array for Gen11+
215 */
216 u32 __iomem *submit_reg;
217
218 /**
219 * @ctrl_reg: the enhanced execlists control register, used to load the
220 * submit queue on the HW and to request preemptions to idle
221 */
222 u32 __iomem *ctrl_reg;
223
224 /**
225 * @port: execlist port states
226 *
227 * For each hardware ELSP (ExecList Submission Port) we keep
228 * track of the last request and the number of times we submitted
229 * that port to hw. We then count the number of times the hw reports
230 * a context completion or preemption. As only one context can
231 * be active on hw, we limit resubmission of context to port[0]. This
232 * is called Lite Restore, of the context.
233 */
234 struct execlist_port {
235 /**
236 * @request_count: combined request and submission count
237 */
238 struct i915_request *request_count;
239#define EXECLIST_COUNT_BITS 2
240#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
241#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
242#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
243#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
244#define port_set(p, packed) ((p)->request_count = (packed))
245#define port_isset(p) ((p)->request_count)
246#define port_index(p, execlists) ((p) - (execlists)->port)
247
248 /**
249 * @context_id: context ID for port
250 */
251 GEM_DEBUG_DECL(u32 context_id);
252
253#define EXECLIST_MAX_PORTS 2
254 } port[EXECLIST_MAX_PORTS];
255
256 /**
257 * @active: is the HW active? We consider the HW as active after
258 * submitting any context for execution and until we have seen the
259 * last context completion event. After that, we do not expect any
260 * more events until we submit, and so can park the HW.
261 *
262 * As we have a small number of different sources from which we feed
263 * the HW, we track the state of each inside a single bitfield.
264 */
265 unsigned int active;
266#define EXECLISTS_ACTIVE_USER 0
267#define EXECLISTS_ACTIVE_PREEMPT 1
268#define EXECLISTS_ACTIVE_HWACK 2
269
270 /**
271 * @port_mask: number of execlist ports - 1
272 */
273 unsigned int port_mask;
274
275 /**
276 * @queue_priority: Highest pending priority.
277 *
278 * When we add requests into the queue, or adjust the priority of
279 * executing requests, we compute the maximum priority of those
280 * pending requests. We can then use this value to determine if
281 * we need to preempt the executing requests to service the queue.
282 */
283 int queue_priority;
284
285 /**
286 * @queue: queue of requests, in priority lists
287 */
288 struct rb_root queue;
289
290 /**
291 * @first: leftmost level in priority @queue
292 */
293 struct rb_node *first;
294
295 /**
296 * @fw_domains: forcewake domains for irq tasklet
297 */
298 unsigned int fw_domains;
299
300 /**
301 * @csb_head: context status buffer head
302 */
303 unsigned int csb_head;
304
305 /**
306 * @csb_use_mmio: access csb through mmio, instead of hwsp
307 */
308 bool csb_use_mmio;
309
310 /**
311 * @preempt_complete_status: expected CSB upon completing preemption
312 */
313 u32 preempt_complete_status;
314};
315
316#define INTEL_ENGINE_CS_MAX_NAME 8
317
318struct intel_engine_cs {
319 struct drm_i915_private *i915;
320 char name[INTEL_ENGINE_CS_MAX_NAME];
321
322 enum intel_engine_id id;
323 unsigned int hw_id;
324 unsigned int guc_id;
325
326 u8 uabi_id;
327 u8 uabi_class;
328
329 u8 class;
330 u8 instance;
331 u32 context_size;
332 u32 mmio_base;
333 unsigned int irq_shift;
334
335 struct intel_ring *buffer;
336 struct intel_timeline *timeline;
337
338 struct drm_i915_gem_object *default_state;
339
340 atomic_t irq_count;
341 unsigned long irq_posted;
342#define ENGINE_IRQ_BREADCRUMB 0
343#define ENGINE_IRQ_EXECLIST 1
344
345 /* Rather than have every client wait upon all user interrupts,
346 * with the herd waking after every interrupt and each doing the
347 * heavyweight seqno dance, we delegate the task (of being the
348 * bottom-half of the user interrupt) to the first client. After
349 * every interrupt, we wake up one client, who does the heavyweight
350 * coherent seqno read and either goes back to sleep (if incomplete),
351 * or wakes up all the completed clients in parallel, before then
352 * transferring the bottom-half status to the next client in the queue.
353 *
354 * Compared to walking the entire list of waiters in a single dedicated
355 * bottom-half, we reduce the latency of the first waiter by avoiding
356 * a context switch, but incur additional coherent seqno reads when
357 * following the chain of request breadcrumbs. Since it is most likely
358 * that we have a single client waiting on each seqno, then reducing
359 * the overhead of waking that client is much preferred.
360 */
361 struct intel_breadcrumbs {
362 spinlock_t irq_lock; /* protects irq_*; irqsafe */
363 struct intel_wait *irq_wait; /* oldest waiter by retirement */
364
365 spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
366 struct rb_root waiters; /* sorted by retirement, priority */
367 struct list_head signals; /* sorted by retirement */
368 struct task_struct *signaler; /* used for fence signalling */
369
370 struct timer_list fake_irq; /* used after a missed interrupt */
371 struct timer_list hangcheck; /* detect missed interrupts */
372
373 unsigned int hangcheck_interrupts;
374 unsigned int irq_enabled;
375
376 bool irq_armed : 1;
377 I915_SELFTEST_DECLARE(bool mock : 1);
378 } breadcrumbs;
379
380 struct {
381 /**
382 * @enable: Bitmask of enable sample events on this engine.
383 *
384 * Bits correspond to sample event types, for instance
385 * I915_SAMPLE_QUEUED is bit 0 etc.
386 */
387 u32 enable;
388 /**
389 * @enable_count: Reference count for the enabled samplers.
390 *
391 * Index number corresponds to the bit number from @enable.
392 */
393 unsigned int enable_count[I915_PMU_SAMPLE_BITS];
394 /**
395 * @sample: Counter values for sampling events.
396 *
397 * Our internal timer stores the current counters in this field.
398 */
399#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)
400 struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
401 } pmu;
402
403 /*
404 * A pool of objects to use as shadow copies of client batch buffers
405 * when the command parser is enabled. Prevents the client from
406 * modifying the batch contents after software parsing.
407 */
408 struct i915_gem_batch_pool batch_pool;
409
410 struct intel_hw_status_page status_page;
411 struct i915_ctx_workarounds wa_ctx;
412 struct i915_vma *scratch;
413
414 u32 irq_keep_mask; /* always keep these interrupts */
415 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
416 void (*irq_enable)(struct intel_engine_cs *engine);
417 void (*irq_disable)(struct intel_engine_cs *engine);
418
419 int (*init_hw)(struct intel_engine_cs *engine);
420 void (*reset_hw)(struct intel_engine_cs *engine,
421 struct i915_request *rq);
422
423 void (*park)(struct intel_engine_cs *engine);
424 void (*unpark)(struct intel_engine_cs *engine);
425
426 void (*set_default_submission)(struct intel_engine_cs *engine);
427
428 struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
429 struct i915_gem_context *ctx);
430 void (*context_unpin)(struct intel_engine_cs *engine,
431 struct i915_gem_context *ctx);
432 int (*request_alloc)(struct i915_request *rq);
433 int (*init_context)(struct i915_request *rq);
434
435 int (*emit_flush)(struct i915_request *request, u32 mode);
436#define EMIT_INVALIDATE BIT(0)
437#define EMIT_FLUSH BIT(1)
438#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
439 int (*emit_bb_start)(struct i915_request *rq,
440 u64 offset, u32 length,
441 unsigned int dispatch_flags);
442#define I915_DISPATCH_SECURE BIT(0)
443#define I915_DISPATCH_PINNED BIT(1)
444#define I915_DISPATCH_RS BIT(2)
445 void (*emit_breadcrumb)(struct i915_request *rq, u32 *cs);
446 int emit_breadcrumb_sz;
447
448 /* Pass the request to the hardware queue (e.g. directly into
449 * the legacy ringbuffer or to the end of an execlist).
450 *
451 * This is called from an atomic context with irqs disabled; must
452 * be irq safe.
453 */
454 void (*submit_request)(struct i915_request *rq);
455
456 /* Call when the priority on a request has changed and it and its
457 * dependencies may need rescheduling. Note the request itself may
458 * not be ready to run!
459 *
460 * Called under the struct_mutex.
461 */
462 void (*schedule)(struct i915_request *request, int priority);
463
464 /*
465 * Cancel all requests on the hardware, or queued for execution.
466 * This should only cancel the ready requests that have been
467 * submitted to the engine (via the engine->submit_request callback).
468 * This is called when marking the device as wedged.
469 */
470 void (*cancel_requests)(struct intel_engine_cs *engine);
471
472 /* Some chipsets are not quite as coherent as advertised and need
473 * an expensive kick to force a true read of the up-to-date seqno.
474 * However, the up-to-date seqno is not always required and the last
475 * seen value is good enough. Note that the seqno will always be
476 * monotonic, even if not coherent.
477 */
478 void (*irq_seqno_barrier)(struct intel_engine_cs *engine);
479 void (*cleanup)(struct intel_engine_cs *engine);
480
481 /* GEN8 signal/wait table - never trust comments!
482 * signal to signal to signal to signal to signal to
483 * RCS VCS BCS VECS VCS2
484 * --------------------------------------------------------------------
485 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
486 * |-------------------------------------------------------------------
487 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
488 * |-------------------------------------------------------------------
489 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
490 * |-------------------------------------------------------------------
491 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
492 * |-------------------------------------------------------------------
493 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
494 * |-------------------------------------------------------------------
495 *
496 * Generalization:
497 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
498 * ie. transpose of g(x, y)
499 *
500 * sync from sync from sync from sync from sync from
501 * RCS VCS BCS VECS VCS2
502 * --------------------------------------------------------------------
503 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
504 * |-------------------------------------------------------------------
505 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
506 * |-------------------------------------------------------------------
507 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
508 * |-------------------------------------------------------------------
509 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
510 * |-------------------------------------------------------------------
511 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
512 * |-------------------------------------------------------------------
513 *
514 * Generalization:
515 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
516 * ie. transpose of f(x, y)
517 */
518 struct {
519#define GEN6_SEMAPHORE_LAST VECS_HW
520#define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
521#define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
522 struct {
523 /* our mbox written by others */
524 u32 wait[GEN6_NUM_SEMAPHORES];
525 /* mboxes this ring signals to */
526 i915_reg_t signal[GEN6_NUM_SEMAPHORES];
527 } mbox;
528
529 /* AKA wait() */
530 int (*sync_to)(struct i915_request *rq,
531 struct i915_request *signal);
532 u32 *(*signal)(struct i915_request *rq, u32 *cs);
533 } semaphore;
534
535 struct intel_engine_execlists execlists;
536
537 /* Contexts are pinned whilst they are active on the GPU. The last
538 * context executed remains active whilst the GPU is idle - the
539 * switch away and write to the context object only occurs on the
540 * next execution. Contexts are only unpinned on retirement of the
541 * following request ensuring that we can always write to the object
542 * on the context switch even after idling. Across suspend, we switch
543 * to the kernel context and trash it as the save may not happen
544 * before the hardware is powered down.
545 */
546 struct i915_gem_context *last_retired_context;
547
548 /* We track the current MI_SET_CONTEXT in order to eliminate
549 * redudant context switches. This presumes that requests are not
550 * reordered! Or when they are the tracking is updated along with
551 * the emission of individual requests into the legacy command
552 * stream (ring).
553 */
554 struct i915_gem_context *legacy_active_context;
555 struct i915_hw_ppgtt *legacy_active_ppgtt;
556
557 /* status_notifier: list of callbacks for context-switch changes */
558 struct atomic_notifier_head context_status_notifier;
559
560 struct intel_engine_hangcheck hangcheck;
561
562#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
563#define I915_ENGINE_SUPPORTS_STATS BIT(1)
564 unsigned int flags;
565
566 /*
567 * Table of commands the command parser needs to know about
568 * for this engine.
569 */
570 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
571
572 /*
573 * Table of registers allowed in commands that read/write registers.
574 */
575 const struct drm_i915_reg_table *reg_tables;
576 int reg_table_count;
577
578 /*
579 * Returns the bitmask for the length field of the specified command.
580 * Return 0 for an unrecognized/invalid command.
581 *
582 * If the command parser finds an entry for a command in the engine's
583 * cmd_tables, it gets the command's length based on the table entry.
584 * If not, it calls this function to determine the per-engine length
585 * field encoding for the command (i.e. different opcode ranges use
586 * certain bits to encode the command length in the header).
587 */
588 u32 (*get_cmd_length_mask)(u32 cmd_header);
589
590 struct {
591 /**
592 * @lock: Lock protecting the below fields.
593 */
594 spinlock_t lock;
595 /**
596 * @enabled: Reference count indicating number of listeners.
597 */
598 unsigned int enabled;
599 /**
600 * @active: Number of contexts currently scheduled in.
601 */
602 unsigned int active;
603 /**
604 * @enabled_at: Timestamp when busy stats were enabled.
605 */
606 ktime_t enabled_at;
607 /**
608 * @start: Timestamp of the last idle to active transition.
609 *
610 * Idle is defined as active == 0, active is active > 0.
611 */
612 ktime_t start;
613 /**
614 * @total: Total time this engine was busy.
615 *
616 * Accumulated time not counting the most recent block in cases
617 * where engine is currently busy (active > 0).
618 */
619 ktime_t total;
620 } stats;
621};
622
623static inline bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine)
624{
625 return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
626}
627
628static inline bool intel_engine_supports_stats(struct intel_engine_cs *engine)
629{
630 return engine->flags & I915_ENGINE_SUPPORTS_STATS;
631}
632
633static inline void
634execlists_set_active(struct intel_engine_execlists *execlists,
635 unsigned int bit)
636{
637 __set_bit(bit, (unsigned long *)&execlists->active);
638}
639
640static inline void
641execlists_clear_active(struct intel_engine_execlists *execlists,
642 unsigned int bit)
643{
644 __clear_bit(bit, (unsigned long *)&execlists->active);
645}
646
647static inline bool
648execlists_is_active(const struct intel_engine_execlists *execlists,
649 unsigned int bit)
650{
651 return test_bit(bit, (unsigned long *)&execlists->active);
652}
653
654void
655execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
656
657void
658execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
659
660static inline unsigned int
661execlists_num_ports(const struct intel_engine_execlists * const execlists)
662{
663 return execlists->port_mask + 1;
664}
665
666static inline void
667execlists_port_complete(struct intel_engine_execlists * const execlists,
668 struct execlist_port * const port)
669{
670 const unsigned int m = execlists->port_mask;
671
672 GEM_BUG_ON(port_index(port, execlists) != 0);
673 GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
674
675 memmove(port, port + 1, m * sizeof(struct execlist_port));
676 memset(port + m, 0, sizeof(struct execlist_port));
677}
678
679static inline unsigned int
680intel_engine_flag(const struct intel_engine_cs *engine)
681{
682 return BIT(engine->id);
683}
684
685static inline u32
686intel_read_status_page(const struct intel_engine_cs *engine, int reg)
687{
688 /* Ensure that the compiler doesn't optimize away the load. */
689 return READ_ONCE(engine->status_page.page_addr[reg]);
690}
691
692static inline void
693intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
694{
695 /* Writing into the status page should be done sparingly. Since
696 * we do when we are uncertain of the device state, we take a bit
697 * of extra paranoia to try and ensure that the HWS takes the value
698 * we give and that it doesn't end up trapped inside the CPU!
699 */
700 if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
701 mb();
702 clflush(&engine->status_page.page_addr[reg]);
703 engine->status_page.page_addr[reg] = value;
704 clflush(&engine->status_page.page_addr[reg]);
705 mb();
706 } else {
707 WRITE_ONCE(engine->status_page.page_addr[reg], value);
708 }
709}
710
711/*
712 * Reads a dword out of the status page, which is written to from the command
713 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
714 * MI_STORE_DATA_IMM.
715 *
716 * The following dwords have a reserved meaning:
717 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
718 * 0x04: ring 0 head pointer
719 * 0x05: ring 1 head pointer (915-class)
720 * 0x06: ring 2 head pointer (915-class)
721 * 0x10-0x1b: Context status DWords (GM45)
722 * 0x1f: Last written status offset. (GM45)
723 * 0x20-0x2f: Reserved (Gen6+)
724 *
725 * The area from dword 0x30 to 0x3ff is available for driver usage.
726 */
727#define I915_GEM_HWS_INDEX 0x30
728#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
729#define I915_GEM_HWS_PREEMPT_INDEX 0x32
730#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
731#define I915_GEM_HWS_SCRATCH_INDEX 0x40
732#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
733
734#define I915_HWS_CSB_BUF0_INDEX 0x10
735#define I915_HWS_CSB_WRITE_INDEX 0x1f
736#define CNL_HWS_CSB_WRITE_INDEX 0x2f
737
738struct intel_ring *
739intel_engine_create_ring(struct intel_engine_cs *engine, int size);
740int intel_ring_pin(struct intel_ring *ring,
741 struct drm_i915_private *i915,
742 unsigned int offset_bias);
743void intel_ring_reset(struct intel_ring *ring, u32 tail);
744unsigned int intel_ring_update_space(struct intel_ring *ring);
745void intel_ring_unpin(struct intel_ring *ring);
746void intel_ring_free(struct intel_ring *ring);
747
748void intel_engine_stop(struct intel_engine_cs *engine);
749void intel_engine_cleanup(struct intel_engine_cs *engine);
750
751void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
752
753int __must_check intel_ring_cacheline_align(struct i915_request *rq);
754
755int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes);
756u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
757
758static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
759{
760 /* Dummy function.
761 *
762 * This serves as a placeholder in the code so that the reader
763 * can compare against the preceding intel_ring_begin() and
764 * check that the number of dwords emitted matches the space
765 * reserved for the command packet (i.e. the value passed to
766 * intel_ring_begin()).
767 */
768 GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
769}
770
771static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
772{
773 return pos & (ring->size - 1);
774}
775
776static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
777{
778 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
779 u32 offset = addr - rq->ring->vaddr;
780 GEM_BUG_ON(offset > rq->ring->size);
781 return intel_ring_wrap(rq->ring, offset);
782}
783
784static inline void
785assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
786{
787 /* We could combine these into a single tail operation, but keeping
788 * them as seperate tests will help identify the cause should one
789 * ever fire.
790 */
791 GEM_BUG_ON(!IS_ALIGNED(tail, 8));
792 GEM_BUG_ON(tail >= ring->size);
793
794 /*
795 * "Ring Buffer Use"
796 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
797 * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
798 * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
799 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
800 * same cacheline, the Head Pointer must not be greater than the Tail
801 * Pointer."
802 *
803 * We use ring->head as the last known location of the actual RING_HEAD,
804 * it may have advanced but in the worst case it is equally the same
805 * as ring->head and so we should never program RING_TAIL to advance
806 * into the same cacheline as ring->head.
807 */
808#define cacheline(a) round_down(a, CACHELINE_BYTES)
809 GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
810 tail < ring->head);
811#undef cacheline
812}
813
814static inline unsigned int
815intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
816{
817 /* Whilst writes to the tail are strictly order, there is no
818 * serialisation between readers and the writers. The tail may be
819 * read by i915_request_retire() just as it is being updated
820 * by execlists, as although the breadcrumb is complete, the context
821 * switch hasn't been seen.
822 */
823 assert_ring_tail_valid(ring, tail);
824 ring->tail = tail;
825 return tail;
826}
827
828void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
829
830void intel_engine_setup_common(struct intel_engine_cs *engine);
831int intel_engine_init_common(struct intel_engine_cs *engine);
832int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
833void intel_engine_cleanup_common(struct intel_engine_cs *engine);
834
835int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
836int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
837int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
838int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
839
840u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
841u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
842
843static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
844{
845 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
846}
847
848static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
849{
850 /* We are only peeking at the tail of the submit queue (and not the
851 * queue itself) in order to gain a hint as to the current active
852 * state of the engine. Callers are not expected to be taking
853 * engine->timeline->lock, nor are they expected to be concerned
854 * wtih serialising this hint with anything, so document it as
855 * a hint and nothing more.
856 */
857 return READ_ONCE(engine->timeline->seqno);
858}
859
860int init_workarounds_ring(struct intel_engine_cs *engine);
861int intel_ring_workarounds_emit(struct i915_request *rq);
862
863void intel_engine_get_instdone(struct intel_engine_cs *engine,
864 struct intel_instdone *instdone);
865
866/*
867 * Arbitrary size for largest possible 'add request' sequence. The code paths
868 * are complex and variable. Empirical measurement shows that the worst case
869 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
870 * we need to allocate double the largest single packet within that emission
871 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
872 */
873#define MIN_SPACE_FOR_ADD_REQUEST 336
874
875static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
876{
877 return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
878}
879
880static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
881{
882 return engine->status_page.ggtt_offset + I915_GEM_HWS_PREEMPT_ADDR;
883}
884
885/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
886int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
887
888static inline void intel_wait_init(struct intel_wait *wait,
889 struct i915_request *rq)
890{
891 wait->tsk = current;
892 wait->request = rq;
893}
894
895static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
896{
897 wait->tsk = current;
898 wait->seqno = seqno;
899}
900
901static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
902{
903 return wait->seqno;
904}
905
906static inline bool
907intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
908{
909 wait->seqno = seqno;
910 return intel_wait_has_seqno(wait);
911}
912
913static inline bool
914intel_wait_update_request(struct intel_wait *wait,
915 const struct i915_request *rq)
916{
917 return intel_wait_update_seqno(wait, i915_request_global_seqno(rq));
918}
919
920static inline bool
921intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
922{
923 return wait->seqno == seqno;
924}
925
926static inline bool
927intel_wait_check_request(const struct intel_wait *wait,
928 const struct i915_request *rq)
929{
930 return intel_wait_check_seqno(wait, i915_request_global_seqno(rq));
931}
932
933static inline bool intel_wait_complete(const struct intel_wait *wait)
934{
935 return RB_EMPTY_NODE(&wait->node);
936}
937
938bool intel_engine_add_wait(struct intel_engine_cs *engine,
939 struct intel_wait *wait);
940void intel_engine_remove_wait(struct intel_engine_cs *engine,
941 struct intel_wait *wait);
942void intel_engine_enable_signaling(struct i915_request *request, bool wakeup);
943void intel_engine_cancel_signaling(struct i915_request *request);
944
945static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
946{
947 return READ_ONCE(engine->breadcrumbs.irq_wait);
948}
949
950unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
951#define ENGINE_WAKEUP_WAITER BIT(0)
952#define ENGINE_WAKEUP_ASLEEP BIT(1)
953
954void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
955void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
956
957void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
958void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
959
960void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
961void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
962
963static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
964{
965 memset(batch, 0, 6 * sizeof(u32));
966
967 batch[0] = GFX_OP_PIPE_CONTROL(6);
968 batch[1] = flags;
969 batch[2] = offset;
970
971 return batch + 6;
972}
973
974static inline u32 *
975gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset)
976{
977 /* We're using qword write, offset should be aligned to 8 bytes. */
978 GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
979
980 /* w/a for post sync ops following a GPGPU operation we
981 * need a prior CS_STALL, which is emitted by the flush
982 * following the batch.
983 */
984 *cs++ = GFX_OP_PIPE_CONTROL(6);
985 *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
986 PIPE_CONTROL_QW_WRITE;
987 *cs++ = gtt_offset;
988 *cs++ = 0;
989 *cs++ = value;
990 /* We're thrashing one dword of HWS. */
991 *cs++ = 0;
992
993 return cs;
994}
995
996static inline u32 *
997gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
998{
999 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
1000 GEM_BUG_ON(gtt_offset & (1 << 5));
1001 /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
1002 GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
1003
1004 *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
1005 *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
1006 *cs++ = 0;
1007 *cs++ = value;
1008
1009 return cs;
1010}
1011
1012bool intel_engine_is_idle(struct intel_engine_cs *engine);
1013bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
1014
1015bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
1016
1017void intel_engines_park(struct drm_i915_private *i915);
1018void intel_engines_unpark(struct drm_i915_private *i915);
1019
1020void intel_engines_reset_default_submission(struct drm_i915_private *i915);
1021unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
1022
1023bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
1024
1025__printf(3, 4)
1026void intel_engine_dump(struct intel_engine_cs *engine,
1027 struct drm_printer *m,
1028 const char *header, ...);
1029
1030struct intel_engine_cs *
1031intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
1032
1033static inline void intel_engine_context_in(struct intel_engine_cs *engine)
1034{
1035 unsigned long flags;
1036
1037 if (READ_ONCE(engine->stats.enabled) == 0)
1038 return;
1039
1040 spin_lock_irqsave(&engine->stats.lock, flags);
1041
1042 if (engine->stats.enabled > 0) {
1043 if (engine->stats.active++ == 0)
1044 engine->stats.start = ktime_get();
1045 GEM_BUG_ON(engine->stats.active == 0);
1046 }
1047
1048 spin_unlock_irqrestore(&engine->stats.lock, flags);
1049}
1050
1051static inline void intel_engine_context_out(struct intel_engine_cs *engine)
1052{
1053 unsigned long flags;
1054
1055 if (READ_ONCE(engine->stats.enabled) == 0)
1056 return;
1057
1058 spin_lock_irqsave(&engine->stats.lock, flags);
1059
1060 if (engine->stats.enabled > 0) {
1061 ktime_t last;
1062
1063 if (engine->stats.active && --engine->stats.active == 0) {
1064 /*
1065 * Decrement the active context count and in case GPU
1066 * is now idle add up to the running total.
1067 */
1068 last = ktime_sub(ktime_get(), engine->stats.start);
1069
1070 engine->stats.total = ktime_add(engine->stats.total,
1071 last);
1072 } else if (engine->stats.active == 0) {
1073 /*
1074 * After turning on engine stats, context out might be
1075 * the first event in which case we account from the
1076 * time stats gathering was turned on.
1077 */
1078 last = ktime_sub(ktime_get(), engine->stats.enabled_at);
1079
1080 engine->stats.total = ktime_add(engine->stats.total,
1081 last);
1082 }
1083 }
1084
1085 spin_unlock_irqrestore(&engine->stats.lock, flags);
1086}
1087
1088int intel_enable_engine_stats(struct intel_engine_cs *engine);
1089void intel_disable_engine_stats(struct intel_engine_cs *engine);
1090
1091ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
1092
1093#endif /* _INTEL_RINGBUFFER_H_ */