Linux Audio

Check our new training course

Loading...
v3.1
  1#ifndef _INTEL_RINGBUFFER_H_
  2#define _INTEL_RINGBUFFER_H_
  3
  4enum {
  5    RCS = 0x0,
  6    VCS,
  7    BCS,
  8    I915_NUM_RINGS,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  9};
 10
 11struct  intel_hw_status_page {
 12	u32	__iomem	*page_addr;
 13	unsigned int	gfx_addr;
 14	struct		drm_i915_gem_object *obj;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 15};
 16
 17#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
 18#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
 19
 20#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
 21#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
 22
 23#define I915_READ_HEAD(ring)  I915_READ(RING_HEAD((ring)->mmio_base))
 24#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 25
 26#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
 27#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 28
 29#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
 30#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 31
 32#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
 33#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
 34#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
 35
 36struct  intel_ring_buffer {
 
 37	const char	*name;
 38	enum intel_ring_id {
 39		RING_RENDER = 0x1,
 40		RING_BSD = 0x2,
 41		RING_BLT = 0x4,
 
 
 42	} id;
 
 
 
 
 
 
 
 
 
 
 43	u32		mmio_base;
 44	void		__iomem *virtual_start;
 45	struct		drm_device *dev;
 46	struct		drm_i915_gem_object *obj;
 47
 48	u32		head;
 49	u32		tail;
 50	int		space;
 51	int		size;
 52	int		effective_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 53	struct intel_hw_status_page status_page;
 
 
 54
 55	spinlock_t	irq_lock;
 56	u32		irq_refcount;
 57	u32		irq_mask;
 58	u32		irq_seqno;		/* last seq seem at irq time */
 59	u32		trace_irq_seqno;
 60	u32		waiting_seqno;
 61	u32		sync_seqno[I915_NUM_RINGS-1];
 62	bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
 63	void		(*irq_put)(struct intel_ring_buffer *ring);
 64
 65	int		(*init)(struct intel_ring_buffer *ring);
 66
 67	void		(*write_tail)(struct intel_ring_buffer *ring,
 68				      u32 value);
 69	int __must_check (*flush)(struct intel_ring_buffer *ring,
 70				  u32	invalidate_domains,
 71				  u32	flush_domains);
 72	int		(*add_request)(struct intel_ring_buffer *ring,
 73				       u32 *seqno);
 74	u32		(*get_seqno)(struct intel_ring_buffer *ring);
 75	int		(*dispatch_execbuffer)(struct intel_ring_buffer *ring,
 76					       u32 offset, u32 length);
 77	void		(*cleanup)(struct intel_ring_buffer *ring);
 78
 79	/**
 80	 * List of objects currently involved in rendering from the
 81	 * ringbuffer.
 82	 *
 83	 * Includes buffers having the contents of their GPU caches
 84	 * flushed, not necessarily primitives.  last_rendering_seqno
 85	 * represents when the rendering involved will be completed.
 86	 *
 87	 * A reference is held on the buffer while on this list.
 88	 */
 89	struct list_head active_list;
 90
 91	/**
 92	 * List of breadcrumbs associated with GPU requests currently
 93	 * outstanding.
 94	 */
 95	struct list_head request_list;
 96
 97	/**
 98	 * List of objects currently pending a GPU write flush.
 
 99	 *
100	 * All elements on this list will belong to either the
101	 * active_list or flushing_list, last_rendering_seqno can
102	 * be used to differentiate between the two elements.
103	 */
104	struct list_head gpu_write_list;
 
105
106	/**
107	 * Do we have some not yet emitted requests outstanding?
 
 
 
108	 */
109	u32 outstanding_lazy_request;
 
110
111	wait_queue_head_t irq_queue;
112	drm_local_map_t map;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
114	void *private;
115};
116
117static inline u32
118intel_ring_sync_index(struct intel_ring_buffer *ring,
119		      struct intel_ring_buffer *other)
120{
121	int idx;
 
 
 
 
 
 
 
 
 
 
122
123	/*
124	 * cs -> 0 = vcs, 1 = bcs
125	 * vcs -> 0 = bcs, 1 = cs,
126	 * bcs -> 0 = cs, 1 = vcs.
 
 
 
 
 
127	 */
 
 
128
129	idx = (other - ring) - 1;
130	if (idx < 0)
131		idx += I915_NUM_RINGS;
 
 
132
133	return idx;
 
 
 
 
 
134}
135
136static inline u32
137intel_read_status_page(struct intel_ring_buffer *ring,
138		       int reg)
 
 
 
 
 
 
 
139{
140	return ioread32(ring->status_page.page_addr + reg);
141}
142
143/**
144 * Reads a dword out of the status page, which is written to from the command
145 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
146 * MI_STORE_DATA_IMM.
147 *
148 * The following dwords have a reserved meaning:
149 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
150 * 0x04: ring 0 head pointer
151 * 0x05: ring 1 head pointer (915-class)
152 * 0x06: ring 2 head pointer (915-class)
153 * 0x10-0x1b: Context status DWords (GM45)
154 * 0x1f: Last written status offset. (GM45)
 
155 *
156 * The area from dword 0x20 to 0x3ff is available for driver usage.
157 */
158#define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg)
159#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
160#define I915_GEM_HWS_INDEX		0x20
161#define I915_BREADCRUMB_INDEX		0x21
162
163void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
 
 
 
 
164
165int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
166static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
 
 
 
 
 
 
 
 
 
167{
168	return intel_wait_ring_buffer(ring, ring->size - 8);
 
169}
170
171int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
 
 
 
172
173static inline void intel_ring_emit(struct intel_ring_buffer *ring,
174				   u32 data)
175{
176	iowrite32(data, ring->virtual_start + ring->tail);
177	ring->tail += 4;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178}
179
180void intel_ring_advance(struct intel_ring_buffer *ring);
 
 
 
 
 
 
 
 
 
 
 
 
181
182u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
183int intel_ring_sync(struct intel_ring_buffer *ring,
184		    struct intel_ring_buffer *to,
185		    u32 seqno);
186
187int intel_init_render_ring_buffer(struct drm_device *dev);
188int intel_init_bsd_ring_buffer(struct drm_device *dev);
189int intel_init_blt_ring_buffer(struct drm_device *dev);
 
 
 
 
 
190
191u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
192void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
 
 
 
 
 
 
 
 
 
 
 
193
194static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
195{
196	if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
197		ring->trace_irq_seqno = seqno;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198}
199
200/* DRI warts */
201int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
 
202
203#endif /* _INTEL_RINGBUFFER_H_ */
v4.10.11
  1#ifndef _INTEL_RINGBUFFER_H_
  2#define _INTEL_RINGBUFFER_H_
  3
  4#include <linux/hashtable.h>
  5#include "i915_gem_batch_pool.h"
  6#include "i915_gem_request.h"
  7#include "i915_gem_timeline.h"
  8
  9#define I915_CMD_HASH_ORDER 9
 10
 11/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
 12 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
 13 * to give some inclination as to some of the magic values used in the various
 14 * workarounds!
 15 */
 16#define CACHELINE_BYTES 64
 17#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
 18
 19/*
 20 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
 21 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
 22 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
 23 *
 24 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
 25 * cacheline, the Head Pointer must not be greater than the Tail
 26 * Pointer."
 27 */
 28#define I915_RING_FREE_SPACE 64
 29
 30struct intel_hw_status_page {
 31	struct i915_vma *vma;
 32	u32 *page_addr;
 33	u32 ggtt_offset;
 34};
 35
 36#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
 37#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
 38
 39#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
 40#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
 41
 42#define I915_READ_HEAD(engine)  I915_READ(RING_HEAD((engine)->mmio_base))
 43#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
 44
 45#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
 46#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
 47
 48#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
 49#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
 50
 51#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
 52#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
 53
 54/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
 55 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
 56 */
 57#define gen8_semaphore_seqno_size sizeof(uint64_t)
 58#define GEN8_SEMAPHORE_OFFSET(__from, __to)			     \
 59	(((__from) * I915_NUM_ENGINES  + (__to)) * gen8_semaphore_seqno_size)
 60#define GEN8_SIGNAL_OFFSET(__ring, to)			     \
 61	(dev_priv->semaphore->node.start + \
 62	 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
 63#define GEN8_WAIT_OFFSET(__ring, from)			     \
 64	(dev_priv->semaphore->node.start + \
 65	 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
 66
 67enum intel_engine_hangcheck_action {
 68	HANGCHECK_IDLE = 0,
 69	HANGCHECK_WAIT,
 70	HANGCHECK_ACTIVE,
 71	HANGCHECK_KICK,
 72	HANGCHECK_HUNG,
 73};
 74
 75#define HANGCHECK_SCORE_RING_HUNG 31
 
 76
 77#define I915_MAX_SLICES	3
 78#define I915_MAX_SUBSLICES 3
 79
 80#define instdone_slice_mask(dev_priv__) \
 81	(INTEL_GEN(dev_priv__) == 7 ? \
 82	 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
 83
 84#define instdone_subslice_mask(dev_priv__) \
 85	(INTEL_GEN(dev_priv__) == 7 ? \
 86	 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
 87
 88#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
 89	for ((slice__) = 0, (subslice__) = 0; \
 90	     (slice__) < I915_MAX_SLICES; \
 91	     (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
 92	       (slice__) += ((subslice__) == 0)) \
 93		for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
 94			    (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
 95
 96struct intel_instdone {
 97	u32 instdone;
 98	/* The following exist only in the RCS engine */
 99	u32 slice_common;
100	u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
101	u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
102};
103
104struct intel_engine_hangcheck {
105	u64 acthd;
106	u32 seqno;
107	int score;
108	enum intel_engine_hangcheck_action action;
109	int deadlock;
110	struct intel_instdone instdone;
111};
112
113struct intel_ring {
114	struct i915_vma *vma;
115	void *vaddr;
116
117	struct intel_engine_cs *engine;
118
119	struct list_head request_list;
120
121	u32 head;
122	u32 tail;
123	int space;
124	int size;
125	int effective_size;
126
127	/** We track the position of the requests in the ring buffer, and
128	 * when each is retired we increment last_retired_head as the GPU
129	 * must have finished processing the request and so we know we
130	 * can advance the ringbuffer up to that position.
131	 *
132	 * last_retired_head is set to -1 after the value is consumed so
133	 * we can detect new retirements.
134	 */
135	u32 last_retired_head;
136};
137
138struct i915_gem_context;
139struct drm_i915_reg_table;
140
141/*
142 * we use a single page to load ctx workarounds so all of these
143 * values are referred in terms of dwords
144 *
145 * struct i915_wa_ctx_bb:
146 *  offset: specifies batch starting position, also helpful in case
147 *    if we want to have multiple batches at different offsets based on
148 *    some criteria. It is not a requirement at the moment but provides
149 *    an option for future use.
150 *  size: size of the batch in DWORDS
151 */
152struct i915_ctx_workarounds {
153	struct i915_wa_ctx_bb {
154		u32 offset;
155		u32 size;
156	} indirect_ctx, per_ctx;
157	struct i915_vma *vma;
158};
159
160struct drm_i915_gem_request;
161struct intel_render_state;
 
162
163struct intel_engine_cs {
164	struct drm_i915_private *i915;
165	const char	*name;
166	enum intel_engine_id {
167		RCS = 0,
168		BCS,
169		VCS,
170		VCS2,	/* Keep instances of the same type engine together. */
171		VECS
172	} id;
173#define _VCS(n) (VCS + (n))
174	unsigned int exec_id;
175	enum intel_engine_hw_id {
176		RCS_HW = 0,
177		VCS_HW,
178		BCS_HW,
179		VECS_HW,
180		VCS2_HW
181	} hw_id;
182	enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */
183	u32		mmio_base;
184	unsigned int irq_shift;
185	struct intel_ring *buffer;
186	struct intel_timeline *timeline;
187
188	struct intel_render_state *render_state;
189
190	/* Rather than have every client wait upon all user interrupts,
191	 * with the herd waking after every interrupt and each doing the
192	 * heavyweight seqno dance, we delegate the task (of being the
193	 * bottom-half of the user interrupt) to the first client. After
194	 * every interrupt, we wake up one client, who does the heavyweight
195	 * coherent seqno read and either goes back to sleep (if incomplete),
196	 * or wakes up all the completed clients in parallel, before then
197	 * transferring the bottom-half status to the next client in the queue.
198	 *
199	 * Compared to walking the entire list of waiters in a single dedicated
200	 * bottom-half, we reduce the latency of the first waiter by avoiding
201	 * a context switch, but incur additional coherent seqno reads when
202	 * following the chain of request breadcrumbs. Since it is most likely
203	 * that we have a single client waiting on each seqno, then reducing
204	 * the overhead of waking that client is much preferred.
205	 */
206	struct intel_breadcrumbs {
207		struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */
208		bool irq_posted;
209
210		spinlock_t lock; /* protects the lists of requests; irqsafe */
211		struct rb_root waiters; /* sorted by retirement, priority */
212		struct rb_root signals; /* sorted by retirement */
213		struct intel_wait *first_wait; /* oldest waiter by retirement */
214		struct task_struct *signaler; /* used for fence signalling */
215		struct drm_i915_gem_request *first_signal;
216		struct timer_list fake_irq; /* used after a missed interrupt */
217		struct timer_list hangcheck; /* detect missed interrupts */
218
219		unsigned long timeout;
220
221		bool irq_enabled : 1;
222		bool rpm_wakelock : 1;
223	} breadcrumbs;
224
225	/*
226	 * A pool of objects to use as shadow copies of client batch buffers
227	 * when the command parser is enabled. Prevents the client from
228	 * modifying the batch contents after software parsing.
229	 */
230	struct i915_gem_batch_pool batch_pool;
231
232	struct intel_hw_status_page status_page;
233	struct i915_ctx_workarounds wa_ctx;
234	struct i915_vma *scratch;
235
236	u32             irq_keep_mask; /* always keep these interrupts */
237	u32		irq_enable_mask; /* bitmask to enable ring interrupt */
238	void		(*irq_enable)(struct intel_engine_cs *engine);
239	void		(*irq_disable)(struct intel_engine_cs *engine);
240
241	int		(*init_hw)(struct intel_engine_cs *engine);
242	void		(*reset_hw)(struct intel_engine_cs *engine,
243				    struct drm_i915_gem_request *req);
244
245	int		(*init_context)(struct drm_i915_gem_request *req);
246
247	int		(*emit_flush)(struct drm_i915_gem_request *request,
248				      u32 mode);
249#define EMIT_INVALIDATE	BIT(0)
250#define EMIT_FLUSH	BIT(1)
251#define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
252	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
253					 u64 offset, u32 length,
254					 unsigned int dispatch_flags);
255#define I915_DISPATCH_SECURE BIT(0)
256#define I915_DISPATCH_PINNED BIT(1)
257#define I915_DISPATCH_RS     BIT(2)
258	void		(*emit_breadcrumb)(struct drm_i915_gem_request *req,
259					   u32 *out);
260	int		emit_breadcrumb_sz;
261
262	/* Pass the request to the hardware queue (e.g. directly into
263	 * the legacy ringbuffer or to the end of an execlist).
264	 *
265	 * This is called from an atomic context with irqs disabled; must
266	 * be irq safe.
 
 
 
 
 
 
 
 
267	 */
268	void		(*submit_request)(struct drm_i915_gem_request *req);
269
270	/* Call when the priority on a request has changed and it and its
271	 * dependencies may need rescheduling. Note the request itself may
272	 * not be ready to run!
273	 *
274	 * Called under the struct_mutex.
 
 
275	 */
276	void		(*schedule)(struct drm_i915_gem_request *request,
277				    int priority);
278
279	/* Some chipsets are not quite as coherent as advertised and need
280	 * an expensive kick to force a true read of the up-to-date seqno.
281	 * However, the up-to-date seqno is not always required and the last
282	 * seen value is good enough. Note that the seqno will always be
283	 * monotonic, even if not coherent.
284	 */
285	void		(*irq_seqno_barrier)(struct intel_engine_cs *engine);
286	void		(*cleanup)(struct intel_engine_cs *engine);
287
288	/* GEN8 signal/wait table - never trust comments!
289	 *	  signal to	signal to    signal to   signal to      signal to
290	 *	    RCS		   VCS          BCS        VECS		 VCS2
291	 *      --------------------------------------------------------------------
292	 *  RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
293	 *	|-------------------------------------------------------------------
294	 *  VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
295	 *	|-------------------------------------------------------------------
296	 *  BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
297	 *	|-------------------------------------------------------------------
298	 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) |  NOP (0x90) | VCS2 (0x98) |
299	 *	|-------------------------------------------------------------------
300	 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP  (0xc0) |
301	 *	|-------------------------------------------------------------------
302	 *
303	 * Generalization:
304	 *  f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
305	 *  ie. transpose of g(x, y)
306	 *
307	 *	 sync from	sync from    sync from    sync from	sync from
308	 *	    RCS		   VCS          BCS        VECS		 VCS2
309	 *      --------------------------------------------------------------------
310	 *  RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
311	 *	|-------------------------------------------------------------------
312	 *  VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
313	 *	|-------------------------------------------------------------------
314	 *  BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
315	 *	|-------------------------------------------------------------------
316	 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) |  NOP (0x90) | VCS2 (0xb8) |
317	 *	|-------------------------------------------------------------------
318	 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) |  NOP (0xc0) |
319	 *	|-------------------------------------------------------------------
320	 *
321	 * Generalization:
322	 *  g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
323	 *  ie. transpose of f(x, y)
324	 */
325	struct {
326		union {
327#define GEN6_SEMAPHORE_LAST	VECS_HW
328#define GEN6_NUM_SEMAPHORES	(GEN6_SEMAPHORE_LAST + 1)
329#define GEN6_SEMAPHORES_MASK	GENMASK(GEN6_SEMAPHORE_LAST, 0)
330			struct {
331				/* our mbox written by others */
332				u32		wait[GEN6_NUM_SEMAPHORES];
333				/* mboxes this ring signals to */
334				i915_reg_t	signal[GEN6_NUM_SEMAPHORES];
335			} mbox;
336			u64		signal_ggtt[I915_NUM_ENGINES];
337		};
338
339		/* AKA wait() */
340		int	(*sync_to)(struct drm_i915_gem_request *req,
341				   struct drm_i915_gem_request *signal);
342		u32	*(*signal)(struct drm_i915_gem_request *req, u32 *out);
343	} semaphore;
344
345	/* Execlists */
346	struct tasklet_struct irq_tasklet;
347	struct execlist_port {
348		struct drm_i915_gem_request *request;
349		unsigned int count;
350	} execlist_port[2];
351	struct rb_root execlist_queue;
352	struct rb_node *execlist_first;
353	unsigned int fw_domains;
354	bool disable_lite_restore_wa;
355	bool preempt_wa;
356	u32 ctx_desc_template;
357
358	struct i915_gem_context *last_context;
 
359
360	struct intel_engine_hangcheck hangcheck;
361
362	bool needs_cmd_parser;
363
364	/*
365	 * Table of commands the command parser needs to know about
366	 * for this engine.
367	 */
368	DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
369
370	/*
371	 * Table of registers allowed in commands that read/write registers.
372	 */
373	const struct drm_i915_reg_table *reg_tables;
374	int reg_table_count;
375
376	/*
377	 * Returns the bitmask for the length field of the specified command.
378	 * Return 0 for an unrecognized/invalid command.
379	 *
380	 * If the command parser finds an entry for a command in the engine's
381	 * cmd_tables, it gets the command's length based on the table entry.
382	 * If not, it calls this function to determine the per-engine length
383	 * field encoding for the command (i.e. different opcode ranges use
384	 * certain bits to encode the command length in the header).
385	 */
386	u32 (*get_cmd_length_mask)(u32 cmd_header);
387};
388
389static inline unsigned
390intel_engine_flag(const struct intel_engine_cs *engine)
391{
392	return 1 << engine->id;
393}
394
395static inline void
396intel_flush_status_page(struct intel_engine_cs *engine, int reg)
397{
398	mb();
399	clflush(&engine->status_page.page_addr[reg]);
400	mb();
401}
402
403static inline u32
404intel_read_status_page(struct intel_engine_cs *engine, int reg)
405{
406	/* Ensure that the compiler doesn't optimize away the load. */
407	return READ_ONCE(engine->status_page.page_addr[reg]);
408}
409
410static inline void
411intel_write_status_page(struct intel_engine_cs *engine,
412			int reg, u32 value)
413{
414	engine->status_page.page_addr[reg] = value;
415}
416
417/*
418 * Reads a dword out of the status page, which is written to from the command
419 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
420 * MI_STORE_DATA_IMM.
421 *
422 * The following dwords have a reserved meaning:
423 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
424 * 0x04: ring 0 head pointer
425 * 0x05: ring 1 head pointer (915-class)
426 * 0x06: ring 2 head pointer (915-class)
427 * 0x10-0x1b: Context status DWords (GM45)
428 * 0x1f: Last written status offset. (GM45)
429 * 0x20-0x2f: Reserved (Gen6+)
430 *
431 * The area from dword 0x30 to 0x3ff is available for driver usage.
432 */
433#define I915_GEM_HWS_INDEX		0x30
434#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
435#define I915_GEM_HWS_SCRATCH_INDEX	0x40
436#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
437
438struct intel_ring *
439intel_engine_create_ring(struct intel_engine_cs *engine, int size);
440int intel_ring_pin(struct intel_ring *ring);
441void intel_ring_unpin(struct intel_ring *ring);
442void intel_ring_free(struct intel_ring *ring);
443
444void intel_engine_stop(struct intel_engine_cs *engine);
445void intel_engine_cleanup(struct intel_engine_cs *engine);
446
447void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
448
449int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
450
451int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
452int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
453
454static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
455{
456	*(uint32_t *)(ring->vaddr + ring->tail) = data;
457	ring->tail += 4;
458}
459
460static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
461{
462	intel_ring_emit(ring, i915_mmio_reg_offset(reg));
463}
464
465static inline void intel_ring_advance(struct intel_ring *ring)
 
466{
467	/* Dummy function.
468	 *
469	 * This serves as a placeholder in the code so that the reader
470	 * can compare against the preceding intel_ring_begin() and
471	 * check that the number of dwords emitted matches the space
472	 * reserved for the command packet (i.e. the value passed to
473	 * intel_ring_begin()).
474	 */
475}
476
477static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
478{
479	/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
480	u32 offset = addr - ring->vaddr;
481	return offset & (ring->size - 1);
482}
483
484int __intel_ring_space(int head, int tail, int size);
485void intel_ring_update_space(struct intel_ring *ring);
486
487void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
488
489void intel_engine_setup_common(struct intel_engine_cs *engine);
490int intel_engine_init_common(struct intel_engine_cs *engine);
491int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
492void intel_engine_cleanup_common(struct intel_engine_cs *engine);
493
494int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
495int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
496int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
497int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
498int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
499
500u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
501u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
502
503static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
504{
505	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
506}
507
508static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
509{
510	/* We are only peeking at the tail of the submit queue (and not the
511	 * queue itself) in order to gain a hint as to the current active
512	 * state of the engine. Callers are not expected to be taking
513	 * engine->timeline->lock, nor are they expected to be concerned
514	 * wtih serialising this hint with anything, so document it as
515	 * a hint and nothing more.
516	 */
517	return READ_ONCE(engine->timeline->last_submitted_seqno);
518}
519
520int init_workarounds_ring(struct intel_engine_cs *engine);
521
522void intel_engine_get_instdone(struct intel_engine_cs *engine,
523			       struct intel_instdone *instdone);
 
 
524
525/*
526 * Arbitrary size for largest possible 'add request' sequence. The code paths
527 * are complex and variable. Empirical measurement shows that the worst case
528 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
529 * we need to allocate double the largest single packet within that emission
530 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
531 */
532#define MIN_SPACE_FOR_ADD_REQUEST 336
533
534static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
535{
536	return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
537}
538
539/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
540int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
541
542static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
543{
544	wait->tsk = current;
545	wait->seqno = seqno;
546}
547
548static inline bool intel_wait_complete(const struct intel_wait *wait)
549{
550	return RB_EMPTY_NODE(&wait->node);
551}
552
553bool intel_engine_add_wait(struct intel_engine_cs *engine,
554			   struct intel_wait *wait);
555void intel_engine_remove_wait(struct intel_engine_cs *engine,
556			      struct intel_wait *wait);
557void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
558
559static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
560{
561	return rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh);
562}
563
564static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
565{
566	bool wakeup = false;
567
568	/* Note that for this not to dangerously chase a dangling pointer,
569	 * we must hold the rcu_read_lock here.
570	 *
571	 * Also note that tsk is likely to be in !TASK_RUNNING state so an
572	 * early test for tsk->state != TASK_RUNNING before wake_up_process()
573	 * is unlikely to be beneficial.
574	 */
575	if (intel_engine_has_waiter(engine)) {
576		struct task_struct *tsk;
577
578		rcu_read_lock();
579		tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
580		if (tsk)
581			wakeup = wake_up_process(tsk);
582		rcu_read_unlock();
583	}
584
585	return wakeup;
586}
587
588void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
589void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
590unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915);
591
592#endif /* _INTEL_RINGBUFFER_H_ */