Linux Audio

Check our new training course

Loading...
v5.14.15
  1/*
  2 * Copyright © 2008-2018 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 */
 24
 25#ifndef I915_REQUEST_H
 26#define I915_REQUEST_H
 27
 28#include <linux/dma-fence.h>
 29#include <linux/hrtimer.h>
 30#include <linux/irq_work.h>
 31#include <linux/llist.h>
 32#include <linux/lockdep.h>
 33
 34#include "gem/i915_gem_context_types.h"
 35#include "gt/intel_context_types.h"
 36#include "gt/intel_engine_types.h"
 37#include "gt/intel_timeline_types.h"
 38
 39#include "i915_gem.h"
 40#include "i915_scheduler.h"
 41#include "i915_selftest.h"
 42#include "i915_sw_fence.h"
 43
 44#include <uapi/drm/i915_drm.h>
 45
 46struct drm_file;
 47struct drm_i915_gem_object;
 48struct drm_printer;
 49struct i915_request;
 50
 51struct i915_capture_list {
 52	struct i915_capture_list *next;
 53	struct i915_vma *vma;
 
 
 54};
 55
 56#define RQ_TRACE(rq, fmt, ...) do {					\
 57	const struct i915_request *rq__ = (rq);				\
 58	ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt,	\
 59		     rq__->fence.context, rq__->fence.seqno,		\
 60		     hwsp_seqno(rq__), ##__VA_ARGS__);			\
 61} while (0)
 62
 63enum {
 64	/*
 65	 * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
 66	 *
 67	 * Set by __i915_request_submit() on handing over to HW, and cleared
 68	 * by __i915_request_unsubmit() if we preempt this request.
 69	 *
 70	 * Finally cleared for consistency on retiring the request, when
 71	 * we know the HW is no longer running this request.
 72	 *
 73	 * See i915_request_is_active()
 74	 */
 75	I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
 76
 77	/*
 78	 * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
 79	 *
 80	 * Using the scheduler, when a request is ready for execution it is put
 81	 * into the priority queue, and removed from that queue when transferred
 82	 * to the HW runlists. We want to track its membership within the
 83	 * priority queue so that we can easily check before rescheduling.
 84	 *
 85	 * See i915_request_in_priority_queue()
 86	 */
 87	I915_FENCE_FLAG_PQUEUE,
 88
 89	/*
 90	 * I915_FENCE_FLAG_HOLD - this request is currently on hold
 91	 *
 92	 * This request has been suspended, pending an ongoing investigation.
 93	 */
 94	I915_FENCE_FLAG_HOLD,
 95
 96	/*
 97	 * I915_FENCE_FLAG_INITIAL_BREADCRUMB - this request has the initial
 98	 * breadcrumb that marks the end of semaphore waits and start of the
 99	 * user payload.
100	 */
101	I915_FENCE_FLAG_INITIAL_BREADCRUMB,
 
 
102
103	/*
104	 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
105	 *
106	 * Internal bookkeeping used by the breadcrumb code to track when
107	 * a request is on the various signal_list.
108	 */
109	I915_FENCE_FLAG_SIGNAL,
 
 
 
 
 
 
 
 
 
 
 
 
 
110
111	/*
112	 * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
113	 *
114	 * The execution of some requests should not be interrupted. This is
115	 * a sensitive operation as it makes the request super important,
116	 * blocking other higher priority work. Abuse of this flag will
117	 * lead to quality of service issues.
118	 */
119	I915_FENCE_FLAG_NOPREEMPT,
120
121	/*
122	 * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue
123	 *
124	 * A high priority sentinel request may be submitted to clear the
125	 * submission queue. As it will be the only request in-flight, upon
126	 * execution all other active requests will have been preempted and
127	 * unsubmitted. This preemptive pulse is used to re-evaluate the
128	 * in-flight requests, particularly in cases where an active context
129	 * is banned and those active requests need to be cancelled.
130	 */
131	I915_FENCE_FLAG_SENTINEL,
132
133	/*
134	 * I915_FENCE_FLAG_BOOST - upclock the gpu for this request
135	 *
136	 * Some requests are more important than others! In particular, a
137	 * request that the user is waiting on is typically required for
138	 * interactive latency, for which we want to minimise by upclocking
139	 * the GPU. Here we track such boost requests on a per-request basis.
140	 */
141	I915_FENCE_FLAG_BOOST,
142};
143
144/**
145 * Request queue structure.
146 *
147 * The request queue allows us to note sequence numbers that have been emitted
148 * and may be associated with active buffers to be retired.
149 *
150 * By keeping this list, we can avoid having to do questionable sequence
151 * number comparisons on buffer last_read|write_seqno. It also allows an
152 * emission time to be associated with the request for tracking how far ahead
153 * of the GPU the submission is.
154 *
155 * When modifying this structure be very aware that we perform a lockless
156 * RCU lookup of it that may race against reallocation of the struct
157 * from the slab freelist. We intentionally do not zero the structure on
158 * allocation so that the lookup can use the dangling pointers (and is
159 * cogniscent that those pointers may be wrong). Instead, everything that
160 * needs to be initialised must be done so explicitly.
161 *
162 * The requests are reference counted.
163 */
164struct i915_request {
165	struct dma_fence fence;
166	spinlock_t lock;
167
 
 
 
168	/**
169	 * Context and ring buffer related to this request
170	 * Contexts are refcounted, so when this request is associated with a
171	 * context, we must increment the context's refcount, to guarantee that
172	 * it persists while any request is linked to it. Requests themselves
173	 * are also refcounted, so the request will only be freed when the last
174	 * reference to it is dismissed, and the code in
175	 * i915_request_free() will then decrement the refcount on the
176	 * context.
177	 */
 
178	struct intel_engine_cs *engine;
179	struct intel_context *context;
180	struct intel_ring *ring;
181	struct intel_timeline __rcu *timeline;
182
183	struct list_head signal_link;
184	struct llist_node signal_node;
185
186	/*
187	 * The rcu epoch of when this request was allocated. Used to judiciously
188	 * apply backpressure on future allocations to ensure that under
189	 * mempressure there is sufficient RCU ticks for us to reclaim our
190	 * RCU protected slabs.
191	 */
192	unsigned long rcustate;
193
194	/*
195	 * We pin the timeline->mutex while constructing the request to
196	 * ensure that no caller accidentally drops it during construction.
197	 * The timeline->mutex must be held to ensure that only this caller
198	 * can use the ring and manipulate the associated timeline during
199	 * construction.
200	 */
201	struct pin_cookie cookie;
202
203	/*
204	 * Fences for the various phases in the request's lifetime.
205	 *
206	 * The submit fence is used to await upon all of the request's
207	 * dependencies. When it is signaled, the request is ready to run.
208	 * It is used by the driver to then queue the request for execution.
209	 */
210	struct i915_sw_fence submit;
211	union {
212		wait_queue_entry_t submitq;
213		struct i915_sw_dma_fence_cb dmaq;
214		struct i915_request_duration_cb {
215			struct dma_fence_cb cb;
216			ktime_t emitted;
217		} duration;
218	};
219	struct llist_head execute_cb;
220	struct i915_sw_fence semaphore;
221
222	/*
223	 * A list of everyone we wait upon, and everyone who waits upon us.
224	 * Even though we will not be submitted to the hardware before the
225	 * submit fence is signaled (it waits for all external events as well
226	 * as our own requests), the scheduler still needs to know the
227	 * dependency tree for the lifetime of the request (from execbuf
228	 * to retirement), i.e. bidirectional dependency information for the
229	 * request not tied to individual fences.
230	 */
231	struct i915_sched_node sched;
232	struct i915_dependency dep;
233	intel_engine_mask_t execution_mask;
234
235	/*
236	 * A convenience pointer to the current breadcrumb value stored in
237	 * the HW status page (or our timeline's local equivalent). The full
238	 * path would be rq->hw_context->ring->timeline->hwsp_seqno.
 
239	 */
240	const u32 *hwsp_seqno;
241
242	/** Position in the ring of the start of the request */
243	u32 head;
244
245	/** Position in the ring of the start of the user packets */
246	u32 infix;
247
248	/**
249	 * Position in the ring of the start of the postfix.
250	 * This is required to calculate the maximum available ring space
251	 * without overwriting the postfix.
252	 */
253	u32 postfix;
254
255	/** Position in the ring of the end of the whole request */
256	u32 tail;
257
258	/** Position in the ring of the end of any workarounds after the tail */
259	u32 wa_tail;
260
261	/** Preallocate space in the ring for the emitting the request */
262	u32 reserved_space;
263
264	/** Batch buffer related to this request if any (used for
265	 * error state dump only).
266	 */
267	struct i915_vma *batch;
268	/**
269	 * Additional buffers requested by userspace to be captured upon
270	 * a GPU hang. The vma/obj on this list are protected by their
271	 * active reference - all objects on this list must also be
272	 * on the active_list (of their final request).
273	 */
274	struct i915_capture_list *capture_list;
 
275
276	/** Time at which this request was emitted, in jiffies. */
277	unsigned long emitted_jiffies;
278
279	/** timeline->request entry for this request */
 
 
280	struct list_head link;
281
282	/** Watchdog support fields. */
283	struct i915_request_watchdog {
284		struct llist_node link;
285		struct hrtimer timer;
286	} watchdog;
287
288	I915_SELFTEST_DECLARE(struct {
289		struct list_head link;
290		unsigned long delay;
291	} mock;)
292};
293
294#define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
295
296extern const struct dma_fence_ops i915_fence_ops;
297
298static inline bool dma_fence_is_i915(const struct dma_fence *fence)
299{
300	return fence->ops == &i915_fence_ops;
301}
302
303struct kmem_cache *i915_request_slab_cache(void);
304
305struct i915_request * __must_check
306__i915_request_create(struct intel_context *ce, gfp_t gfp);
307struct i915_request * __must_check
308i915_request_create(struct intel_context *ce);
309
310void __i915_request_skip(struct i915_request *rq);
311bool i915_request_set_error_once(struct i915_request *rq, int error);
312struct i915_request *i915_request_mark_eio(struct i915_request *rq);
313
314struct i915_request *__i915_request_commit(struct i915_request *request);
315void __i915_request_queue(struct i915_request *rq,
316			  const struct i915_sched_attr *attr);
317void __i915_request_queue_bh(struct i915_request *rq);
318
319bool i915_request_retire(struct i915_request *rq);
320void i915_request_retire_upto(struct i915_request *rq);
321
322static inline struct i915_request *
323to_request(struct dma_fence *fence)
324{
325	/* We assume that NULL fence/request are interoperable */
326	BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
327	GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
328	return container_of(fence, struct i915_request, fence);
329}
330
331static inline struct i915_request *
332i915_request_get(struct i915_request *rq)
333{
334	return to_request(dma_fence_get(&rq->fence));
335}
336
337static inline struct i915_request *
338i915_request_get_rcu(struct i915_request *rq)
339{
340	return to_request(dma_fence_get_rcu(&rq->fence));
341}
342
343static inline void
344i915_request_put(struct i915_request *rq)
345{
346	dma_fence_put(&rq->fence);
347}
348
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
349int i915_request_await_object(struct i915_request *to,
350			      struct drm_i915_gem_object *obj,
351			      bool write);
352int i915_request_await_dma_fence(struct i915_request *rq,
353				 struct dma_fence *fence);
354int i915_request_await_execution(struct i915_request *rq,
355				 struct dma_fence *fence,
356				 void (*hook)(struct i915_request *rq,
357					      struct dma_fence *signal));
358
359void i915_request_add(struct i915_request *rq);
 
 
360
361bool __i915_request_submit(struct i915_request *request);
362void i915_request_submit(struct i915_request *request);
363
364void __i915_request_unsubmit(struct i915_request *request);
365void i915_request_unsubmit(struct i915_request *request);
366
367void i915_request_cancel(struct i915_request *rq, int error);
368
369long i915_request_wait(struct i915_request *rq,
370		       unsigned int flags,
371		       long timeout)
372	__attribute__((nonnull(1)));
373#define I915_WAIT_INTERRUPTIBLE	BIT(0)
374#define I915_WAIT_PRIORITY	BIT(1) /* small priority bump for the request */
375#define I915_WAIT_ALL		BIT(2) /* used by i915_gem_object_wait() */
376
377void i915_request_show(struct drm_printer *m,
378		       const struct i915_request *rq,
379		       const char *prefix,
380		       int indent);
381
382static inline bool i915_request_signaled(const struct i915_request *rq)
 
 
 
383{
384	/* The request may live longer than its HWSP, so check flags first! */
385	return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
386}
387
388static inline bool i915_request_is_active(const struct i915_request *rq)
 
389{
390	return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
 
 
391}
392
393static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
394{
395	return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
 
 
 
 
 
 
396}
397
398static inline bool
399i915_request_has_initial_breadcrumb(const struct i915_request *rq)
400{
401	return test_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
 
 
 
 
 
 
 
402}
403
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
404/**
405 * Returns true if seq1 is later than seq2.
 
 
 
 
 
 
 
 
406 */
407static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
 
 
408{
409	return (s32)(seq1 - seq2) >= 0;
 
410}
411
412static inline u32 __hwsp_seqno(const struct i915_request *rq)
 
 
 
 
 
 
 
 
 
 
 
413{
414	const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
415
416	return READ_ONCE(*hwsp);
417}
418
419/**
420 * hwsp_seqno - the current breadcrumb value in the HW status page
421 * @rq: the request, to chase the relevant HW status page
422 *
423 * The emphasis in naming here is that hwsp_seqno() is not a property of the
424 * request, but an indication of the current HW state (associated with this
425 * request). Its value will change as the GPU executes more requests.
426 *
427 * Returns the current breadcrumb value in the associated HW status page (or
428 * the local timeline's equivalent) for this request. The request itself
429 * has the associated breadcrumb value of rq->fence.seqno, when the HW
430 * status page has that breadcrumb or later, this request is complete.
431 */
432static inline u32 hwsp_seqno(const struct i915_request *rq)
 
 
 
433{
434	u32 seqno;
435
436	rcu_read_lock(); /* the HWSP may be freed at runtime */
437	seqno = __hwsp_seqno(rq);
438	rcu_read_unlock();
439
440	return seqno;
441}
442
443static inline bool __i915_request_has_started(const struct i915_request *rq)
 
444{
445	return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1);
 
 
 
 
 
 
 
446}
447
448/**
449 * i915_request_started - check if the request has begun being executed
450 * @rq: the request
451 *
452 * If the timeline is not using initial breadcrumbs, a request is
453 * considered started if the previous request on its timeline (i.e.
454 * context) has been signaled.
455 *
456 * If the timeline is using semaphores, it will also be emitting an
457 * "initial breadcrumb" after the semaphores are complete and just before
458 * it began executing the user payload. A request can therefore be active
459 * on the HW and not yet started as it is still busywaiting on its
460 * dependencies (via HW semaphores).
461 *
462 * If the request has started, its dependencies will have been signaled
463 * (either by fences or by semaphores) and it will have begun processing
464 * the user payload.
465 *
466 * However, even if a request has started, it may have been preempted and
467 * so no longer active, or it may have already completed.
468 *
469 * See also i915_request_is_active().
470 *
471 * Returns true if the request has begun executing the user payload, or
472 * has completed:
 
473 */
474static inline bool i915_request_started(const struct i915_request *rq)
 
475{
476	bool result;
477
478	if (i915_request_signaled(rq))
479		return true;
480
481	result = true;
482	rcu_read_lock(); /* the HWSP may be freed at runtime */
483	if (likely(!i915_request_signaled(rq)))
484		/* Remember: started but may have since been preempted! */
485		result = __i915_request_has_started(rq);
486	rcu_read_unlock();
487
488	return result;
489}
490
491/**
492 * i915_request_is_running - check if the request may actually be executing
493 * @rq: the request
494 *
495 * Returns true if the request is currently submitted to hardware, has passed
496 * its start point (i.e. the context is setup and not busywaiting). Note that
497 * it may no longer be running by the time the function returns!
498 */
499static inline bool i915_request_is_running(const struct i915_request *rq)
 
500{
501	bool result;
502
503	if (!i915_request_is_active(rq))
504		return false;
505
506	rcu_read_lock();
507	result = __i915_request_has_started(rq) && i915_request_is_active(rq);
508	rcu_read_unlock();
509
510	return result;
511}
512
513/**
514 * i915_request_is_ready - check if the request is ready for execution
515 * @rq: the request
516 *
517 * Upon construction, the request is instructed to wait upon various
518 * signals before it is ready to be executed by the HW. That is, we do
519 * not want to start execution and read data before it is written. In practice,
520 * this is controlled with a mixture of interrupts and semaphores. Once
521 * the submit fence is completed, the backend scheduler will place the
522 * request into its queue and from there submit it for execution. So we
523 * can detect when a request is eligible for execution (and is under control
524 * of the scheduler) by querying where it is in any of the scheduler's lists.
525 *
526 * Returns true if the request is ready for execution (it may be inflight),
527 * false otherwise.
528 */
529static inline bool i915_request_is_ready(const struct i915_request *rq)
530{
531	return !list_empty(&rq->sched.link);
532}
533
534static inline bool __i915_request_is_complete(const struct i915_request *rq)
535{
536	return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
537}
538
539static inline bool i915_request_completed(const struct i915_request *rq)
 
 
 
 
 
 
 
 
 
540{
541	bool result;
542
543	if (i915_request_signaled(rq))
544		return true;
545
546	result = true;
547	rcu_read_lock(); /* the HWSP may be freed at runtime */
548	if (likely(!i915_request_signaled(rq)))
549		result = __i915_request_is_complete(rq);
550	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
551
552	return result;
 
553}
554
555static inline void i915_request_mark_complete(struct i915_request *rq)
 
 
 
 
 
 
 
 
 
 
 
556{
557	WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */
558		   (u32 *)&rq->fence.seqno);
559}
560
561static inline bool i915_request_has_waitboost(const struct i915_request *rq)
562{
563	return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
564}
565
566static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
567{
568	/* Preemption should only be disabled very rarely */
569	return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
570}
571
572static inline bool i915_request_has_sentinel(const struct i915_request *rq)
 
 
 
 
 
 
 
 
 
573{
574	return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
575}
576
577static inline bool i915_request_on_hold(const struct i915_request *rq)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
578{
579	return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
580}
581
582static inline void i915_request_set_hold(struct i915_request *rq)
583{
584	set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
585}
 
586
587static inline void i915_request_clear_hold(struct i915_request *rq)
588{
589	clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
590}
591
592static inline struct intel_timeline *
593i915_request_timeline(const struct i915_request *rq)
 
 
 
 
 
 
 
 
 
 
594{
595	/* Valid only while the request is being constructed (or retired). */
596	return rcu_dereference_protected(rq->timeline,
597					 lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
598}
599
600static inline struct i915_gem_context *
601i915_request_gem_context(const struct i915_request *rq)
602{
603	/* Valid only while the request is being constructed (or retired). */
604	return rcu_dereference_protected(rq->context->gem_context, true);
605}
606
607static inline struct intel_timeline *
608i915_request_active_timeline(const struct i915_request *rq)
609{
610	/*
611	 * When in use during submission, we are protected by a guarantee that
612	 * the context/timeline is pinned and must remain pinned until after
613	 * this submission.
614	 */
615	return rcu_dereference_protected(rq->timeline,
616					 lockdep_is_held(&rq->engine->active.lock));
617}
618
619static inline u32
620i915_request_active_seqno(const struct i915_request *rq)
621{
622	u32 hwsp_phys_base =
623		page_mask_bits(i915_request_active_timeline(rq)->hwsp_offset);
624	u32 hwsp_relative_offset = offset_in_page(rq->hwsp_seqno);
625
626	/*
627	 * Because of wraparound, we cannot simply take tl->hwsp_offset,
628	 * but instead use the fact that the relative for vaddr is the
629	 * offset as for hwsp_offset. Take the top bits from tl->hwsp_offset
630	 * and combine them with the relative offset in rq->hwsp_seqno.
631	 *
632	 * As rw->hwsp_seqno is rewritten when signaled, this only works
633	 * when the request isn't signaled yet, but at that point you
634	 * no longer need the offset.
635	 */
636
637	return hwsp_phys_base + hwsp_relative_offset;
638}
639
640bool
641i915_request_active_engine(struct i915_request *rq,
642			   struct intel_engine_cs **active);
643
644#endif /* I915_REQUEST_H */
v4.17
  1/*
  2 * Copyright © 2008-2018 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 */
 24
 25#ifndef I915_REQUEST_H
 26#define I915_REQUEST_H
 27
 28#include <linux/dma-fence.h>
 
 
 
 
 
 
 
 
 
 29
 30#include "i915_gem.h"
 
 
 31#include "i915_sw_fence.h"
 32
 33#include <uapi/drm/i915_drm.h>
 34
 35struct drm_file;
 36struct drm_i915_gem_object;
 
 37struct i915_request;
 38
 39struct intel_wait {
 40	struct rb_node node;
 41	struct task_struct *tsk;
 42	struct i915_request *request;
 43	u32 seqno;
 44};
 45
 46struct intel_signal_node {
 47	struct intel_wait wait;
 48	struct list_head link;
 49};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 50
 51struct i915_dependency {
 52	struct i915_priotree *signaler;
 53	struct list_head signal_link;
 54	struct list_head wait_link;
 55	struct list_head dfs_link;
 56	unsigned long flags;
 57#define I915_DEPENDENCY_ALLOC BIT(0)
 58};
 59
 60/*
 61 * "People assume that time is a strict progression of cause to effect, but
 62 * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
 63 * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
 64 *
 65 * Requests exist in a complex web of interdependencies. Each request
 66 * has to wait for some other request to complete before it is ready to be run
 67 * (e.g. we have to wait until the pixels have been rendering into a texture
 68 * before we can copy from it). We track the readiness of a request in terms
 69 * of fences, but we also need to keep the dependency tree for the lifetime
 70 * of the request (beyond the life of an individual fence). We use the tree
 71 * at various points to reorder the requests whilst keeping the requests
 72 * in order with respect to their various dependencies.
 73 */
 74struct i915_priotree {
 75	struct list_head signalers_list; /* those before us, we depend upon */
 76	struct list_head waiters_list; /* those after us, they depend upon us */
 77	struct list_head link;
 78	int priority;
 79};
 80
 81enum {
 82	I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
 83	I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
 84	I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
 
 
 
 
 
 85
 86	I915_PRIORITY_INVALID = INT_MIN
 87};
 
 
 
 
 
 
 
 
 
 88
 89struct i915_capture_list {
 90	struct i915_capture_list *next;
 91	struct i915_vma *vma;
 
 
 
 
 
 
 92};
 93
 94/**
 95 * Request queue structure.
 96 *
 97 * The request queue allows us to note sequence numbers that have been emitted
 98 * and may be associated with active buffers to be retired.
 99 *
100 * By keeping this list, we can avoid having to do questionable sequence
101 * number comparisons on buffer last_read|write_seqno. It also allows an
102 * emission time to be associated with the request for tracking how far ahead
103 * of the GPU the submission is.
104 *
105 * When modifying this structure be very aware that we perform a lockless
106 * RCU lookup of it that may race against reallocation of the struct
107 * from the slab freelist. We intentionally do not zero the structure on
108 * allocation so that the lookup can use the dangling pointers (and is
109 * cogniscent that those pointers may be wrong). Instead, everything that
110 * needs to be initialised must be done so explicitly.
111 *
112 * The requests are reference counted.
113 */
114struct i915_request {
115	struct dma_fence fence;
116	spinlock_t lock;
117
118	/** On Which ring this request was generated */
119	struct drm_i915_private *i915;
120
121	/**
122	 * Context and ring buffer related to this request
123	 * Contexts are refcounted, so when this request is associated with a
124	 * context, we must increment the context's refcount, to guarantee that
125	 * it persists while any request is linked to it. Requests themselves
126	 * are also refcounted, so the request will only be freed when the last
127	 * reference to it is dismissed, and the code in
128	 * i915_request_free() will then decrement the refcount on the
129	 * context.
130	 */
131	struct i915_gem_context *ctx;
132	struct intel_engine_cs *engine;
 
133	struct intel_ring *ring;
134	struct intel_timeline *timeline;
135	struct intel_signal_node signaling;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
137	/*
138	 * Fences for the various phases in the request's lifetime.
139	 *
140	 * The submit fence is used to await upon all of the request's
141	 * dependencies. When it is signaled, the request is ready to run.
142	 * It is used by the driver to then queue the request for execution.
143	 */
144	struct i915_sw_fence submit;
145	wait_queue_entry_t submitq;
146	wait_queue_head_t execute;
 
 
 
 
 
 
 
 
147
148	/*
149	 * A list of everyone we wait upon, and everyone who waits upon us.
150	 * Even though we will not be submitted to the hardware before the
151	 * submit fence is signaled (it waits for all external events as well
152	 * as our own requests), the scheduler still needs to know the
153	 * dependency tree for the lifetime of the request (from execbuf
154	 * to retirement), i.e. bidirectional dependency information for the
155	 * request not tied to individual fences.
156	 */
157	struct i915_priotree priotree;
158	struct i915_dependency dep;
 
159
160	/**
161	 * GEM sequence number associated with this request on the
162	 * global execution timeline. It is zero when the request is not
163	 * on the HW queue (i.e. not on the engine timeline list).
164	 * Its value is guarded by the timeline spinlock.
165	 */
166	u32 global_seqno;
167
168	/** Position in the ring of the start of the request */
169	u32 head;
170
 
 
 
171	/**
172	 * Position in the ring of the start of the postfix.
173	 * This is required to calculate the maximum available ring space
174	 * without overwriting the postfix.
175	 */
176	u32 postfix;
177
178	/** Position in the ring of the end of the whole request */
179	u32 tail;
180
181	/** Position in the ring of the end of any workarounds after the tail */
182	u32 wa_tail;
183
184	/** Preallocate space in the ring for the emitting the request */
185	u32 reserved_space;
186
187	/** Batch buffer related to this request if any (used for
188	 * error state dump only).
189	 */
190	struct i915_vma *batch;
191	/**
192	 * Additional buffers requested by userspace to be captured upon
193	 * a GPU hang. The vma/obj on this list are protected by their
194	 * active reference - all objects on this list must also be
195	 * on the active_list (of their final request).
196	 */
197	struct i915_capture_list *capture_list;
198	struct list_head active_list;
199
200	/** Time at which this request was emitted, in jiffies. */
201	unsigned long emitted_jiffies;
202
203	bool waitboost;
204
205	/** engine->request_list entry for this request */
206	struct list_head link;
207
208	/** ring->request_list entry for this request */
209	struct list_head ring_link;
210
211	struct drm_i915_file_private *file_priv;
212	/** file_priv list entry for this request */
213	struct list_head client_link;
 
 
 
 
214};
215
216#define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
217
218extern const struct dma_fence_ops i915_fence_ops;
219
220static inline bool dma_fence_is_i915(const struct dma_fence *fence)
221{
222	return fence->ops == &i915_fence_ops;
223}
224
 
 
225struct i915_request * __must_check
226i915_request_alloc(struct intel_engine_cs *engine,
227		   struct i915_gem_context *ctx);
 
 
 
 
 
 
 
 
 
 
 
 
228void i915_request_retire_upto(struct i915_request *rq);
229
230static inline struct i915_request *
231to_request(struct dma_fence *fence)
232{
233	/* We assume that NULL fence/request are interoperable */
234	BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
235	GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
236	return container_of(fence, struct i915_request, fence);
237}
238
239static inline struct i915_request *
240i915_request_get(struct i915_request *rq)
241{
242	return to_request(dma_fence_get(&rq->fence));
243}
244
245static inline struct i915_request *
246i915_request_get_rcu(struct i915_request *rq)
247{
248	return to_request(dma_fence_get_rcu(&rq->fence));
249}
250
251static inline void
252i915_request_put(struct i915_request *rq)
253{
254	dma_fence_put(&rq->fence);
255}
256
257/**
258 * i915_request_global_seqno - report the current global seqno
259 * @request - the request
260 *
261 * A request is assigned a global seqno only when it is on the hardware
262 * execution queue. The global seqno can be used to maintain a list of
263 * requests on the same engine in retirement order, for example for
264 * constructing a priority queue for waiting. Prior to its execution, or
265 * if it is subsequently removed in the event of preemption, its global
266 * seqno is zero. As both insertion and removal from the execution queue
267 * may operate in IRQ context, it is not guarded by the usual struct_mutex
268 * BKL. Instead those relying on the global seqno must be prepared for its
269 * value to change between reads. Only when the request is complete can
270 * the global seqno be stable (due to the memory barriers on submitting
271 * the commands to the hardware to write the breadcrumb, if the HWS shows
272 * that it has passed the global seqno and the global seqno is unchanged
273 * after the read, it is indeed complete).
274 */
275static u32
276i915_request_global_seqno(const struct i915_request *request)
277{
278	return READ_ONCE(request->global_seqno);
279}
280
281int i915_request_await_object(struct i915_request *to,
282			      struct drm_i915_gem_object *obj,
283			      bool write);
284int i915_request_await_dma_fence(struct i915_request *rq,
285				 struct dma_fence *fence);
 
 
 
 
286
287void __i915_request_add(struct i915_request *rq, bool flush_caches);
288#define i915_request_add(rq) \
289	__i915_request_add(rq, false)
290
291void __i915_request_submit(struct i915_request *request);
292void i915_request_submit(struct i915_request *request);
293
294void __i915_request_unsubmit(struct i915_request *request);
295void i915_request_unsubmit(struct i915_request *request);
296
 
 
297long i915_request_wait(struct i915_request *rq,
298		       unsigned int flags,
299		       long timeout)
300	__attribute__((nonnull(1)));
301#define I915_WAIT_INTERRUPTIBLE	BIT(0)
302#define I915_WAIT_LOCKED	BIT(1) /* struct_mutex held, handle GPU reset */
303#define I915_WAIT_ALL		BIT(2) /* used by i915_gem_object_wait() */
304
305static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
 
 
 
306
307/**
308 * Returns true if seq1 is later than seq2.
309 */
310static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
311{
312	return (s32)(seq1 - seq2) >= 0;
 
313}
314
315static inline bool
316__i915_request_completed(const struct i915_request *rq, u32 seqno)
317{
318	GEM_BUG_ON(!seqno);
319	return i915_seqno_passed(intel_engine_get_seqno(rq->engine), seqno) &&
320		seqno == i915_request_global_seqno(rq);
321}
322
323static inline bool i915_request_completed(const struct i915_request *rq)
324{
325	u32 seqno;
326
327	seqno = i915_request_global_seqno(rq);
328	if (!seqno)
329		return false;
330
331	return __i915_request_completed(rq, seqno);
332}
333
334static inline bool i915_request_started(const struct i915_request *rq)
 
335{
336	u32 seqno;
337
338	seqno = i915_request_global_seqno(rq);
339	if (!seqno)
340		return false;
341
342	return i915_seqno_passed(intel_engine_get_seqno(rq->engine),
343				 seqno - 1);
344}
345
346static inline bool i915_priotree_signaled(const struct i915_priotree *pt)
347{
348	const struct i915_request *rq =
349		container_of(pt, const struct i915_request, priotree);
350
351	return i915_request_completed(rq);
352}
353
354void i915_retire_requests(struct drm_i915_private *i915);
355
356/*
357 * We treat requests as fences. This is not be to confused with our
358 * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
359 * We use the fences to synchronize access from the CPU with activity on the
360 * GPU, for example, we should not rewrite an object's PTE whilst the GPU
361 * is reading them. We also track fences at a higher level to provide
362 * implicit synchronisation around GEM objects, e.g. set-domain will wait
363 * for outstanding GPU rendering before marking the object ready for CPU
364 * access, or a pageflip will wait until the GPU is complete before showing
365 * the frame on the scanout.
366 *
367 * In order to use a fence, the object must track the fence it needs to
368 * serialise with. For example, GEM objects want to track both read and
369 * write access so that we can perform concurrent read operations between
370 * the CPU and GPU engines, as well as waiting for all rendering to
371 * complete, or waiting for the last GPU user of a "fence register". The
372 * object then embeds a #i915_gem_active to track the most recent (in
373 * retirement order) request relevant for the desired mode of access.
374 * The #i915_gem_active is updated with i915_gem_active_set() to track the
375 * most recent fence request, typically this is done as part of
376 * i915_vma_move_to_active().
377 *
378 * When the #i915_gem_active completes (is retired), it will
379 * signal its completion to the owner through a callback as well as mark
380 * itself as idle (i915_gem_active.request == NULL). The owner
381 * can then perform any action, such as delayed freeing of an active
382 * resource including itself.
383 */
384struct i915_gem_active;
385
386typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
387				   struct i915_request *);
388
389struct i915_gem_active {
390	struct i915_request __rcu *request;
391	struct list_head link;
392	i915_gem_retire_fn retire;
393};
394
395void i915_gem_retire_noop(struct i915_gem_active *,
396			  struct i915_request *request);
397
398/**
399 * init_request_active - prepares the activity tracker for use
400 * @active - the active tracker
401 * @func - a callback when then the tracker is retired (becomes idle),
402 *         can be NULL
403 *
404 * init_request_active() prepares the embedded @active struct for use as
405 * an activity tracker, that is for tracking the last known active request
406 * associated with it. When the last request becomes idle, when it is retired
407 * after completion, the optional callback @func is invoked.
408 */
409static inline void
410init_request_active(struct i915_gem_active *active,
411		    i915_gem_retire_fn retire)
412{
413	INIT_LIST_HEAD(&active->link);
414	active->retire = retire ?: i915_gem_retire_noop;
415}
416
417/**
418 * i915_gem_active_set - updates the tracker to watch the current request
419 * @active - the active tracker
420 * @request - the request to watch
421 *
422 * i915_gem_active_set() watches the given @request for completion. Whilst
423 * that @request is busy, the @active reports busy. When that @request is
424 * retired, the @active tracker is updated to report idle.
425 */
426static inline void
427i915_gem_active_set(struct i915_gem_active *active,
428		    struct i915_request *request)
429{
430	list_move(&active->link, &request->active_list);
431	rcu_assign_pointer(active->request, request);
 
432}
433
434/**
435 * i915_gem_active_set_retire_fn - updates the retirement callback
436 * @active - the active tracker
437 * @fn - the routine called when the request is retired
438 * @mutex - struct_mutex used to guard retirements
439 *
440 * i915_gem_active_set_retire_fn() updates the function pointer that
441 * is called when the final request associated with the @active tracker
442 * is retired.
 
 
 
443 */
444static inline void
445i915_gem_active_set_retire_fn(struct i915_gem_active *active,
446			      i915_gem_retire_fn fn,
447			      struct mutex *mutex)
448{
449	lockdep_assert_held(mutex);
450	active->retire = fn ?: i915_gem_retire_noop;
 
 
 
 
 
451}
452
453static inline struct i915_request *
454__i915_gem_active_peek(const struct i915_gem_active *active)
455{
456	/*
457	 * Inside the error capture (running with the driver in an unknown
458	 * state), we want to bend the rules slightly (a lot).
459	 *
460	 * Work is in progress to make it safer, in the meantime this keeps
461	 * the known issue from spamming the logs.
462	 */
463	return rcu_dereference_protected(active->request, 1);
464}
465
466/**
467 * i915_gem_active_raw - return the active request
468 * @active - the active tracker
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
469 *
470 * i915_gem_active_raw() returns the current request being tracked, or NULL.
471 * It does not obtain a reference on the request for the caller, so the caller
472 * must hold struct_mutex.
473 */
474static inline struct i915_request *
475i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex)
476{
477	return rcu_dereference_protected(active->request,
478					 lockdep_is_held(mutex));
 
 
 
 
 
 
 
 
 
 
 
479}
480
481/**
482 * i915_gem_active_peek - report the active request being monitored
483 * @active - the active tracker
484 *
485 * i915_gem_active_peek() returns the current request being tracked if
486 * still active, or NULL. It does not obtain a reference on the request
487 * for the caller, so the caller must hold struct_mutex.
488 */
489static inline struct i915_request *
490i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
491{
492	struct i915_request *request;
 
 
 
493
494	request = i915_gem_active_raw(active, mutex);
495	if (!request || i915_request_completed(request))
496		return NULL;
497
498	return request;
499}
500
501/**
502 * i915_gem_active_get - return a reference to the active request
503 * @active - the active tracker
 
 
 
 
 
 
 
 
 
504 *
505 * i915_gem_active_get() returns a reference to the active request, or NULL
506 * if the active tracker is idle. The caller must hold struct_mutex.
507 */
508static inline struct i915_request *
509i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
 
 
 
 
510{
511	return i915_request_get(i915_gem_active_peek(active, mutex));
512}
513
514/**
515 * __i915_gem_active_get_rcu - return a reference to the active request
516 * @active - the active tracker
517 *
518 * __i915_gem_active_get() returns a reference to the active request, or NULL
519 * if the active tracker is idle. The caller must hold the RCU read lock, but
520 * the returned pointer is safe to use outside of RCU.
521 */
522static inline struct i915_request *
523__i915_gem_active_get_rcu(const struct i915_gem_active *active)
524{
525	/*
526	 * Performing a lockless retrieval of the active request is super
527	 * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
528	 * slab of request objects will not be freed whilst we hold the
529	 * RCU read lock. It does not guarantee that the request itself
530	 * will not be freed and then *reused*. Viz,
531	 *
532	 * Thread A			Thread B
533	 *
534	 * rq = active.request
535	 *				retire(rq) -> free(rq);
536	 *				(rq is now first on the slab freelist)
537	 *				active.request = NULL
538	 *
539	 *				rq = new submission on a new object
540	 * ref(rq)
541	 *
542	 * To prevent the request from being reused whilst the caller
543	 * uses it, we take a reference like normal. Whilst acquiring
544	 * the reference we check that it is not in a destroyed state
545	 * (refcnt == 0). That prevents the request being reallocated
546	 * whilst the caller holds on to it. To check that the request
547	 * was not reallocated as we acquired the reference we have to
548	 * check that our request remains the active request across
549	 * the lookup, in the same manner as a seqlock. The visibility
550	 * of the pointer versus the reference counting is controlled
551	 * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
552	 *
553	 * In the middle of all that, we inspect whether the request is
554	 * complete. Retiring is lazy so the request may be completed long
555	 * before the active tracker is updated. Querying whether the
556	 * request is complete is far cheaper (as it involves no locked
557	 * instructions setting cachelines to exclusive) than acquiring
558	 * the reference, so we do it first. The RCU read lock ensures the
559	 * pointer dereference is valid, but does not ensure that the
560	 * seqno nor HWS is the right one! However, if the request was
561	 * reallocated, that means the active tracker's request was complete.
562	 * If the new request is also complete, then both are and we can
563	 * just report the active tracker is idle. If the new request is
564	 * incomplete, then we acquire a reference on it and check that
565	 * it remained the active request.
566	 *
567	 * It is then imperative that we do not zero the request on
568	 * reallocation, so that we can chase the dangling pointers!
569	 * See i915_request_alloc().
570	 */
571	do {
572		struct i915_request *request;
573
574		request = rcu_dereference(active->request);
575		if (!request || i915_request_completed(request))
576			return NULL;
577
578		/*
579		 * An especially silly compiler could decide to recompute the
580		 * result of i915_request_completed, more specifically
581		 * re-emit the load for request->fence.seqno. A race would catch
582		 * a later seqno value, which could flip the result from true to
583		 * false. Which means part of the instructions below might not
584		 * be executed, while later on instructions are executed. Due to
585		 * barriers within the refcounting the inconsistency can't reach
586		 * past the call to i915_request_get_rcu, but not executing
587		 * that while still executing i915_request_put() creates
588		 * havoc enough.  Prevent this with a compiler barrier.
589		 */
590		barrier();
591
592		request = i915_request_get_rcu(request);
593
594		/*
595		 * What stops the following rcu_access_pointer() from occurring
596		 * before the above i915_request_get_rcu()? If we were
597		 * to read the value before pausing to get the reference to
598		 * the request, we may not notice a change in the active
599		 * tracker.
600		 *
601		 * The rcu_access_pointer() is a mere compiler barrier, which
602		 * means both the CPU and compiler are free to perform the
603		 * memory read without constraint. The compiler only has to
604		 * ensure that any operations after the rcu_access_pointer()
605		 * occur afterwards in program order. This means the read may
606		 * be performed earlier by an out-of-order CPU, or adventurous
607		 * compiler.
608		 *
609		 * The atomic operation at the heart of
610		 * i915_request_get_rcu(), see dma_fence_get_rcu(), is
611		 * atomic_inc_not_zero() which is only a full memory barrier
612		 * when successful. That is, if i915_request_get_rcu()
613		 * returns the request (and so with the reference counted
614		 * incremented) then the following read for rcu_access_pointer()
615		 * must occur after the atomic operation and so confirm
616		 * that this request is the one currently being tracked.
617		 *
618		 * The corresponding write barrier is part of
619		 * rcu_assign_pointer().
620		 */
621		if (!request || request == rcu_access_pointer(active->request))
622			return rcu_pointer_handoff(request);
623
624		i915_request_put(request);
625	} while (1);
626}
627
628/**
629 * i915_gem_active_get_unlocked - return a reference to the active request
630 * @active - the active tracker
631 *
632 * i915_gem_active_get_unlocked() returns a reference to the active request,
633 * or NULL if the active tracker is idle. The reference is obtained under RCU,
634 * so no locking is required by the caller.
635 *
636 * The reference should be freed with i915_request_put().
637 */
638static inline struct i915_request *
639i915_gem_active_get_unlocked(const struct i915_gem_active *active)
640{
641	struct i915_request *request;
 
 
642
643	rcu_read_lock();
644	request = __i915_gem_active_get_rcu(active);
645	rcu_read_unlock();
 
646
647	return request;
 
 
 
648}
649
650/**
651 * i915_gem_active_isset - report whether the active tracker is assigned
652 * @active - the active tracker
653 *
654 * i915_gem_active_isset() returns true if the active tracker is currently
655 * assigned to a request. Due to the lazy retiring, that request may be idle
656 * and this may report stale information.
657 */
658static inline bool
659i915_gem_active_isset(const struct i915_gem_active *active)
660{
661	return rcu_access_pointer(active->request);
662}
663
664/**
665 * i915_gem_active_wait - waits until the request is completed
666 * @active - the active request on which to wait
667 * @flags - how to wait
668 * @timeout - how long to wait at most
669 * @rps - userspace client to charge for a waitboost
670 *
671 * i915_gem_active_wait() waits until the request is completed before
672 * returning, without requiring any locks to be held. Note that it does not
673 * retire any requests before returning.
674 *
675 * This function relies on RCU in order to acquire the reference to the active
676 * request without holding any locks. See __i915_gem_active_get_rcu() for the
677 * glory details on how that is managed. Once the reference is acquired, we
678 * can then wait upon the request, and afterwards release our reference,
679 * free of any locking.
680 *
681 * This function wraps i915_request_wait(), see it for the full details on
682 * the arguments.
683 *
684 * Returns 0 if successful, or a negative error code.
685 */
686static inline int
687i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags)
688{
689	struct i915_request *request;
690	long ret = 0;
691
692	request = i915_gem_active_get_unlocked(active);
693	if (request) {
694		ret = i915_request_wait(request, flags, MAX_SCHEDULE_TIMEOUT);
695		i915_request_put(request);
696	}
697
698	return ret < 0 ? ret : 0;
 
 
699}
700
701/**
702 * i915_gem_active_retire - waits until the request is retired
703 * @active - the active request on which to wait
704 *
705 * i915_gem_active_retire() waits until the request is completed,
706 * and then ensures that at least the retirement handler for this
707 * @active tracker is called before returning. If the @active
708 * tracker is idle, the function returns immediately.
709 */
710static inline int __must_check
711i915_gem_active_retire(struct i915_gem_active *active,
712		       struct mutex *mutex)
713{
714	struct i915_request *request;
715	long ret;
 
 
716
717	request = i915_gem_active_raw(active, mutex);
718	if (!request)
719		return 0;
 
 
 
720
721	ret = i915_request_wait(request,
722				I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
723				MAX_SCHEDULE_TIMEOUT);
724	if (ret < 0)
725		return ret;
 
 
 
 
 
 
726
727	list_del_init(&active->link);
728	RCU_INIT_POINTER(active->request, NULL);
 
 
 
 
729
730	active->retire(active, request);
 
 
 
 
 
 
 
 
 
731
732	return 0;
733}
734
735#define for_each_active(mask, idx) \
736	for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
 
737
738#endif /* I915_REQUEST_H */