Loading...
1/*
2 * Copyright © 2008-2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#ifndef I915_REQUEST_H
26#define I915_REQUEST_H
27
28#include <linux/dma-fence.h>
29#include <linux/hrtimer.h>
30#include <linux/irq_work.h>
31#include <linux/llist.h>
32#include <linux/lockdep.h>
33
34#include "gem/i915_gem_context_types.h"
35#include "gt/intel_context_types.h"
36#include "gt/intel_engine_types.h"
37#include "gt/intel_timeline_types.h"
38
39#include "i915_gem.h"
40#include "i915_scheduler.h"
41#include "i915_selftest.h"
42#include "i915_sw_fence.h"
43#include "i915_vma_resource.h"
44
45#include <uapi/drm/i915_drm.h>
46
47struct drm_file;
48struct drm_i915_gem_object;
49struct drm_printer;
50struct i915_deps;
51struct i915_request;
52
53#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
54struct i915_capture_list {
55 struct i915_vma_resource *vma_res;
56 struct i915_capture_list *next;
57};
58
59void i915_request_free_capture_list(struct i915_capture_list *capture);
60#else
61#define i915_request_free_capture_list(_a) do {} while (0)
62#endif
63
64#define RQ_TRACE(rq, fmt, ...) do { \
65 const struct i915_request *rq__ = (rq); \
66 ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \
67 rq__->fence.context, rq__->fence.seqno, \
68 hwsp_seqno(rq__), ##__VA_ARGS__); \
69} while (0)
70
71enum {
72 /*
73 * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
74 *
75 * Set by __i915_request_submit() on handing over to HW, and cleared
76 * by __i915_request_unsubmit() if we preempt this request.
77 *
78 * Finally cleared for consistency on retiring the request, when
79 * we know the HW is no longer running this request.
80 *
81 * See i915_request_is_active()
82 */
83 I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
84
85 /*
86 * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
87 *
88 * Using the scheduler, when a request is ready for execution it is put
89 * into the priority queue, and removed from that queue when transferred
90 * to the HW runlists. We want to track its membership within the
91 * priority queue so that we can easily check before rescheduling.
92 *
93 * See i915_request_in_priority_queue()
94 */
95 I915_FENCE_FLAG_PQUEUE,
96
97 /*
98 * I915_FENCE_FLAG_HOLD - this request is currently on hold
99 *
100 * This request has been suspended, pending an ongoing investigation.
101 */
102 I915_FENCE_FLAG_HOLD,
103
104 /*
105 * I915_FENCE_FLAG_INITIAL_BREADCRUMB - this request has the initial
106 * breadcrumb that marks the end of semaphore waits and start of the
107 * user payload.
108 */
109 I915_FENCE_FLAG_INITIAL_BREADCRUMB,
110
111 /*
112 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
113 *
114 * Internal bookkeeping used by the breadcrumb code to track when
115 * a request is on the various signal_list.
116 */
117 I915_FENCE_FLAG_SIGNAL,
118
119 /*
120 * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
121 *
122 * The execution of some requests should not be interrupted. This is
123 * a sensitive operation as it makes the request super important,
124 * blocking other higher priority work. Abuse of this flag will
125 * lead to quality of service issues.
126 */
127 I915_FENCE_FLAG_NOPREEMPT,
128
129 /*
130 * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue
131 *
132 * A high priority sentinel request may be submitted to clear the
133 * submission queue. As it will be the only request in-flight, upon
134 * execution all other active requests will have been preempted and
135 * unsubmitted. This preemptive pulse is used to re-evaluate the
136 * in-flight requests, particularly in cases where an active context
137 * is banned and those active requests need to be cancelled.
138 */
139 I915_FENCE_FLAG_SENTINEL,
140
141 /*
142 * I915_FENCE_FLAG_BOOST - upclock the gpu for this request
143 *
144 * Some requests are more important than others! In particular, a
145 * request that the user is waiting on is typically required for
146 * interactive latency, for which we want to minimise by upclocking
147 * the GPU. Here we track such boost requests on a per-request basis.
148 */
149 I915_FENCE_FLAG_BOOST,
150
151 /*
152 * I915_FENCE_FLAG_SUBMIT_PARALLEL - request with a context in a
153 * parent-child relationship (parallel submission, multi-lrc) should
154 * trigger a submission to the GuC rather than just moving the context
155 * tail.
156 */
157 I915_FENCE_FLAG_SUBMIT_PARALLEL,
158
159 /*
160 * I915_FENCE_FLAG_SKIP_PARALLEL - request with a context in a
161 * parent-child relationship (parallel submission, multi-lrc) that
162 * hit an error while generating requests in the execbuf IOCTL.
163 * Indicates this request should be skipped as another request in
164 * submission / relationship encoutered an error.
165 */
166 I915_FENCE_FLAG_SKIP_PARALLEL,
167
168 /*
169 * I915_FENCE_FLAG_COMPOSITE - Indicates fence is part of a composite
170 * fence (dma_fence_array) and i915 generated for parallel submission.
171 */
172 I915_FENCE_FLAG_COMPOSITE,
173};
174
175/*
176 * Request queue structure.
177 *
178 * The request queue allows us to note sequence numbers that have been emitted
179 * and may be associated with active buffers to be retired.
180 *
181 * By keeping this list, we can avoid having to do questionable sequence
182 * number comparisons on buffer last_read|write_seqno. It also allows an
183 * emission time to be associated with the request for tracking how far ahead
184 * of the GPU the submission is.
185 *
186 * When modifying this structure be very aware that we perform a lockless
187 * RCU lookup of it that may race against reallocation of the struct
188 * from the slab freelist. We intentionally do not zero the structure on
189 * allocation so that the lookup can use the dangling pointers (and is
190 * cogniscent that those pointers may be wrong). Instead, everything that
191 * needs to be initialised must be done so explicitly.
192 *
193 * The requests are reference counted.
194 */
195struct i915_request {
196 struct dma_fence fence;
197 spinlock_t lock;
198
199 struct drm_i915_private *i915;
200
201 /*
202 * Context and ring buffer related to this request
203 * Contexts are refcounted, so when this request is associated with a
204 * context, we must increment the context's refcount, to guarantee that
205 * it persists while any request is linked to it. Requests themselves
206 * are also refcounted, so the request will only be freed when the last
207 * reference to it is dismissed, and the code in
208 * i915_request_free() will then decrement the refcount on the
209 * context.
210 */
211 struct intel_engine_cs *engine;
212 struct intel_context *context;
213 struct intel_ring *ring;
214 struct intel_timeline __rcu *timeline;
215
216 struct list_head signal_link;
217 struct llist_node signal_node;
218
219 /*
220 * The rcu epoch of when this request was allocated. Used to judiciously
221 * apply backpressure on future allocations to ensure that under
222 * mempressure there is sufficient RCU ticks for us to reclaim our
223 * RCU protected slabs.
224 */
225 unsigned long rcustate;
226
227 /*
228 * We pin the timeline->mutex while constructing the request to
229 * ensure that no caller accidentally drops it during construction.
230 * The timeline->mutex must be held to ensure that only this caller
231 * can use the ring and manipulate the associated timeline during
232 * construction.
233 */
234 struct pin_cookie cookie;
235
236 /*
237 * Fences for the various phases in the request's lifetime.
238 *
239 * The submit fence is used to await upon all of the request's
240 * dependencies. When it is signaled, the request is ready to run.
241 * It is used by the driver to then queue the request for execution.
242 */
243 struct i915_sw_fence submit;
244 union {
245 wait_queue_entry_t submitq;
246 struct i915_sw_dma_fence_cb dmaq;
247 struct i915_request_duration_cb {
248 struct dma_fence_cb cb;
249 ktime_t emitted;
250 } duration;
251 };
252 struct llist_head execute_cb;
253 struct i915_sw_fence semaphore;
254 /*
255 * complete submit fence from an IRQ if needed for locking hierarchy
256 * reasons.
257 */
258 struct irq_work submit_work;
259
260 /*
261 * A list of everyone we wait upon, and everyone who waits upon us.
262 * Even though we will not be submitted to the hardware before the
263 * submit fence is signaled (it waits for all external events as well
264 * as our own requests), the scheduler still needs to know the
265 * dependency tree for the lifetime of the request (from execbuf
266 * to retirement), i.e. bidirectional dependency information for the
267 * request not tied to individual fences.
268 */
269 struct i915_sched_node sched;
270 struct i915_dependency dep;
271 intel_engine_mask_t execution_mask;
272
273 /*
274 * A convenience pointer to the current breadcrumb value stored in
275 * the HW status page (or our timeline's local equivalent). The full
276 * path would be rq->hw_context->ring->timeline->hwsp_seqno.
277 */
278 const u32 *hwsp_seqno;
279
280 /* Position in the ring of the start of the request */
281 u32 head;
282
283 /* Position in the ring of the start of the user packets */
284 u32 infix;
285
286 /*
287 * Position in the ring of the start of the postfix.
288 * This is required to calculate the maximum available ring space
289 * without overwriting the postfix.
290 */
291 u32 postfix;
292
293 /* Position in the ring of the end of the whole request */
294 u32 tail;
295
296 /* Position in the ring of the end of any workarounds after the tail */
297 u32 wa_tail;
298
299 /* Preallocate space in the ring for the emitting the request */
300 u32 reserved_space;
301
302 /* Batch buffer pointer for selftest internal use. */
303 I915_SELFTEST_DECLARE(struct i915_vma *batch);
304
305 struct i915_vma_resource *batch_res;
306
307#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
308 /*
309 * Additional buffers requested by userspace to be captured upon
310 * a GPU hang. The vma/obj on this list are protected by their
311 * active reference - all objects on this list must also be
312 * on the active_list (of their final request).
313 */
314 struct i915_capture_list *capture_list;
315#endif
316
317 /* Time at which this request was emitted, in jiffies. */
318 unsigned long emitted_jiffies;
319
320 /* timeline->request entry for this request */
321 struct list_head link;
322
323 /* Watchdog support fields. */
324 struct i915_request_watchdog {
325 struct llist_node link;
326 struct hrtimer timer;
327 } watchdog;
328
329 /*
330 * Requests may need to be stalled when using GuC submission waiting for
331 * certain GuC operations to complete. If that is the case, stalled
332 * requests are added to a per context list of stalled requests. The
333 * below list_head is the link in that list. Protected by
334 * ce->guc_state.lock.
335 */
336 struct list_head guc_fence_link;
337
338 /*
339 * Priority level while the request is in flight. Differs
340 * from i915 scheduler priority. See comment above
341 * I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP for details. Protected by
342 * ce->guc_active.lock. Two special values (GUC_PRIO_INIT and
343 * GUC_PRIO_FINI) outside the GuC priority range are used to indicate
344 * if the priority has not been initialized yet or if no more updates
345 * are possible because the request has completed.
346 */
347#define GUC_PRIO_INIT 0xff
348#define GUC_PRIO_FINI 0xfe
349 u8 guc_prio;
350
351 /*
352 * wait queue entry used to wait on the HuC load to complete
353 */
354 wait_queue_entry_t hucq;
355
356 I915_SELFTEST_DECLARE(struct {
357 struct list_head link;
358 unsigned long delay;
359 } mock;)
360};
361
362#define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
363
364extern const struct dma_fence_ops i915_fence_ops;
365
366static inline bool dma_fence_is_i915(const struct dma_fence *fence)
367{
368 return fence->ops == &i915_fence_ops;
369}
370
371struct kmem_cache *i915_request_slab_cache(void);
372
373struct i915_request * __must_check
374__i915_request_create(struct intel_context *ce, gfp_t gfp);
375struct i915_request * __must_check
376i915_request_create(struct intel_context *ce);
377
378void __i915_request_skip(struct i915_request *rq);
379bool i915_request_set_error_once(struct i915_request *rq, int error);
380struct i915_request *i915_request_mark_eio(struct i915_request *rq);
381
382struct i915_request *__i915_request_commit(struct i915_request *request);
383void __i915_request_queue(struct i915_request *rq,
384 const struct i915_sched_attr *attr);
385void __i915_request_queue_bh(struct i915_request *rq);
386
387bool i915_request_retire(struct i915_request *rq);
388void i915_request_retire_upto(struct i915_request *rq);
389
390static inline struct i915_request *
391to_request(struct dma_fence *fence)
392{
393 /* We assume that NULL fence/request are interoperable */
394 BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
395 GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
396 return container_of(fence, struct i915_request, fence);
397}
398
399static inline struct i915_request *
400i915_request_get(struct i915_request *rq)
401{
402 return to_request(dma_fence_get(&rq->fence));
403}
404
405static inline struct i915_request *
406i915_request_get_rcu(struct i915_request *rq)
407{
408 return to_request(dma_fence_get_rcu(&rq->fence));
409}
410
411static inline void
412i915_request_put(struct i915_request *rq)
413{
414 dma_fence_put(&rq->fence);
415}
416
417int i915_request_await_object(struct i915_request *to,
418 struct drm_i915_gem_object *obj,
419 bool write);
420int i915_request_await_dma_fence(struct i915_request *rq,
421 struct dma_fence *fence);
422int i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps);
423int i915_request_await_execution(struct i915_request *rq,
424 struct dma_fence *fence);
425
426void i915_request_add(struct i915_request *rq);
427
428bool __i915_request_submit(struct i915_request *request);
429void i915_request_submit(struct i915_request *request);
430
431void __i915_request_unsubmit(struct i915_request *request);
432void i915_request_unsubmit(struct i915_request *request);
433
434void i915_request_cancel(struct i915_request *rq, int error);
435
436long i915_request_wait_timeout(struct i915_request *rq,
437 unsigned int flags,
438 long timeout)
439 __attribute__((nonnull(1)));
440
441long i915_request_wait(struct i915_request *rq,
442 unsigned int flags,
443 long timeout)
444 __attribute__((nonnull(1)));
445#define I915_WAIT_INTERRUPTIBLE BIT(0)
446#define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */
447#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
448
449void i915_request_show(struct drm_printer *m,
450 const struct i915_request *rq,
451 const char *prefix,
452 int indent);
453
454static inline bool i915_request_signaled(const struct i915_request *rq)
455{
456 /* The request may live longer than its HWSP, so check flags first! */
457 return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
458}
459
460static inline bool i915_request_is_active(const struct i915_request *rq)
461{
462 return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
463}
464
465static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
466{
467 return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
468}
469
470static inline bool
471i915_request_has_initial_breadcrumb(const struct i915_request *rq)
472{
473 return test_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
474}
475
476/*
477 * Returns true if seq1 is later than seq2.
478 */
479static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
480{
481 return (s32)(seq1 - seq2) >= 0;
482}
483
484static inline u32 __hwsp_seqno(const struct i915_request *rq)
485{
486 const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
487
488 return READ_ONCE(*hwsp);
489}
490
491/**
492 * hwsp_seqno - the current breadcrumb value in the HW status page
493 * @rq: the request, to chase the relevant HW status page
494 *
495 * The emphasis in naming here is that hwsp_seqno() is not a property of the
496 * request, but an indication of the current HW state (associated with this
497 * request). Its value will change as the GPU executes more requests.
498 *
499 * Returns the current breadcrumb value in the associated HW status page (or
500 * the local timeline's equivalent) for this request. The request itself
501 * has the associated breadcrumb value of rq->fence.seqno, when the HW
502 * status page has that breadcrumb or later, this request is complete.
503 */
504static inline u32 hwsp_seqno(const struct i915_request *rq)
505{
506 u32 seqno;
507
508 rcu_read_lock(); /* the HWSP may be freed at runtime */
509 seqno = __hwsp_seqno(rq);
510 rcu_read_unlock();
511
512 return seqno;
513}
514
515static inline bool __i915_request_has_started(const struct i915_request *rq)
516{
517 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1);
518}
519
520/**
521 * i915_request_started - check if the request has begun being executed
522 * @rq: the request
523 *
524 * If the timeline is not using initial breadcrumbs, a request is
525 * considered started if the previous request on its timeline (i.e.
526 * context) has been signaled.
527 *
528 * If the timeline is using semaphores, it will also be emitting an
529 * "initial breadcrumb" after the semaphores are complete and just before
530 * it began executing the user payload. A request can therefore be active
531 * on the HW and not yet started as it is still busywaiting on its
532 * dependencies (via HW semaphores).
533 *
534 * If the request has started, its dependencies will have been signaled
535 * (either by fences or by semaphores) and it will have begun processing
536 * the user payload.
537 *
538 * However, even if a request has started, it may have been preempted and
539 * so no longer active, or it may have already completed.
540 *
541 * See also i915_request_is_active().
542 *
543 * Returns true if the request has begun executing the user payload, or
544 * has completed:
545 */
546static inline bool i915_request_started(const struct i915_request *rq)
547{
548 bool result;
549
550 if (i915_request_signaled(rq))
551 return true;
552
553 result = true;
554 rcu_read_lock(); /* the HWSP may be freed at runtime */
555 if (likely(!i915_request_signaled(rq)))
556 /* Remember: started but may have since been preempted! */
557 result = __i915_request_has_started(rq);
558 rcu_read_unlock();
559
560 return result;
561}
562
563/**
564 * i915_request_is_running - check if the request may actually be executing
565 * @rq: the request
566 *
567 * Returns true if the request is currently submitted to hardware, has passed
568 * its start point (i.e. the context is setup and not busywaiting). Note that
569 * it may no longer be running by the time the function returns!
570 */
571static inline bool i915_request_is_running(const struct i915_request *rq)
572{
573 bool result;
574
575 if (!i915_request_is_active(rq))
576 return false;
577
578 rcu_read_lock();
579 result = __i915_request_has_started(rq) && i915_request_is_active(rq);
580 rcu_read_unlock();
581
582 return result;
583}
584
585/**
586 * i915_request_is_ready - check if the request is ready for execution
587 * @rq: the request
588 *
589 * Upon construction, the request is instructed to wait upon various
590 * signals before it is ready to be executed by the HW. That is, we do
591 * not want to start execution and read data before it is written. In practice,
592 * this is controlled with a mixture of interrupts and semaphores. Once
593 * the submit fence is completed, the backend scheduler will place the
594 * request into its queue and from there submit it for execution. So we
595 * can detect when a request is eligible for execution (and is under control
596 * of the scheduler) by querying where it is in any of the scheduler's lists.
597 *
598 * Returns true if the request is ready for execution (it may be inflight),
599 * false otherwise.
600 */
601static inline bool i915_request_is_ready(const struct i915_request *rq)
602{
603 return !list_empty(&rq->sched.link);
604}
605
606static inline bool __i915_request_is_complete(const struct i915_request *rq)
607{
608 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
609}
610
611static inline bool i915_request_completed(const struct i915_request *rq)
612{
613 bool result;
614
615 if (i915_request_signaled(rq))
616 return true;
617
618 result = true;
619 rcu_read_lock(); /* the HWSP may be freed at runtime */
620 if (likely(!i915_request_signaled(rq)))
621 result = __i915_request_is_complete(rq);
622 rcu_read_unlock();
623
624 return result;
625}
626
627static inline void i915_request_mark_complete(struct i915_request *rq)
628{
629 WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */
630 (u32 *)&rq->fence.seqno);
631}
632
633static inline bool i915_request_has_waitboost(const struct i915_request *rq)
634{
635 return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
636}
637
638static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
639{
640 /* Preemption should only be disabled very rarely */
641 return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
642}
643
644static inline bool i915_request_has_sentinel(const struct i915_request *rq)
645{
646 return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
647}
648
649static inline bool i915_request_on_hold(const struct i915_request *rq)
650{
651 return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
652}
653
654static inline void i915_request_set_hold(struct i915_request *rq)
655{
656 set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
657}
658
659static inline void i915_request_clear_hold(struct i915_request *rq)
660{
661 clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
662}
663
664static inline struct intel_timeline *
665i915_request_timeline(const struct i915_request *rq)
666{
667 /* Valid only while the request is being constructed (or retired). */
668 return rcu_dereference_protected(rq->timeline,
669 lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex) ||
670 test_bit(CONTEXT_IS_PARKING, &rq->context->flags));
671}
672
673static inline struct i915_gem_context *
674i915_request_gem_context(const struct i915_request *rq)
675{
676 /* Valid only while the request is being constructed (or retired). */
677 return rcu_dereference_protected(rq->context->gem_context, true);
678}
679
680static inline struct intel_timeline *
681i915_request_active_timeline(const struct i915_request *rq)
682{
683 /*
684 * When in use during submission, we are protected by a guarantee that
685 * the context/timeline is pinned and must remain pinned until after
686 * this submission.
687 */
688 return rcu_dereference_protected(rq->timeline,
689 lockdep_is_held(&rq->engine->sched_engine->lock));
690}
691
692static inline u32
693i915_request_active_seqno(const struct i915_request *rq)
694{
695 u32 hwsp_phys_base =
696 page_mask_bits(i915_request_active_timeline(rq)->hwsp_offset);
697 u32 hwsp_relative_offset = offset_in_page(rq->hwsp_seqno);
698
699 /*
700 * Because of wraparound, we cannot simply take tl->hwsp_offset,
701 * but instead use the fact that the relative for vaddr is the
702 * offset as for hwsp_offset. Take the top bits from tl->hwsp_offset
703 * and combine them with the relative offset in rq->hwsp_seqno.
704 *
705 * As rw->hwsp_seqno is rewritten when signaled, this only works
706 * when the request isn't signaled yet, but at that point you
707 * no longer need the offset.
708 */
709
710 return hwsp_phys_base + hwsp_relative_offset;
711}
712
713bool
714i915_request_active_engine(struct i915_request *rq,
715 struct intel_engine_cs **active);
716
717void i915_request_notify_execute_cb_imm(struct i915_request *rq);
718
719enum i915_request_state {
720 I915_REQUEST_UNKNOWN = 0,
721 I915_REQUEST_COMPLETE,
722 I915_REQUEST_PENDING,
723 I915_REQUEST_QUEUED,
724 I915_REQUEST_ACTIVE,
725};
726
727enum i915_request_state i915_test_request_state(struct i915_request *rq);
728
729void i915_request_module_exit(void);
730int i915_request_module_init(void);
731
732#endif /* I915_REQUEST_H */
1/*
2 * Copyright © 2008-2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#ifndef I915_REQUEST_H
26#define I915_REQUEST_H
27
28#include <linux/dma-fence.h>
29#include <linux/irq_work.h>
30#include <linux/lockdep.h>
31
32#include "gem/i915_gem_context_types.h"
33#include "gt/intel_context_types.h"
34#include "gt/intel_engine_types.h"
35#include "gt/intel_timeline_types.h"
36
37#include "i915_gem.h"
38#include "i915_scheduler.h"
39#include "i915_selftest.h"
40#include "i915_sw_fence.h"
41
42#include <uapi/drm/i915_drm.h>
43
44struct drm_file;
45struct drm_i915_gem_object;
46struct i915_request;
47
48struct i915_capture_list {
49 struct i915_capture_list *next;
50 struct i915_vma *vma;
51};
52
53#define RQ_TRACE(rq, fmt, ...) do { \
54 const struct i915_request *rq__ = (rq); \
55 ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \
56 rq__->fence.context, rq__->fence.seqno, \
57 hwsp_seqno(rq__), ##__VA_ARGS__); \
58} while (0)
59
60enum {
61 /*
62 * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
63 *
64 * Set by __i915_request_submit() on handing over to HW, and cleared
65 * by __i915_request_unsubmit() if we preempt this request.
66 *
67 * Finally cleared for consistency on retiring the request, when
68 * we know the HW is no longer running this request.
69 *
70 * See i915_request_is_active()
71 */
72 I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
73
74 /*
75 * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
76 *
77 * Using the scheduler, when a request is ready for execution it is put
78 * into the priority queue, and removed from that queue when transferred
79 * to the HW runlists. We want to track its membership within the
80 * priority queue so that we can easily check before rescheduling.
81 *
82 * See i915_request_in_priority_queue()
83 */
84 I915_FENCE_FLAG_PQUEUE,
85
86 /*
87 * I915_FENCE_FLAG_HOLD - this request is currently on hold
88 *
89 * This request has been suspended, pending an ongoing investigation.
90 */
91 I915_FENCE_FLAG_HOLD,
92
93 /*
94 * I915_FENCE_FLAG_INITIAL_BREADCRUMB - this request has the initial
95 * breadcrumb that marks the end of semaphore waits and start of the
96 * user payload.
97 */
98 I915_FENCE_FLAG_INITIAL_BREADCRUMB,
99
100 /*
101 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
102 *
103 * Internal bookkeeping used by the breadcrumb code to track when
104 * a request is on the various signal_list.
105 */
106 I915_FENCE_FLAG_SIGNAL,
107
108 /*
109 * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
110 *
111 * The execution of some requests should not be interrupted. This is
112 * a sensitive operation as it makes the request super important,
113 * blocking other higher priority work. Abuse of this flag will
114 * lead to quality of service issues.
115 */
116 I915_FENCE_FLAG_NOPREEMPT,
117
118 /*
119 * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue
120 *
121 * A high priority sentinel request may be submitted to clear the
122 * submission queue. As it will be the only request in-flight, upon
123 * execution all other active requests will have been preempted and
124 * unsubmitted. This preemptive pulse is used to re-evaluate the
125 * in-flight requests, particularly in cases where an active context
126 * is banned and those active requests need to be cancelled.
127 */
128 I915_FENCE_FLAG_SENTINEL,
129
130 /*
131 * I915_FENCE_FLAG_BOOST - upclock the gpu for this request
132 *
133 * Some requests are more important than others! In particular, a
134 * request that the user is waiting on is typically required for
135 * interactive latency, for which we want to minimise by upclocking
136 * the GPU. Here we track such boost requests on a per-request basis.
137 */
138 I915_FENCE_FLAG_BOOST,
139};
140
141/**
142 * Request queue structure.
143 *
144 * The request queue allows us to note sequence numbers that have been emitted
145 * and may be associated with active buffers to be retired.
146 *
147 * By keeping this list, we can avoid having to do questionable sequence
148 * number comparisons on buffer last_read|write_seqno. It also allows an
149 * emission time to be associated with the request for tracking how far ahead
150 * of the GPU the submission is.
151 *
152 * When modifying this structure be very aware that we perform a lockless
153 * RCU lookup of it that may race against reallocation of the struct
154 * from the slab freelist. We intentionally do not zero the structure on
155 * allocation so that the lookup can use the dangling pointers (and is
156 * cogniscent that those pointers may be wrong). Instead, everything that
157 * needs to be initialised must be done so explicitly.
158 *
159 * The requests are reference counted.
160 */
161struct i915_request {
162 struct dma_fence fence;
163 spinlock_t lock;
164
165 /**
166 * Context and ring buffer related to this request
167 * Contexts are refcounted, so when this request is associated with a
168 * context, we must increment the context's refcount, to guarantee that
169 * it persists while any request is linked to it. Requests themselves
170 * are also refcounted, so the request will only be freed when the last
171 * reference to it is dismissed, and the code in
172 * i915_request_free() will then decrement the refcount on the
173 * context.
174 */
175 struct intel_engine_cs *engine;
176 struct intel_context *context;
177 struct intel_ring *ring;
178 struct intel_timeline __rcu *timeline;
179 struct list_head signal_link;
180
181 /*
182 * The rcu epoch of when this request was allocated. Used to judiciously
183 * apply backpressure on future allocations to ensure that under
184 * mempressure there is sufficient RCU ticks for us to reclaim our
185 * RCU protected slabs.
186 */
187 unsigned long rcustate;
188
189 /*
190 * We pin the timeline->mutex while constructing the request to
191 * ensure that no caller accidentally drops it during construction.
192 * The timeline->mutex must be held to ensure that only this caller
193 * can use the ring and manipulate the associated timeline during
194 * construction.
195 */
196 struct pin_cookie cookie;
197
198 /*
199 * Fences for the various phases in the request's lifetime.
200 *
201 * The submit fence is used to await upon all of the request's
202 * dependencies. When it is signaled, the request is ready to run.
203 * It is used by the driver to then queue the request for execution.
204 */
205 struct i915_sw_fence submit;
206 union {
207 wait_queue_entry_t submitq;
208 struct i915_sw_dma_fence_cb dmaq;
209 struct i915_request_duration_cb {
210 struct dma_fence_cb cb;
211 ktime_t emitted;
212 } duration;
213 };
214 struct llist_head execute_cb;
215 struct i915_sw_fence semaphore;
216
217 /*
218 * A list of everyone we wait upon, and everyone who waits upon us.
219 * Even though we will not be submitted to the hardware before the
220 * submit fence is signaled (it waits for all external events as well
221 * as our own requests), the scheduler still needs to know the
222 * dependency tree for the lifetime of the request (from execbuf
223 * to retirement), i.e. bidirectional dependency information for the
224 * request not tied to individual fences.
225 */
226 struct i915_sched_node sched;
227 struct i915_dependency dep;
228 intel_engine_mask_t execution_mask;
229
230 /*
231 * A convenience pointer to the current breadcrumb value stored in
232 * the HW status page (or our timeline's local equivalent). The full
233 * path would be rq->hw_context->ring->timeline->hwsp_seqno.
234 */
235 const u32 *hwsp_seqno;
236
237 /*
238 * If we need to access the timeline's seqno for this request in
239 * another request, we need to keep a read reference to this associated
240 * cacheline, so that we do not free and recycle it before the foreign
241 * observers have completed. Hence, we keep a pointer to the cacheline
242 * inside the timeline's HWSP vma, but it is only valid while this
243 * request has not completed and guarded by the timeline mutex.
244 */
245 struct intel_timeline_cacheline __rcu *hwsp_cacheline;
246
247 /** Position in the ring of the start of the request */
248 u32 head;
249
250 /** Position in the ring of the start of the user packets */
251 u32 infix;
252
253 /**
254 * Position in the ring of the start of the postfix.
255 * This is required to calculate the maximum available ring space
256 * without overwriting the postfix.
257 */
258 u32 postfix;
259
260 /** Position in the ring of the end of the whole request */
261 u32 tail;
262
263 /** Position in the ring of the end of any workarounds after the tail */
264 u32 wa_tail;
265
266 /** Preallocate space in the ring for the emitting the request */
267 u32 reserved_space;
268
269 /** Batch buffer related to this request if any (used for
270 * error state dump only).
271 */
272 struct i915_vma *batch;
273 /**
274 * Additional buffers requested by userspace to be captured upon
275 * a GPU hang. The vma/obj on this list are protected by their
276 * active reference - all objects on this list must also be
277 * on the active_list (of their final request).
278 */
279 struct i915_capture_list *capture_list;
280
281 /** Time at which this request was emitted, in jiffies. */
282 unsigned long emitted_jiffies;
283
284 /** timeline->request entry for this request */
285 struct list_head link;
286
287 struct drm_i915_file_private *file_priv;
288 /** file_priv list entry for this request */
289 struct list_head client_link;
290
291 I915_SELFTEST_DECLARE(struct {
292 struct list_head link;
293 unsigned long delay;
294 } mock;)
295};
296
297#define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
298
299extern const struct dma_fence_ops i915_fence_ops;
300
301static inline bool dma_fence_is_i915(const struct dma_fence *fence)
302{
303 return fence->ops == &i915_fence_ops;
304}
305
306struct kmem_cache *i915_request_slab_cache(void);
307
308struct i915_request * __must_check
309__i915_request_create(struct intel_context *ce, gfp_t gfp);
310struct i915_request * __must_check
311i915_request_create(struct intel_context *ce);
312
313void i915_request_set_error_once(struct i915_request *rq, int error);
314void __i915_request_skip(struct i915_request *rq);
315
316struct i915_request *__i915_request_commit(struct i915_request *request);
317void __i915_request_queue(struct i915_request *rq,
318 const struct i915_sched_attr *attr);
319
320bool i915_request_retire(struct i915_request *rq);
321void i915_request_retire_upto(struct i915_request *rq);
322
323static inline struct i915_request *
324to_request(struct dma_fence *fence)
325{
326 /* We assume that NULL fence/request are interoperable */
327 BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
328 GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
329 return container_of(fence, struct i915_request, fence);
330}
331
332static inline struct i915_request *
333i915_request_get(struct i915_request *rq)
334{
335 return to_request(dma_fence_get(&rq->fence));
336}
337
338static inline struct i915_request *
339i915_request_get_rcu(struct i915_request *rq)
340{
341 return to_request(dma_fence_get_rcu(&rq->fence));
342}
343
344static inline void
345i915_request_put(struct i915_request *rq)
346{
347 dma_fence_put(&rq->fence);
348}
349
350int i915_request_await_object(struct i915_request *to,
351 struct drm_i915_gem_object *obj,
352 bool write);
353int i915_request_await_dma_fence(struct i915_request *rq,
354 struct dma_fence *fence);
355int i915_request_await_execution(struct i915_request *rq,
356 struct dma_fence *fence,
357 void (*hook)(struct i915_request *rq,
358 struct dma_fence *signal));
359
360void i915_request_add(struct i915_request *rq);
361
362bool __i915_request_submit(struct i915_request *request);
363void i915_request_submit(struct i915_request *request);
364
365void __i915_request_unsubmit(struct i915_request *request);
366void i915_request_unsubmit(struct i915_request *request);
367
368/* Note: part of the intel_breadcrumbs family */
369bool i915_request_enable_breadcrumb(struct i915_request *request);
370void i915_request_cancel_breadcrumb(struct i915_request *request);
371
372long i915_request_wait(struct i915_request *rq,
373 unsigned int flags,
374 long timeout)
375 __attribute__((nonnull(1)));
376#define I915_WAIT_INTERRUPTIBLE BIT(0)
377#define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */
378#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
379
380static inline bool i915_request_signaled(const struct i915_request *rq)
381{
382 /* The request may live longer than its HWSP, so check flags first! */
383 return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
384}
385
386static inline bool i915_request_is_active(const struct i915_request *rq)
387{
388 return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
389}
390
391static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
392{
393 return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
394}
395
396static inline bool
397i915_request_has_initial_breadcrumb(const struct i915_request *rq)
398{
399 return test_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
400}
401
402/**
403 * Returns true if seq1 is later than seq2.
404 */
405static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
406{
407 return (s32)(seq1 - seq2) >= 0;
408}
409
410static inline u32 __hwsp_seqno(const struct i915_request *rq)
411{
412 const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
413
414 return READ_ONCE(*hwsp);
415}
416
417/**
418 * hwsp_seqno - the current breadcrumb value in the HW status page
419 * @rq: the request, to chase the relevant HW status page
420 *
421 * The emphasis in naming here is that hwsp_seqno() is not a property of the
422 * request, but an indication of the current HW state (associated with this
423 * request). Its value will change as the GPU executes more requests.
424 *
425 * Returns the current breadcrumb value in the associated HW status page (or
426 * the local timeline's equivalent) for this request. The request itself
427 * has the associated breadcrumb value of rq->fence.seqno, when the HW
428 * status page has that breadcrumb or later, this request is complete.
429 */
430static inline u32 hwsp_seqno(const struct i915_request *rq)
431{
432 u32 seqno;
433
434 rcu_read_lock(); /* the HWSP may be freed at runtime */
435 seqno = __hwsp_seqno(rq);
436 rcu_read_unlock();
437
438 return seqno;
439}
440
441static inline bool __i915_request_has_started(const struct i915_request *rq)
442{
443 return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
444}
445
446/**
447 * i915_request_started - check if the request has begun being executed
448 * @rq: the request
449 *
450 * If the timeline is not using initial breadcrumbs, a request is
451 * considered started if the previous request on its timeline (i.e.
452 * context) has been signaled.
453 *
454 * If the timeline is using semaphores, it will also be emitting an
455 * "initial breadcrumb" after the semaphores are complete and just before
456 * it began executing the user payload. A request can therefore be active
457 * on the HW and not yet started as it is still busywaiting on its
458 * dependencies (via HW semaphores).
459 *
460 * If the request has started, its dependencies will have been signaled
461 * (either by fences or by semaphores) and it will have begun processing
462 * the user payload.
463 *
464 * However, even if a request has started, it may have been preempted and
465 * so no longer active, or it may have already completed.
466 *
467 * See also i915_request_is_active().
468 *
469 * Returns true if the request has begun executing the user payload, or
470 * has completed:
471 */
472static inline bool i915_request_started(const struct i915_request *rq)
473{
474 if (i915_request_signaled(rq))
475 return true;
476
477 /* Remember: started but may have since been preempted! */
478 return __i915_request_has_started(rq);
479}
480
481/**
482 * i915_request_is_running - check if the request may actually be executing
483 * @rq: the request
484 *
485 * Returns true if the request is currently submitted to hardware, has passed
486 * its start point (i.e. the context is setup and not busywaiting). Note that
487 * it may no longer be running by the time the function returns!
488 */
489static inline bool i915_request_is_running(const struct i915_request *rq)
490{
491 if (!i915_request_is_active(rq))
492 return false;
493
494 return __i915_request_has_started(rq);
495}
496
497/**
498 * i915_request_is_ready - check if the request is ready for execution
499 * @rq: the request
500 *
501 * Upon construction, the request is instructed to wait upon various
502 * signals before it is ready to be executed by the HW. That is, we do
503 * not want to start execution and read data before it is written. In practice,
504 * this is controlled with a mixture of interrupts and semaphores. Once
505 * the submit fence is completed, the backend scheduler will place the
506 * request into its queue and from there submit it for execution. So we
507 * can detect when a request is eligible for execution (and is under control
508 * of the scheduler) by querying where it is in any of the scheduler's lists.
509 *
510 * Returns true if the request is ready for execution (it may be inflight),
511 * false otherwise.
512 */
513static inline bool i915_request_is_ready(const struct i915_request *rq)
514{
515 return !list_empty(&rq->sched.link);
516}
517
518static inline bool i915_request_completed(const struct i915_request *rq)
519{
520 if (i915_request_signaled(rq))
521 return true;
522
523 return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
524}
525
526static inline void i915_request_mark_complete(struct i915_request *rq)
527{
528 WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */
529 (u32 *)&rq->fence.seqno);
530}
531
532static inline bool i915_request_has_waitboost(const struct i915_request *rq)
533{
534 return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
535}
536
537static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
538{
539 /* Preemption should only be disabled very rarely */
540 return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
541}
542
543static inline bool i915_request_has_sentinel(const struct i915_request *rq)
544{
545 return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
546}
547
548static inline bool i915_request_on_hold(const struct i915_request *rq)
549{
550 return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
551}
552
553static inline void i915_request_set_hold(struct i915_request *rq)
554{
555 set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
556}
557
558static inline void i915_request_clear_hold(struct i915_request *rq)
559{
560 clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
561}
562
563static inline struct intel_timeline *
564i915_request_timeline(const struct i915_request *rq)
565{
566 /* Valid only while the request is being constructed (or retired). */
567 return rcu_dereference_protected(rq->timeline,
568 lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
569}
570
571static inline struct i915_gem_context *
572i915_request_gem_context(const struct i915_request *rq)
573{
574 /* Valid only while the request is being constructed (or retired). */
575 return rcu_dereference_protected(rq->context->gem_context, true);
576}
577
578static inline struct intel_timeline *
579i915_request_active_timeline(const struct i915_request *rq)
580{
581 /*
582 * When in use during submission, we are protected by a guarantee that
583 * the context/timeline is pinned and must remain pinned until after
584 * this submission.
585 */
586 return rcu_dereference_protected(rq->timeline,
587 lockdep_is_held(&rq->engine->active.lock));
588}
589
590#endif /* I915_REQUEST_H */