Loading...
Note: File does not exist in v4.6.
1/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include <linux/kthread.h>
26#include <uapi/linux/sched/types.h>
27
28#include "i915_drv.h"
29
30#ifdef CONFIG_SMP
31#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_cpu)
32#else
33#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL)
34#endif
35
36static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
37{
38 struct intel_wait *wait;
39 unsigned int result = 0;
40
41 lockdep_assert_held(&b->irq_lock);
42
43 wait = b->irq_wait;
44 if (wait) {
45 /*
46 * N.B. Since task_asleep() and ttwu are not atomic, the
47 * waiter may actually go to sleep after the check, causing
48 * us to suppress a valid wakeup. We prefer to reduce the
49 * number of false positive missed_breadcrumb() warnings
50 * at the expense of a few false negatives, as it it easy
51 * to trigger a false positive under heavy load. Enough
52 * signal should remain from genuine missed_breadcrumb()
53 * for us to detect in CI.
54 */
55 bool was_asleep = task_asleep(wait->tsk);
56
57 result = ENGINE_WAKEUP_WAITER;
58 if (wake_up_process(wait->tsk) && was_asleep)
59 result |= ENGINE_WAKEUP_ASLEEP;
60 }
61
62 return result;
63}
64
65unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
66{
67 struct intel_breadcrumbs *b = &engine->breadcrumbs;
68 unsigned long flags;
69 unsigned int result;
70
71 spin_lock_irqsave(&b->irq_lock, flags);
72 result = __intel_breadcrumbs_wakeup(b);
73 spin_unlock_irqrestore(&b->irq_lock, flags);
74
75 return result;
76}
77
78static unsigned long wait_timeout(void)
79{
80 return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
81}
82
83static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
84{
85 if (drm_debug & DRM_UT_DRIVER) {
86 struct drm_printer p = drm_debug_printer(__func__);
87
88 intel_engine_dump(engine, &p,
89 "%s missed breadcrumb at %pS\n",
90 engine->name, __builtin_return_address(0));
91 }
92
93 set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
94}
95
96static void intel_breadcrumbs_hangcheck(struct timer_list *t)
97{
98 struct intel_engine_cs *engine =
99 from_timer(engine, t, breadcrumbs.hangcheck);
100 struct intel_breadcrumbs *b = &engine->breadcrumbs;
101
102 if (!b->irq_armed)
103 return;
104
105 if (b->hangcheck_interrupts != atomic_read(&engine->irq_count)) {
106 b->hangcheck_interrupts = atomic_read(&engine->irq_count);
107 mod_timer(&b->hangcheck, wait_timeout());
108 return;
109 }
110
111 /* We keep the hangcheck timer alive until we disarm the irq, even
112 * if there are no waiters at present.
113 *
114 * If the waiter was currently running, assume it hasn't had a chance
115 * to process the pending interrupt (e.g, low priority task on a loaded
116 * system) and wait until it sleeps before declaring a missed interrupt.
117 *
118 * If the waiter was asleep (and not even pending a wakeup), then we
119 * must have missed an interrupt as the GPU has stopped advancing
120 * but we still have a waiter. Assuming all batches complete within
121 * DRM_I915_HANGCHECK_JIFFIES [1.5s]!
122 */
123 if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) {
124 missed_breadcrumb(engine);
125 mod_timer(&b->fake_irq, jiffies + 1);
126 } else {
127 mod_timer(&b->hangcheck, wait_timeout());
128 }
129}
130
131static void intel_breadcrumbs_fake_irq(struct timer_list *t)
132{
133 struct intel_engine_cs *engine = from_timer(engine, t,
134 breadcrumbs.fake_irq);
135 struct intel_breadcrumbs *b = &engine->breadcrumbs;
136
137 /* The timer persists in case we cannot enable interrupts,
138 * or if we have previously seen seqno/interrupt incoherency
139 * ("missed interrupt" syndrome, better known as a "missed breadcrumb").
140 * Here the worker will wake up every jiffie in order to kick the
141 * oldest waiter to do the coherent seqno check.
142 */
143
144 spin_lock_irq(&b->irq_lock);
145 if (b->irq_armed && !__intel_breadcrumbs_wakeup(b))
146 __intel_engine_disarm_breadcrumbs(engine);
147 spin_unlock_irq(&b->irq_lock);
148 if (!b->irq_armed)
149 return;
150
151 mod_timer(&b->fake_irq, jiffies + 1);
152}
153
154static void irq_enable(struct intel_engine_cs *engine)
155{
156 /*
157 * FIXME: Ideally we want this on the API boundary, but for the
158 * sake of testing with mock breadcrumbs (no HW so unable to
159 * enable irqs) we place it deep within the bowels, at the point
160 * of no return.
161 */
162 GEM_BUG_ON(!intel_irqs_enabled(engine->i915));
163
164 /* Enabling the IRQ may miss the generation of the interrupt, but
165 * we still need to force the barrier before reading the seqno,
166 * just in case.
167 */
168 set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
169
170 /* Caller disables interrupts */
171 if (engine->irq_enable) {
172 spin_lock(&engine->i915->irq_lock);
173 engine->irq_enable(engine);
174 spin_unlock(&engine->i915->irq_lock);
175 }
176}
177
178static void irq_disable(struct intel_engine_cs *engine)
179{
180 /* Caller disables interrupts */
181 if (engine->irq_disable) {
182 spin_lock(&engine->i915->irq_lock);
183 engine->irq_disable(engine);
184 spin_unlock(&engine->i915->irq_lock);
185 }
186}
187
188void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
189{
190 struct intel_breadcrumbs *b = &engine->breadcrumbs;
191
192 lockdep_assert_held(&b->irq_lock);
193 GEM_BUG_ON(b->irq_wait);
194 GEM_BUG_ON(!b->irq_armed);
195
196 GEM_BUG_ON(!b->irq_enabled);
197 if (!--b->irq_enabled)
198 irq_disable(engine);
199
200 b->irq_armed = false;
201}
202
203void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine)
204{
205 struct intel_breadcrumbs *b = &engine->breadcrumbs;
206
207 spin_lock_irq(&b->irq_lock);
208 if (!b->irq_enabled++)
209 irq_enable(engine);
210 GEM_BUG_ON(!b->irq_enabled); /* no overflow! */
211 spin_unlock_irq(&b->irq_lock);
212}
213
214void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine)
215{
216 struct intel_breadcrumbs *b = &engine->breadcrumbs;
217
218 spin_lock_irq(&b->irq_lock);
219 GEM_BUG_ON(!b->irq_enabled); /* no underflow! */
220 if (!--b->irq_enabled)
221 irq_disable(engine);
222 spin_unlock_irq(&b->irq_lock);
223}
224
225void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
226{
227 struct intel_breadcrumbs *b = &engine->breadcrumbs;
228 struct intel_wait *wait, *n;
229
230 if (!b->irq_armed)
231 return;
232
233 /*
234 * We only disarm the irq when we are idle (all requests completed),
235 * so if the bottom-half remains asleep, it missed the request
236 * completion.
237 */
238 if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP)
239 missed_breadcrumb(engine);
240
241 spin_lock_irq(&b->rb_lock);
242
243 spin_lock(&b->irq_lock);
244 b->irq_wait = NULL;
245 if (b->irq_armed)
246 __intel_engine_disarm_breadcrumbs(engine);
247 spin_unlock(&b->irq_lock);
248
249 rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
250 GEM_BUG_ON(!i915_seqno_passed(intel_engine_get_seqno(engine),
251 wait->seqno));
252 RB_CLEAR_NODE(&wait->node);
253 wake_up_process(wait->tsk);
254 }
255 b->waiters = RB_ROOT;
256
257 spin_unlock_irq(&b->rb_lock);
258}
259
260static bool use_fake_irq(const struct intel_breadcrumbs *b)
261{
262 const struct intel_engine_cs *engine =
263 container_of(b, struct intel_engine_cs, breadcrumbs);
264
265 if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings))
266 return false;
267
268 /* Only start with the heavy weight fake irq timer if we have not
269 * seen any interrupts since enabling it the first time. If the
270 * interrupts are still arriving, it means we made a mistake in our
271 * engine->seqno_barrier(), a timing error that should be transient
272 * and unlikely to reoccur.
273 */
274 return atomic_read(&engine->irq_count) == b->hangcheck_interrupts;
275}
276
277static void enable_fake_irq(struct intel_breadcrumbs *b)
278{
279 /* Ensure we never sleep indefinitely */
280 if (!b->irq_enabled || use_fake_irq(b))
281 mod_timer(&b->fake_irq, jiffies + 1);
282 else
283 mod_timer(&b->hangcheck, wait_timeout());
284}
285
286static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
287{
288 struct intel_engine_cs *engine =
289 container_of(b, struct intel_engine_cs, breadcrumbs);
290 struct drm_i915_private *i915 = engine->i915;
291 bool enabled;
292
293 lockdep_assert_held(&b->irq_lock);
294 if (b->irq_armed)
295 return false;
296
297 /* The breadcrumb irq will be disarmed on the interrupt after the
298 * waiters are signaled. This gives us a single interrupt window in
299 * which we can add a new waiter and avoid the cost of re-enabling
300 * the irq.
301 */
302 b->irq_armed = true;
303
304 if (I915_SELFTEST_ONLY(b->mock)) {
305 /* For our mock objects we want to avoid interaction
306 * with the real hardware (which is not set up). So
307 * we simply pretend we have enabled the powerwell
308 * and the irq, and leave it up to the mock
309 * implementation to call intel_engine_wakeup()
310 * itself when it wants to simulate a user interrupt,
311 */
312 return true;
313 }
314
315 /* Since we are waiting on a request, the GPU should be busy
316 * and should have its own rpm reference. This is tracked
317 * by i915->gt.awake, we can forgo holding our own wakref
318 * for the interrupt as before i915->gt.awake is released (when
319 * the driver is idle) we disarm the breadcrumbs.
320 */
321
322 /* No interrupts? Kick the waiter every jiffie! */
323 enabled = false;
324 if (!b->irq_enabled++ &&
325 !test_bit(engine->id, &i915->gpu_error.test_irq_rings)) {
326 irq_enable(engine);
327 enabled = true;
328 }
329
330 enable_fake_irq(b);
331 return enabled;
332}
333
334static inline struct intel_wait *to_wait(struct rb_node *node)
335{
336 return rb_entry(node, struct intel_wait, node);
337}
338
339static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
340 struct intel_wait *wait)
341{
342 lockdep_assert_held(&b->rb_lock);
343 GEM_BUG_ON(b->irq_wait == wait);
344
345 /*
346 * This request is completed, so remove it from the tree, mark it as
347 * complete, and *then* wake up the associated task. N.B. when the
348 * task wakes up, it will find the empty rb_node, discern that it
349 * has already been removed from the tree and skip the serialisation
350 * of the b->rb_lock and b->irq_lock. This means that the destruction
351 * of the intel_wait is not serialised with the interrupt handler
352 * by the waiter - it must instead be serialised by the caller.
353 */
354 rb_erase(&wait->node, &b->waiters);
355 RB_CLEAR_NODE(&wait->node);
356
357 if (wait->tsk->state != TASK_RUNNING)
358 wake_up_process(wait->tsk); /* implicit smp_wmb() */
359}
360
361static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine,
362 struct rb_node *next)
363{
364 struct intel_breadcrumbs *b = &engine->breadcrumbs;
365
366 spin_lock(&b->irq_lock);
367 GEM_BUG_ON(!b->irq_armed);
368 GEM_BUG_ON(!b->irq_wait);
369 b->irq_wait = to_wait(next);
370 spin_unlock(&b->irq_lock);
371
372 /* We always wake up the next waiter that takes over as the bottom-half
373 * as we may delegate not only the irq-seqno barrier to the next waiter
374 * but also the task of waking up concurrent waiters.
375 */
376 if (next)
377 wake_up_process(to_wait(next)->tsk);
378}
379
380static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
381 struct intel_wait *wait)
382{
383 struct intel_breadcrumbs *b = &engine->breadcrumbs;
384 struct rb_node **p, *parent, *completed;
385 bool first, armed;
386 u32 seqno;
387
388 GEM_BUG_ON(!wait->seqno);
389
390 /* Insert the request into the retirement ordered list
391 * of waiters by walking the rbtree. If we are the oldest
392 * seqno in the tree (the first to be retired), then
393 * set ourselves as the bottom-half.
394 *
395 * As we descend the tree, prune completed branches since we hold the
396 * spinlock we know that the first_waiter must be delayed and can
397 * reduce some of the sequential wake up latency if we take action
398 * ourselves and wake up the completed tasks in parallel. Also, by
399 * removing stale elements in the tree, we may be able to reduce the
400 * ping-pong between the old bottom-half and ourselves as first-waiter.
401 */
402 armed = false;
403 first = true;
404 parent = NULL;
405 completed = NULL;
406 seqno = intel_engine_get_seqno(engine);
407
408 /* If the request completed before we managed to grab the spinlock,
409 * return now before adding ourselves to the rbtree. We let the
410 * current bottom-half handle any pending wakeups and instead
411 * try and get out of the way quickly.
412 */
413 if (i915_seqno_passed(seqno, wait->seqno)) {
414 RB_CLEAR_NODE(&wait->node);
415 return first;
416 }
417
418 p = &b->waiters.rb_node;
419 while (*p) {
420 parent = *p;
421 if (wait->seqno == to_wait(parent)->seqno) {
422 /* We have multiple waiters on the same seqno, select
423 * the highest priority task (that with the smallest
424 * task->prio) to serve as the bottom-half for this
425 * group.
426 */
427 if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
428 p = &parent->rb_right;
429 first = false;
430 } else {
431 p = &parent->rb_left;
432 }
433 } else if (i915_seqno_passed(wait->seqno,
434 to_wait(parent)->seqno)) {
435 p = &parent->rb_right;
436 if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
437 completed = parent;
438 else
439 first = false;
440 } else {
441 p = &parent->rb_left;
442 }
443 }
444 rb_link_node(&wait->node, parent, p);
445 rb_insert_color(&wait->node, &b->waiters);
446
447 if (first) {
448 spin_lock(&b->irq_lock);
449 b->irq_wait = wait;
450 /* After assigning ourselves as the new bottom-half, we must
451 * perform a cursory check to prevent a missed interrupt.
452 * Either we miss the interrupt whilst programming the hardware,
453 * or if there was a previous waiter (for a later seqno) they
454 * may be woken instead of us (due to the inherent race
455 * in the unlocked read of b->irq_seqno_bh in the irq handler)
456 * and so we miss the wake up.
457 */
458 armed = __intel_breadcrumbs_enable_irq(b);
459 spin_unlock(&b->irq_lock);
460 }
461
462 if (completed) {
463 /* Advance the bottom-half (b->irq_wait) before we wake up
464 * the waiters who may scribble over their intel_wait
465 * just as the interrupt handler is dereferencing it via
466 * b->irq_wait.
467 */
468 if (!first) {
469 struct rb_node *next = rb_next(completed);
470 GEM_BUG_ON(next == &wait->node);
471 __intel_breadcrumbs_next(engine, next);
472 }
473
474 do {
475 struct intel_wait *crumb = to_wait(completed);
476 completed = rb_prev(completed);
477 __intel_breadcrumbs_finish(b, crumb);
478 } while (completed);
479 }
480
481 GEM_BUG_ON(!b->irq_wait);
482 GEM_BUG_ON(!b->irq_armed);
483 GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node);
484
485 return armed;
486}
487
488bool intel_engine_add_wait(struct intel_engine_cs *engine,
489 struct intel_wait *wait)
490{
491 struct intel_breadcrumbs *b = &engine->breadcrumbs;
492 bool armed;
493
494 spin_lock_irq(&b->rb_lock);
495 armed = __intel_engine_add_wait(engine, wait);
496 spin_unlock_irq(&b->rb_lock);
497 if (armed)
498 return armed;
499
500 /* Make the caller recheck if its request has already started. */
501 return i915_seqno_passed(intel_engine_get_seqno(engine),
502 wait->seqno - 1);
503}
504
505static inline bool chain_wakeup(struct rb_node *rb, int priority)
506{
507 return rb && to_wait(rb)->tsk->prio <= priority;
508}
509
510static inline int wakeup_priority(struct intel_breadcrumbs *b,
511 struct task_struct *tsk)
512{
513 if (tsk == b->signaler)
514 return INT_MIN;
515 else
516 return tsk->prio;
517}
518
519static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
520 struct intel_wait *wait)
521{
522 struct intel_breadcrumbs *b = &engine->breadcrumbs;
523
524 lockdep_assert_held(&b->rb_lock);
525
526 if (RB_EMPTY_NODE(&wait->node))
527 goto out;
528
529 if (b->irq_wait == wait) {
530 const int priority = wakeup_priority(b, wait->tsk);
531 struct rb_node *next;
532
533 /* We are the current bottom-half. Find the next candidate,
534 * the first waiter in the queue on the remaining oldest
535 * request. As multiple seqnos may complete in the time it
536 * takes us to wake up and find the next waiter, we have to
537 * wake up that waiter for it to perform its own coherent
538 * completion check.
539 */
540 next = rb_next(&wait->node);
541 if (chain_wakeup(next, priority)) {
542 /* If the next waiter is already complete,
543 * wake it up and continue onto the next waiter. So
544 * if have a small herd, they will wake up in parallel
545 * rather than sequentially, which should reduce
546 * the overall latency in waking all the completed
547 * clients.
548 *
549 * However, waking up a chain adds extra latency to
550 * the first_waiter. This is undesirable if that
551 * waiter is a high priority task.
552 */
553 u32 seqno = intel_engine_get_seqno(engine);
554
555 while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
556 struct rb_node *n = rb_next(next);
557
558 __intel_breadcrumbs_finish(b, to_wait(next));
559 next = n;
560 if (!chain_wakeup(next, priority))
561 break;
562 }
563 }
564
565 __intel_breadcrumbs_next(engine, next);
566 } else {
567 GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
568 }
569
570 GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
571 rb_erase(&wait->node, &b->waiters);
572 RB_CLEAR_NODE(&wait->node);
573
574out:
575 GEM_BUG_ON(b->irq_wait == wait);
576 GEM_BUG_ON(rb_first(&b->waiters) !=
577 (b->irq_wait ? &b->irq_wait->node : NULL));
578}
579
580void intel_engine_remove_wait(struct intel_engine_cs *engine,
581 struct intel_wait *wait)
582{
583 struct intel_breadcrumbs *b = &engine->breadcrumbs;
584
585 /* Quick check to see if this waiter was already decoupled from
586 * the tree by the bottom-half to avoid contention on the spinlock
587 * by the herd.
588 */
589 if (RB_EMPTY_NODE(&wait->node)) {
590 GEM_BUG_ON(READ_ONCE(b->irq_wait) == wait);
591 return;
592 }
593
594 spin_lock_irq(&b->rb_lock);
595 __intel_engine_remove_wait(engine, wait);
596 spin_unlock_irq(&b->rb_lock);
597}
598
599static void signaler_set_rtpriority(void)
600{
601 struct sched_param param = { .sched_priority = 1 };
602
603 sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m);
604}
605
606static int intel_breadcrumbs_signaler(void *arg)
607{
608 struct intel_engine_cs *engine = arg;
609 struct intel_breadcrumbs *b = &engine->breadcrumbs;
610 struct i915_request *rq, *n;
611
612 /* Install ourselves with high priority to reduce signalling latency */
613 signaler_set_rtpriority();
614
615 do {
616 bool do_schedule = true;
617 LIST_HEAD(list);
618 u32 seqno;
619
620 set_current_state(TASK_INTERRUPTIBLE);
621 if (list_empty(&b->signals))
622 goto sleep;
623
624 /*
625 * We are either woken up by the interrupt bottom-half,
626 * or by a client adding a new signaller. In both cases,
627 * the GPU seqno may have advanced beyond our oldest signal.
628 * If it has, propagate the signal, remove the waiter and
629 * check again with the next oldest signal. Otherwise we
630 * need to wait for a new interrupt from the GPU or for
631 * a new client.
632 */
633 seqno = intel_engine_get_seqno(engine);
634
635 spin_lock_irq(&b->rb_lock);
636 list_for_each_entry_safe(rq, n, &b->signals, signaling.link) {
637 u32 this = rq->signaling.wait.seqno;
638
639 GEM_BUG_ON(!rq->signaling.wait.seqno);
640
641 if (!i915_seqno_passed(seqno, this))
642 break;
643
644 if (likely(this == i915_request_global_seqno(rq))) {
645 __intel_engine_remove_wait(engine,
646 &rq->signaling.wait);
647
648 rq->signaling.wait.seqno = 0;
649 __list_del_entry(&rq->signaling.link);
650
651 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
652 &rq->fence.flags)) {
653 list_add_tail(&rq->signaling.link,
654 &list);
655 i915_request_get(rq);
656 }
657 }
658 }
659 spin_unlock_irq(&b->rb_lock);
660
661 if (!list_empty(&list)) {
662 local_bh_disable();
663 list_for_each_entry_safe(rq, n, &list, signaling.link) {
664 dma_fence_signal(&rq->fence);
665 GEM_BUG_ON(!i915_request_completed(rq));
666 i915_request_put(rq);
667 }
668 local_bh_enable(); /* kick start the tasklets */
669
670 /*
671 * If the engine is saturated we may be continually
672 * processing completed requests. This angers the
673 * NMI watchdog if we never let anything else
674 * have access to the CPU. Let's pretend to be nice
675 * and relinquish the CPU if we burn through the
676 * entire RT timeslice!
677 */
678 do_schedule = need_resched();
679 }
680
681 if (unlikely(do_schedule)) {
682 /* Before we sleep, check for a missed seqno */
683 if (current->state & TASK_NORMAL &&
684 !list_empty(&b->signals) &&
685 engine->irq_seqno_barrier &&
686 test_and_clear_bit(ENGINE_IRQ_BREADCRUMB,
687 &engine->irq_posted)) {
688 engine->irq_seqno_barrier(engine);
689 intel_engine_wakeup(engine);
690 }
691
692sleep:
693 if (kthread_should_park())
694 kthread_parkme();
695
696 if (unlikely(kthread_should_stop()))
697 break;
698
699 schedule();
700 }
701 } while (1);
702 __set_current_state(TASK_RUNNING);
703
704 return 0;
705}
706
707static void insert_signal(struct intel_breadcrumbs *b,
708 struct i915_request *request,
709 const u32 seqno)
710{
711 struct i915_request *iter;
712
713 lockdep_assert_held(&b->rb_lock);
714
715 /*
716 * A reasonable assumption is that we are called to add signals
717 * in sequence, as the requests are submitted for execution and
718 * assigned a global_seqno. This will be the case for the majority
719 * of internally generated signals (inter-engine signaling).
720 *
721 * Out of order waiters triggering random signaling enabling will
722 * be more problematic, but hopefully rare enough and the list
723 * small enough that the O(N) insertion sort is not an issue.
724 */
725
726 list_for_each_entry_reverse(iter, &b->signals, signaling.link)
727 if (i915_seqno_passed(seqno, iter->signaling.wait.seqno))
728 break;
729
730 list_add(&request->signaling.link, &iter->signaling.link);
731}
732
733void intel_engine_enable_signaling(struct i915_request *request, bool wakeup)
734{
735 struct intel_engine_cs *engine = request->engine;
736 struct intel_breadcrumbs *b = &engine->breadcrumbs;
737 u32 seqno;
738
739 /*
740 * Note that we may be called from an interrupt handler on another
741 * device (e.g. nouveau signaling a fence completion causing us
742 * to submit a request, and so enable signaling). As such,
743 * we need to make sure that all other users of b->rb_lock protect
744 * against interrupts, i.e. use spin_lock_irqsave.
745 */
746
747 /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
748 GEM_BUG_ON(!irqs_disabled());
749 lockdep_assert_held(&request->lock);
750
751 seqno = i915_request_global_seqno(request);
752 if (!seqno) /* will be enabled later upon execution */
753 return;
754
755 GEM_BUG_ON(request->signaling.wait.seqno);
756 request->signaling.wait.tsk = b->signaler;
757 request->signaling.wait.request = request;
758 request->signaling.wait.seqno = seqno;
759
760 /*
761 * Add ourselves into the list of waiters, but registering our
762 * bottom-half as the signaller thread. As per usual, only the oldest
763 * waiter (not just signaller) is tasked as the bottom-half waking
764 * up all completed waiters after the user interrupt.
765 *
766 * If we are the oldest waiter, enable the irq (after which we
767 * must double check that the seqno did not complete).
768 */
769 spin_lock(&b->rb_lock);
770 insert_signal(b, request, seqno);
771 wakeup &= __intel_engine_add_wait(engine, &request->signaling.wait);
772 spin_unlock(&b->rb_lock);
773
774 if (wakeup)
775 wake_up_process(b->signaler);
776}
777
778void intel_engine_cancel_signaling(struct i915_request *request)
779{
780 struct intel_engine_cs *engine = request->engine;
781 struct intel_breadcrumbs *b = &engine->breadcrumbs;
782
783 GEM_BUG_ON(!irqs_disabled());
784 lockdep_assert_held(&request->lock);
785
786 if (!READ_ONCE(request->signaling.wait.seqno))
787 return;
788
789 spin_lock(&b->rb_lock);
790 __intel_engine_remove_wait(engine, &request->signaling.wait);
791 if (fetch_and_zero(&request->signaling.wait.seqno))
792 __list_del_entry(&request->signaling.link);
793 spin_unlock(&b->rb_lock);
794}
795
796int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
797{
798 struct intel_breadcrumbs *b = &engine->breadcrumbs;
799 struct task_struct *tsk;
800
801 spin_lock_init(&b->rb_lock);
802 spin_lock_init(&b->irq_lock);
803
804 timer_setup(&b->fake_irq, intel_breadcrumbs_fake_irq, 0);
805 timer_setup(&b->hangcheck, intel_breadcrumbs_hangcheck, 0);
806
807 INIT_LIST_HEAD(&b->signals);
808
809 /* Spawn a thread to provide a common bottom-half for all signals.
810 * As this is an asynchronous interface we cannot steal the current
811 * task for handling the bottom-half to the user interrupt, therefore
812 * we create a thread to do the coherent seqno dance after the
813 * interrupt and then signal the waitqueue (via the dma-buf/fence).
814 */
815 tsk = kthread_run(intel_breadcrumbs_signaler, engine,
816 "i915/signal:%d", engine->id);
817 if (IS_ERR(tsk))
818 return PTR_ERR(tsk);
819
820 b->signaler = tsk;
821
822 return 0;
823}
824
825static void cancel_fake_irq(struct intel_engine_cs *engine)
826{
827 struct intel_breadcrumbs *b = &engine->breadcrumbs;
828
829 del_timer_sync(&b->hangcheck);
830 del_timer_sync(&b->fake_irq);
831 clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
832}
833
834void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
835{
836 struct intel_breadcrumbs *b = &engine->breadcrumbs;
837
838 cancel_fake_irq(engine);
839 spin_lock_irq(&b->irq_lock);
840
841 if (b->irq_enabled)
842 irq_enable(engine);
843 else
844 irq_disable(engine);
845
846 /* We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
847 * GPU is active and may have already executed the MI_USER_INTERRUPT
848 * before the CPU is ready to receive. However, the engine is currently
849 * idle (we haven't started it yet), there is no possibility for a
850 * missed interrupt as we enabled the irq and so we can clear the
851 * immediate wakeup (until a real interrupt arrives for the waiter).
852 */
853 clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
854
855 if (b->irq_armed)
856 enable_fake_irq(b);
857
858 spin_unlock_irq(&b->irq_lock);
859}
860
861void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
862{
863 struct intel_breadcrumbs *b = &engine->breadcrumbs;
864
865 /* The engines should be idle and all requests accounted for! */
866 WARN_ON(READ_ONCE(b->irq_wait));
867 WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
868 WARN_ON(!list_empty(&b->signals));
869
870 if (!IS_ERR_OR_NULL(b->signaler))
871 kthread_stop(b->signaler);
872
873 cancel_fake_irq(engine);
874}
875
876#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
877#include "selftests/intel_breadcrumbs.c"
878#endif