Loading...
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * (C) Copyright 2016 Intel Corporation
5 */
6
7#include <linux/slab.h>
8#include <linux/dma-fence.h>
9#include <linux/irq_work.h>
10#include <linux/dma-resv.h>
11
12#include "i915_sw_fence.h"
13#include "i915_selftest.h"
14
15#define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */
16
17static DEFINE_SPINLOCK(i915_sw_fence_lock);
18
19enum {
20 DEBUG_FENCE_IDLE = 0,
21 DEBUG_FENCE_NOTIFY,
22};
23
24static void *i915_sw_fence_debug_hint(void *addr)
25{
26 return (void *)(((struct i915_sw_fence *)addr)->flags & I915_SW_FENCE_MASK);
27}
28
29#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
30
31static struct debug_obj_descr i915_sw_fence_debug_descr = {
32 .name = "i915_sw_fence",
33 .debug_hint = i915_sw_fence_debug_hint,
34};
35
36static inline void debug_fence_init(struct i915_sw_fence *fence)
37{
38 debug_object_init(fence, &i915_sw_fence_debug_descr);
39}
40
41static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
42{
43 debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr);
44}
45
46static inline void debug_fence_activate(struct i915_sw_fence *fence)
47{
48 debug_object_activate(fence, &i915_sw_fence_debug_descr);
49}
50
51static inline void debug_fence_set_state(struct i915_sw_fence *fence,
52 int old, int new)
53{
54 debug_object_active_state(fence, &i915_sw_fence_debug_descr, old, new);
55}
56
57static inline void debug_fence_deactivate(struct i915_sw_fence *fence)
58{
59 debug_object_deactivate(fence, &i915_sw_fence_debug_descr);
60}
61
62static inline void debug_fence_destroy(struct i915_sw_fence *fence)
63{
64 debug_object_destroy(fence, &i915_sw_fence_debug_descr);
65}
66
67static inline void debug_fence_free(struct i915_sw_fence *fence)
68{
69 debug_object_free(fence, &i915_sw_fence_debug_descr);
70 smp_wmb(); /* flush the change in state before reallocation */
71}
72
73static inline void debug_fence_assert(struct i915_sw_fence *fence)
74{
75 debug_object_assert_init(fence, &i915_sw_fence_debug_descr);
76}
77
78#else
79
80static inline void debug_fence_init(struct i915_sw_fence *fence)
81{
82}
83
84static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
85{
86}
87
88static inline void debug_fence_activate(struct i915_sw_fence *fence)
89{
90}
91
92static inline void debug_fence_set_state(struct i915_sw_fence *fence,
93 int old, int new)
94{
95}
96
97static inline void debug_fence_deactivate(struct i915_sw_fence *fence)
98{
99}
100
101static inline void debug_fence_destroy(struct i915_sw_fence *fence)
102{
103}
104
105static inline void debug_fence_free(struct i915_sw_fence *fence)
106{
107}
108
109static inline void debug_fence_assert(struct i915_sw_fence *fence)
110{
111}
112
113#endif
114
115static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
116 enum i915_sw_fence_notify state)
117{
118 i915_sw_fence_notify_t fn;
119
120 fn = (i915_sw_fence_notify_t)(fence->flags & I915_SW_FENCE_MASK);
121 return fn(fence, state);
122}
123
124#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
125void i915_sw_fence_fini(struct i915_sw_fence *fence)
126{
127 debug_fence_free(fence);
128}
129#endif
130
131static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
132 struct list_head *continuation)
133{
134 wait_queue_head_t *x = &fence->wait;
135 wait_queue_entry_t *pos, *next;
136 unsigned long flags;
137
138 debug_fence_deactivate(fence);
139 atomic_set_release(&fence->pending, -1); /* 0 -> -1 [done] */
140
141 /*
142 * To prevent unbounded recursion as we traverse the graph of
143 * i915_sw_fences, we move the entry list from this, the next ready
144 * fence, to the tail of the original fence's entry list
145 * (and so added to the list to be woken).
146 */
147
148 spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation);
149 if (continuation) {
150 list_for_each_entry_safe(pos, next, &x->head, entry) {
151 if (pos->func == autoremove_wake_function)
152 pos->func(pos, TASK_NORMAL, 0, continuation);
153 else
154 list_move_tail(&pos->entry, continuation);
155 }
156 } else {
157 LIST_HEAD(extra);
158
159 do {
160 list_for_each_entry_safe(pos, next, &x->head, entry) {
161 pos->func(pos,
162 TASK_NORMAL, fence->error,
163 &extra);
164 }
165
166 if (list_empty(&extra))
167 break;
168
169 list_splice_tail_init(&extra, &x->head);
170 } while (1);
171 }
172 spin_unlock_irqrestore(&x->lock, flags);
173
174 debug_fence_assert(fence);
175}
176
177static void __i915_sw_fence_complete(struct i915_sw_fence *fence,
178 struct list_head *continuation)
179{
180 debug_fence_assert(fence);
181
182 if (!atomic_dec_and_test(&fence->pending))
183 return;
184
185 debug_fence_set_state(fence, DEBUG_FENCE_IDLE, DEBUG_FENCE_NOTIFY);
186
187 if (__i915_sw_fence_notify(fence, FENCE_COMPLETE) != NOTIFY_DONE)
188 return;
189
190 debug_fence_set_state(fence, DEBUG_FENCE_NOTIFY, DEBUG_FENCE_IDLE);
191
192 __i915_sw_fence_wake_up_all(fence, continuation);
193
194 debug_fence_destroy(fence);
195 __i915_sw_fence_notify(fence, FENCE_FREE);
196}
197
198void i915_sw_fence_complete(struct i915_sw_fence *fence)
199{
200 debug_fence_assert(fence);
201
202 if (WARN_ON(i915_sw_fence_done(fence)))
203 return;
204
205 __i915_sw_fence_complete(fence, NULL);
206}
207
208void i915_sw_fence_await(struct i915_sw_fence *fence)
209{
210 debug_fence_assert(fence);
211 WARN_ON(atomic_inc_return(&fence->pending) <= 1);
212}
213
214void __i915_sw_fence_init(struct i915_sw_fence *fence,
215 i915_sw_fence_notify_t fn,
216 const char *name,
217 struct lock_class_key *key)
218{
219 BUG_ON(!fn || (unsigned long)fn & ~I915_SW_FENCE_MASK);
220
221 debug_fence_init(fence);
222
223 __init_waitqueue_head(&fence->wait, name, key);
224 atomic_set(&fence->pending, 1);
225 fence->error = 0;
226
227 fence->flags = (unsigned long)fn;
228}
229
230void i915_sw_fence_commit(struct i915_sw_fence *fence)
231{
232 debug_fence_activate(fence);
233 i915_sw_fence_complete(fence);
234}
235
236static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key)
237{
238 i915_sw_fence_set_error_once(wq->private, flags);
239
240 list_del(&wq->entry);
241 __i915_sw_fence_complete(wq->private, key);
242
243 if (wq->flags & I915_SW_FENCE_FLAG_ALLOC)
244 kfree(wq);
245 return 0;
246}
247
248static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
249 const struct i915_sw_fence * const signaler)
250{
251 wait_queue_entry_t *wq;
252
253 if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
254 return false;
255
256 if (fence == signaler)
257 return true;
258
259 list_for_each_entry(wq, &fence->wait.head, entry) {
260 if (wq->func != i915_sw_fence_wake)
261 continue;
262
263 if (__i915_sw_fence_check_if_after(wq->private, signaler))
264 return true;
265 }
266
267 return false;
268}
269
270static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence)
271{
272 wait_queue_entry_t *wq;
273
274 if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
275 return;
276
277 list_for_each_entry(wq, &fence->wait.head, entry) {
278 if (wq->func != i915_sw_fence_wake)
279 continue;
280
281 __i915_sw_fence_clear_checked_bit(wq->private);
282 }
283}
284
285static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
286 const struct i915_sw_fence * const signaler)
287{
288 unsigned long flags;
289 bool err;
290
291 if (!IS_ENABLED(CONFIG_DRM_I915_SW_FENCE_CHECK_DAG))
292 return false;
293
294 spin_lock_irqsave(&i915_sw_fence_lock, flags);
295 err = __i915_sw_fence_check_if_after(fence, signaler);
296 __i915_sw_fence_clear_checked_bit(fence);
297 spin_unlock_irqrestore(&i915_sw_fence_lock, flags);
298
299 return err;
300}
301
302static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
303 struct i915_sw_fence *signaler,
304 wait_queue_entry_t *wq, gfp_t gfp)
305{
306 unsigned long flags;
307 int pending;
308
309 debug_fence_assert(fence);
310 might_sleep_if(gfpflags_allow_blocking(gfp));
311
312 if (i915_sw_fence_done(signaler)) {
313 i915_sw_fence_set_error_once(fence, signaler->error);
314 return 0;
315 }
316
317 debug_fence_assert(signaler);
318
319 /* The dependency graph must be acyclic. */
320 if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
321 return -EINVAL;
322
323 pending = 0;
324 if (!wq) {
325 wq = kmalloc(sizeof(*wq), gfp);
326 if (!wq) {
327 if (!gfpflags_allow_blocking(gfp))
328 return -ENOMEM;
329
330 i915_sw_fence_wait(signaler);
331 i915_sw_fence_set_error_once(fence, signaler->error);
332 return 0;
333 }
334
335 pending |= I915_SW_FENCE_FLAG_ALLOC;
336 }
337
338 INIT_LIST_HEAD(&wq->entry);
339 wq->flags = pending;
340 wq->func = i915_sw_fence_wake;
341 wq->private = fence;
342
343 i915_sw_fence_await(fence);
344
345 spin_lock_irqsave(&signaler->wait.lock, flags);
346 if (likely(!i915_sw_fence_done(signaler))) {
347 __add_wait_queue_entry_tail(&signaler->wait, wq);
348 pending = 1;
349 } else {
350 i915_sw_fence_wake(wq, 0, signaler->error, NULL);
351 pending = 0;
352 }
353 spin_unlock_irqrestore(&signaler->wait.lock, flags);
354
355 return pending;
356}
357
358int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
359 struct i915_sw_fence *signaler,
360 wait_queue_entry_t *wq)
361{
362 return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0);
363}
364
365int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
366 struct i915_sw_fence *signaler,
367 gfp_t gfp)
368{
369 return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp);
370}
371
372struct i915_sw_dma_fence_cb_timer {
373 struct i915_sw_dma_fence_cb base;
374 struct dma_fence *dma;
375 struct timer_list timer;
376 struct irq_work work;
377 struct rcu_head rcu;
378};
379
380static void dma_i915_sw_fence_wake(struct dma_fence *dma,
381 struct dma_fence_cb *data)
382{
383 struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
384
385 i915_sw_fence_set_error_once(cb->fence, dma->error);
386 i915_sw_fence_complete(cb->fence);
387 kfree(cb);
388}
389
390static void timer_i915_sw_fence_wake(struct timer_list *t)
391{
392 struct i915_sw_dma_fence_cb_timer *cb = from_timer(cb, t, timer);
393 struct i915_sw_fence *fence;
394
395 fence = xchg(&cb->base.fence, NULL);
396 if (!fence)
397 return;
398
399 pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%pS)\n",
400 cb->dma->ops->get_driver_name(cb->dma),
401 cb->dma->ops->get_timeline_name(cb->dma),
402 cb->dma->seqno,
403 i915_sw_fence_debug_hint(fence));
404
405 i915_sw_fence_set_error_once(fence, -ETIMEDOUT);
406 i915_sw_fence_complete(fence);
407}
408
409static void dma_i915_sw_fence_wake_timer(struct dma_fence *dma,
410 struct dma_fence_cb *data)
411{
412 struct i915_sw_dma_fence_cb_timer *cb =
413 container_of(data, typeof(*cb), base.base);
414 struct i915_sw_fence *fence;
415
416 fence = xchg(&cb->base.fence, NULL);
417 if (fence)
418 i915_sw_fence_complete(fence);
419
420 irq_work_queue(&cb->work);
421}
422
423static void irq_i915_sw_fence_work(struct irq_work *wrk)
424{
425 struct i915_sw_dma_fence_cb_timer *cb =
426 container_of(wrk, typeof(*cb), work);
427
428 del_timer_sync(&cb->timer);
429 dma_fence_put(cb->dma);
430
431 kfree_rcu(cb, rcu);
432}
433
434int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
435 struct dma_fence *dma,
436 unsigned long timeout,
437 gfp_t gfp)
438{
439 struct i915_sw_dma_fence_cb *cb;
440 dma_fence_func_t func;
441 int ret;
442
443 debug_fence_assert(fence);
444 might_sleep_if(gfpflags_allow_blocking(gfp));
445
446 if (dma_fence_is_signaled(dma))
447 return 0;
448
449 cb = kmalloc(timeout ?
450 sizeof(struct i915_sw_dma_fence_cb_timer) :
451 sizeof(struct i915_sw_dma_fence_cb),
452 gfp);
453 if (!cb) {
454 if (!gfpflags_allow_blocking(gfp))
455 return -ENOMEM;
456
457 return dma_fence_wait(dma, false);
458 }
459
460 cb->fence = fence;
461 i915_sw_fence_await(fence);
462
463 func = dma_i915_sw_fence_wake;
464 if (timeout) {
465 struct i915_sw_dma_fence_cb_timer *timer =
466 container_of(cb, typeof(*timer), base);
467
468 timer->dma = dma_fence_get(dma);
469 init_irq_work(&timer->work, irq_i915_sw_fence_work);
470
471 timer_setup(&timer->timer,
472 timer_i915_sw_fence_wake, TIMER_IRQSAFE);
473 mod_timer(&timer->timer, round_jiffies_up(jiffies + timeout));
474
475 func = dma_i915_sw_fence_wake_timer;
476 }
477
478 ret = dma_fence_add_callback(dma, &cb->base, func);
479 if (ret == 0) {
480 ret = 1;
481 } else {
482 func(dma, &cb->base);
483 if (ret == -ENOENT) /* fence already signaled */
484 ret = 0;
485 }
486
487 return ret;
488}
489
490static void __dma_i915_sw_fence_wake(struct dma_fence *dma,
491 struct dma_fence_cb *data)
492{
493 struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
494
495 i915_sw_fence_set_error_once(cb->fence, dma->error);
496 i915_sw_fence_complete(cb->fence);
497}
498
499int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
500 struct dma_fence *dma,
501 struct i915_sw_dma_fence_cb *cb)
502{
503 int ret;
504
505 debug_fence_assert(fence);
506
507 if (dma_fence_is_signaled(dma))
508 return 0;
509
510 cb->fence = fence;
511 i915_sw_fence_await(fence);
512
513 ret = dma_fence_add_callback(dma, &cb->base, __dma_i915_sw_fence_wake);
514 if (ret == 0) {
515 ret = 1;
516 } else {
517 __dma_i915_sw_fence_wake(dma, &cb->base);
518 if (ret == -ENOENT) /* fence already signaled */
519 ret = 0;
520 }
521
522 return ret;
523}
524
525int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
526 struct dma_resv *resv,
527 const struct dma_fence_ops *exclude,
528 bool write,
529 unsigned long timeout,
530 gfp_t gfp)
531{
532 struct dma_fence *excl;
533 int ret = 0, pending;
534
535 debug_fence_assert(fence);
536 might_sleep_if(gfpflags_allow_blocking(gfp));
537
538 if (write) {
539 struct dma_fence **shared;
540 unsigned int count, i;
541
542 ret = dma_resv_get_fences_rcu(resv,
543 &excl, &count, &shared);
544 if (ret)
545 return ret;
546
547 for (i = 0; i < count; i++) {
548 if (shared[i]->ops == exclude)
549 continue;
550
551 pending = i915_sw_fence_await_dma_fence(fence,
552 shared[i],
553 timeout,
554 gfp);
555 if (pending < 0) {
556 ret = pending;
557 break;
558 }
559
560 ret |= pending;
561 }
562
563 for (i = 0; i < count; i++)
564 dma_fence_put(shared[i]);
565 kfree(shared);
566 } else {
567 excl = dma_resv_get_excl_rcu(resv);
568 }
569
570 if (ret >= 0 && excl && excl->ops != exclude) {
571 pending = i915_sw_fence_await_dma_fence(fence,
572 excl,
573 timeout,
574 gfp);
575 if (pending < 0)
576 ret = pending;
577 else
578 ret |= pending;
579 }
580
581 dma_fence_put(excl);
582
583 return ret;
584}
585
586#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
587#include "selftests/lib_sw_fence.c"
588#include "selftests/i915_sw_fence.c"
589#endif
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * (C) Copyright 2016 Intel Corporation
5 */
6
7#include <linux/slab.h>
8#include <linux/dma-fence.h>
9#include <linux/irq_work.h>
10#include <linux/dma-resv.h>
11
12#include "i915_sw_fence.h"
13#include "i915_selftest.h"
14
15#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
16#define I915_SW_FENCE_BUG_ON(expr) BUG_ON(expr)
17#else
18#define I915_SW_FENCE_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
19#endif
20
21#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG
22static DEFINE_SPINLOCK(i915_sw_fence_lock);
23#endif
24
25#define WQ_FLAG_BITS \
26 BITS_PER_TYPE(typeof_member(struct wait_queue_entry, flags))
27
28/* after WQ_FLAG_* for safety */
29#define I915_SW_FENCE_FLAG_FENCE BIT(WQ_FLAG_BITS - 1)
30#define I915_SW_FENCE_FLAG_ALLOC BIT(WQ_FLAG_BITS - 2)
31
32enum {
33 DEBUG_FENCE_IDLE = 0,
34 DEBUG_FENCE_NOTIFY,
35};
36
37static void *i915_sw_fence_debug_hint(void *addr)
38{
39 return (void *)(((struct i915_sw_fence *)addr)->fn);
40}
41
42#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
43
44static const struct debug_obj_descr i915_sw_fence_debug_descr = {
45 .name = "i915_sw_fence",
46 .debug_hint = i915_sw_fence_debug_hint,
47};
48
49static inline void debug_fence_init(struct i915_sw_fence *fence)
50{
51 debug_object_init(fence, &i915_sw_fence_debug_descr);
52}
53
54static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
55{
56 debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr);
57}
58
59static inline void debug_fence_activate(struct i915_sw_fence *fence)
60{
61 debug_object_activate(fence, &i915_sw_fence_debug_descr);
62}
63
64static inline void debug_fence_set_state(struct i915_sw_fence *fence,
65 int old, int new)
66{
67 debug_object_active_state(fence, &i915_sw_fence_debug_descr, old, new);
68}
69
70static inline void debug_fence_deactivate(struct i915_sw_fence *fence)
71{
72 debug_object_deactivate(fence, &i915_sw_fence_debug_descr);
73}
74
75static inline void debug_fence_destroy(struct i915_sw_fence *fence)
76{
77 debug_object_destroy(fence, &i915_sw_fence_debug_descr);
78}
79
80static inline void debug_fence_free(struct i915_sw_fence *fence)
81{
82 debug_object_free(fence, &i915_sw_fence_debug_descr);
83 smp_wmb(); /* flush the change in state before reallocation */
84}
85
86static inline void debug_fence_assert(struct i915_sw_fence *fence)
87{
88 debug_object_assert_init(fence, &i915_sw_fence_debug_descr);
89}
90
91#else
92
93static inline void debug_fence_init(struct i915_sw_fence *fence)
94{
95}
96
97static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
98{
99}
100
101static inline void debug_fence_activate(struct i915_sw_fence *fence)
102{
103}
104
105static inline void debug_fence_set_state(struct i915_sw_fence *fence,
106 int old, int new)
107{
108}
109
110static inline void debug_fence_deactivate(struct i915_sw_fence *fence)
111{
112}
113
114static inline void debug_fence_destroy(struct i915_sw_fence *fence)
115{
116}
117
118static inline void debug_fence_free(struct i915_sw_fence *fence)
119{
120}
121
122static inline void debug_fence_assert(struct i915_sw_fence *fence)
123{
124}
125
126#endif
127
128static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
129 enum i915_sw_fence_notify state)
130{
131 return fence->fn(fence, state);
132}
133
134#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
135void i915_sw_fence_fini(struct i915_sw_fence *fence)
136{
137 debug_fence_free(fence);
138}
139#endif
140
141static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
142 struct list_head *continuation)
143{
144 wait_queue_head_t *x = &fence->wait;
145 wait_queue_entry_t *pos, *next;
146 unsigned long flags;
147
148 debug_fence_deactivate(fence);
149 atomic_set_release(&fence->pending, -1); /* 0 -> -1 [done] */
150
151 /*
152 * To prevent unbounded recursion as we traverse the graph of
153 * i915_sw_fences, we move the entry list from this, the next ready
154 * fence, to the tail of the original fence's entry list
155 * (and so added to the list to be woken).
156 */
157
158 spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation);
159 if (continuation) {
160 list_for_each_entry_safe(pos, next, &x->head, entry) {
161 if (pos->flags & I915_SW_FENCE_FLAG_FENCE)
162 list_move_tail(&pos->entry, continuation);
163 else
164 pos->func(pos, TASK_NORMAL, 0, continuation);
165 }
166 } else {
167 LIST_HEAD(extra);
168
169 do {
170 list_for_each_entry_safe(pos, next, &x->head, entry) {
171 int wake_flags;
172
173 wake_flags = 0;
174 if (pos->flags & I915_SW_FENCE_FLAG_FENCE)
175 wake_flags = fence->error;
176
177 pos->func(pos, TASK_NORMAL, wake_flags, &extra);
178 }
179
180 if (list_empty(&extra))
181 break;
182
183 list_splice_tail_init(&extra, &x->head);
184 } while (1);
185 }
186 spin_unlock_irqrestore(&x->lock, flags);
187
188 debug_fence_assert(fence);
189}
190
191static void __i915_sw_fence_complete(struct i915_sw_fence *fence,
192 struct list_head *continuation)
193{
194 debug_fence_assert(fence);
195
196 if (!atomic_dec_and_test(&fence->pending))
197 return;
198
199 debug_fence_set_state(fence, DEBUG_FENCE_IDLE, DEBUG_FENCE_NOTIFY);
200
201 if (__i915_sw_fence_notify(fence, FENCE_COMPLETE) != NOTIFY_DONE)
202 return;
203
204 debug_fence_set_state(fence, DEBUG_FENCE_NOTIFY, DEBUG_FENCE_IDLE);
205
206 __i915_sw_fence_wake_up_all(fence, continuation);
207
208 debug_fence_destroy(fence);
209 __i915_sw_fence_notify(fence, FENCE_FREE);
210}
211
212void i915_sw_fence_complete(struct i915_sw_fence *fence)
213{
214 debug_fence_assert(fence);
215
216 if (WARN_ON(i915_sw_fence_done(fence)))
217 return;
218
219 __i915_sw_fence_complete(fence, NULL);
220}
221
222bool i915_sw_fence_await(struct i915_sw_fence *fence)
223{
224 int pending;
225
226 /*
227 * It is only safe to add a new await to the fence while it has
228 * not yet been signaled (i.e. there are still existing signalers).
229 */
230 pending = atomic_read(&fence->pending);
231 do {
232 if (pending < 1)
233 return false;
234 } while (!atomic_try_cmpxchg(&fence->pending, &pending, pending + 1));
235
236 return true;
237}
238
239void __i915_sw_fence_init(struct i915_sw_fence *fence,
240 i915_sw_fence_notify_t fn,
241 const char *name,
242 struct lock_class_key *key)
243{
244 __init_waitqueue_head(&fence->wait, name, key);
245 fence->fn = fn;
246#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG
247 fence->flags = 0;
248#endif
249
250 i915_sw_fence_reinit(fence);
251}
252
253void i915_sw_fence_reinit(struct i915_sw_fence *fence)
254{
255 debug_fence_init(fence);
256
257 atomic_set(&fence->pending, 1);
258 fence->error = 0;
259
260 I915_SW_FENCE_BUG_ON(!list_empty(&fence->wait.head));
261}
262
263void i915_sw_fence_commit(struct i915_sw_fence *fence)
264{
265 debug_fence_activate(fence);
266 i915_sw_fence_complete(fence);
267}
268
269static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key)
270{
271 i915_sw_fence_set_error_once(wq->private, flags);
272
273 list_del(&wq->entry);
274 __i915_sw_fence_complete(wq->private, key);
275
276 if (wq->flags & I915_SW_FENCE_FLAG_ALLOC)
277 kfree(wq);
278 return 0;
279}
280
281#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG
282static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
283 const struct i915_sw_fence * const signaler)
284{
285 wait_queue_entry_t *wq;
286
287 if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
288 return false;
289
290 if (fence == signaler)
291 return true;
292
293 list_for_each_entry(wq, &fence->wait.head, entry) {
294 if (wq->func != i915_sw_fence_wake)
295 continue;
296
297 if (__i915_sw_fence_check_if_after(wq->private, signaler))
298 return true;
299 }
300
301 return false;
302}
303
304static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence)
305{
306 wait_queue_entry_t *wq;
307
308 if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
309 return;
310
311 list_for_each_entry(wq, &fence->wait.head, entry) {
312 if (wq->func != i915_sw_fence_wake)
313 continue;
314
315 __i915_sw_fence_clear_checked_bit(wq->private);
316 }
317}
318
319static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
320 const struct i915_sw_fence * const signaler)
321{
322 unsigned long flags;
323 bool err;
324
325 spin_lock_irqsave(&i915_sw_fence_lock, flags);
326 err = __i915_sw_fence_check_if_after(fence, signaler);
327 __i915_sw_fence_clear_checked_bit(fence);
328 spin_unlock_irqrestore(&i915_sw_fence_lock, flags);
329
330 return err;
331}
332#else
333static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
334 const struct i915_sw_fence * const signaler)
335{
336 return false;
337}
338#endif
339
340static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
341 struct i915_sw_fence *signaler,
342 wait_queue_entry_t *wq, gfp_t gfp)
343{
344 unsigned int pending;
345 unsigned long flags;
346
347 debug_fence_assert(fence);
348 might_sleep_if(gfpflags_allow_blocking(gfp));
349
350 if (i915_sw_fence_done(signaler)) {
351 i915_sw_fence_set_error_once(fence, signaler->error);
352 return 0;
353 }
354
355 debug_fence_assert(signaler);
356
357 /* The dependency graph must be acyclic. */
358 if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
359 return -EINVAL;
360
361 pending = I915_SW_FENCE_FLAG_FENCE;
362 if (!wq) {
363 wq = kmalloc(sizeof(*wq), gfp);
364 if (!wq) {
365 if (!gfpflags_allow_blocking(gfp))
366 return -ENOMEM;
367
368 i915_sw_fence_wait(signaler);
369 i915_sw_fence_set_error_once(fence, signaler->error);
370 return 0;
371 }
372
373 pending |= I915_SW_FENCE_FLAG_ALLOC;
374 }
375
376 INIT_LIST_HEAD(&wq->entry);
377 wq->flags = pending;
378 wq->func = i915_sw_fence_wake;
379 wq->private = fence;
380
381 i915_sw_fence_await(fence);
382
383 spin_lock_irqsave(&signaler->wait.lock, flags);
384 if (likely(!i915_sw_fence_done(signaler))) {
385 __add_wait_queue_entry_tail(&signaler->wait, wq);
386 pending = 1;
387 } else {
388 i915_sw_fence_wake(wq, 0, signaler->error, NULL);
389 pending = 0;
390 }
391 spin_unlock_irqrestore(&signaler->wait.lock, flags);
392
393 return pending;
394}
395
396int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
397 struct i915_sw_fence *signaler,
398 wait_queue_entry_t *wq)
399{
400 return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0);
401}
402
403int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
404 struct i915_sw_fence *signaler,
405 gfp_t gfp)
406{
407 return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp);
408}
409
410struct i915_sw_dma_fence_cb_timer {
411 struct i915_sw_dma_fence_cb base;
412 struct dma_fence *dma;
413 struct timer_list timer;
414 struct irq_work work;
415 struct rcu_head rcu;
416};
417
418static void dma_i915_sw_fence_wake(struct dma_fence *dma,
419 struct dma_fence_cb *data)
420{
421 struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
422
423 i915_sw_fence_set_error_once(cb->fence, dma->error);
424 i915_sw_fence_complete(cb->fence);
425 kfree(cb);
426}
427
428static void timer_i915_sw_fence_wake(struct timer_list *t)
429{
430 struct i915_sw_dma_fence_cb_timer *cb = from_timer(cb, t, timer);
431 struct i915_sw_fence *fence;
432
433 fence = xchg(&cb->base.fence, NULL);
434 if (!fence)
435 return;
436
437 pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%ps)\n",
438 cb->dma->ops->get_driver_name(cb->dma),
439 cb->dma->ops->get_timeline_name(cb->dma),
440 cb->dma->seqno,
441 i915_sw_fence_debug_hint(fence));
442
443 i915_sw_fence_set_error_once(fence, -ETIMEDOUT);
444 i915_sw_fence_complete(fence);
445}
446
447static void dma_i915_sw_fence_wake_timer(struct dma_fence *dma,
448 struct dma_fence_cb *data)
449{
450 struct i915_sw_dma_fence_cb_timer *cb =
451 container_of(data, typeof(*cb), base.base);
452 struct i915_sw_fence *fence;
453
454 fence = xchg(&cb->base.fence, NULL);
455 if (fence) {
456 i915_sw_fence_set_error_once(fence, dma->error);
457 i915_sw_fence_complete(fence);
458 }
459
460 irq_work_queue(&cb->work);
461}
462
463static void irq_i915_sw_fence_work(struct irq_work *wrk)
464{
465 struct i915_sw_dma_fence_cb_timer *cb =
466 container_of(wrk, typeof(*cb), work);
467
468 timer_shutdown_sync(&cb->timer);
469 dma_fence_put(cb->dma);
470
471 kfree_rcu(cb, rcu);
472}
473
474int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
475 struct dma_fence *dma,
476 unsigned long timeout,
477 gfp_t gfp)
478{
479 struct i915_sw_dma_fence_cb *cb;
480 dma_fence_func_t func;
481 int ret;
482
483 debug_fence_assert(fence);
484 might_sleep_if(gfpflags_allow_blocking(gfp));
485
486 if (dma_fence_is_signaled(dma)) {
487 i915_sw_fence_set_error_once(fence, dma->error);
488 return 0;
489 }
490
491 cb = kmalloc(timeout ?
492 sizeof(struct i915_sw_dma_fence_cb_timer) :
493 sizeof(struct i915_sw_dma_fence_cb),
494 gfp);
495 if (!cb) {
496 if (!gfpflags_allow_blocking(gfp))
497 return -ENOMEM;
498
499 ret = dma_fence_wait(dma, false);
500 if (ret)
501 return ret;
502
503 i915_sw_fence_set_error_once(fence, dma->error);
504 return 0;
505 }
506
507 cb->fence = fence;
508 i915_sw_fence_await(fence);
509
510 func = dma_i915_sw_fence_wake;
511 if (timeout) {
512 struct i915_sw_dma_fence_cb_timer *timer =
513 container_of(cb, typeof(*timer), base);
514
515 timer->dma = dma_fence_get(dma);
516 init_irq_work(&timer->work, irq_i915_sw_fence_work);
517
518 timer_setup(&timer->timer,
519 timer_i915_sw_fence_wake, TIMER_IRQSAFE);
520 mod_timer(&timer->timer, round_jiffies_up(jiffies + timeout));
521
522 func = dma_i915_sw_fence_wake_timer;
523 }
524
525 ret = dma_fence_add_callback(dma, &cb->base, func);
526 if (ret == 0) {
527 ret = 1;
528 } else {
529 func(dma, &cb->base);
530 if (ret == -ENOENT) /* fence already signaled */
531 ret = 0;
532 }
533
534 return ret;
535}
536
537static void __dma_i915_sw_fence_wake(struct dma_fence *dma,
538 struct dma_fence_cb *data)
539{
540 struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
541
542 i915_sw_fence_set_error_once(cb->fence, dma->error);
543 i915_sw_fence_complete(cb->fence);
544}
545
546int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
547 struct dma_fence *dma,
548 struct i915_sw_dma_fence_cb *cb)
549{
550 int ret;
551
552 debug_fence_assert(fence);
553
554 if (dma_fence_is_signaled(dma)) {
555 i915_sw_fence_set_error_once(fence, dma->error);
556 return 0;
557 }
558
559 cb->fence = fence;
560 i915_sw_fence_await(fence);
561
562 ret = 1;
563 if (dma_fence_add_callback(dma, &cb->base, __dma_i915_sw_fence_wake)) {
564 /* fence already signaled */
565 __dma_i915_sw_fence_wake(dma, &cb->base);
566 ret = 0;
567 }
568
569 return ret;
570}
571
572int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
573 struct dma_resv *resv,
574 bool write,
575 unsigned long timeout,
576 gfp_t gfp)
577{
578 struct dma_resv_iter cursor;
579 struct dma_fence *f;
580 int ret = 0, pending;
581
582 debug_fence_assert(fence);
583 might_sleep_if(gfpflags_allow_blocking(gfp));
584
585 dma_resv_iter_begin(&cursor, resv, dma_resv_usage_rw(write));
586 dma_resv_for_each_fence_unlocked(&cursor, f) {
587 pending = i915_sw_fence_await_dma_fence(fence, f, timeout,
588 gfp);
589 if (pending < 0) {
590 ret = pending;
591 break;
592 }
593
594 ret |= pending;
595 }
596 dma_resv_iter_end(&cursor);
597 return ret;
598}
599
600#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
601#include "selftests/lib_sw_fence.c"
602#include "selftests/i915_sw_fence.c"
603#endif