Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Fence mechanism for dma-buf and to allow for asynchronous dma access
4 *
5 * Copyright (C) 2012 Canonical Ltd
6 * Copyright (C) 2012 Texas Instruments
7 *
8 * Authors:
9 * Rob Clark <robdclark@gmail.com>
10 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
11 */
12
13#include <linux/slab.h>
14#include <linux/export.h>
15#include <linux/atomic.h>
16#include <linux/dma-fence.h>
17#include <linux/sched/signal.h>
18
19#define CREATE_TRACE_POINTS
20#include <trace/events/dma_fence.h>
21
22EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
23EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
24EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled);
25
26static DEFINE_SPINLOCK(dma_fence_stub_lock);
27static struct dma_fence dma_fence_stub;
28
29/*
30 * fence context counter: each execution context should have its own
31 * fence context, this allows checking if fences belong to the same
32 * context or not. One device can have multiple separate contexts,
33 * and they're used if some engine can run independently of another.
34 */
35static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
36
37/**
38 * DOC: DMA fences overview
39 *
40 * DMA fences, represented by &struct dma_fence, are the kernel internal
41 * synchronization primitive for DMA operations like GPU rendering, video
42 * encoding/decoding, or displaying buffers on a screen.
43 *
44 * A fence is initialized using dma_fence_init() and completed using
45 * dma_fence_signal(). Fences are associated with a context, allocated through
46 * dma_fence_context_alloc(), and all fences on the same context are
47 * fully ordered.
48 *
49 * Since the purposes of fences is to facilitate cross-device and
50 * cross-application synchronization, there's multiple ways to use one:
51 *
52 * - Individual fences can be exposed as a &sync_file, accessed as a file
53 * descriptor from userspace, created by calling sync_file_create(). This is
54 * called explicit fencing, since userspace passes around explicit
55 * synchronization points.
56 *
57 * - Some subsystems also have their own explicit fencing primitives, like
58 * &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying
59 * fence to be updated.
60 *
61 * - Then there's also implicit fencing, where the synchronization points are
62 * implicitly passed around as part of shared &dma_buf instances. Such
63 * implicit fences are stored in &struct dma_resv through the
64 * &dma_buf.resv pointer.
65 */
66
67/**
68 * DOC: fence cross-driver contract
69 *
70 * Since &dma_fence provide a cross driver contract, all drivers must follow the
71 * same rules:
72 *
73 * * Fences must complete in a reasonable time. Fences which represent kernels
74 * and shaders submitted by userspace, which could run forever, must be backed
75 * up by timeout and gpu hang recovery code. Minimally that code must prevent
76 * further command submission and force complete all in-flight fences, e.g.
77 * when the driver or hardware do not support gpu reset, or if the gpu reset
78 * failed for some reason. Ideally the driver supports gpu recovery which only
79 * affects the offending userspace context, and no other userspace
80 * submissions.
81 *
82 * * Drivers may have different ideas of what completion within a reasonable
83 * time means. Some hang recovery code uses a fixed timeout, others a mix
84 * between observing forward progress and increasingly strict timeouts.
85 * Drivers should not try to second guess timeout handling of fences from
86 * other drivers.
87 *
88 * * To ensure there's no deadlocks of dma_fence_wait() against other locks
89 * drivers should annotate all code required to reach dma_fence_signal(),
90 * which completes the fences, with dma_fence_begin_signalling() and
91 * dma_fence_end_signalling().
92 *
93 * * Drivers are allowed to call dma_fence_wait() while holding dma_resv_lock().
94 * This means any code required for fence completion cannot acquire a
95 * &dma_resv lock. Note that this also pulls in the entire established
96 * locking hierarchy around dma_resv_lock() and dma_resv_unlock().
97 *
98 * * Drivers are allowed to call dma_fence_wait() from their &shrinker
99 * callbacks. This means any code required for fence completion cannot
100 * allocate memory with GFP_KERNEL.
101 *
102 * * Drivers are allowed to call dma_fence_wait() from their &mmu_notifier
103 * respectively &mmu_interval_notifier callbacks. This means any code required
104 * for fence completeion cannot allocate memory with GFP_NOFS or GFP_NOIO.
105 * Only GFP_ATOMIC is permissible, which might fail.
106 *
107 * Note that only GPU drivers have a reasonable excuse for both requiring
108 * &mmu_interval_notifier and &shrinker callbacks at the same time as having to
109 * track asynchronous compute work using &dma_fence. No driver outside of
110 * drivers/gpu should ever call dma_fence_wait() in such contexts.
111 */
112
113static const char *dma_fence_stub_get_name(struct dma_fence *fence)
114{
115 return "stub";
116}
117
118static const struct dma_fence_ops dma_fence_stub_ops = {
119 .get_driver_name = dma_fence_stub_get_name,
120 .get_timeline_name = dma_fence_stub_get_name,
121};
122
123/**
124 * dma_fence_get_stub - return a signaled fence
125 *
126 * Return a stub fence which is already signaled.
127 */
128struct dma_fence *dma_fence_get_stub(void)
129{
130 spin_lock(&dma_fence_stub_lock);
131 if (!dma_fence_stub.ops) {
132 dma_fence_init(&dma_fence_stub,
133 &dma_fence_stub_ops,
134 &dma_fence_stub_lock,
135 0, 0);
136 dma_fence_signal_locked(&dma_fence_stub);
137 }
138 spin_unlock(&dma_fence_stub_lock);
139
140 return dma_fence_get(&dma_fence_stub);
141}
142EXPORT_SYMBOL(dma_fence_get_stub);
143
144/**
145 * dma_fence_context_alloc - allocate an array of fence contexts
146 * @num: amount of contexts to allocate
147 *
148 * This function will return the first index of the number of fence contexts
149 * allocated. The fence context is used for setting &dma_fence.context to a
150 * unique number by passing the context to dma_fence_init().
151 */
152u64 dma_fence_context_alloc(unsigned num)
153{
154 WARN_ON(!num);
155 return atomic64_fetch_add(num, &dma_fence_context_counter);
156}
157EXPORT_SYMBOL(dma_fence_context_alloc);
158
159/**
160 * DOC: fence signalling annotation
161 *
162 * Proving correctness of all the kernel code around &dma_fence through code
163 * review and testing is tricky for a few reasons:
164 *
165 * * It is a cross-driver contract, and therefore all drivers must follow the
166 * same rules for lock nesting order, calling contexts for various functions
167 * and anything else significant for in-kernel interfaces. But it is also
168 * impossible to test all drivers in a single machine, hence brute-force N vs.
169 * N testing of all combinations is impossible. Even just limiting to the
170 * possible combinations is infeasible.
171 *
172 * * There is an enormous amount of driver code involved. For render drivers
173 * there's the tail of command submission, after fences are published,
174 * scheduler code, interrupt and workers to process job completion,
175 * and timeout, gpu reset and gpu hang recovery code. Plus for integration
176 * with core mm with have &mmu_notifier, respectively &mmu_interval_notifier,
177 * and &shrinker. For modesetting drivers there's the commit tail functions
178 * between when fences for an atomic modeset are published, and when the
179 * corresponding vblank completes, including any interrupt processing and
180 * related workers. Auditing all that code, across all drivers, is not
181 * feasible.
182 *
183 * * Due to how many other subsystems are involved and the locking hierarchies
184 * this pulls in there is extremely thin wiggle-room for driver-specific
185 * differences. &dma_fence interacts with almost all of the core memory
186 * handling through page fault handlers via &dma_resv, dma_resv_lock() and
187 * dma_resv_unlock(). On the other side it also interacts through all
188 * allocation sites through &mmu_notifier and &shrinker.
189 *
190 * Furthermore lockdep does not handle cross-release dependencies, which means
191 * any deadlocks between dma_fence_wait() and dma_fence_signal() can't be caught
192 * at runtime with some quick testing. The simplest example is one thread
193 * waiting on a &dma_fence while holding a lock::
194 *
195 * lock(A);
196 * dma_fence_wait(B);
197 * unlock(A);
198 *
199 * while the other thread is stuck trying to acquire the same lock, which
200 * prevents it from signalling the fence the previous thread is stuck waiting
201 * on::
202 *
203 * lock(A);
204 * unlock(A);
205 * dma_fence_signal(B);
206 *
207 * By manually annotating all code relevant to signalling a &dma_fence we can
208 * teach lockdep about these dependencies, which also helps with the validation
209 * headache since now lockdep can check all the rules for us::
210 *
211 * cookie = dma_fence_begin_signalling();
212 * lock(A);
213 * unlock(A);
214 * dma_fence_signal(B);
215 * dma_fence_end_signalling(cookie);
216 *
217 * For using dma_fence_begin_signalling() and dma_fence_end_signalling() to
218 * annotate critical sections the following rules need to be observed:
219 *
220 * * All code necessary to complete a &dma_fence must be annotated, from the
221 * point where a fence is accessible to other threads, to the point where
222 * dma_fence_signal() is called. Un-annotated code can contain deadlock issues,
223 * and due to the very strict rules and many corner cases it is infeasible to
224 * catch these just with review or normal stress testing.
225 *
226 * * &struct dma_resv deserves a special note, since the readers are only
227 * protected by rcu. This means the signalling critical section starts as soon
228 * as the new fences are installed, even before dma_resv_unlock() is called.
229 *
230 * * The only exception are fast paths and opportunistic signalling code, which
231 * calls dma_fence_signal() purely as an optimization, but is not required to
232 * guarantee completion of a &dma_fence. The usual example is a wait IOCTL
233 * which calls dma_fence_signal(), while the mandatory completion path goes
234 * through a hardware interrupt and possible job completion worker.
235 *
236 * * To aid composability of code, the annotations can be freely nested, as long
237 * as the overall locking hierarchy is consistent. The annotations also work
238 * both in interrupt and process context. Due to implementation details this
239 * requires that callers pass an opaque cookie from
240 * dma_fence_begin_signalling() to dma_fence_end_signalling().
241 *
242 * * Validation against the cross driver contract is implemented by priming
243 * lockdep with the relevant hierarchy at boot-up. This means even just
244 * testing with a single device is enough to validate a driver, at least as
245 * far as deadlocks with dma_fence_wait() against dma_fence_signal() are
246 * concerned.
247 */
248#ifdef CONFIG_LOCKDEP
249static struct lockdep_map dma_fence_lockdep_map = {
250 .name = "dma_fence_map"
251};
252
253/**
254 * dma_fence_begin_signalling - begin a critical DMA fence signalling section
255 *
256 * Drivers should use this to annotate the beginning of any code section
257 * required to eventually complete &dma_fence by calling dma_fence_signal().
258 *
259 * The end of these critical sections are annotated with
260 * dma_fence_end_signalling().
261 *
262 * Returns:
263 *
264 * Opaque cookie needed by the implementation, which needs to be passed to
265 * dma_fence_end_signalling().
266 */
267bool dma_fence_begin_signalling(void)
268{
269 /* explicitly nesting ... */
270 if (lock_is_held_type(&dma_fence_lockdep_map, 1))
271 return true;
272
273 /* rely on might_sleep check for soft/hardirq locks */
274 if (in_atomic())
275 return true;
276
277 /* ... and non-recursive readlock */
278 lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _RET_IP_);
279
280 return false;
281}
282EXPORT_SYMBOL(dma_fence_begin_signalling);
283
284/**
285 * dma_fence_end_signalling - end a critical DMA fence signalling section
286 *
287 * Closes a critical section annotation opened by dma_fence_begin_signalling().
288 */
289void dma_fence_end_signalling(bool cookie)
290{
291 if (cookie)
292 return;
293
294 lock_release(&dma_fence_lockdep_map, _RET_IP_);
295}
296EXPORT_SYMBOL(dma_fence_end_signalling);
297
298void __dma_fence_might_wait(void)
299{
300 bool tmp;
301
302 tmp = lock_is_held_type(&dma_fence_lockdep_map, 1);
303 if (tmp)
304 lock_release(&dma_fence_lockdep_map, _THIS_IP_);
305 lock_map_acquire(&dma_fence_lockdep_map);
306 lock_map_release(&dma_fence_lockdep_map);
307 if (tmp)
308 lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _THIS_IP_);
309}
310#endif
311
312
313/**
314 * dma_fence_signal_locked - signal completion of a fence
315 * @fence: the fence to signal
316 *
317 * Signal completion for software callbacks on a fence, this will unblock
318 * dma_fence_wait() calls and run all the callbacks added with
319 * dma_fence_add_callback(). Can be called multiple times, but since a fence
320 * can only go from the unsignaled to the signaled state and not back, it will
321 * only be effective the first time.
322 *
323 * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
324 * held.
325 *
326 * Returns 0 on success and a negative error value when @fence has been
327 * signalled already.
328 */
329int dma_fence_signal_locked(struct dma_fence *fence)
330{
331 struct dma_fence_cb *cur, *tmp;
332 struct list_head cb_list;
333
334 lockdep_assert_held(fence->lock);
335
336 if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
337 &fence->flags)))
338 return -EINVAL;
339
340 /* Stash the cb_list before replacing it with the timestamp */
341 list_replace(&fence->cb_list, &cb_list);
342
343 fence->timestamp = ktime_get();
344 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
345 trace_dma_fence_signaled(fence);
346
347 list_for_each_entry_safe(cur, tmp, &cb_list, node) {
348 INIT_LIST_HEAD(&cur->node);
349 cur->func(fence, cur);
350 }
351
352 return 0;
353}
354EXPORT_SYMBOL(dma_fence_signal_locked);
355
356/**
357 * dma_fence_signal - signal completion of a fence
358 * @fence: the fence to signal
359 *
360 * Signal completion for software callbacks on a fence, this will unblock
361 * dma_fence_wait() calls and run all the callbacks added with
362 * dma_fence_add_callback(). Can be called multiple times, but since a fence
363 * can only go from the unsignaled to the signaled state and not back, it will
364 * only be effective the first time.
365 *
366 * Returns 0 on success and a negative error value when @fence has been
367 * signalled already.
368 */
369int dma_fence_signal(struct dma_fence *fence)
370{
371 unsigned long flags;
372 int ret;
373 bool tmp;
374
375 if (!fence)
376 return -EINVAL;
377
378 tmp = dma_fence_begin_signalling();
379
380 spin_lock_irqsave(fence->lock, flags);
381 ret = dma_fence_signal_locked(fence);
382 spin_unlock_irqrestore(fence->lock, flags);
383
384 dma_fence_end_signalling(tmp);
385
386 return ret;
387}
388EXPORT_SYMBOL(dma_fence_signal);
389
390/**
391 * dma_fence_wait_timeout - sleep until the fence gets signaled
392 * or until timeout elapses
393 * @fence: the fence to wait on
394 * @intr: if true, do an interruptible wait
395 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
396 *
397 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
398 * remaining timeout in jiffies on success. Other error values may be
399 * returned on custom implementations.
400 *
401 * Performs a synchronous wait on this fence. It is assumed the caller
402 * directly or indirectly (buf-mgr between reservation and committing)
403 * holds a reference to the fence, otherwise the fence might be
404 * freed before return, resulting in undefined behavior.
405 *
406 * See also dma_fence_wait() and dma_fence_wait_any_timeout().
407 */
408signed long
409dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
410{
411 signed long ret;
412
413 if (WARN_ON(timeout < 0))
414 return -EINVAL;
415
416 might_sleep();
417
418 __dma_fence_might_wait();
419
420 trace_dma_fence_wait_start(fence);
421 if (fence->ops->wait)
422 ret = fence->ops->wait(fence, intr, timeout);
423 else
424 ret = dma_fence_default_wait(fence, intr, timeout);
425 trace_dma_fence_wait_end(fence);
426 return ret;
427}
428EXPORT_SYMBOL(dma_fence_wait_timeout);
429
430/**
431 * dma_fence_release - default relese function for fences
432 * @kref: &dma_fence.recfount
433 *
434 * This is the default release functions for &dma_fence. Drivers shouldn't call
435 * this directly, but instead call dma_fence_put().
436 */
437void dma_fence_release(struct kref *kref)
438{
439 struct dma_fence *fence =
440 container_of(kref, struct dma_fence, refcount);
441
442 trace_dma_fence_destroy(fence);
443
444 if (WARN(!list_empty(&fence->cb_list) &&
445 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags),
446 "Fence %s:%s:%llx:%llx released with pending signals!\n",
447 fence->ops->get_driver_name(fence),
448 fence->ops->get_timeline_name(fence),
449 fence->context, fence->seqno)) {
450 unsigned long flags;
451
452 /*
453 * Failed to signal before release, likely a refcounting issue.
454 *
455 * This should never happen, but if it does make sure that we
456 * don't leave chains dangling. We set the error flag first
457 * so that the callbacks know this signal is due to an error.
458 */
459 spin_lock_irqsave(fence->lock, flags);
460 fence->error = -EDEADLK;
461 dma_fence_signal_locked(fence);
462 spin_unlock_irqrestore(fence->lock, flags);
463 }
464
465 if (fence->ops->release)
466 fence->ops->release(fence);
467 else
468 dma_fence_free(fence);
469}
470EXPORT_SYMBOL(dma_fence_release);
471
472/**
473 * dma_fence_free - default release function for &dma_fence.
474 * @fence: fence to release
475 *
476 * This is the default implementation for &dma_fence_ops.release. It calls
477 * kfree_rcu() on @fence.
478 */
479void dma_fence_free(struct dma_fence *fence)
480{
481 kfree_rcu(fence, rcu);
482}
483EXPORT_SYMBOL(dma_fence_free);
484
485static bool __dma_fence_enable_signaling(struct dma_fence *fence)
486{
487 bool was_set;
488
489 lockdep_assert_held(fence->lock);
490
491 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
492 &fence->flags);
493
494 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
495 return false;
496
497 if (!was_set && fence->ops->enable_signaling) {
498 trace_dma_fence_enable_signal(fence);
499
500 if (!fence->ops->enable_signaling(fence)) {
501 dma_fence_signal_locked(fence);
502 return false;
503 }
504 }
505
506 return true;
507}
508
509/**
510 * dma_fence_enable_sw_signaling - enable signaling on fence
511 * @fence: the fence to enable
512 *
513 * This will request for sw signaling to be enabled, to make the fence
514 * complete as soon as possible. This calls &dma_fence_ops.enable_signaling
515 * internally.
516 */
517void dma_fence_enable_sw_signaling(struct dma_fence *fence)
518{
519 unsigned long flags;
520
521 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
522 return;
523
524 spin_lock_irqsave(fence->lock, flags);
525 __dma_fence_enable_signaling(fence);
526 spin_unlock_irqrestore(fence->lock, flags);
527}
528EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
529
530/**
531 * dma_fence_add_callback - add a callback to be called when the fence
532 * is signaled
533 * @fence: the fence to wait on
534 * @cb: the callback to register
535 * @func: the function to call
536 *
537 * @cb will be initialized by dma_fence_add_callback(), no initialization
538 * by the caller is required. Any number of callbacks can be registered
539 * to a fence, but a callback can only be registered to one fence at a time.
540 *
541 * Note that the callback can be called from an atomic context. If
542 * fence is already signaled, this function will return -ENOENT (and
543 * *not* call the callback).
544 *
545 * Add a software callback to the fence. Same restrictions apply to
546 * refcount as it does to dma_fence_wait(), however the caller doesn't need to
547 * keep a refcount to fence afterward dma_fence_add_callback() has returned:
548 * when software access is enabled, the creator of the fence is required to keep
549 * the fence alive until after it signals with dma_fence_signal(). The callback
550 * itself can be called from irq context.
551 *
552 * Returns 0 in case of success, -ENOENT if the fence is already signaled
553 * and -EINVAL in case of error.
554 */
555int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
556 dma_fence_func_t func)
557{
558 unsigned long flags;
559 int ret = 0;
560
561 if (WARN_ON(!fence || !func))
562 return -EINVAL;
563
564 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
565 INIT_LIST_HEAD(&cb->node);
566 return -ENOENT;
567 }
568
569 spin_lock_irqsave(fence->lock, flags);
570
571 if (__dma_fence_enable_signaling(fence)) {
572 cb->func = func;
573 list_add_tail(&cb->node, &fence->cb_list);
574 } else {
575 INIT_LIST_HEAD(&cb->node);
576 ret = -ENOENT;
577 }
578
579 spin_unlock_irqrestore(fence->lock, flags);
580
581 return ret;
582}
583EXPORT_SYMBOL(dma_fence_add_callback);
584
585/**
586 * dma_fence_get_status - returns the status upon completion
587 * @fence: the dma_fence to query
588 *
589 * This wraps dma_fence_get_status_locked() to return the error status
590 * condition on a signaled fence. See dma_fence_get_status_locked() for more
591 * details.
592 *
593 * Returns 0 if the fence has not yet been signaled, 1 if the fence has
594 * been signaled without an error condition, or a negative error code
595 * if the fence has been completed in err.
596 */
597int dma_fence_get_status(struct dma_fence *fence)
598{
599 unsigned long flags;
600 int status;
601
602 spin_lock_irqsave(fence->lock, flags);
603 status = dma_fence_get_status_locked(fence);
604 spin_unlock_irqrestore(fence->lock, flags);
605
606 return status;
607}
608EXPORT_SYMBOL(dma_fence_get_status);
609
610/**
611 * dma_fence_remove_callback - remove a callback from the signaling list
612 * @fence: the fence to wait on
613 * @cb: the callback to remove
614 *
615 * Remove a previously queued callback from the fence. This function returns
616 * true if the callback is successfully removed, or false if the fence has
617 * already been signaled.
618 *
619 * *WARNING*:
620 * Cancelling a callback should only be done if you really know what you're
621 * doing, since deadlocks and race conditions could occur all too easily. For
622 * this reason, it should only ever be done on hardware lockup recovery,
623 * with a reference held to the fence.
624 *
625 * Behaviour is undefined if @cb has not been added to @fence using
626 * dma_fence_add_callback() beforehand.
627 */
628bool
629dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
630{
631 unsigned long flags;
632 bool ret;
633
634 spin_lock_irqsave(fence->lock, flags);
635
636 ret = !list_empty(&cb->node);
637 if (ret)
638 list_del_init(&cb->node);
639
640 spin_unlock_irqrestore(fence->lock, flags);
641
642 return ret;
643}
644EXPORT_SYMBOL(dma_fence_remove_callback);
645
646struct default_wait_cb {
647 struct dma_fence_cb base;
648 struct task_struct *task;
649};
650
651static void
652dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
653{
654 struct default_wait_cb *wait =
655 container_of(cb, struct default_wait_cb, base);
656
657 wake_up_state(wait->task, TASK_NORMAL);
658}
659
660/**
661 * dma_fence_default_wait - default sleep until the fence gets signaled
662 * or until timeout elapses
663 * @fence: the fence to wait on
664 * @intr: if true, do an interruptible wait
665 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
666 *
667 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
668 * remaining timeout in jiffies on success. If timeout is zero the value one is
669 * returned if the fence is already signaled for consistency with other
670 * functions taking a jiffies timeout.
671 */
672signed long
673dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
674{
675 struct default_wait_cb cb;
676 unsigned long flags;
677 signed long ret = timeout ? timeout : 1;
678
679 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
680 return ret;
681
682 spin_lock_irqsave(fence->lock, flags);
683
684 if (intr && signal_pending(current)) {
685 ret = -ERESTARTSYS;
686 goto out;
687 }
688
689 if (!__dma_fence_enable_signaling(fence))
690 goto out;
691
692 if (!timeout) {
693 ret = 0;
694 goto out;
695 }
696
697 cb.base.func = dma_fence_default_wait_cb;
698 cb.task = current;
699 list_add(&cb.base.node, &fence->cb_list);
700
701 while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
702 if (intr)
703 __set_current_state(TASK_INTERRUPTIBLE);
704 else
705 __set_current_state(TASK_UNINTERRUPTIBLE);
706 spin_unlock_irqrestore(fence->lock, flags);
707
708 ret = schedule_timeout(ret);
709
710 spin_lock_irqsave(fence->lock, flags);
711 if (ret > 0 && intr && signal_pending(current))
712 ret = -ERESTARTSYS;
713 }
714
715 if (!list_empty(&cb.base.node))
716 list_del(&cb.base.node);
717 __set_current_state(TASK_RUNNING);
718
719out:
720 spin_unlock_irqrestore(fence->lock, flags);
721 return ret;
722}
723EXPORT_SYMBOL(dma_fence_default_wait);
724
725static bool
726dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
727 uint32_t *idx)
728{
729 int i;
730
731 for (i = 0; i < count; ++i) {
732 struct dma_fence *fence = fences[i];
733 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
734 if (idx)
735 *idx = i;
736 return true;
737 }
738 }
739 return false;
740}
741
742/**
743 * dma_fence_wait_any_timeout - sleep until any fence gets signaled
744 * or until timeout elapses
745 * @fences: array of fences to wait on
746 * @count: number of fences to wait on
747 * @intr: if true, do an interruptible wait
748 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
749 * @idx: used to store the first signaled fence index, meaningful only on
750 * positive return
751 *
752 * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
753 * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
754 * on success.
755 *
756 * Synchronous waits for the first fence in the array to be signaled. The
757 * caller needs to hold a reference to all fences in the array, otherwise a
758 * fence might be freed before return, resulting in undefined behavior.
759 *
760 * See also dma_fence_wait() and dma_fence_wait_timeout().
761 */
762signed long
763dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
764 bool intr, signed long timeout, uint32_t *idx)
765{
766 struct default_wait_cb *cb;
767 signed long ret = timeout;
768 unsigned i;
769
770 if (WARN_ON(!fences || !count || timeout < 0))
771 return -EINVAL;
772
773 if (timeout == 0) {
774 for (i = 0; i < count; ++i)
775 if (dma_fence_is_signaled(fences[i])) {
776 if (idx)
777 *idx = i;
778 return 1;
779 }
780
781 return 0;
782 }
783
784 cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
785 if (cb == NULL) {
786 ret = -ENOMEM;
787 goto err_free_cb;
788 }
789
790 for (i = 0; i < count; ++i) {
791 struct dma_fence *fence = fences[i];
792
793 cb[i].task = current;
794 if (dma_fence_add_callback(fence, &cb[i].base,
795 dma_fence_default_wait_cb)) {
796 /* This fence is already signaled */
797 if (idx)
798 *idx = i;
799 goto fence_rm_cb;
800 }
801 }
802
803 while (ret > 0) {
804 if (intr)
805 set_current_state(TASK_INTERRUPTIBLE);
806 else
807 set_current_state(TASK_UNINTERRUPTIBLE);
808
809 if (dma_fence_test_signaled_any(fences, count, idx))
810 break;
811
812 ret = schedule_timeout(ret);
813
814 if (ret > 0 && intr && signal_pending(current))
815 ret = -ERESTARTSYS;
816 }
817
818 __set_current_state(TASK_RUNNING);
819
820fence_rm_cb:
821 while (i-- > 0)
822 dma_fence_remove_callback(fences[i], &cb[i].base);
823
824err_free_cb:
825 kfree(cb);
826
827 return ret;
828}
829EXPORT_SYMBOL(dma_fence_wait_any_timeout);
830
831/**
832 * dma_fence_init - Initialize a custom fence.
833 * @fence: the fence to initialize
834 * @ops: the dma_fence_ops for operations on this fence
835 * @lock: the irqsafe spinlock to use for locking this fence
836 * @context: the execution context this fence is run on
837 * @seqno: a linear increasing sequence number for this context
838 *
839 * Initializes an allocated fence, the caller doesn't have to keep its
840 * refcount after committing with this fence, but it will need to hold a
841 * refcount again if &dma_fence_ops.enable_signaling gets called.
842 *
843 * context and seqno are used for easy comparison between fences, allowing
844 * to check which fence is later by simply using dma_fence_later().
845 */
846void
847dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
848 spinlock_t *lock, u64 context, u64 seqno)
849{
850 BUG_ON(!lock);
851 BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
852
853 kref_init(&fence->refcount);
854 fence->ops = ops;
855 INIT_LIST_HEAD(&fence->cb_list);
856 fence->lock = lock;
857 fence->context = context;
858 fence->seqno = seqno;
859 fence->flags = 0UL;
860 fence->error = 0;
861
862 trace_dma_fence_init(fence);
863}
864EXPORT_SYMBOL(dma_fence_init);