Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Fence mechanism for dma-buf and to allow for asynchronous dma access
   4 *
   5 * Copyright (C) 2012 Canonical Ltd
   6 * Copyright (C) 2012 Texas Instruments
   7 *
   8 * Authors:
   9 * Rob Clark <robdclark@gmail.com>
  10 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
  11 */
  12
  13#include <linux/slab.h>
  14#include <linux/export.h>
  15#include <linux/atomic.h>
  16#include <linux/dma-fence.h>
  17#include <linux/sched/signal.h>
  18#include <linux/seq_file.h>
  19
  20#define CREATE_TRACE_POINTS
  21#include <trace/events/dma_fence.h>
  22
  23EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
  24EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
  25EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled);
  26
  27static DEFINE_SPINLOCK(dma_fence_stub_lock);
  28static struct dma_fence dma_fence_stub;
  29
  30/*
  31 * fence context counter: each execution context should have its own
  32 * fence context, this allows checking if fences belong to the same
  33 * context or not. One device can have multiple separate contexts,
  34 * and they're used if some engine can run independently of another.
  35 */
  36static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
  37
  38/**
  39 * DOC: DMA fences overview
  40 *
  41 * DMA fences, represented by &struct dma_fence, are the kernel internal
  42 * synchronization primitive for DMA operations like GPU rendering, video
  43 * encoding/decoding, or displaying buffers on a screen.
  44 *
  45 * A fence is initialized using dma_fence_init() and completed using
  46 * dma_fence_signal(). Fences are associated with a context, allocated through
  47 * dma_fence_context_alloc(), and all fences on the same context are
  48 * fully ordered.
  49 *
  50 * Since the purposes of fences is to facilitate cross-device and
  51 * cross-application synchronization, there's multiple ways to use one:
  52 *
  53 * - Individual fences can be exposed as a &sync_file, accessed as a file
  54 *   descriptor from userspace, created by calling sync_file_create(). This is
  55 *   called explicit fencing, since userspace passes around explicit
  56 *   synchronization points.
  57 *
  58 * - Some subsystems also have their own explicit fencing primitives, like
  59 *   &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying
  60 *   fence to be updated.
  61 *
  62 * - Then there's also implicit fencing, where the synchronization points are
  63 *   implicitly passed around as part of shared &dma_buf instances. Such
  64 *   implicit fences are stored in &struct dma_resv through the
  65 *   &dma_buf.resv pointer.
  66 */
  67
  68/**
  69 * DOC: fence cross-driver contract
  70 *
  71 * Since &dma_fence provide a cross driver contract, all drivers must follow the
  72 * same rules:
  73 *
  74 * * Fences must complete in a reasonable time. Fences which represent kernels
  75 *   and shaders submitted by userspace, which could run forever, must be backed
  76 *   up by timeout and gpu hang recovery code. Minimally that code must prevent
  77 *   further command submission and force complete all in-flight fences, e.g.
  78 *   when the driver or hardware do not support gpu reset, or if the gpu reset
  79 *   failed for some reason. Ideally the driver supports gpu recovery which only
  80 *   affects the offending userspace context, and no other userspace
  81 *   submissions.
  82 *
  83 * * Drivers may have different ideas of what completion within a reasonable
  84 *   time means. Some hang recovery code uses a fixed timeout, others a mix
  85 *   between observing forward progress and increasingly strict timeouts.
  86 *   Drivers should not try to second guess timeout handling of fences from
  87 *   other drivers.
  88 *
  89 * * To ensure there's no deadlocks of dma_fence_wait() against other locks
  90 *   drivers should annotate all code required to reach dma_fence_signal(),
  91 *   which completes the fences, with dma_fence_begin_signalling() and
  92 *   dma_fence_end_signalling().
  93 *
  94 * * Drivers are allowed to call dma_fence_wait() while holding dma_resv_lock().
  95 *   This means any code required for fence completion cannot acquire a
  96 *   &dma_resv lock. Note that this also pulls in the entire established
  97 *   locking hierarchy around dma_resv_lock() and dma_resv_unlock().
  98 *
  99 * * Drivers are allowed to call dma_fence_wait() from their &shrinker
 100 *   callbacks. This means any code required for fence completion cannot
 101 *   allocate memory with GFP_KERNEL.
 102 *
 103 * * Drivers are allowed to call dma_fence_wait() from their &mmu_notifier
 104 *   respectively &mmu_interval_notifier callbacks. This means any code required
 105 *   for fence completeion cannot allocate memory with GFP_NOFS or GFP_NOIO.
 106 *   Only GFP_ATOMIC is permissible, which might fail.
 107 *
 108 * Note that only GPU drivers have a reasonable excuse for both requiring
 109 * &mmu_interval_notifier and &shrinker callbacks at the same time as having to
 110 * track asynchronous compute work using &dma_fence. No driver outside of
 111 * drivers/gpu should ever call dma_fence_wait() in such contexts.
 112 */
 113
 114static const char *dma_fence_stub_get_name(struct dma_fence *fence)
 115{
 116        return "stub";
 117}
 118
 119static const struct dma_fence_ops dma_fence_stub_ops = {
 120	.get_driver_name = dma_fence_stub_get_name,
 121	.get_timeline_name = dma_fence_stub_get_name,
 122};
 123
 124/**
 125 * dma_fence_get_stub - return a signaled fence
 126 *
 127 * Return a stub fence which is already signaled. The fence's
 128 * timestamp corresponds to the first time after boot this
 129 * function is called.
 130 */
 131struct dma_fence *dma_fence_get_stub(void)
 132{
 133	spin_lock(&dma_fence_stub_lock);
 134	if (!dma_fence_stub.ops) {
 135		dma_fence_init(&dma_fence_stub,
 136			       &dma_fence_stub_ops,
 137			       &dma_fence_stub_lock,
 138			       0, 0);
 139
 140		set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
 141			&dma_fence_stub.flags);
 142
 143		dma_fence_signal_locked(&dma_fence_stub);
 144	}
 145	spin_unlock(&dma_fence_stub_lock);
 146
 147	return dma_fence_get(&dma_fence_stub);
 148}
 149EXPORT_SYMBOL(dma_fence_get_stub);
 150
 151/**
 152 * dma_fence_allocate_private_stub - return a private, signaled fence
 153 * @timestamp: timestamp when the fence was signaled
 154 *
 155 * Return a newly allocated and signaled stub fence.
 156 */
 157struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp)
 158{
 159	struct dma_fence *fence;
 160
 161	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
 162	if (fence == NULL)
 163		return NULL;
 164
 165	dma_fence_init(fence,
 166		       &dma_fence_stub_ops,
 167		       &dma_fence_stub_lock,
 168		       0, 0);
 169
 170	set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
 171		&fence->flags);
 172
 173	dma_fence_signal_timestamp(fence, timestamp);
 174
 175	return fence;
 176}
 177EXPORT_SYMBOL(dma_fence_allocate_private_stub);
 178
 179/**
 180 * dma_fence_context_alloc - allocate an array of fence contexts
 181 * @num: amount of contexts to allocate
 182 *
 183 * This function will return the first index of the number of fence contexts
 184 * allocated.  The fence context is used for setting &dma_fence.context to a
 185 * unique number by passing the context to dma_fence_init().
 186 */
 187u64 dma_fence_context_alloc(unsigned num)
 188{
 189	WARN_ON(!num);
 190	return atomic64_fetch_add(num, &dma_fence_context_counter);
 191}
 192EXPORT_SYMBOL(dma_fence_context_alloc);
 193
 194/**
 195 * DOC: fence signalling annotation
 196 *
 197 * Proving correctness of all the kernel code around &dma_fence through code
 198 * review and testing is tricky for a few reasons:
 199 *
 200 * * It is a cross-driver contract, and therefore all drivers must follow the
 201 *   same rules for lock nesting order, calling contexts for various functions
 202 *   and anything else significant for in-kernel interfaces. But it is also
 203 *   impossible to test all drivers in a single machine, hence brute-force N vs.
 204 *   N testing of all combinations is impossible. Even just limiting to the
 205 *   possible combinations is infeasible.
 206 *
 207 * * There is an enormous amount of driver code involved. For render drivers
 208 *   there's the tail of command submission, after fences are published,
 209 *   scheduler code, interrupt and workers to process job completion,
 210 *   and timeout, gpu reset and gpu hang recovery code. Plus for integration
 211 *   with core mm with have &mmu_notifier, respectively &mmu_interval_notifier,
 212 *   and &shrinker. For modesetting drivers there's the commit tail functions
 213 *   between when fences for an atomic modeset are published, and when the
 214 *   corresponding vblank completes, including any interrupt processing and
 215 *   related workers. Auditing all that code, across all drivers, is not
 216 *   feasible.
 217 *
 218 * * Due to how many other subsystems are involved and the locking hierarchies
 219 *   this pulls in there is extremely thin wiggle-room for driver-specific
 220 *   differences. &dma_fence interacts with almost all of the core memory
 221 *   handling through page fault handlers via &dma_resv, dma_resv_lock() and
 222 *   dma_resv_unlock(). On the other side it also interacts through all
 223 *   allocation sites through &mmu_notifier and &shrinker.
 224 *
 225 * Furthermore lockdep does not handle cross-release dependencies, which means
 226 * any deadlocks between dma_fence_wait() and dma_fence_signal() can't be caught
 227 * at runtime with some quick testing. The simplest example is one thread
 228 * waiting on a &dma_fence while holding a lock::
 229 *
 230 *     lock(A);
 231 *     dma_fence_wait(B);
 232 *     unlock(A);
 233 *
 234 * while the other thread is stuck trying to acquire the same lock, which
 235 * prevents it from signalling the fence the previous thread is stuck waiting
 236 * on::
 237 *
 238 *     lock(A);
 239 *     unlock(A);
 240 *     dma_fence_signal(B);
 241 *
 242 * By manually annotating all code relevant to signalling a &dma_fence we can
 243 * teach lockdep about these dependencies, which also helps with the validation
 244 * headache since now lockdep can check all the rules for us::
 245 *
 246 *    cookie = dma_fence_begin_signalling();
 247 *    lock(A);
 248 *    unlock(A);
 249 *    dma_fence_signal(B);
 250 *    dma_fence_end_signalling(cookie);
 251 *
 252 * For using dma_fence_begin_signalling() and dma_fence_end_signalling() to
 253 * annotate critical sections the following rules need to be observed:
 254 *
 255 * * All code necessary to complete a &dma_fence must be annotated, from the
 256 *   point where a fence is accessible to other threads, to the point where
 257 *   dma_fence_signal() is called. Un-annotated code can contain deadlock issues,
 258 *   and due to the very strict rules and many corner cases it is infeasible to
 259 *   catch these just with review or normal stress testing.
 260 *
 261 * * &struct dma_resv deserves a special note, since the readers are only
 262 *   protected by rcu. This means the signalling critical section starts as soon
 263 *   as the new fences are installed, even before dma_resv_unlock() is called.
 264 *
 265 * * The only exception are fast paths and opportunistic signalling code, which
 266 *   calls dma_fence_signal() purely as an optimization, but is not required to
 267 *   guarantee completion of a &dma_fence. The usual example is a wait IOCTL
 268 *   which calls dma_fence_signal(), while the mandatory completion path goes
 269 *   through a hardware interrupt and possible job completion worker.
 270 *
 271 * * To aid composability of code, the annotations can be freely nested, as long
 272 *   as the overall locking hierarchy is consistent. The annotations also work
 273 *   both in interrupt and process context. Due to implementation details this
 274 *   requires that callers pass an opaque cookie from
 275 *   dma_fence_begin_signalling() to dma_fence_end_signalling().
 276 *
 277 * * Validation against the cross driver contract is implemented by priming
 278 *   lockdep with the relevant hierarchy at boot-up. This means even just
 279 *   testing with a single device is enough to validate a driver, at least as
 280 *   far as deadlocks with dma_fence_wait() against dma_fence_signal() are
 281 *   concerned.
 282 */
 283#ifdef CONFIG_LOCKDEP
 284static struct lockdep_map dma_fence_lockdep_map = {
 285	.name = "dma_fence_map"
 286};
 287
 288/**
 289 * dma_fence_begin_signalling - begin a critical DMA fence signalling section
 290 *
 291 * Drivers should use this to annotate the beginning of any code section
 292 * required to eventually complete &dma_fence by calling dma_fence_signal().
 293 *
 294 * The end of these critical sections are annotated with
 295 * dma_fence_end_signalling().
 296 *
 297 * Returns:
 298 *
 299 * Opaque cookie needed by the implementation, which needs to be passed to
 300 * dma_fence_end_signalling().
 301 */
 302bool dma_fence_begin_signalling(void)
 303{
 304	/* explicitly nesting ... */
 305	if (lock_is_held_type(&dma_fence_lockdep_map, 1))
 306		return true;
 307
 308	/* rely on might_sleep check for soft/hardirq locks */
 309	if (in_atomic())
 310		return true;
 311
 312	/* ... and non-recursive readlock */
 313	lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _RET_IP_);
 314
 315	return false;
 316}
 317EXPORT_SYMBOL(dma_fence_begin_signalling);
 318
 319/**
 320 * dma_fence_end_signalling - end a critical DMA fence signalling section
 321 * @cookie: opaque cookie from dma_fence_begin_signalling()
 322 *
 323 * Closes a critical section annotation opened by dma_fence_begin_signalling().
 324 */
 325void dma_fence_end_signalling(bool cookie)
 326{
 327	if (cookie)
 328		return;
 329
 330	lock_release(&dma_fence_lockdep_map, _RET_IP_);
 331}
 332EXPORT_SYMBOL(dma_fence_end_signalling);
 333
 334void __dma_fence_might_wait(void)
 335{
 336	bool tmp;
 337
 338	tmp = lock_is_held_type(&dma_fence_lockdep_map, 1);
 339	if (tmp)
 340		lock_release(&dma_fence_lockdep_map, _THIS_IP_);
 341	lock_map_acquire(&dma_fence_lockdep_map);
 342	lock_map_release(&dma_fence_lockdep_map);
 343	if (tmp)
 344		lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _THIS_IP_);
 345}
 346#endif
 347
 348
 349/**
 350 * dma_fence_signal_timestamp_locked - signal completion of a fence
 351 * @fence: the fence to signal
 352 * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
 353 *
 354 * Signal completion for software callbacks on a fence, this will unblock
 355 * dma_fence_wait() calls and run all the callbacks added with
 356 * dma_fence_add_callback(). Can be called multiple times, but since a fence
 357 * can only go from the unsignaled to the signaled state and not back, it will
 358 * only be effective the first time. Set the timestamp provided as the fence
 359 * signal timestamp.
 360 *
 361 * Unlike dma_fence_signal_timestamp(), this function must be called with
 362 * &dma_fence.lock held.
 363 *
 364 * Returns 0 on success and a negative error value when @fence has been
 365 * signalled already.
 366 */
 367int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
 368				      ktime_t timestamp)
 369{
 370	struct dma_fence_cb *cur, *tmp;
 371	struct list_head cb_list;
 372
 373	lockdep_assert_held(fence->lock);
 374
 375	if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
 376				      &fence->flags)))
 377		return -EINVAL;
 378
 379	/* Stash the cb_list before replacing it with the timestamp */
 380	list_replace(&fence->cb_list, &cb_list);
 381
 382	fence->timestamp = timestamp;
 383	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
 384	trace_dma_fence_signaled(fence);
 385
 386	list_for_each_entry_safe(cur, tmp, &cb_list, node) {
 387		INIT_LIST_HEAD(&cur->node);
 388		cur->func(fence, cur);
 389	}
 390
 391	return 0;
 392}
 393EXPORT_SYMBOL(dma_fence_signal_timestamp_locked);
 394
 395/**
 396 * dma_fence_signal_timestamp - signal completion of a fence
 397 * @fence: the fence to signal
 398 * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
 399 *
 400 * Signal completion for software callbacks on a fence, this will unblock
 401 * dma_fence_wait() calls and run all the callbacks added with
 402 * dma_fence_add_callback(). Can be called multiple times, but since a fence
 403 * can only go from the unsignaled to the signaled state and not back, it will
 404 * only be effective the first time. Set the timestamp provided as the fence
 405 * signal timestamp.
 406 *
 407 * Returns 0 on success and a negative error value when @fence has been
 408 * signalled already.
 409 */
 410int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
 411{
 412	unsigned long flags;
 413	int ret;
 414
 415	if (!fence)
 416		return -EINVAL;
 417
 418	spin_lock_irqsave(fence->lock, flags);
 419	ret = dma_fence_signal_timestamp_locked(fence, timestamp);
 420	spin_unlock_irqrestore(fence->lock, flags);
 421
 422	return ret;
 423}
 424EXPORT_SYMBOL(dma_fence_signal_timestamp);
 425
 426/**
 427 * dma_fence_signal_locked - signal completion of a fence
 428 * @fence: the fence to signal
 429 *
 430 * Signal completion for software callbacks on a fence, this will unblock
 431 * dma_fence_wait() calls and run all the callbacks added with
 432 * dma_fence_add_callback(). Can be called multiple times, but since a fence
 433 * can only go from the unsignaled to the signaled state and not back, it will
 434 * only be effective the first time.
 435 *
 436 * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
 437 * held.
 438 *
 439 * Returns 0 on success and a negative error value when @fence has been
 440 * signalled already.
 441 */
 442int dma_fence_signal_locked(struct dma_fence *fence)
 443{
 444	return dma_fence_signal_timestamp_locked(fence, ktime_get());
 445}
 446EXPORT_SYMBOL(dma_fence_signal_locked);
 447
 448/**
 449 * dma_fence_signal - signal completion of a fence
 450 * @fence: the fence to signal
 451 *
 452 * Signal completion for software callbacks on a fence, this will unblock
 453 * dma_fence_wait() calls and run all the callbacks added with
 454 * dma_fence_add_callback(). Can be called multiple times, but since a fence
 455 * can only go from the unsignaled to the signaled state and not back, it will
 456 * only be effective the first time.
 457 *
 458 * Returns 0 on success and a negative error value when @fence has been
 459 * signalled already.
 460 */
 461int dma_fence_signal(struct dma_fence *fence)
 462{
 463	unsigned long flags;
 464	int ret;
 465	bool tmp;
 466
 467	if (!fence)
 468		return -EINVAL;
 469
 470	tmp = dma_fence_begin_signalling();
 471
 472	spin_lock_irqsave(fence->lock, flags);
 473	ret = dma_fence_signal_timestamp_locked(fence, ktime_get());
 474	spin_unlock_irqrestore(fence->lock, flags);
 475
 476	dma_fence_end_signalling(tmp);
 477
 478	return ret;
 479}
 480EXPORT_SYMBOL(dma_fence_signal);
 481
 482/**
 483 * dma_fence_wait_timeout - sleep until the fence gets signaled
 484 * or until timeout elapses
 485 * @fence: the fence to wait on
 486 * @intr: if true, do an interruptible wait
 487 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 488 *
 489 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
 490 * remaining timeout in jiffies on success. Other error values may be
 491 * returned on custom implementations.
 492 *
 493 * Performs a synchronous wait on this fence. It is assumed the caller
 494 * directly or indirectly (buf-mgr between reservation and committing)
 495 * holds a reference to the fence, otherwise the fence might be
 496 * freed before return, resulting in undefined behavior.
 497 *
 498 * See also dma_fence_wait() and dma_fence_wait_any_timeout().
 499 */
 500signed long
 501dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
 502{
 503	signed long ret;
 504
 505	if (WARN_ON(timeout < 0))
 506		return -EINVAL;
 507
 508	might_sleep();
 509
 510	__dma_fence_might_wait();
 511
 512	dma_fence_enable_sw_signaling(fence);
 513
 514	trace_dma_fence_wait_start(fence);
 515	if (fence->ops->wait)
 516		ret = fence->ops->wait(fence, intr, timeout);
 517	else
 518		ret = dma_fence_default_wait(fence, intr, timeout);
 519	trace_dma_fence_wait_end(fence);
 520	return ret;
 521}
 522EXPORT_SYMBOL(dma_fence_wait_timeout);
 523
 524/**
 525 * dma_fence_release - default relese function for fences
 526 * @kref: &dma_fence.recfount
 527 *
 528 * This is the default release functions for &dma_fence. Drivers shouldn't call
 529 * this directly, but instead call dma_fence_put().
 530 */
 531void dma_fence_release(struct kref *kref)
 532{
 533	struct dma_fence *fence =
 534		container_of(kref, struct dma_fence, refcount);
 535
 536	trace_dma_fence_destroy(fence);
 537
 538	if (WARN(!list_empty(&fence->cb_list) &&
 539		 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags),
 540		 "Fence %s:%s:%llx:%llx released with pending signals!\n",
 541		 fence->ops->get_driver_name(fence),
 542		 fence->ops->get_timeline_name(fence),
 543		 fence->context, fence->seqno)) {
 544		unsigned long flags;
 545
 546		/*
 547		 * Failed to signal before release, likely a refcounting issue.
 548		 *
 549		 * This should never happen, but if it does make sure that we
 550		 * don't leave chains dangling. We set the error flag first
 551		 * so that the callbacks know this signal is due to an error.
 552		 */
 553		spin_lock_irqsave(fence->lock, flags);
 554		fence->error = -EDEADLK;
 555		dma_fence_signal_locked(fence);
 556		spin_unlock_irqrestore(fence->lock, flags);
 557	}
 558
 559	if (fence->ops->release)
 560		fence->ops->release(fence);
 561	else
 562		dma_fence_free(fence);
 563}
 564EXPORT_SYMBOL(dma_fence_release);
 565
 566/**
 567 * dma_fence_free - default release function for &dma_fence.
 568 * @fence: fence to release
 569 *
 570 * This is the default implementation for &dma_fence_ops.release. It calls
 571 * kfree_rcu() on @fence.
 572 */
 573void dma_fence_free(struct dma_fence *fence)
 574{
 575	kfree_rcu(fence, rcu);
 576}
 577EXPORT_SYMBOL(dma_fence_free);
 578
 579static bool __dma_fence_enable_signaling(struct dma_fence *fence)
 580{
 581	bool was_set;
 582
 583	lockdep_assert_held(fence->lock);
 584
 585	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
 586				   &fence->flags);
 587
 588	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 589		return false;
 590
 591	if (!was_set && fence->ops->enable_signaling) {
 592		trace_dma_fence_enable_signal(fence);
 593
 594		if (!fence->ops->enable_signaling(fence)) {
 595			dma_fence_signal_locked(fence);
 596			return false;
 597		}
 598	}
 599
 600	return true;
 601}
 602
 603/**
 604 * dma_fence_enable_sw_signaling - enable signaling on fence
 605 * @fence: the fence to enable
 606 *
 607 * This will request for sw signaling to be enabled, to make the fence
 608 * complete as soon as possible. This calls &dma_fence_ops.enable_signaling
 609 * internally.
 610 */
 611void dma_fence_enable_sw_signaling(struct dma_fence *fence)
 612{
 613	unsigned long flags;
 614
 615	spin_lock_irqsave(fence->lock, flags);
 616	__dma_fence_enable_signaling(fence);
 617	spin_unlock_irqrestore(fence->lock, flags);
 618}
 619EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
 620
 621/**
 622 * dma_fence_add_callback - add a callback to be called when the fence
 623 * is signaled
 624 * @fence: the fence to wait on
 625 * @cb: the callback to register
 626 * @func: the function to call
 627 *
 628 * Add a software callback to the fence. The caller should keep a reference to
 629 * the fence.
 630 *
 631 * @cb will be initialized by dma_fence_add_callback(), no initialization
 632 * by the caller is required. Any number of callbacks can be registered
 633 * to a fence, but a callback can only be registered to one fence at a time.
 634 *
 635 * If fence is already signaled, this function will return -ENOENT (and
 636 * *not* call the callback).
 637 *
 638 * Note that the callback can be called from an atomic context or irq context.
 639 *
 640 * Returns 0 in case of success, -ENOENT if the fence is already signaled
 641 * and -EINVAL in case of error.
 642 */
 643int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
 644			   dma_fence_func_t func)
 645{
 646	unsigned long flags;
 647	int ret = 0;
 648
 649	if (WARN_ON(!fence || !func))
 650		return -EINVAL;
 651
 652	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
 653		INIT_LIST_HEAD(&cb->node);
 654		return -ENOENT;
 655	}
 656
 657	spin_lock_irqsave(fence->lock, flags);
 658
 659	if (__dma_fence_enable_signaling(fence)) {
 660		cb->func = func;
 661		list_add_tail(&cb->node, &fence->cb_list);
 662	} else {
 663		INIT_LIST_HEAD(&cb->node);
 664		ret = -ENOENT;
 665	}
 666
 667	spin_unlock_irqrestore(fence->lock, flags);
 668
 669	return ret;
 670}
 671EXPORT_SYMBOL(dma_fence_add_callback);
 672
 673/**
 674 * dma_fence_get_status - returns the status upon completion
 675 * @fence: the dma_fence to query
 676 *
 677 * This wraps dma_fence_get_status_locked() to return the error status
 678 * condition on a signaled fence. See dma_fence_get_status_locked() for more
 679 * details.
 680 *
 681 * Returns 0 if the fence has not yet been signaled, 1 if the fence has
 682 * been signaled without an error condition, or a negative error code
 683 * if the fence has been completed in err.
 684 */
 685int dma_fence_get_status(struct dma_fence *fence)
 686{
 687	unsigned long flags;
 688	int status;
 689
 690	spin_lock_irqsave(fence->lock, flags);
 691	status = dma_fence_get_status_locked(fence);
 692	spin_unlock_irqrestore(fence->lock, flags);
 693
 694	return status;
 695}
 696EXPORT_SYMBOL(dma_fence_get_status);
 697
 698/**
 699 * dma_fence_remove_callback - remove a callback from the signaling list
 700 * @fence: the fence to wait on
 701 * @cb: the callback to remove
 702 *
 703 * Remove a previously queued callback from the fence. This function returns
 704 * true if the callback is successfully removed, or false if the fence has
 705 * already been signaled.
 706 *
 707 * *WARNING*:
 708 * Cancelling a callback should only be done if you really know what you're
 709 * doing, since deadlocks and race conditions could occur all too easily. For
 710 * this reason, it should only ever be done on hardware lockup recovery,
 711 * with a reference held to the fence.
 712 *
 713 * Behaviour is undefined if @cb has not been added to @fence using
 714 * dma_fence_add_callback() beforehand.
 715 */
 716bool
 717dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
 718{
 719	unsigned long flags;
 720	bool ret;
 721
 722	spin_lock_irqsave(fence->lock, flags);
 723
 724	ret = !list_empty(&cb->node);
 725	if (ret)
 726		list_del_init(&cb->node);
 727
 728	spin_unlock_irqrestore(fence->lock, flags);
 729
 730	return ret;
 731}
 732EXPORT_SYMBOL(dma_fence_remove_callback);
 733
 734struct default_wait_cb {
 735	struct dma_fence_cb base;
 736	struct task_struct *task;
 737};
 738
 739static void
 740dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 741{
 742	struct default_wait_cb *wait =
 743		container_of(cb, struct default_wait_cb, base);
 744
 745	wake_up_state(wait->task, TASK_NORMAL);
 746}
 747
 748/**
 749 * dma_fence_default_wait - default sleep until the fence gets signaled
 750 * or until timeout elapses
 751 * @fence: the fence to wait on
 752 * @intr: if true, do an interruptible wait
 753 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 754 *
 755 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
 756 * remaining timeout in jiffies on success. If timeout is zero the value one is
 757 * returned if the fence is already signaled for consistency with other
 758 * functions taking a jiffies timeout.
 759 */
 760signed long
 761dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
 762{
 763	struct default_wait_cb cb;
 764	unsigned long flags;
 765	signed long ret = timeout ? timeout : 1;
 766
 767	spin_lock_irqsave(fence->lock, flags);
 768
 769	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 770		goto out;
 771
 772	if (intr && signal_pending(current)) {
 773		ret = -ERESTARTSYS;
 774		goto out;
 775	}
 776
 777	if (!timeout) {
 778		ret = 0;
 779		goto out;
 780	}
 781
 782	cb.base.func = dma_fence_default_wait_cb;
 783	cb.task = current;
 784	list_add(&cb.base.node, &fence->cb_list);
 785
 786	while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
 787		if (intr)
 788			__set_current_state(TASK_INTERRUPTIBLE);
 789		else
 790			__set_current_state(TASK_UNINTERRUPTIBLE);
 791		spin_unlock_irqrestore(fence->lock, flags);
 792
 793		ret = schedule_timeout(ret);
 794
 795		spin_lock_irqsave(fence->lock, flags);
 796		if (ret > 0 && intr && signal_pending(current))
 797			ret = -ERESTARTSYS;
 798	}
 799
 800	if (!list_empty(&cb.base.node))
 801		list_del(&cb.base.node);
 802	__set_current_state(TASK_RUNNING);
 803
 804out:
 805	spin_unlock_irqrestore(fence->lock, flags);
 806	return ret;
 807}
 808EXPORT_SYMBOL(dma_fence_default_wait);
 809
 810static bool
 811dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
 812			    uint32_t *idx)
 813{
 814	int i;
 815
 816	for (i = 0; i < count; ++i) {
 817		struct dma_fence *fence = fences[i];
 818		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
 819			if (idx)
 820				*idx = i;
 821			return true;
 822		}
 823	}
 824	return false;
 825}
 826
 827/**
 828 * dma_fence_wait_any_timeout - sleep until any fence gets signaled
 829 * or until timeout elapses
 830 * @fences: array of fences to wait on
 831 * @count: number of fences to wait on
 832 * @intr: if true, do an interruptible wait
 833 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 834 * @idx: used to store the first signaled fence index, meaningful only on
 835 *	positive return
 836 *
 837 * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
 838 * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
 839 * on success.
 840 *
 841 * Synchronous waits for the first fence in the array to be signaled. The
 842 * caller needs to hold a reference to all fences in the array, otherwise a
 843 * fence might be freed before return, resulting in undefined behavior.
 844 *
 845 * See also dma_fence_wait() and dma_fence_wait_timeout().
 846 */
 847signed long
 848dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
 849			   bool intr, signed long timeout, uint32_t *idx)
 850{
 851	struct default_wait_cb *cb;
 852	signed long ret = timeout;
 853	unsigned i;
 854
 855	if (WARN_ON(!fences || !count || timeout < 0))
 856		return -EINVAL;
 857
 858	if (timeout == 0) {
 859		for (i = 0; i < count; ++i)
 860			if (dma_fence_is_signaled(fences[i])) {
 861				if (idx)
 862					*idx = i;
 863				return 1;
 864			}
 865
 866		return 0;
 867	}
 868
 869	cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
 870	if (cb == NULL) {
 871		ret = -ENOMEM;
 872		goto err_free_cb;
 873	}
 874
 875	for (i = 0; i < count; ++i) {
 876		struct dma_fence *fence = fences[i];
 877
 878		cb[i].task = current;
 879		if (dma_fence_add_callback(fence, &cb[i].base,
 880					   dma_fence_default_wait_cb)) {
 881			/* This fence is already signaled */
 882			if (idx)
 883				*idx = i;
 884			goto fence_rm_cb;
 885		}
 886	}
 887
 888	while (ret > 0) {
 889		if (intr)
 890			set_current_state(TASK_INTERRUPTIBLE);
 891		else
 892			set_current_state(TASK_UNINTERRUPTIBLE);
 893
 894		if (dma_fence_test_signaled_any(fences, count, idx))
 895			break;
 896
 897		ret = schedule_timeout(ret);
 898
 899		if (ret > 0 && intr && signal_pending(current))
 900			ret = -ERESTARTSYS;
 901	}
 902
 903	__set_current_state(TASK_RUNNING);
 904
 905fence_rm_cb:
 906	while (i-- > 0)
 907		dma_fence_remove_callback(fences[i], &cb[i].base);
 908
 909err_free_cb:
 910	kfree(cb);
 911
 912	return ret;
 913}
 914EXPORT_SYMBOL(dma_fence_wait_any_timeout);
 915
 916/**
 917 * DOC: deadline hints
 918 *
 919 * In an ideal world, it would be possible to pipeline a workload sufficiently
 920 * that a utilization based device frequency governor could arrive at a minimum
 921 * frequency that meets the requirements of the use-case, in order to minimize
 922 * power consumption.  But in the real world there are many workloads which
 923 * defy this ideal.  For example, but not limited to:
 924 *
 925 * * Workloads that ping-pong between device and CPU, with alternating periods
 926 *   of CPU waiting for device, and device waiting on CPU.  This can result in
 927 *   devfreq and cpufreq seeing idle time in their respective domains and in
 928 *   result reduce frequency.
 929 *
 930 * * Workloads that interact with a periodic time based deadline, such as double
 931 *   buffered GPU rendering vs vblank sync'd page flipping.  In this scenario,
 932 *   missing a vblank deadline results in an *increase* in idle time on the GPU
 933 *   (since it has to wait an additional vblank period), sending a signal to
 934 *   the GPU's devfreq to reduce frequency, when in fact the opposite is what is
 935 *   needed.
 936 *
 937 * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline
 938 * (or indirectly via userspace facing ioctls like &sync_set_deadline).
 939 * The deadline hint provides a way for the waiting driver, or userspace, to
 940 * convey an appropriate sense of urgency to the signaling driver.
 941 *
 942 * A deadline hint is given in absolute ktime (CLOCK_MONOTONIC for userspace
 943 * facing APIs).  The time could either be some point in the future (such as
 944 * the vblank based deadline for page-flipping, or the start of a compositor's
 945 * composition cycle), or the current time to indicate an immediate deadline
 946 * hint (Ie. forward progress cannot be made until this fence is signaled).
 947 *
 948 * Multiple deadlines may be set on a given fence, even in parallel.  See the
 949 * documentation for &dma_fence_ops.set_deadline.
 950 *
 951 * The deadline hint is just that, a hint.  The driver that created the fence
 952 * may react by increasing frequency, making different scheduling choices, etc.
 953 * Or doing nothing at all.
 954 */
 955
 956/**
 957 * dma_fence_set_deadline - set desired fence-wait deadline hint
 958 * @fence:    the fence that is to be waited on
 959 * @deadline: the time by which the waiter hopes for the fence to be
 960 *            signaled
 961 *
 962 * Give the fence signaler a hint about an upcoming deadline, such as
 963 * vblank, by which point the waiter would prefer the fence to be
 964 * signaled by.  This is intended to give feedback to the fence signaler
 965 * to aid in power management decisions, such as boosting GPU frequency
 966 * if a periodic vblank deadline is approaching but the fence is not
 967 * yet signaled..
 968 */
 969void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
 970{
 971	if (fence->ops->set_deadline && !dma_fence_is_signaled(fence))
 972		fence->ops->set_deadline(fence, deadline);
 973}
 974EXPORT_SYMBOL(dma_fence_set_deadline);
 975
 976/**
 977 * dma_fence_describe - Dump fence describtion into seq_file
 978 * @fence: the 6fence to describe
 979 * @seq: the seq_file to put the textual description into
 980 *
 981 * Dump a textual description of the fence and it's state into the seq_file.
 982 */
 983void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq)
 984{
 985	seq_printf(seq, "%s %s seq %llu %ssignalled\n",
 986		   fence->ops->get_driver_name(fence),
 987		   fence->ops->get_timeline_name(fence), fence->seqno,
 988		   dma_fence_is_signaled(fence) ? "" : "un");
 989}
 990EXPORT_SYMBOL(dma_fence_describe);
 991
 992/**
 993 * dma_fence_init - Initialize a custom fence.
 994 * @fence: the fence to initialize
 995 * @ops: the dma_fence_ops for operations on this fence
 996 * @lock: the irqsafe spinlock to use for locking this fence
 997 * @context: the execution context this fence is run on
 998 * @seqno: a linear increasing sequence number for this context
 999 *
1000 * Initializes an allocated fence, the caller doesn't have to keep its
1001 * refcount after committing with this fence, but it will need to hold a
1002 * refcount again if &dma_fence_ops.enable_signaling gets called.
1003 *
1004 * context and seqno are used for easy comparison between fences, allowing
1005 * to check which fence is later by simply using dma_fence_later().
1006 */
1007void
1008dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
1009	       spinlock_t *lock, u64 context, u64 seqno)
1010{
1011	BUG_ON(!lock);
1012	BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
1013
1014	kref_init(&fence->refcount);
1015	fence->ops = ops;
1016	INIT_LIST_HEAD(&fence->cb_list);
1017	fence->lock = lock;
1018	fence->context = context;
1019	fence->seqno = seqno;
1020	fence->flags = 0UL;
1021	fence->error = 0;
1022
1023	trace_dma_fence_init(fence);
1024}
1025EXPORT_SYMBOL(dma_fence_init);
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Fence mechanism for dma-buf and to allow for asynchronous dma access
  4 *
  5 * Copyright (C) 2012 Canonical Ltd
  6 * Copyright (C) 2012 Texas Instruments
  7 *
  8 * Authors:
  9 * Rob Clark <robdclark@gmail.com>
 10 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
 11 */
 12
 13#include <linux/slab.h>
 14#include <linux/export.h>
 15#include <linux/atomic.h>
 16#include <linux/dma-fence.h>
 17#include <linux/sched/signal.h>
 18#include <linux/seq_file.h>
 19
 20#define CREATE_TRACE_POINTS
 21#include <trace/events/dma_fence.h>
 22
 23EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
 24EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
 25EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled);
 26
 27static DEFINE_SPINLOCK(dma_fence_stub_lock);
 28static struct dma_fence dma_fence_stub;
 29
 30/*
 31 * fence context counter: each execution context should have its own
 32 * fence context, this allows checking if fences belong to the same
 33 * context or not. One device can have multiple separate contexts,
 34 * and they're used if some engine can run independently of another.
 35 */
 36static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
 37
 38/**
 39 * DOC: DMA fences overview
 40 *
 41 * DMA fences, represented by &struct dma_fence, are the kernel internal
 42 * synchronization primitive for DMA operations like GPU rendering, video
 43 * encoding/decoding, or displaying buffers on a screen.
 44 *
 45 * A fence is initialized using dma_fence_init() and completed using
 46 * dma_fence_signal(). Fences are associated with a context, allocated through
 47 * dma_fence_context_alloc(), and all fences on the same context are
 48 * fully ordered.
 49 *
 50 * Since the purposes of fences is to facilitate cross-device and
 51 * cross-application synchronization, there's multiple ways to use one:
 52 *
 53 * - Individual fences can be exposed as a &sync_file, accessed as a file
 54 *   descriptor from userspace, created by calling sync_file_create(). This is
 55 *   called explicit fencing, since userspace passes around explicit
 56 *   synchronization points.
 57 *
 58 * - Some subsystems also have their own explicit fencing primitives, like
 59 *   &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying
 60 *   fence to be updated.
 61 *
 62 * - Then there's also implicit fencing, where the synchronization points are
 63 *   implicitly passed around as part of shared &dma_buf instances. Such
 64 *   implicit fences are stored in &struct dma_resv through the
 65 *   &dma_buf.resv pointer.
 66 */
 67
 68/**
 69 * DOC: fence cross-driver contract
 70 *
 71 * Since &dma_fence provide a cross driver contract, all drivers must follow the
 72 * same rules:
 73 *
 74 * * Fences must complete in a reasonable time. Fences which represent kernels
 75 *   and shaders submitted by userspace, which could run forever, must be backed
 76 *   up by timeout and gpu hang recovery code. Minimally that code must prevent
 77 *   further command submission and force complete all in-flight fences, e.g.
 78 *   when the driver or hardware do not support gpu reset, or if the gpu reset
 79 *   failed for some reason. Ideally the driver supports gpu recovery which only
 80 *   affects the offending userspace context, and no other userspace
 81 *   submissions.
 82 *
 83 * * Drivers may have different ideas of what completion within a reasonable
 84 *   time means. Some hang recovery code uses a fixed timeout, others a mix
 85 *   between observing forward progress and increasingly strict timeouts.
 86 *   Drivers should not try to second guess timeout handling of fences from
 87 *   other drivers.
 88 *
 89 * * To ensure there's no deadlocks of dma_fence_wait() against other locks
 90 *   drivers should annotate all code required to reach dma_fence_signal(),
 91 *   which completes the fences, with dma_fence_begin_signalling() and
 92 *   dma_fence_end_signalling().
 93 *
 94 * * Drivers are allowed to call dma_fence_wait() while holding dma_resv_lock().
 95 *   This means any code required for fence completion cannot acquire a
 96 *   &dma_resv lock. Note that this also pulls in the entire established
 97 *   locking hierarchy around dma_resv_lock() and dma_resv_unlock().
 98 *
 99 * * Drivers are allowed to call dma_fence_wait() from their &shrinker
100 *   callbacks. This means any code required for fence completion cannot
101 *   allocate memory with GFP_KERNEL.
102 *
103 * * Drivers are allowed to call dma_fence_wait() from their &mmu_notifier
104 *   respectively &mmu_interval_notifier callbacks. This means any code required
105 *   for fence completeion cannot allocate memory with GFP_NOFS or GFP_NOIO.
106 *   Only GFP_ATOMIC is permissible, which might fail.
107 *
108 * Note that only GPU drivers have a reasonable excuse for both requiring
109 * &mmu_interval_notifier and &shrinker callbacks at the same time as having to
110 * track asynchronous compute work using &dma_fence. No driver outside of
111 * drivers/gpu should ever call dma_fence_wait() in such contexts.
112 */
113
114static const char *dma_fence_stub_get_name(struct dma_fence *fence)
115{
116        return "stub";
117}
118
119static const struct dma_fence_ops dma_fence_stub_ops = {
120	.get_driver_name = dma_fence_stub_get_name,
121	.get_timeline_name = dma_fence_stub_get_name,
122};
123
124/**
125 * dma_fence_get_stub - return a signaled fence
126 *
127 * Return a stub fence which is already signaled. The fence's
128 * timestamp corresponds to the first time after boot this
129 * function is called.
130 */
131struct dma_fence *dma_fence_get_stub(void)
132{
133	spin_lock(&dma_fence_stub_lock);
134	if (!dma_fence_stub.ops) {
135		dma_fence_init(&dma_fence_stub,
136			       &dma_fence_stub_ops,
137			       &dma_fence_stub_lock,
138			       0, 0);
139
140		set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
141			&dma_fence_stub.flags);
142
143		dma_fence_signal_locked(&dma_fence_stub);
144	}
145	spin_unlock(&dma_fence_stub_lock);
146
147	return dma_fence_get(&dma_fence_stub);
148}
149EXPORT_SYMBOL(dma_fence_get_stub);
150
151/**
152 * dma_fence_allocate_private_stub - return a private, signaled fence
 
153 *
154 * Return a newly allocated and signaled stub fence.
155 */
156struct dma_fence *dma_fence_allocate_private_stub(void)
157{
158	struct dma_fence *fence;
159
160	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
161	if (fence == NULL)
162		return ERR_PTR(-ENOMEM);
163
164	dma_fence_init(fence,
165		       &dma_fence_stub_ops,
166		       &dma_fence_stub_lock,
167		       0, 0);
168
169	set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
170		&fence->flags);
171
172	dma_fence_signal(fence);
173
174	return fence;
175}
176EXPORT_SYMBOL(dma_fence_allocate_private_stub);
177
178/**
179 * dma_fence_context_alloc - allocate an array of fence contexts
180 * @num: amount of contexts to allocate
181 *
182 * This function will return the first index of the number of fence contexts
183 * allocated.  The fence context is used for setting &dma_fence.context to a
184 * unique number by passing the context to dma_fence_init().
185 */
186u64 dma_fence_context_alloc(unsigned num)
187{
188	WARN_ON(!num);
189	return atomic64_fetch_add(num, &dma_fence_context_counter);
190}
191EXPORT_SYMBOL(dma_fence_context_alloc);
192
193/**
194 * DOC: fence signalling annotation
195 *
196 * Proving correctness of all the kernel code around &dma_fence through code
197 * review and testing is tricky for a few reasons:
198 *
199 * * It is a cross-driver contract, and therefore all drivers must follow the
200 *   same rules for lock nesting order, calling contexts for various functions
201 *   and anything else significant for in-kernel interfaces. But it is also
202 *   impossible to test all drivers in a single machine, hence brute-force N vs.
203 *   N testing of all combinations is impossible. Even just limiting to the
204 *   possible combinations is infeasible.
205 *
206 * * There is an enormous amount of driver code involved. For render drivers
207 *   there's the tail of command submission, after fences are published,
208 *   scheduler code, interrupt and workers to process job completion,
209 *   and timeout, gpu reset and gpu hang recovery code. Plus for integration
210 *   with core mm with have &mmu_notifier, respectively &mmu_interval_notifier,
211 *   and &shrinker. For modesetting drivers there's the commit tail functions
212 *   between when fences for an atomic modeset are published, and when the
213 *   corresponding vblank completes, including any interrupt processing and
214 *   related workers. Auditing all that code, across all drivers, is not
215 *   feasible.
216 *
217 * * Due to how many other subsystems are involved and the locking hierarchies
218 *   this pulls in there is extremely thin wiggle-room for driver-specific
219 *   differences. &dma_fence interacts with almost all of the core memory
220 *   handling through page fault handlers via &dma_resv, dma_resv_lock() and
221 *   dma_resv_unlock(). On the other side it also interacts through all
222 *   allocation sites through &mmu_notifier and &shrinker.
223 *
224 * Furthermore lockdep does not handle cross-release dependencies, which means
225 * any deadlocks between dma_fence_wait() and dma_fence_signal() can't be caught
226 * at runtime with some quick testing. The simplest example is one thread
227 * waiting on a &dma_fence while holding a lock::
228 *
229 *     lock(A);
230 *     dma_fence_wait(B);
231 *     unlock(A);
232 *
233 * while the other thread is stuck trying to acquire the same lock, which
234 * prevents it from signalling the fence the previous thread is stuck waiting
235 * on::
236 *
237 *     lock(A);
238 *     unlock(A);
239 *     dma_fence_signal(B);
240 *
241 * By manually annotating all code relevant to signalling a &dma_fence we can
242 * teach lockdep about these dependencies, which also helps with the validation
243 * headache since now lockdep can check all the rules for us::
244 *
245 *    cookie = dma_fence_begin_signalling();
246 *    lock(A);
247 *    unlock(A);
248 *    dma_fence_signal(B);
249 *    dma_fence_end_signalling(cookie);
250 *
251 * For using dma_fence_begin_signalling() and dma_fence_end_signalling() to
252 * annotate critical sections the following rules need to be observed:
253 *
254 * * All code necessary to complete a &dma_fence must be annotated, from the
255 *   point where a fence is accessible to other threads, to the point where
256 *   dma_fence_signal() is called. Un-annotated code can contain deadlock issues,
257 *   and due to the very strict rules and many corner cases it is infeasible to
258 *   catch these just with review or normal stress testing.
259 *
260 * * &struct dma_resv deserves a special note, since the readers are only
261 *   protected by rcu. This means the signalling critical section starts as soon
262 *   as the new fences are installed, even before dma_resv_unlock() is called.
263 *
264 * * The only exception are fast paths and opportunistic signalling code, which
265 *   calls dma_fence_signal() purely as an optimization, but is not required to
266 *   guarantee completion of a &dma_fence. The usual example is a wait IOCTL
267 *   which calls dma_fence_signal(), while the mandatory completion path goes
268 *   through a hardware interrupt and possible job completion worker.
269 *
270 * * To aid composability of code, the annotations can be freely nested, as long
271 *   as the overall locking hierarchy is consistent. The annotations also work
272 *   both in interrupt and process context. Due to implementation details this
273 *   requires that callers pass an opaque cookie from
274 *   dma_fence_begin_signalling() to dma_fence_end_signalling().
275 *
276 * * Validation against the cross driver contract is implemented by priming
277 *   lockdep with the relevant hierarchy at boot-up. This means even just
278 *   testing with a single device is enough to validate a driver, at least as
279 *   far as deadlocks with dma_fence_wait() against dma_fence_signal() are
280 *   concerned.
281 */
282#ifdef CONFIG_LOCKDEP
283static struct lockdep_map dma_fence_lockdep_map = {
284	.name = "dma_fence_map"
285};
286
287/**
288 * dma_fence_begin_signalling - begin a critical DMA fence signalling section
289 *
290 * Drivers should use this to annotate the beginning of any code section
291 * required to eventually complete &dma_fence by calling dma_fence_signal().
292 *
293 * The end of these critical sections are annotated with
294 * dma_fence_end_signalling().
295 *
296 * Returns:
297 *
298 * Opaque cookie needed by the implementation, which needs to be passed to
299 * dma_fence_end_signalling().
300 */
301bool dma_fence_begin_signalling(void)
302{
303	/* explicitly nesting ... */
304	if (lock_is_held_type(&dma_fence_lockdep_map, 1))
305		return true;
306
307	/* rely on might_sleep check for soft/hardirq locks */
308	if (in_atomic())
309		return true;
310
311	/* ... and non-recursive readlock */
312	lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _RET_IP_);
313
314	return false;
315}
316EXPORT_SYMBOL(dma_fence_begin_signalling);
317
318/**
319 * dma_fence_end_signalling - end a critical DMA fence signalling section
320 * @cookie: opaque cookie from dma_fence_begin_signalling()
321 *
322 * Closes a critical section annotation opened by dma_fence_begin_signalling().
323 */
324void dma_fence_end_signalling(bool cookie)
325{
326	if (cookie)
327		return;
328
329	lock_release(&dma_fence_lockdep_map, _RET_IP_);
330}
331EXPORT_SYMBOL(dma_fence_end_signalling);
332
333void __dma_fence_might_wait(void)
334{
335	bool tmp;
336
337	tmp = lock_is_held_type(&dma_fence_lockdep_map, 1);
338	if (tmp)
339		lock_release(&dma_fence_lockdep_map, _THIS_IP_);
340	lock_map_acquire(&dma_fence_lockdep_map);
341	lock_map_release(&dma_fence_lockdep_map);
342	if (tmp)
343		lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _THIS_IP_);
344}
345#endif
346
347
348/**
349 * dma_fence_signal_timestamp_locked - signal completion of a fence
350 * @fence: the fence to signal
351 * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
352 *
353 * Signal completion for software callbacks on a fence, this will unblock
354 * dma_fence_wait() calls and run all the callbacks added with
355 * dma_fence_add_callback(). Can be called multiple times, but since a fence
356 * can only go from the unsignaled to the signaled state and not back, it will
357 * only be effective the first time. Set the timestamp provided as the fence
358 * signal timestamp.
359 *
360 * Unlike dma_fence_signal_timestamp(), this function must be called with
361 * &dma_fence.lock held.
362 *
363 * Returns 0 on success and a negative error value when @fence has been
364 * signalled already.
365 */
366int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
367				      ktime_t timestamp)
368{
369	struct dma_fence_cb *cur, *tmp;
370	struct list_head cb_list;
371
372	lockdep_assert_held(fence->lock);
373
374	if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
375				      &fence->flags)))
376		return -EINVAL;
377
378	/* Stash the cb_list before replacing it with the timestamp */
379	list_replace(&fence->cb_list, &cb_list);
380
381	fence->timestamp = timestamp;
382	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
383	trace_dma_fence_signaled(fence);
384
385	list_for_each_entry_safe(cur, tmp, &cb_list, node) {
386		INIT_LIST_HEAD(&cur->node);
387		cur->func(fence, cur);
388	}
389
390	return 0;
391}
392EXPORT_SYMBOL(dma_fence_signal_timestamp_locked);
393
394/**
395 * dma_fence_signal_timestamp - signal completion of a fence
396 * @fence: the fence to signal
397 * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
398 *
399 * Signal completion for software callbacks on a fence, this will unblock
400 * dma_fence_wait() calls and run all the callbacks added with
401 * dma_fence_add_callback(). Can be called multiple times, but since a fence
402 * can only go from the unsignaled to the signaled state and not back, it will
403 * only be effective the first time. Set the timestamp provided as the fence
404 * signal timestamp.
405 *
406 * Returns 0 on success and a negative error value when @fence has been
407 * signalled already.
408 */
409int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
410{
411	unsigned long flags;
412	int ret;
413
414	if (!fence)
415		return -EINVAL;
416
417	spin_lock_irqsave(fence->lock, flags);
418	ret = dma_fence_signal_timestamp_locked(fence, timestamp);
419	spin_unlock_irqrestore(fence->lock, flags);
420
421	return ret;
422}
423EXPORT_SYMBOL(dma_fence_signal_timestamp);
424
425/**
426 * dma_fence_signal_locked - signal completion of a fence
427 * @fence: the fence to signal
428 *
429 * Signal completion for software callbacks on a fence, this will unblock
430 * dma_fence_wait() calls and run all the callbacks added with
431 * dma_fence_add_callback(). Can be called multiple times, but since a fence
432 * can only go from the unsignaled to the signaled state and not back, it will
433 * only be effective the first time.
434 *
435 * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
436 * held.
437 *
438 * Returns 0 on success and a negative error value when @fence has been
439 * signalled already.
440 */
441int dma_fence_signal_locked(struct dma_fence *fence)
442{
443	return dma_fence_signal_timestamp_locked(fence, ktime_get());
444}
445EXPORT_SYMBOL(dma_fence_signal_locked);
446
447/**
448 * dma_fence_signal - signal completion of a fence
449 * @fence: the fence to signal
450 *
451 * Signal completion for software callbacks on a fence, this will unblock
452 * dma_fence_wait() calls and run all the callbacks added with
453 * dma_fence_add_callback(). Can be called multiple times, but since a fence
454 * can only go from the unsignaled to the signaled state and not back, it will
455 * only be effective the first time.
456 *
457 * Returns 0 on success and a negative error value when @fence has been
458 * signalled already.
459 */
460int dma_fence_signal(struct dma_fence *fence)
461{
462	unsigned long flags;
463	int ret;
464	bool tmp;
465
466	if (!fence)
467		return -EINVAL;
468
469	tmp = dma_fence_begin_signalling();
470
471	spin_lock_irqsave(fence->lock, flags);
472	ret = dma_fence_signal_timestamp_locked(fence, ktime_get());
473	spin_unlock_irqrestore(fence->lock, flags);
474
475	dma_fence_end_signalling(tmp);
476
477	return ret;
478}
479EXPORT_SYMBOL(dma_fence_signal);
480
481/**
482 * dma_fence_wait_timeout - sleep until the fence gets signaled
483 * or until timeout elapses
484 * @fence: the fence to wait on
485 * @intr: if true, do an interruptible wait
486 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
487 *
488 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
489 * remaining timeout in jiffies on success. Other error values may be
490 * returned on custom implementations.
491 *
492 * Performs a synchronous wait on this fence. It is assumed the caller
493 * directly or indirectly (buf-mgr between reservation and committing)
494 * holds a reference to the fence, otherwise the fence might be
495 * freed before return, resulting in undefined behavior.
496 *
497 * See also dma_fence_wait() and dma_fence_wait_any_timeout().
498 */
499signed long
500dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
501{
502	signed long ret;
503
504	if (WARN_ON(timeout < 0))
505		return -EINVAL;
506
507	might_sleep();
508
509	__dma_fence_might_wait();
510
511	dma_fence_enable_sw_signaling(fence);
512
513	trace_dma_fence_wait_start(fence);
514	if (fence->ops->wait)
515		ret = fence->ops->wait(fence, intr, timeout);
516	else
517		ret = dma_fence_default_wait(fence, intr, timeout);
518	trace_dma_fence_wait_end(fence);
519	return ret;
520}
521EXPORT_SYMBOL(dma_fence_wait_timeout);
522
523/**
524 * dma_fence_release - default relese function for fences
525 * @kref: &dma_fence.recfount
526 *
527 * This is the default release functions for &dma_fence. Drivers shouldn't call
528 * this directly, but instead call dma_fence_put().
529 */
530void dma_fence_release(struct kref *kref)
531{
532	struct dma_fence *fence =
533		container_of(kref, struct dma_fence, refcount);
534
535	trace_dma_fence_destroy(fence);
536
537	if (WARN(!list_empty(&fence->cb_list) &&
538		 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags),
539		 "Fence %s:%s:%llx:%llx released with pending signals!\n",
540		 fence->ops->get_driver_name(fence),
541		 fence->ops->get_timeline_name(fence),
542		 fence->context, fence->seqno)) {
543		unsigned long flags;
544
545		/*
546		 * Failed to signal before release, likely a refcounting issue.
547		 *
548		 * This should never happen, but if it does make sure that we
549		 * don't leave chains dangling. We set the error flag first
550		 * so that the callbacks know this signal is due to an error.
551		 */
552		spin_lock_irqsave(fence->lock, flags);
553		fence->error = -EDEADLK;
554		dma_fence_signal_locked(fence);
555		spin_unlock_irqrestore(fence->lock, flags);
556	}
557
558	if (fence->ops->release)
559		fence->ops->release(fence);
560	else
561		dma_fence_free(fence);
562}
563EXPORT_SYMBOL(dma_fence_release);
564
565/**
566 * dma_fence_free - default release function for &dma_fence.
567 * @fence: fence to release
568 *
569 * This is the default implementation for &dma_fence_ops.release. It calls
570 * kfree_rcu() on @fence.
571 */
572void dma_fence_free(struct dma_fence *fence)
573{
574	kfree_rcu(fence, rcu);
575}
576EXPORT_SYMBOL(dma_fence_free);
577
578static bool __dma_fence_enable_signaling(struct dma_fence *fence)
579{
580	bool was_set;
581
582	lockdep_assert_held(fence->lock);
583
584	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
585				   &fence->flags);
586
587	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
588		return false;
589
590	if (!was_set && fence->ops->enable_signaling) {
591		trace_dma_fence_enable_signal(fence);
592
593		if (!fence->ops->enable_signaling(fence)) {
594			dma_fence_signal_locked(fence);
595			return false;
596		}
597	}
598
599	return true;
600}
601
602/**
603 * dma_fence_enable_sw_signaling - enable signaling on fence
604 * @fence: the fence to enable
605 *
606 * This will request for sw signaling to be enabled, to make the fence
607 * complete as soon as possible. This calls &dma_fence_ops.enable_signaling
608 * internally.
609 */
610void dma_fence_enable_sw_signaling(struct dma_fence *fence)
611{
612	unsigned long flags;
613
614	spin_lock_irqsave(fence->lock, flags);
615	__dma_fence_enable_signaling(fence);
616	spin_unlock_irqrestore(fence->lock, flags);
617}
618EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
619
620/**
621 * dma_fence_add_callback - add a callback to be called when the fence
622 * is signaled
623 * @fence: the fence to wait on
624 * @cb: the callback to register
625 * @func: the function to call
626 *
627 * Add a software callback to the fence. The caller should keep a reference to
628 * the fence.
629 *
630 * @cb will be initialized by dma_fence_add_callback(), no initialization
631 * by the caller is required. Any number of callbacks can be registered
632 * to a fence, but a callback can only be registered to one fence at a time.
633 *
634 * If fence is already signaled, this function will return -ENOENT (and
635 * *not* call the callback).
636 *
637 * Note that the callback can be called from an atomic context or irq context.
638 *
639 * Returns 0 in case of success, -ENOENT if the fence is already signaled
640 * and -EINVAL in case of error.
641 */
642int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
643			   dma_fence_func_t func)
644{
645	unsigned long flags;
646	int ret = 0;
647
648	if (WARN_ON(!fence || !func))
649		return -EINVAL;
650
651	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
652		INIT_LIST_HEAD(&cb->node);
653		return -ENOENT;
654	}
655
656	spin_lock_irqsave(fence->lock, flags);
657
658	if (__dma_fence_enable_signaling(fence)) {
659		cb->func = func;
660		list_add_tail(&cb->node, &fence->cb_list);
661	} else {
662		INIT_LIST_HEAD(&cb->node);
663		ret = -ENOENT;
664	}
665
666	spin_unlock_irqrestore(fence->lock, flags);
667
668	return ret;
669}
670EXPORT_SYMBOL(dma_fence_add_callback);
671
672/**
673 * dma_fence_get_status - returns the status upon completion
674 * @fence: the dma_fence to query
675 *
676 * This wraps dma_fence_get_status_locked() to return the error status
677 * condition on a signaled fence. See dma_fence_get_status_locked() for more
678 * details.
679 *
680 * Returns 0 if the fence has not yet been signaled, 1 if the fence has
681 * been signaled without an error condition, or a negative error code
682 * if the fence has been completed in err.
683 */
684int dma_fence_get_status(struct dma_fence *fence)
685{
686	unsigned long flags;
687	int status;
688
689	spin_lock_irqsave(fence->lock, flags);
690	status = dma_fence_get_status_locked(fence);
691	spin_unlock_irqrestore(fence->lock, flags);
692
693	return status;
694}
695EXPORT_SYMBOL(dma_fence_get_status);
696
697/**
698 * dma_fence_remove_callback - remove a callback from the signaling list
699 * @fence: the fence to wait on
700 * @cb: the callback to remove
701 *
702 * Remove a previously queued callback from the fence. This function returns
703 * true if the callback is successfully removed, or false if the fence has
704 * already been signaled.
705 *
706 * *WARNING*:
707 * Cancelling a callback should only be done if you really know what you're
708 * doing, since deadlocks and race conditions could occur all too easily. For
709 * this reason, it should only ever be done on hardware lockup recovery,
710 * with a reference held to the fence.
711 *
712 * Behaviour is undefined if @cb has not been added to @fence using
713 * dma_fence_add_callback() beforehand.
714 */
715bool
716dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
717{
718	unsigned long flags;
719	bool ret;
720
721	spin_lock_irqsave(fence->lock, flags);
722
723	ret = !list_empty(&cb->node);
724	if (ret)
725		list_del_init(&cb->node);
726
727	spin_unlock_irqrestore(fence->lock, flags);
728
729	return ret;
730}
731EXPORT_SYMBOL(dma_fence_remove_callback);
732
733struct default_wait_cb {
734	struct dma_fence_cb base;
735	struct task_struct *task;
736};
737
738static void
739dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
740{
741	struct default_wait_cb *wait =
742		container_of(cb, struct default_wait_cb, base);
743
744	wake_up_state(wait->task, TASK_NORMAL);
745}
746
747/**
748 * dma_fence_default_wait - default sleep until the fence gets signaled
749 * or until timeout elapses
750 * @fence: the fence to wait on
751 * @intr: if true, do an interruptible wait
752 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
753 *
754 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
755 * remaining timeout in jiffies on success. If timeout is zero the value one is
756 * returned if the fence is already signaled for consistency with other
757 * functions taking a jiffies timeout.
758 */
759signed long
760dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
761{
762	struct default_wait_cb cb;
763	unsigned long flags;
764	signed long ret = timeout ? timeout : 1;
765
766	spin_lock_irqsave(fence->lock, flags);
767
768	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
769		goto out;
770
771	if (intr && signal_pending(current)) {
772		ret = -ERESTARTSYS;
773		goto out;
774	}
775
776	if (!timeout) {
777		ret = 0;
778		goto out;
779	}
780
781	cb.base.func = dma_fence_default_wait_cb;
782	cb.task = current;
783	list_add(&cb.base.node, &fence->cb_list);
784
785	while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
786		if (intr)
787			__set_current_state(TASK_INTERRUPTIBLE);
788		else
789			__set_current_state(TASK_UNINTERRUPTIBLE);
790		spin_unlock_irqrestore(fence->lock, flags);
791
792		ret = schedule_timeout(ret);
793
794		spin_lock_irqsave(fence->lock, flags);
795		if (ret > 0 && intr && signal_pending(current))
796			ret = -ERESTARTSYS;
797	}
798
799	if (!list_empty(&cb.base.node))
800		list_del(&cb.base.node);
801	__set_current_state(TASK_RUNNING);
802
803out:
804	spin_unlock_irqrestore(fence->lock, flags);
805	return ret;
806}
807EXPORT_SYMBOL(dma_fence_default_wait);
808
809static bool
810dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
811			    uint32_t *idx)
812{
813	int i;
814
815	for (i = 0; i < count; ++i) {
816		struct dma_fence *fence = fences[i];
817		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
818			if (idx)
819				*idx = i;
820			return true;
821		}
822	}
823	return false;
824}
825
826/**
827 * dma_fence_wait_any_timeout - sleep until any fence gets signaled
828 * or until timeout elapses
829 * @fences: array of fences to wait on
830 * @count: number of fences to wait on
831 * @intr: if true, do an interruptible wait
832 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
833 * @idx: used to store the first signaled fence index, meaningful only on
834 *	positive return
835 *
836 * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
837 * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
838 * on success.
839 *
840 * Synchronous waits for the first fence in the array to be signaled. The
841 * caller needs to hold a reference to all fences in the array, otherwise a
842 * fence might be freed before return, resulting in undefined behavior.
843 *
844 * See also dma_fence_wait() and dma_fence_wait_timeout().
845 */
846signed long
847dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
848			   bool intr, signed long timeout, uint32_t *idx)
849{
850	struct default_wait_cb *cb;
851	signed long ret = timeout;
852	unsigned i;
853
854	if (WARN_ON(!fences || !count || timeout < 0))
855		return -EINVAL;
856
857	if (timeout == 0) {
858		for (i = 0; i < count; ++i)
859			if (dma_fence_is_signaled(fences[i])) {
860				if (idx)
861					*idx = i;
862				return 1;
863			}
864
865		return 0;
866	}
867
868	cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
869	if (cb == NULL) {
870		ret = -ENOMEM;
871		goto err_free_cb;
872	}
873
874	for (i = 0; i < count; ++i) {
875		struct dma_fence *fence = fences[i];
876
877		cb[i].task = current;
878		if (dma_fence_add_callback(fence, &cb[i].base,
879					   dma_fence_default_wait_cb)) {
880			/* This fence is already signaled */
881			if (idx)
882				*idx = i;
883			goto fence_rm_cb;
884		}
885	}
886
887	while (ret > 0) {
888		if (intr)
889			set_current_state(TASK_INTERRUPTIBLE);
890		else
891			set_current_state(TASK_UNINTERRUPTIBLE);
892
893		if (dma_fence_test_signaled_any(fences, count, idx))
894			break;
895
896		ret = schedule_timeout(ret);
897
898		if (ret > 0 && intr && signal_pending(current))
899			ret = -ERESTARTSYS;
900	}
901
902	__set_current_state(TASK_RUNNING);
903
904fence_rm_cb:
905	while (i-- > 0)
906		dma_fence_remove_callback(fences[i], &cb[i].base);
907
908err_free_cb:
909	kfree(cb);
910
911	return ret;
912}
913EXPORT_SYMBOL(dma_fence_wait_any_timeout);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
914
915/**
916 * dma_fence_describe - Dump fence describtion into seq_file
917 * @fence: the 6fence to describe
918 * @seq: the seq_file to put the textual description into
919 *
920 * Dump a textual description of the fence and it's state into the seq_file.
921 */
922void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq)
923{
924	seq_printf(seq, "%s %s seq %llu %ssignalled\n",
925		   fence->ops->get_driver_name(fence),
926		   fence->ops->get_timeline_name(fence), fence->seqno,
927		   dma_fence_is_signaled(fence) ? "" : "un");
928}
929EXPORT_SYMBOL(dma_fence_describe);
930
931/**
932 * dma_fence_init - Initialize a custom fence.
933 * @fence: the fence to initialize
934 * @ops: the dma_fence_ops for operations on this fence
935 * @lock: the irqsafe spinlock to use for locking this fence
936 * @context: the execution context this fence is run on
937 * @seqno: a linear increasing sequence number for this context
938 *
939 * Initializes an allocated fence, the caller doesn't have to keep its
940 * refcount after committing with this fence, but it will need to hold a
941 * refcount again if &dma_fence_ops.enable_signaling gets called.
942 *
943 * context and seqno are used for easy comparison between fences, allowing
944 * to check which fence is later by simply using dma_fence_later().
945 */
946void
947dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
948	       spinlock_t *lock, u64 context, u64 seqno)
949{
950	BUG_ON(!lock);
951	BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
952
953	kref_init(&fence->refcount);
954	fence->ops = ops;
955	INIT_LIST_HEAD(&fence->cb_list);
956	fence->lock = lock;
957	fence->context = context;
958	fence->seqno = seqno;
959	fence->flags = 0UL;
960	fence->error = 0;
961
962	trace_dma_fence_init(fence);
963}
964EXPORT_SYMBOL(dma_fence_init);