Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Syncpoint dma_fence implementation
  4 *
  5 * Copyright (c) 2020, NVIDIA Corporation.
  6 */
  7
  8#include <linux/dma-fence.h>
  9#include <linux/file.h>
 10#include <linux/fs.h>
 11#include <linux/slab.h>
 12#include <linux/sync_file.h>
 13
 14#include "fence.h"
 15#include "intr.h"
 16#include "syncpt.h"
 17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 18static const char *host1x_syncpt_fence_get_driver_name(struct dma_fence *f)
 19{
 20	return "host1x";
 21}
 22
 23static const char *host1x_syncpt_fence_get_timeline_name(struct dma_fence *f)
 24{
 25	return "syncpoint";
 26}
 27
 28static struct host1x_syncpt_fence *to_host1x_fence(struct dma_fence *f)
 29{
 30	return container_of(f, struct host1x_syncpt_fence, base);
 31}
 32
 33static bool host1x_syncpt_fence_enable_signaling(struct dma_fence *f)
 34{
 35	struct host1x_syncpt_fence *sf = to_host1x_fence(f);
 
 36
 37	if (host1x_syncpt_is_expired(sf->sp, sf->threshold))
 38		return false;
 39
 40	/* Reference for interrupt path. */
 41	dma_fence_get(f);
 42
 43	/*
 44	 * The dma_fence framework requires the fence driver to keep a
 45	 * reference to any fences for which 'enable_signaling' has been
 46	 * called (and that have not been signalled).
 47	 *
 48	 * We cannot currently always guarantee that all fences get signalled
 49	 * or cancelled. As such, for such situations, set up a timeout, so
 50	 * that long-lasting fences will get reaped eventually.
 
 51	 */
 52	if (sf->timeout) {
 53		/* Reference for timeout path. */
 54		dma_fence_get(f);
 55		schedule_delayed_work(&sf->timeout_work, msecs_to_jiffies(30000));
 
 
 
 
 
 56	}
 57
 58	host1x_intr_add_fence_locked(sf->sp->host, sf);
 
 59
 60	/*
 61	 * The fence may get signalled at any time after the above call,
 62	 * so we need to initialize all state used by signalling
 63	 * before it.
 64	 */
 65
 66	return true;
 67}
 68
 
 
 
 
 
 
 
 
 
 
 69static const struct dma_fence_ops host1x_syncpt_fence_ops = {
 70	.get_driver_name = host1x_syncpt_fence_get_driver_name,
 71	.get_timeline_name = host1x_syncpt_fence_get_timeline_name,
 72	.enable_signaling = host1x_syncpt_fence_enable_signaling,
 
 73};
 74
 75void host1x_fence_signal(struct host1x_syncpt_fence *f)
 76{
 77	if (atomic_xchg(&f->signaling, 1)) {
 78		/*
 79		 * Already on timeout path, but we removed the fence before
 80		 * timeout path could, so drop interrupt path reference.
 81		 */
 82		dma_fence_put(&f->base);
 83		return;
 84	}
 85
 86	if (f->timeout && cancel_delayed_work(&f->timeout_work)) {
 87		/*
 88		 * We know that the timeout path will not be entered.
 89		 * Safe to drop the timeout path's reference now.
 90		 */
 91		dma_fence_put(&f->base);
 92	}
 93
 94	dma_fence_signal_locked(&f->base);
 95	dma_fence_put(&f->base);
 96}
 97
 98static void do_fence_timeout(struct work_struct *work)
 99{
100	struct delayed_work *dwork = (struct delayed_work *)work;
101	struct host1x_syncpt_fence *f =
102		container_of(dwork, struct host1x_syncpt_fence, timeout_work);
103
104	if (atomic_xchg(&f->signaling, 1)) {
105		/* Already on interrupt path, drop timeout path reference if any. */
106		if (f->timeout)
107			dma_fence_put(&f->base);
108		return;
109	}
110
111	if (host1x_intr_remove_fence(f->sp->host, f)) {
112		/*
113		 * Managed to remove fence from queue, so it's safe to drop
114		 * the interrupt path's reference.
115		 */
116		dma_fence_put(&f->base);
117	}
118
119	dma_fence_set_error(&f->base, -ETIMEDOUT);
120	dma_fence_signal(&f->base);
121	if (f->timeout)
122		dma_fence_put(&f->base);
123}
124
125struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold,
126				      bool timeout)
127{
128	struct host1x_syncpt_fence *fence;
129
130	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
131	if (!fence)
132		return ERR_PTR(-ENOMEM);
133
 
 
 
 
 
 
134	fence->sp = sp;
135	fence->threshold = threshold;
136	fence->timeout = timeout;
137
138	dma_fence_init(&fence->base, &host1x_syncpt_fence_ops, &sp->fences.lock,
139		       dma_fence_context_alloc(1), 0);
140
141	INIT_DELAYED_WORK(&fence->timeout_work, do_fence_timeout);
142
143	return &fence->base;
144}
145EXPORT_SYMBOL(host1x_fence_create);
146
147void host1x_fence_cancel(struct dma_fence *f)
148{
149	struct host1x_syncpt_fence *sf = to_host1x_fence(f);
150
151	schedule_delayed_work(&sf->timeout_work, 0);
152	flush_delayed_work(&sf->timeout_work);
153}
154EXPORT_SYMBOL(host1x_fence_cancel);
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Syncpoint dma_fence implementation
  4 *
  5 * Copyright (c) 2020, NVIDIA Corporation.
  6 */
  7
  8#include <linux/dma-fence.h>
  9#include <linux/file.h>
 10#include <linux/fs.h>
 11#include <linux/slab.h>
 12#include <linux/sync_file.h>
 13
 14#include "fence.h"
 15#include "intr.h"
 16#include "syncpt.h"
 17
 18static DEFINE_SPINLOCK(lock);
 19
 20struct host1x_syncpt_fence {
 21	struct dma_fence base;
 22
 23	atomic_t signaling;
 24
 25	struct host1x_syncpt *sp;
 26	u32 threshold;
 27
 28	struct host1x_waitlist *waiter;
 29	void *waiter_ref;
 30
 31	struct delayed_work timeout_work;
 32};
 33
 34static const char *host1x_syncpt_fence_get_driver_name(struct dma_fence *f)
 35{
 36	return "host1x";
 37}
 38
 39static const char *host1x_syncpt_fence_get_timeline_name(struct dma_fence *f)
 40{
 41	return "syncpoint";
 42}
 43
 44static struct host1x_syncpt_fence *to_host1x_fence(struct dma_fence *f)
 45{
 46	return container_of(f, struct host1x_syncpt_fence, base);
 47}
 48
 49static bool host1x_syncpt_fence_enable_signaling(struct dma_fence *f)
 50{
 51	struct host1x_syncpt_fence *sf = to_host1x_fence(f);
 52	int err;
 53
 54	if (host1x_syncpt_is_expired(sf->sp, sf->threshold))
 55		return false;
 56
 
 57	dma_fence_get(f);
 58
 59	/*
 60	 * The dma_fence framework requires the fence driver to keep a
 61	 * reference to any fences for which 'enable_signaling' has been
 62	 * called (and that have not been signalled).
 63	 *
 64	 * We provide a userspace API to create arbitrary syncpoint fences,
 65	 * so we cannot normally guarantee that all fences get signalled.
 66	 * As such, setup a timeout, so that long-lasting fences will get
 67	 * reaped eventually.
 68	 */
 69	schedule_delayed_work(&sf->timeout_work, msecs_to_jiffies(30000));
 70
 71	err = host1x_intr_add_action(sf->sp->host, sf->sp, sf->threshold,
 72				     HOST1X_INTR_ACTION_SIGNAL_FENCE, f,
 73				     sf->waiter, &sf->waiter_ref);
 74	if (err) {
 75		cancel_delayed_work_sync(&sf->timeout_work);
 76		dma_fence_put(f);
 77		return false;
 78	}
 79
 80	/* intr framework takes ownership of waiter */
 81	sf->waiter = NULL;
 82
 83	/*
 84	 * The fence may get signalled at any time after the above call,
 85	 * so we need to initialize all state used by signalling
 86	 * before it.
 87	 */
 88
 89	return true;
 90}
 91
 92static void host1x_syncpt_fence_release(struct dma_fence *f)
 93{
 94	struct host1x_syncpt_fence *sf = to_host1x_fence(f);
 95
 96	if (sf->waiter)
 97		kfree(sf->waiter);
 98
 99	dma_fence_free(f);
100}
101
102static const struct dma_fence_ops host1x_syncpt_fence_ops = {
103	.get_driver_name = host1x_syncpt_fence_get_driver_name,
104	.get_timeline_name = host1x_syncpt_fence_get_timeline_name,
105	.enable_signaling = host1x_syncpt_fence_enable_signaling,
106	.release = host1x_syncpt_fence_release,
107};
108
109void host1x_fence_signal(struct host1x_syncpt_fence *f)
110{
111	if (atomic_xchg(&f->signaling, 1))
 
 
 
 
 
112		return;
 
113
114	/*
115	 * Cancel pending timeout work - if it races, it will
116	 * not get 'f->signaling' and return.
117	 */
118	cancel_delayed_work_sync(&f->timeout_work);
119
120	host1x_intr_put_ref(f->sp->host, f->sp->id, f->waiter_ref, false);
121
122	dma_fence_signal(&f->base);
123	dma_fence_put(&f->base);
124}
125
126static void do_fence_timeout(struct work_struct *work)
127{
128	struct delayed_work *dwork = (struct delayed_work *)work;
129	struct host1x_syncpt_fence *f =
130		container_of(dwork, struct host1x_syncpt_fence, timeout_work);
131
132	if (atomic_xchg(&f->signaling, 1))
 
 
 
133		return;
 
134
135	/*
136	 * Cancel pending timeout work - if it races, it will
137	 * not get 'f->signaling' and return.
138	 */
139	host1x_intr_put_ref(f->sp->host, f->sp->id, f->waiter_ref, true);
 
 
140
141	dma_fence_set_error(&f->base, -ETIMEDOUT);
142	dma_fence_signal(&f->base);
143	dma_fence_put(&f->base);
 
144}
145
146struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold)
 
147{
148	struct host1x_syncpt_fence *fence;
149
150	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
151	if (!fence)
152		return ERR_PTR(-ENOMEM);
153
154	fence->waiter = kzalloc(sizeof(*fence->waiter), GFP_KERNEL);
155	if (!fence->waiter) {
156		kfree(fence);
157		return ERR_PTR(-ENOMEM);
158	}
159
160	fence->sp = sp;
161	fence->threshold = threshold;
 
162
163	dma_fence_init(&fence->base, &host1x_syncpt_fence_ops, &lock,
164		       dma_fence_context_alloc(1), 0);
165
166	INIT_DELAYED_WORK(&fence->timeout_work, do_fence_timeout);
167
168	return &fence->base;
169}
170EXPORT_SYMBOL(host1x_fence_create);