Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: MIT
  2
  3#include <uapi/linux/sched/types.h>
  4
  5#include <drm/drm_print.h>
  6#include <drm/drm_vblank.h>
  7#include <drm/drm_vblank_work.h>
  8#include <drm/drm_crtc.h>
  9
 10#include "drm_internal.h"
 11
 12/**
 13 * DOC: vblank works
 14 *
 15 * Many DRM drivers need to program hardware in a time-sensitive manner, many
 16 * times with a deadline of starting and finishing within a certain region of
 17 * the scanout. Most of the time the safest way to accomplish this is to
 18 * simply do said time-sensitive programming in the driver's IRQ handler,
 19 * which allows drivers to avoid being preempted during these critical
 20 * regions. Or even better, the hardware may even handle applying such
 21 * time-critical programming independently of the CPU.
 22 *
 23 * While there's a decent amount of hardware that's designed so that the CPU
 24 * doesn't need to be concerned with extremely time-sensitive programming,
 25 * there's a few situations where it can't be helped. Some unforgiving
 26 * hardware may require that certain time-sensitive programming be handled
 27 * completely by the CPU, and said programming may even take too long to
 28 * handle in an IRQ handler. Another such situation would be where the driver
 29 * needs to perform a task that needs to complete within a specific scanout
 30 * period, but might possibly block and thus cannot be handled in an IRQ
 31 * context. Both of these situations can't be solved perfectly in Linux since
 32 * we're not a realtime kernel, and thus the scheduler may cause us to miss
 33 * our deadline if it decides to preempt us. But for some drivers, it's good
 34 * enough if we can lower our chance of being preempted to an absolute
 35 * minimum.
 36 *
 37 * This is where &drm_vblank_work comes in. &drm_vblank_work provides a simple
 38 * generic delayed work implementation which delays work execution until a
 39 * particular vblank has passed, and then executes the work at realtime
 40 * priority. This provides the best possible chance at performing
 41 * time-sensitive hardware programming on time, even when the system is under
 42 * heavy load. &drm_vblank_work also supports rescheduling, so that self
 43 * re-arming work items can be easily implemented.
 44 */
 45
 46void drm_handle_vblank_works(struct drm_vblank_crtc *vblank)
 47{
 48	struct drm_vblank_work *work, *next;
 49	u64 count = atomic64_read(&vblank->count);
 50	bool wake = false;
 51
 52	assert_spin_locked(&vblank->dev->event_lock);
 53
 54	list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
 55		if (!drm_vblank_passed(count, work->count))
 56			continue;
 57
 58		list_del_init(&work->node);
 59		drm_vblank_put(vblank->dev, vblank->pipe);
 60		kthread_queue_work(vblank->worker, &work->base);
 61		wake = true;
 62	}
 63	if (wake)
 64		wake_up_all(&vblank->work_wait_queue);
 65}
 66
 67/* Handle cancelling any pending vblank work items and drop respective vblank
 68 * references in response to vblank interrupts being disabled.
 69 */
 70void drm_vblank_cancel_pending_works(struct drm_vblank_crtc *vblank)
 71{
 72	struct drm_vblank_work *work, *next;
 73
 74	assert_spin_locked(&vblank->dev->event_lock);
 75
 
 
 
 76	list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
 77		list_del_init(&work->node);
 78		drm_vblank_put(vblank->dev, vblank->pipe);
 79	}
 80
 81	wake_up_all(&vblank->work_wait_queue);
 82}
 83
 84/**
 85 * drm_vblank_work_schedule - schedule a vblank work
 86 * @work: vblank work to schedule
 87 * @count: target vblank count
 88 * @nextonmiss: defer until the next vblank if target vblank was missed
 89 *
 90 * Schedule @work for execution once the crtc vblank count reaches @count.
 91 *
 92 * If the crtc vblank count has already reached @count and @nextonmiss is
 93 * %false the work starts to execute immediately.
 94 *
 95 * If the crtc vblank count has already reached @count and @nextonmiss is
 96 * %true the work is deferred until the next vblank (as if @count has been
 97 * specified as crtc vblank count + 1).
 98 *
 99 * If @work is already scheduled, this function will reschedule said work
100 * using the new @count. This can be used for self-rearming work items.
101 *
102 * Returns:
103 * %1 if @work was successfully (re)scheduled, %0 if it was either already
104 * scheduled or cancelled, or a negative error code on failure.
105 */
106int drm_vblank_work_schedule(struct drm_vblank_work *work,
107			     u64 count, bool nextonmiss)
108{
109	struct drm_vblank_crtc *vblank = work->vblank;
110	struct drm_device *dev = vblank->dev;
111	u64 cur_vbl;
112	unsigned long irqflags;
113	bool passed, inmodeset, rescheduling = false, wake = false;
114	int ret = 0;
115
116	spin_lock_irqsave(&dev->event_lock, irqflags);
117	if (work->cancelling)
118		goto out;
119
120	spin_lock(&dev->vbl_lock);
121	inmodeset = vblank->inmodeset;
122	spin_unlock(&dev->vbl_lock);
123	if (inmodeset)
124		goto out;
125
126	if (list_empty(&work->node)) {
127		ret = drm_vblank_get(dev, vblank->pipe);
128		if (ret < 0)
129			goto out;
130	} else if (work->count == count) {
131		/* Already scheduled w/ same vbl count */
132		goto out;
133	} else {
134		rescheduling = true;
135	}
136
137	work->count = count;
138	cur_vbl = drm_vblank_count(dev, vblank->pipe);
139	passed = drm_vblank_passed(cur_vbl, count);
140	if (passed)
141		drm_dbg_core(dev,
142			     "crtc %d vblank %llu already passed (current %llu)\n",
143			     vblank->pipe, count, cur_vbl);
144
145	if (!nextonmiss && passed) {
146		drm_vblank_put(dev, vblank->pipe);
147		ret = kthread_queue_work(vblank->worker, &work->base);
148
149		if (rescheduling) {
150			list_del_init(&work->node);
151			wake = true;
152		}
153	} else {
154		if (!rescheduling)
155			list_add_tail(&work->node, &vblank->pending_work);
156		ret = true;
157	}
158
159out:
160	spin_unlock_irqrestore(&dev->event_lock, irqflags);
161	if (wake)
162		wake_up_all(&vblank->work_wait_queue);
163	return ret;
164}
165EXPORT_SYMBOL(drm_vblank_work_schedule);
166
167/**
168 * drm_vblank_work_cancel_sync - cancel a vblank work and wait for it to
169 * finish executing
170 * @work: vblank work to cancel
171 *
172 * Cancel an already scheduled vblank work and wait for its
173 * execution to finish.
174 *
175 * On return, @work is guaranteed to no longer be scheduled or running, even
176 * if it's self-arming.
177 *
178 * Returns:
179 * %True if the work was cancelled before it started to execute, %false
180 * otherwise.
181 */
182bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work)
183{
184	struct drm_vblank_crtc *vblank = work->vblank;
185	struct drm_device *dev = vblank->dev;
186	bool ret = false;
187
188	spin_lock_irq(&dev->event_lock);
189	if (!list_empty(&work->node)) {
190		list_del_init(&work->node);
191		drm_vblank_put(vblank->dev, vblank->pipe);
192		ret = true;
193	}
194
195	work->cancelling++;
196	spin_unlock_irq(&dev->event_lock);
197
198	wake_up_all(&vblank->work_wait_queue);
199
200	if (kthread_cancel_work_sync(&work->base))
201		ret = true;
202
203	spin_lock_irq(&dev->event_lock);
204	work->cancelling--;
205	spin_unlock_irq(&dev->event_lock);
206
207	return ret;
208}
209EXPORT_SYMBOL(drm_vblank_work_cancel_sync);
210
211/**
212 * drm_vblank_work_flush - wait for a scheduled vblank work to finish
213 * executing
214 * @work: vblank work to flush
215 *
216 * Wait until @work has finished executing once.
217 */
218void drm_vblank_work_flush(struct drm_vblank_work *work)
219{
220	struct drm_vblank_crtc *vblank = work->vblank;
221	struct drm_device *dev = vblank->dev;
222
223	spin_lock_irq(&dev->event_lock);
224	wait_event_lock_irq(vblank->work_wait_queue, list_empty(&work->node),
225			    dev->event_lock);
226	spin_unlock_irq(&dev->event_lock);
227
228	kthread_flush_work(&work->base);
229}
230EXPORT_SYMBOL(drm_vblank_work_flush);
231
232/**
233 * drm_vblank_work_init - initialize a vblank work item
234 * @work: vblank work item
235 * @crtc: CRTC whose vblank will trigger the work execution
236 * @func: work function to be executed
237 *
238 * Initialize a vblank work item for a specific crtc.
239 */
240void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc,
241			  void (*func)(struct kthread_work *work))
242{
243	kthread_init_work(&work->base, func);
244	INIT_LIST_HEAD(&work->node);
245	work->vblank = &crtc->dev->vblank[drm_crtc_index(crtc)];
246}
247EXPORT_SYMBOL(drm_vblank_work_init);
248
249int drm_vblank_worker_init(struct drm_vblank_crtc *vblank)
250{
251	struct kthread_worker *worker;
252
253	INIT_LIST_HEAD(&vblank->pending_work);
254	init_waitqueue_head(&vblank->work_wait_queue);
255	worker = kthread_create_worker(0, "card%d-crtc%d",
256				       vblank->dev->primary->index,
257				       vblank->pipe);
258	if (IS_ERR(worker))
259		return PTR_ERR(worker);
260
261	vblank->worker = worker;
262
263	sched_set_fifo(worker->task);
264	return 0;
265}
v6.8
  1// SPDX-License-Identifier: MIT
  2
  3#include <uapi/linux/sched/types.h>
  4
  5#include <drm/drm_print.h>
  6#include <drm/drm_vblank.h>
  7#include <drm/drm_vblank_work.h>
  8#include <drm/drm_crtc.h>
  9
 10#include "drm_internal.h"
 11
 12/**
 13 * DOC: vblank works
 14 *
 15 * Many DRM drivers need to program hardware in a time-sensitive manner, many
 16 * times with a deadline of starting and finishing within a certain region of
 17 * the scanout. Most of the time the safest way to accomplish this is to
 18 * simply do said time-sensitive programming in the driver's IRQ handler,
 19 * which allows drivers to avoid being preempted during these critical
 20 * regions. Or even better, the hardware may even handle applying such
 21 * time-critical programming independently of the CPU.
 22 *
 23 * While there's a decent amount of hardware that's designed so that the CPU
 24 * doesn't need to be concerned with extremely time-sensitive programming,
 25 * there's a few situations where it can't be helped. Some unforgiving
 26 * hardware may require that certain time-sensitive programming be handled
 27 * completely by the CPU, and said programming may even take too long to
 28 * handle in an IRQ handler. Another such situation would be where the driver
 29 * needs to perform a task that needs to complete within a specific scanout
 30 * period, but might possibly block and thus cannot be handled in an IRQ
 31 * context. Both of these situations can't be solved perfectly in Linux since
 32 * we're not a realtime kernel, and thus the scheduler may cause us to miss
 33 * our deadline if it decides to preempt us. But for some drivers, it's good
 34 * enough if we can lower our chance of being preempted to an absolute
 35 * minimum.
 36 *
 37 * This is where &drm_vblank_work comes in. &drm_vblank_work provides a simple
 38 * generic delayed work implementation which delays work execution until a
 39 * particular vblank has passed, and then executes the work at realtime
 40 * priority. This provides the best possible chance at performing
 41 * time-sensitive hardware programming on time, even when the system is under
 42 * heavy load. &drm_vblank_work also supports rescheduling, so that self
 43 * re-arming work items can be easily implemented.
 44 */
 45
 46void drm_handle_vblank_works(struct drm_vblank_crtc *vblank)
 47{
 48	struct drm_vblank_work *work, *next;
 49	u64 count = atomic64_read(&vblank->count);
 50	bool wake = false;
 51
 52	assert_spin_locked(&vblank->dev->event_lock);
 53
 54	list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
 55		if (!drm_vblank_passed(count, work->count))
 56			continue;
 57
 58		list_del_init(&work->node);
 59		drm_vblank_put(vblank->dev, vblank->pipe);
 60		kthread_queue_work(vblank->worker, &work->base);
 61		wake = true;
 62	}
 63	if (wake)
 64		wake_up_all(&vblank->work_wait_queue);
 65}
 66
 67/* Handle cancelling any pending vblank work items and drop respective vblank
 68 * references in response to vblank interrupts being disabled.
 69 */
 70void drm_vblank_cancel_pending_works(struct drm_vblank_crtc *vblank)
 71{
 72	struct drm_vblank_work *work, *next;
 73
 74	assert_spin_locked(&vblank->dev->event_lock);
 75
 76	drm_WARN_ONCE(vblank->dev, !list_empty(&vblank->pending_work),
 77		      "Cancelling pending vblank works!\n");
 78
 79	list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
 80		list_del_init(&work->node);
 81		drm_vblank_put(vblank->dev, vblank->pipe);
 82	}
 83
 84	wake_up_all(&vblank->work_wait_queue);
 85}
 86
 87/**
 88 * drm_vblank_work_schedule - schedule a vblank work
 89 * @work: vblank work to schedule
 90 * @count: target vblank count
 91 * @nextonmiss: defer until the next vblank if target vblank was missed
 92 *
 93 * Schedule @work for execution once the crtc vblank count reaches @count.
 94 *
 95 * If the crtc vblank count has already reached @count and @nextonmiss is
 96 * %false the work starts to execute immediately.
 97 *
 98 * If the crtc vblank count has already reached @count and @nextonmiss is
 99 * %true the work is deferred until the next vblank (as if @count has been
100 * specified as crtc vblank count + 1).
101 *
102 * If @work is already scheduled, this function will reschedule said work
103 * using the new @count. This can be used for self-rearming work items.
104 *
105 * Returns:
106 * %1 if @work was successfully (re)scheduled, %0 if it was either already
107 * scheduled or cancelled, or a negative error code on failure.
108 */
109int drm_vblank_work_schedule(struct drm_vblank_work *work,
110			     u64 count, bool nextonmiss)
111{
112	struct drm_vblank_crtc *vblank = work->vblank;
113	struct drm_device *dev = vblank->dev;
114	u64 cur_vbl;
115	unsigned long irqflags;
116	bool passed, inmodeset, rescheduling = false, wake = false;
117	int ret = 0;
118
119	spin_lock_irqsave(&dev->event_lock, irqflags);
120	if (work->cancelling)
121		goto out;
122
123	spin_lock(&dev->vbl_lock);
124	inmodeset = vblank->inmodeset;
125	spin_unlock(&dev->vbl_lock);
126	if (inmodeset)
127		goto out;
128
129	if (list_empty(&work->node)) {
130		ret = drm_vblank_get(dev, vblank->pipe);
131		if (ret < 0)
132			goto out;
133	} else if (work->count == count) {
134		/* Already scheduled w/ same vbl count */
135		goto out;
136	} else {
137		rescheduling = true;
138	}
139
140	work->count = count;
141	cur_vbl = drm_vblank_count(dev, vblank->pipe);
142	passed = drm_vblank_passed(cur_vbl, count);
143	if (passed)
144		drm_dbg_core(dev,
145			     "crtc %d vblank %llu already passed (current %llu)\n",
146			     vblank->pipe, count, cur_vbl);
147
148	if (!nextonmiss && passed) {
149		drm_vblank_put(dev, vblank->pipe);
150		ret = kthread_queue_work(vblank->worker, &work->base);
151
152		if (rescheduling) {
153			list_del_init(&work->node);
154			wake = true;
155		}
156	} else {
157		if (!rescheduling)
158			list_add_tail(&work->node, &vblank->pending_work);
159		ret = true;
160	}
161
162out:
163	spin_unlock_irqrestore(&dev->event_lock, irqflags);
164	if (wake)
165		wake_up_all(&vblank->work_wait_queue);
166	return ret;
167}
168EXPORT_SYMBOL(drm_vblank_work_schedule);
169
170/**
171 * drm_vblank_work_cancel_sync - cancel a vblank work and wait for it to
172 * finish executing
173 * @work: vblank work to cancel
174 *
175 * Cancel an already scheduled vblank work and wait for its
176 * execution to finish.
177 *
178 * On return, @work is guaranteed to no longer be scheduled or running, even
179 * if it's self-arming.
180 *
181 * Returns:
182 * %True if the work was cancelled before it started to execute, %false
183 * otherwise.
184 */
185bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work)
186{
187	struct drm_vblank_crtc *vblank = work->vblank;
188	struct drm_device *dev = vblank->dev;
189	bool ret = false;
190
191	spin_lock_irq(&dev->event_lock);
192	if (!list_empty(&work->node)) {
193		list_del_init(&work->node);
194		drm_vblank_put(vblank->dev, vblank->pipe);
195		ret = true;
196	}
197
198	work->cancelling++;
199	spin_unlock_irq(&dev->event_lock);
200
201	wake_up_all(&vblank->work_wait_queue);
202
203	if (kthread_cancel_work_sync(&work->base))
204		ret = true;
205
206	spin_lock_irq(&dev->event_lock);
207	work->cancelling--;
208	spin_unlock_irq(&dev->event_lock);
209
210	return ret;
211}
212EXPORT_SYMBOL(drm_vblank_work_cancel_sync);
213
214/**
215 * drm_vblank_work_flush - wait for a scheduled vblank work to finish
216 * executing
217 * @work: vblank work to flush
218 *
219 * Wait until @work has finished executing once.
220 */
221void drm_vblank_work_flush(struct drm_vblank_work *work)
222{
223	struct drm_vblank_crtc *vblank = work->vblank;
224	struct drm_device *dev = vblank->dev;
225
226	spin_lock_irq(&dev->event_lock);
227	wait_event_lock_irq(vblank->work_wait_queue, list_empty(&work->node),
228			    dev->event_lock);
229	spin_unlock_irq(&dev->event_lock);
230
231	kthread_flush_work(&work->base);
232}
233EXPORT_SYMBOL(drm_vblank_work_flush);
234
235/**
236 * drm_vblank_work_init - initialize a vblank work item
237 * @work: vblank work item
238 * @crtc: CRTC whose vblank will trigger the work execution
239 * @func: work function to be executed
240 *
241 * Initialize a vblank work item for a specific crtc.
242 */
243void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc,
244			  void (*func)(struct kthread_work *work))
245{
246	kthread_init_work(&work->base, func);
247	INIT_LIST_HEAD(&work->node);
248	work->vblank = &crtc->dev->vblank[drm_crtc_index(crtc)];
249}
250EXPORT_SYMBOL(drm_vblank_work_init);
251
252int drm_vblank_worker_init(struct drm_vblank_crtc *vblank)
253{
254	struct kthread_worker *worker;
255
256	INIT_LIST_HEAD(&vblank->pending_work);
257	init_waitqueue_head(&vblank->work_wait_queue);
258	worker = kthread_create_worker(0, "card%d-crtc%d",
259				       vblank->dev->primary->index,
260				       vblank->pipe);
261	if (IS_ERR(worker))
262		return PTR_ERR(worker);
263
264	vblank->worker = worker;
265
266	sched_set_fifo(worker->task);
267	return 0;
268}