Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * v4l2-event.c
  3 *
  4 * V4L2 events.
  5 *
  6 * Copyright (C) 2009--2010 Nokia Corporation.
  7 *
  8 * Contact: Sakari Ailus <sakari.ailus@iki.fi>
  9 *
 10 * This program is free software; you can redistribute it and/or
 11 * modify it under the terms of the GNU General Public License
 12 * version 2 as published by the Free Software Foundation.
 13 *
 14 * This program is distributed in the hope that it will be useful, but
 15 * WITHOUT ANY WARRANTY; without even the implied warranty of
 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 17 * General Public License for more details.
 18 *
 19 * You should have received a copy of the GNU General Public License
 20 * along with this program; if not, write to the Free Software
 21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
 22 * 02110-1301 USA
 23 */
 24
 25#include <media/v4l2-dev.h>
 26#include <media/v4l2-fh.h>
 27#include <media/v4l2-event.h>
 28
 29#include <linux/sched.h>
 30#include <linux/slab.h>
 31#include <linux/export.h>
 32
 33static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
 34{
 35	idx += sev->first;
 36	return idx >= sev->elems ? idx - sev->elems : idx;
 37}
 38
 39static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
 40{
 41	struct v4l2_kevent *kev;
 42	unsigned long flags;
 43
 44	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 45
 46	if (list_empty(&fh->available)) {
 47		spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 48		return -ENOENT;
 49	}
 50
 51	WARN_ON(fh->navailable == 0);
 52
 53	kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
 54	list_del(&kev->list);
 55	fh->navailable--;
 56
 57	kev->event.pending = fh->navailable;
 58	*event = kev->event;
 59	kev->sev->first = sev_pos(kev->sev, 1);
 60	kev->sev->in_use--;
 61
 62	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 63
 64	return 0;
 65}
 66
 67int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
 68		       int nonblocking)
 69{
 70	int ret;
 71
 72	if (nonblocking)
 73		return __v4l2_event_dequeue(fh, event);
 74
 75	/* Release the vdev lock while waiting */
 76	if (fh->vdev->lock)
 77		mutex_unlock(fh->vdev->lock);
 78
 79	do {
 80		ret = wait_event_interruptible(fh->wait,
 81					       fh->navailable != 0);
 82		if (ret < 0)
 83			break;
 84
 85		ret = __v4l2_event_dequeue(fh, event);
 86	} while (ret == -ENOENT);
 87
 88	if (fh->vdev->lock)
 89		mutex_lock(fh->vdev->lock);
 90
 91	return ret;
 92}
 93EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
 94
 95/* Caller must hold fh->vdev->fh_lock! */
 96static struct v4l2_subscribed_event *v4l2_event_subscribed(
 97		struct v4l2_fh *fh, u32 type, u32 id)
 98{
 99	struct v4l2_subscribed_event *sev;
100
101	assert_spin_locked(&fh->vdev->fh_lock);
102
103	list_for_each_entry(sev, &fh->subscribed, list)
104		if (sev->type == type && sev->id == id)
105			return sev;
106
107	return NULL;
108}
109
110static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
111		const struct timespec *ts)
112{
113	struct v4l2_subscribed_event *sev;
114	struct v4l2_kevent *kev;
115	bool copy_payload = true;
116
117	/* Are we subscribed? */
118	sev = v4l2_event_subscribed(fh, ev->type, ev->id);
119	if (sev == NULL)
120		return;
121
122	/*
123	 * If the event has been added to the fh->subscribed list, but its
124	 * add op has not completed yet elems will be 0, treat this as
125	 * not being subscribed.
126	 */
127	if (!sev->elems)
128		return;
129
130	/* Increase event sequence number on fh. */
131	fh->sequence++;
132
133	/* Do we have any free events? */
134	if (sev->in_use == sev->elems) {
135		/* no, remove the oldest one */
136		kev = sev->events + sev_pos(sev, 0);
137		list_del(&kev->list);
138		sev->in_use--;
139		sev->first = sev_pos(sev, 1);
140		fh->navailable--;
141		if (sev->elems == 1) {
142			if (sev->ops && sev->ops->replace) {
143				sev->ops->replace(&kev->event, ev);
144				copy_payload = false;
145			}
146		} else if (sev->ops && sev->ops->merge) {
147			struct v4l2_kevent *second_oldest =
148				sev->events + sev_pos(sev, 0);
149			sev->ops->merge(&kev->event, &second_oldest->event);
150		}
151	}
152
153	/* Take one and fill it. */
154	kev = sev->events + sev_pos(sev, sev->in_use);
155	kev->event.type = ev->type;
156	if (copy_payload)
157		kev->event.u = ev->u;
158	kev->event.id = ev->id;
159	kev->event.timestamp = *ts;
160	kev->event.sequence = fh->sequence;
161	sev->in_use++;
162	list_add_tail(&kev->list, &fh->available);
163
164	fh->navailable++;
165
166	wake_up_all(&fh->wait);
167}
168
169void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
170{
171	struct v4l2_fh *fh;
172	unsigned long flags;
173	struct timespec timestamp;
174
175	if (vdev == NULL)
176		return;
177
178	ktime_get_ts(&timestamp);
179
180	spin_lock_irqsave(&vdev->fh_lock, flags);
181
182	list_for_each_entry(fh, &vdev->fh_list, list)
183		__v4l2_event_queue_fh(fh, ev, &timestamp);
184
185	spin_unlock_irqrestore(&vdev->fh_lock, flags);
186}
187EXPORT_SYMBOL_GPL(v4l2_event_queue);
188
189void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
190{
191	unsigned long flags;
192	struct timespec timestamp;
193
194	ktime_get_ts(&timestamp);
195
196	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
197	__v4l2_event_queue_fh(fh, ev, &timestamp);
198	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
199}
200EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
201
202int v4l2_event_pending(struct v4l2_fh *fh)
203{
204	return fh->navailable;
205}
206EXPORT_SYMBOL_GPL(v4l2_event_pending);
207
208int v4l2_event_subscribe(struct v4l2_fh *fh,
209			 const struct v4l2_event_subscription *sub, unsigned elems,
210			 const struct v4l2_subscribed_event_ops *ops)
211{
212	struct v4l2_subscribed_event *sev, *found_ev;
213	unsigned long flags;
214	unsigned i;
215
216	if (sub->type == V4L2_EVENT_ALL)
217		return -EINVAL;
218
219	if (elems < 1)
220		elems = 1;
221
222	sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
223	if (!sev)
224		return -ENOMEM;
225	for (i = 0; i < elems; i++)
226		sev->events[i].sev = sev;
227	sev->type = sub->type;
228	sev->id = sub->id;
229	sev->flags = sub->flags;
230	sev->fh = fh;
231	sev->ops = ops;
232
233	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
234	found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
235	if (!found_ev)
236		list_add(&sev->list, &fh->subscribed);
237	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
238
239	if (found_ev) {
240		kfree(sev);
241		return 0; /* Already listening */
242	}
243
244	if (sev->ops && sev->ops->add) {
245		int ret = sev->ops->add(sev, elems);
246		if (ret) {
247			sev->ops = NULL;
248			v4l2_event_unsubscribe(fh, sub);
249			return ret;
250		}
251	}
252
253	/* Mark as ready for use */
254	sev->elems = elems;
255
256	return 0;
257}
258EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
259
260void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
261{
262	struct v4l2_event_subscription sub;
263	struct v4l2_subscribed_event *sev;
264	unsigned long flags;
265
266	do {
267		sev = NULL;
268
269		spin_lock_irqsave(&fh->vdev->fh_lock, flags);
270		if (!list_empty(&fh->subscribed)) {
271			sev = list_first_entry(&fh->subscribed,
272					struct v4l2_subscribed_event, list);
273			sub.type = sev->type;
274			sub.id = sev->id;
275		}
276		spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
277		if (sev)
278			v4l2_event_unsubscribe(fh, &sub);
279	} while (sev);
280}
281EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
282
283int v4l2_event_unsubscribe(struct v4l2_fh *fh,
284			   const struct v4l2_event_subscription *sub)
285{
286	struct v4l2_subscribed_event *sev;
287	unsigned long flags;
288	int i;
289
290	if (sub->type == V4L2_EVENT_ALL) {
291		v4l2_event_unsubscribe_all(fh);
292		return 0;
293	}
294
295	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
296
297	sev = v4l2_event_subscribed(fh, sub->type, sub->id);
298	if (sev != NULL) {
299		/* Remove any pending events for this subscription */
300		for (i = 0; i < sev->in_use; i++) {
301			list_del(&sev->events[sev_pos(sev, i)].list);
302			fh->navailable--;
303		}
304		list_del(&sev->list);
305	}
306
307	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
308
309	if (sev && sev->ops && sev->ops->del)
310		sev->ops->del(sev);
311
312	kfree(sev);
313
314	return 0;
315}
316EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
317
318int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
319				  struct v4l2_event_subscription *sub)
320{
321	return v4l2_event_unsubscribe(fh, sub);
322}
323EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
324
325static void v4l2_event_src_replace(struct v4l2_event *old,
326				const struct v4l2_event *new)
327{
328	u32 old_changes = old->u.src_change.changes;
329
330	old->u.src_change = new->u.src_change;
331	old->u.src_change.changes |= old_changes;
332}
333
334static void v4l2_event_src_merge(const struct v4l2_event *old,
335				struct v4l2_event *new)
336{
337	new->u.src_change.changes |= old->u.src_change.changes;
338}
339
340static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
341	.replace = v4l2_event_src_replace,
342	.merge = v4l2_event_src_merge,
343};
344
345int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
346				const struct v4l2_event_subscription *sub)
347{
348	if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
349		return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
350	return -EINVAL;
351}
352EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
353
354int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
355		struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
356{
357	return v4l2_src_change_event_subscribe(fh, sub);
358}
359EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);