Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Media device request objects
  4 *
  5 * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
  6 * Copyright (C) 2018 Intel Corporation
  7 * Copyright (C) 2018 Google, Inc.
  8 *
  9 * Author: Hans Verkuil <hansverk@cisco.com>
 10 * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
 11 */
 12
 13#include <linux/anon_inodes.h>
 14#include <linux/file.h>
 15#include <linux/refcount.h>
 16
 17#include <media/media-device.h>
 18#include <media/media-request.h>
 19
 20static const char * const request_state[] = {
 21	[MEDIA_REQUEST_STATE_IDLE]	 = "idle",
 22	[MEDIA_REQUEST_STATE_VALIDATING] = "validating",
 23	[MEDIA_REQUEST_STATE_QUEUED]	 = "queued",
 24	[MEDIA_REQUEST_STATE_COMPLETE]	 = "complete",
 25	[MEDIA_REQUEST_STATE_CLEANING]	 = "cleaning",
 26	[MEDIA_REQUEST_STATE_UPDATING]	 = "updating",
 27};
 28
 29static const char *
 30media_request_state_str(enum media_request_state state)
 31{
 32	BUILD_BUG_ON(ARRAY_SIZE(request_state) != NR_OF_MEDIA_REQUEST_STATE);
 33
 34	if (WARN_ON(state >= ARRAY_SIZE(request_state)))
 35		return "invalid";
 36	return request_state[state];
 37}
 38
 39static void media_request_clean(struct media_request *req)
 40{
 41	struct media_request_object *obj, *obj_safe;
 42
 43	/* Just a sanity check. No other code path is allowed to change this. */
 44	WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING);
 45	WARN_ON(req->updating_count);
 46	WARN_ON(req->access_count);
 47
 48	list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
 49		media_request_object_unbind(obj);
 50		media_request_object_put(obj);
 51	}
 52
 53	req->updating_count = 0;
 54	req->access_count = 0;
 55	WARN_ON(req->num_incomplete_objects);
 56	req->num_incomplete_objects = 0;
 57	wake_up_interruptible_all(&req->poll_wait);
 58}
 59
 60static void media_request_release(struct kref *kref)
 61{
 62	struct media_request *req =
 63		container_of(kref, struct media_request, kref);
 64	struct media_device *mdev = req->mdev;
 65
 66	dev_dbg(mdev->dev, "request: release %s\n", req->debug_str);
 67
 68	/* No other users, no need for a spinlock */
 69	req->state = MEDIA_REQUEST_STATE_CLEANING;
 70
 71	media_request_clean(req);
 72
 73	if (mdev->ops->req_free)
 74		mdev->ops->req_free(req);
 75	else
 76		kfree(req);
 77}
 78
 79void media_request_put(struct media_request *req)
 80{
 81	kref_put(&req->kref, media_request_release);
 82}
 83EXPORT_SYMBOL_GPL(media_request_put);
 84
 85static int media_request_close(struct inode *inode, struct file *filp)
 86{
 87	struct media_request *req = filp->private_data;
 88
 89	media_request_put(req);
 90	return 0;
 91}
 92
 93static __poll_t media_request_poll(struct file *filp,
 94				   struct poll_table_struct *wait)
 95{
 96	struct media_request *req = filp->private_data;
 97	unsigned long flags;
 98	__poll_t ret = 0;
 99
100	if (!(poll_requested_events(wait) & EPOLLPRI))
101		return 0;
102
103	poll_wait(filp, &req->poll_wait, wait);
104	spin_lock_irqsave(&req->lock, flags);
105	if (req->state == MEDIA_REQUEST_STATE_COMPLETE) {
106		ret = EPOLLPRI;
107		goto unlock;
108	}
109	if (req->state != MEDIA_REQUEST_STATE_QUEUED) {
110		ret = EPOLLERR;
111		goto unlock;
112	}
113
114unlock:
115	spin_unlock_irqrestore(&req->lock, flags);
116	return ret;
117}
118
119static long media_request_ioctl_queue(struct media_request *req)
120{
121	struct media_device *mdev = req->mdev;
122	enum media_request_state state;
123	unsigned long flags;
124	int ret;
125
126	dev_dbg(mdev->dev, "request: queue %s\n", req->debug_str);
127
128	/*
129	 * Ensure the request that is validated will be the one that gets queued
130	 * next by serialising the queueing process. This mutex is also used
131	 * to serialize with canceling a vb2 queue and with setting values such
132	 * as controls in a request.
133	 */
134	mutex_lock(&mdev->req_queue_mutex);
135
136	media_request_get(req);
137
138	spin_lock_irqsave(&req->lock, flags);
139	if (req->state == MEDIA_REQUEST_STATE_IDLE)
140		req->state = MEDIA_REQUEST_STATE_VALIDATING;
141	state = req->state;
142	spin_unlock_irqrestore(&req->lock, flags);
143	if (state != MEDIA_REQUEST_STATE_VALIDATING) {
144		dev_dbg(mdev->dev,
145			"request: unable to queue %s, request in state %s\n",
146			req->debug_str, media_request_state_str(state));
147		media_request_put(req);
148		mutex_unlock(&mdev->req_queue_mutex);
149		return -EBUSY;
150	}
151
152	ret = mdev->ops->req_validate(req);
153
154	/*
155	 * If the req_validate was successful, then we mark the state as QUEUED
156	 * and call req_queue. The reason we set the state first is that this
157	 * allows req_queue to unbind or complete the queued objects in case
158	 * they are immediately 'consumed'. State changes from QUEUED to another
159	 * state can only happen if either the driver changes the state or if
160	 * the user cancels the vb2 queue. The driver can only change the state
161	 * after each object is queued through the req_queue op (and note that
162	 * that op cannot fail), so setting the state to QUEUED up front is
163	 * safe.
164	 *
165	 * The other reason for changing the state is if the vb2 queue is
166	 * canceled, and that uses the req_queue_mutex which is still locked
167	 * while req_queue is called, so that's safe as well.
168	 */
169	spin_lock_irqsave(&req->lock, flags);
170	req->state = ret ? MEDIA_REQUEST_STATE_IDLE
171			 : MEDIA_REQUEST_STATE_QUEUED;
172	spin_unlock_irqrestore(&req->lock, flags);
173
174	if (!ret)
175		mdev->ops->req_queue(req);
176
177	mutex_unlock(&mdev->req_queue_mutex);
178
179	if (ret) {
180		dev_dbg(mdev->dev, "request: can't queue %s (%d)\n",
181			req->debug_str, ret);
182		media_request_put(req);
183	}
184
185	return ret;
186}
187
188static long media_request_ioctl_reinit(struct media_request *req)
189{
190	struct media_device *mdev = req->mdev;
191	unsigned long flags;
192
193	spin_lock_irqsave(&req->lock, flags);
194	if (req->state != MEDIA_REQUEST_STATE_IDLE &&
195	    req->state != MEDIA_REQUEST_STATE_COMPLETE) {
196		dev_dbg(mdev->dev,
197			"request: %s not in idle or complete state, cannot reinit\n",
198			req->debug_str);
199		spin_unlock_irqrestore(&req->lock, flags);
200		return -EBUSY;
201	}
202	if (req->access_count) {
203		dev_dbg(mdev->dev,
204			"request: %s is being accessed, cannot reinit\n",
205			req->debug_str);
206		spin_unlock_irqrestore(&req->lock, flags);
207		return -EBUSY;
208	}
209	req->state = MEDIA_REQUEST_STATE_CLEANING;
210	spin_unlock_irqrestore(&req->lock, flags);
211
212	media_request_clean(req);
213
214	spin_lock_irqsave(&req->lock, flags);
215	req->state = MEDIA_REQUEST_STATE_IDLE;
216	spin_unlock_irqrestore(&req->lock, flags);
217
218	return 0;
219}
220
221static long media_request_ioctl(struct file *filp, unsigned int cmd,
222				unsigned long arg)
223{
224	struct media_request *req = filp->private_data;
225
226	switch (cmd) {
227	case MEDIA_REQUEST_IOC_QUEUE:
228		return media_request_ioctl_queue(req);
229	case MEDIA_REQUEST_IOC_REINIT:
230		return media_request_ioctl_reinit(req);
231	default:
232		return -ENOIOCTLCMD;
233	}
234}
235
236static const struct file_operations request_fops = {
237	.owner = THIS_MODULE,
238	.poll = media_request_poll,
239	.unlocked_ioctl = media_request_ioctl,
240#ifdef CONFIG_COMPAT
241	.compat_ioctl = media_request_ioctl,
242#endif /* CONFIG_COMPAT */
243	.release = media_request_close,
244};
245
246struct media_request *
247media_request_get_by_fd(struct media_device *mdev, int request_fd)
248{
 
249	struct media_request *req;
250
251	if (!mdev || !mdev->ops ||
252	    !mdev->ops->req_validate || !mdev->ops->req_queue)
253		return ERR_PTR(-EBADR);
254
255	CLASS(fd, f)(request_fd);
256	if (fd_empty(f))
257		goto err;
258
259	if (fd_file(f)->f_op != &request_fops)
260		goto err;
261	req = fd_file(f)->private_data;
262	if (req->mdev != mdev)
263		goto err;
264
265	/*
266	 * Note: as long as someone has an open filehandle of the request,
267	 * the request can never be released. The fdget() above ensures that
268	 * even if userspace closes the request filehandle, the release()
269	 * fop won't be called, so the media_request_get() always succeeds
270	 * and there is no race condition where the request was released
271	 * before media_request_get() is called.
272	 */
273	media_request_get(req);
 
 
274	return req;
275
276err:
 
 
 
277	dev_dbg(mdev->dev, "cannot find request_fd %d\n", request_fd);
278	return ERR_PTR(-EINVAL);
279}
280EXPORT_SYMBOL_GPL(media_request_get_by_fd);
281
282int media_request_alloc(struct media_device *mdev, int *alloc_fd)
283{
284	struct media_request *req;
285	struct file *filp;
286	int fd;
287	int ret;
288
289	/* Either both are NULL or both are non-NULL */
290	if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free))
291		return -ENOMEM;
292
293	if (mdev->ops->req_alloc)
294		req = mdev->ops->req_alloc(mdev);
295	else
296		req = kzalloc(sizeof(*req), GFP_KERNEL);
297	if (!req)
298		return -ENOMEM;
299
300	fd = get_unused_fd_flags(O_CLOEXEC);
301	if (fd < 0) {
302		ret = fd;
303		goto err_free_req;
304	}
305
306	filp = anon_inode_getfile("request", &request_fops, NULL, O_CLOEXEC);
307	if (IS_ERR(filp)) {
308		ret = PTR_ERR(filp);
309		goto err_put_fd;
310	}
311
 
 
 
 
 
 
 
 
 
312	filp->private_data = req;
313	req->mdev = mdev;
314	req->state = MEDIA_REQUEST_STATE_IDLE;
315	req->num_incomplete_objects = 0;
316	kref_init(&req->kref);
317	INIT_LIST_HEAD(&req->objects);
318	spin_lock_init(&req->lock);
319	init_waitqueue_head(&req->poll_wait);
320	req->updating_count = 0;
321	req->access_count = 0;
322
323	*alloc_fd = fd;
324
325	snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d",
326		 atomic_inc_return(&mdev->request_id), fd);
327	dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str);
328
329	fd_install(fd, filp);
330
331	return 0;
332
 
 
 
333err_put_fd:
334	put_unused_fd(fd);
335
336err_free_req:
337	if (mdev->ops->req_free)
338		mdev->ops->req_free(req);
339	else
340		kfree(req);
341
342	return ret;
343}
344
345static void media_request_object_release(struct kref *kref)
346{
347	struct media_request_object *obj =
348		container_of(kref, struct media_request_object, kref);
349	struct media_request *req = obj->req;
350
351	if (WARN_ON(req))
352		media_request_object_unbind(obj);
353	obj->ops->release(obj);
354}
355
356struct media_request_object *
357media_request_object_find(struct media_request *req,
358			  const struct media_request_object_ops *ops,
359			  void *priv)
360{
361	struct media_request_object *obj;
362	struct media_request_object *found = NULL;
363	unsigned long flags;
364
365	if (WARN_ON(!ops || !priv))
366		return NULL;
367
368	spin_lock_irqsave(&req->lock, flags);
369	list_for_each_entry(obj, &req->objects, list) {
370		if (obj->ops == ops && obj->priv == priv) {
371			media_request_object_get(obj);
372			found = obj;
373			break;
374		}
375	}
376	spin_unlock_irqrestore(&req->lock, flags);
377	return found;
378}
379EXPORT_SYMBOL_GPL(media_request_object_find);
380
381void media_request_object_put(struct media_request_object *obj)
382{
383	kref_put(&obj->kref, media_request_object_release);
384}
385EXPORT_SYMBOL_GPL(media_request_object_put);
386
387void media_request_object_init(struct media_request_object *obj)
388{
389	obj->ops = NULL;
390	obj->req = NULL;
391	obj->priv = NULL;
392	obj->completed = false;
393	INIT_LIST_HEAD(&obj->list);
394	kref_init(&obj->kref);
395}
396EXPORT_SYMBOL_GPL(media_request_object_init);
397
398int media_request_object_bind(struct media_request *req,
399			      const struct media_request_object_ops *ops,
400			      void *priv, bool is_buffer,
401			      struct media_request_object *obj)
402{
403	unsigned long flags;
404	int ret = -EBUSY;
405
406	if (WARN_ON(!ops->release))
407		return -EBADR;
408
409	spin_lock_irqsave(&req->lock, flags);
410
411	if (WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING &&
412		    req->state != MEDIA_REQUEST_STATE_QUEUED))
413		goto unlock;
414
415	obj->req = req;
416	obj->ops = ops;
417	obj->priv = priv;
418
419	if (is_buffer)
420		list_add_tail(&obj->list, &req->objects);
421	else
422		list_add(&obj->list, &req->objects);
423	req->num_incomplete_objects++;
424	ret = 0;
425
426unlock:
427	spin_unlock_irqrestore(&req->lock, flags);
428	return ret;
429}
430EXPORT_SYMBOL_GPL(media_request_object_bind);
431
432void media_request_object_unbind(struct media_request_object *obj)
433{
434	struct media_request *req = obj->req;
435	unsigned long flags;
436	bool completed = false;
437
438	if (WARN_ON(!req))
439		return;
440
441	spin_lock_irqsave(&req->lock, flags);
442	list_del(&obj->list);
443	obj->req = NULL;
444
445	if (req->state == MEDIA_REQUEST_STATE_COMPLETE)
446		goto unlock;
447
448	if (WARN_ON(req->state == MEDIA_REQUEST_STATE_VALIDATING))
449		goto unlock;
450
451	if (req->state == MEDIA_REQUEST_STATE_CLEANING) {
452		if (!obj->completed)
453			req->num_incomplete_objects--;
454		goto unlock;
455	}
456
457	if (WARN_ON(!req->num_incomplete_objects))
458		goto unlock;
459
460	req->num_incomplete_objects--;
461	if (req->state == MEDIA_REQUEST_STATE_QUEUED &&
462	    !req->num_incomplete_objects) {
463		req->state = MEDIA_REQUEST_STATE_COMPLETE;
464		completed = true;
465		wake_up_interruptible_all(&req->poll_wait);
466	}
467
468unlock:
469	spin_unlock_irqrestore(&req->lock, flags);
470	if (obj->ops->unbind)
471		obj->ops->unbind(obj);
472	if (completed)
473		media_request_put(req);
474}
475EXPORT_SYMBOL_GPL(media_request_object_unbind);
476
477void media_request_object_complete(struct media_request_object *obj)
478{
479	struct media_request *req = obj->req;
480	unsigned long flags;
481	bool completed = false;
482
483	spin_lock_irqsave(&req->lock, flags);
484	if (obj->completed)
485		goto unlock;
486	obj->completed = true;
487	if (WARN_ON(!req->num_incomplete_objects) ||
488	    WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
489		goto unlock;
490
491	if (!--req->num_incomplete_objects) {
492		req->state = MEDIA_REQUEST_STATE_COMPLETE;
493		wake_up_interruptible_all(&req->poll_wait);
494		completed = true;
495	}
496unlock:
497	spin_unlock_irqrestore(&req->lock, flags);
498	if (completed)
499		media_request_put(req);
500}
501EXPORT_SYMBOL_GPL(media_request_object_complete);
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Media device request objects
  4 *
  5 * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
  6 * Copyright (C) 2018 Intel Corporation
  7 * Copyright (C) 2018 Google, Inc.
  8 *
  9 * Author: Hans Verkuil <hans.verkuil@cisco.com>
 10 * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
 11 */
 12
 13#include <linux/anon_inodes.h>
 14#include <linux/file.h>
 15#include <linux/refcount.h>
 16
 17#include <media/media-device.h>
 18#include <media/media-request.h>
 19
 20static const char * const request_state[] = {
 21	[MEDIA_REQUEST_STATE_IDLE]	 = "idle",
 22	[MEDIA_REQUEST_STATE_VALIDATING] = "validating",
 23	[MEDIA_REQUEST_STATE_QUEUED]	 = "queued",
 24	[MEDIA_REQUEST_STATE_COMPLETE]	 = "complete",
 25	[MEDIA_REQUEST_STATE_CLEANING]	 = "cleaning",
 26	[MEDIA_REQUEST_STATE_UPDATING]	 = "updating",
 27};
 28
 29static const char *
 30media_request_state_str(enum media_request_state state)
 31{
 32	BUILD_BUG_ON(ARRAY_SIZE(request_state) != NR_OF_MEDIA_REQUEST_STATE);
 33
 34	if (WARN_ON(state >= ARRAY_SIZE(request_state)))
 35		return "invalid";
 36	return request_state[state];
 37}
 38
 39static void media_request_clean(struct media_request *req)
 40{
 41	struct media_request_object *obj, *obj_safe;
 42
 43	/* Just a sanity check. No other code path is allowed to change this. */
 44	WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING);
 45	WARN_ON(req->updating_count);
 46	WARN_ON(req->access_count);
 47
 48	list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
 49		media_request_object_unbind(obj);
 50		media_request_object_put(obj);
 51	}
 52
 53	req->updating_count = 0;
 54	req->access_count = 0;
 55	WARN_ON(req->num_incomplete_objects);
 56	req->num_incomplete_objects = 0;
 57	wake_up_interruptible_all(&req->poll_wait);
 58}
 59
 60static void media_request_release(struct kref *kref)
 61{
 62	struct media_request *req =
 63		container_of(kref, struct media_request, kref);
 64	struct media_device *mdev = req->mdev;
 65
 66	dev_dbg(mdev->dev, "request: release %s\n", req->debug_str);
 67
 68	/* No other users, no need for a spinlock */
 69	req->state = MEDIA_REQUEST_STATE_CLEANING;
 70
 71	media_request_clean(req);
 72
 73	if (mdev->ops->req_free)
 74		mdev->ops->req_free(req);
 75	else
 76		kfree(req);
 77}
 78
 79void media_request_put(struct media_request *req)
 80{
 81	kref_put(&req->kref, media_request_release);
 82}
 83EXPORT_SYMBOL_GPL(media_request_put);
 84
 85static int media_request_close(struct inode *inode, struct file *filp)
 86{
 87	struct media_request *req = filp->private_data;
 88
 89	media_request_put(req);
 90	return 0;
 91}
 92
 93static __poll_t media_request_poll(struct file *filp,
 94				   struct poll_table_struct *wait)
 95{
 96	struct media_request *req = filp->private_data;
 97	unsigned long flags;
 98	__poll_t ret = 0;
 99
100	if (!(poll_requested_events(wait) & EPOLLPRI))
101		return 0;
102
103	poll_wait(filp, &req->poll_wait, wait);
104	spin_lock_irqsave(&req->lock, flags);
105	if (req->state == MEDIA_REQUEST_STATE_COMPLETE) {
106		ret = EPOLLPRI;
107		goto unlock;
108	}
109	if (req->state != MEDIA_REQUEST_STATE_QUEUED) {
110		ret = EPOLLERR;
111		goto unlock;
112	}
113
114unlock:
115	spin_unlock_irqrestore(&req->lock, flags);
116	return ret;
117}
118
119static long media_request_ioctl_queue(struct media_request *req)
120{
121	struct media_device *mdev = req->mdev;
122	enum media_request_state state;
123	unsigned long flags;
124	int ret;
125
126	dev_dbg(mdev->dev, "request: queue %s\n", req->debug_str);
127
128	/*
129	 * Ensure the request that is validated will be the one that gets queued
130	 * next by serialising the queueing process. This mutex is also used
131	 * to serialize with canceling a vb2 queue and with setting values such
132	 * as controls in a request.
133	 */
134	mutex_lock(&mdev->req_queue_mutex);
135
136	media_request_get(req);
137
138	spin_lock_irqsave(&req->lock, flags);
139	if (req->state == MEDIA_REQUEST_STATE_IDLE)
140		req->state = MEDIA_REQUEST_STATE_VALIDATING;
141	state = req->state;
142	spin_unlock_irqrestore(&req->lock, flags);
143	if (state != MEDIA_REQUEST_STATE_VALIDATING) {
144		dev_dbg(mdev->dev,
145			"request: unable to queue %s, request in state %s\n",
146			req->debug_str, media_request_state_str(state));
147		media_request_put(req);
148		mutex_unlock(&mdev->req_queue_mutex);
149		return -EBUSY;
150	}
151
152	ret = mdev->ops->req_validate(req);
153
154	/*
155	 * If the req_validate was successful, then we mark the state as QUEUED
156	 * and call req_queue. The reason we set the state first is that this
157	 * allows req_queue to unbind or complete the queued objects in case
158	 * they are immediately 'consumed'. State changes from QUEUED to another
159	 * state can only happen if either the driver changes the state or if
160	 * the user cancels the vb2 queue. The driver can only change the state
161	 * after each object is queued through the req_queue op (and note that
162	 * that op cannot fail), so setting the state to QUEUED up front is
163	 * safe.
164	 *
165	 * The other reason for changing the state is if the vb2 queue is
166	 * canceled, and that uses the req_queue_mutex which is still locked
167	 * while req_queue is called, so that's safe as well.
168	 */
169	spin_lock_irqsave(&req->lock, flags);
170	req->state = ret ? MEDIA_REQUEST_STATE_IDLE
171			 : MEDIA_REQUEST_STATE_QUEUED;
172	spin_unlock_irqrestore(&req->lock, flags);
173
174	if (!ret)
175		mdev->ops->req_queue(req);
176
177	mutex_unlock(&mdev->req_queue_mutex);
178
179	if (ret) {
180		dev_dbg(mdev->dev, "request: can't queue %s (%d)\n",
181			req->debug_str, ret);
182		media_request_put(req);
183	}
184
185	return ret;
186}
187
188static long media_request_ioctl_reinit(struct media_request *req)
189{
190	struct media_device *mdev = req->mdev;
191	unsigned long flags;
192
193	spin_lock_irqsave(&req->lock, flags);
194	if (req->state != MEDIA_REQUEST_STATE_IDLE &&
195	    req->state != MEDIA_REQUEST_STATE_COMPLETE) {
196		dev_dbg(mdev->dev,
197			"request: %s not in idle or complete state, cannot reinit\n",
198			req->debug_str);
199		spin_unlock_irqrestore(&req->lock, flags);
200		return -EBUSY;
201	}
202	if (req->access_count) {
203		dev_dbg(mdev->dev,
204			"request: %s is being accessed, cannot reinit\n",
205			req->debug_str);
206		spin_unlock_irqrestore(&req->lock, flags);
207		return -EBUSY;
208	}
209	req->state = MEDIA_REQUEST_STATE_CLEANING;
210	spin_unlock_irqrestore(&req->lock, flags);
211
212	media_request_clean(req);
213
214	spin_lock_irqsave(&req->lock, flags);
215	req->state = MEDIA_REQUEST_STATE_IDLE;
216	spin_unlock_irqrestore(&req->lock, flags);
217
218	return 0;
219}
220
221static long media_request_ioctl(struct file *filp, unsigned int cmd,
222				unsigned long arg)
223{
224	struct media_request *req = filp->private_data;
225
226	switch (cmd) {
227	case MEDIA_REQUEST_IOC_QUEUE:
228		return media_request_ioctl_queue(req);
229	case MEDIA_REQUEST_IOC_REINIT:
230		return media_request_ioctl_reinit(req);
231	default:
232		return -ENOIOCTLCMD;
233	}
234}
235
236static const struct file_operations request_fops = {
237	.owner = THIS_MODULE,
238	.poll = media_request_poll,
239	.unlocked_ioctl = media_request_ioctl,
240#ifdef CONFIG_COMPAT
241	.compat_ioctl = media_request_ioctl,
242#endif /* CONFIG_COMPAT */
243	.release = media_request_close,
244};
245
246struct media_request *
247media_request_get_by_fd(struct media_device *mdev, int request_fd)
248{
249	struct fd f;
250	struct media_request *req;
251
252	if (!mdev || !mdev->ops ||
253	    !mdev->ops->req_validate || !mdev->ops->req_queue)
254		return ERR_PTR(-EBADR);
255
256	f = fdget(request_fd);
257	if (!f.file)
258		goto err_no_req_fd;
259
260	if (f.file->f_op != &request_fops)
261		goto err_fput;
262	req = f.file->private_data;
263	if (req->mdev != mdev)
264		goto err_fput;
265
266	/*
267	 * Note: as long as someone has an open filehandle of the request,
268	 * the request can never be released. The fdget() above ensures that
269	 * even if userspace closes the request filehandle, the release()
270	 * fop won't be called, so the media_request_get() always succeeds
271	 * and there is no race condition where the request was released
272	 * before media_request_get() is called.
273	 */
274	media_request_get(req);
275	fdput(f);
276
277	return req;
278
279err_fput:
280	fdput(f);
281
282err_no_req_fd:
283	dev_dbg(mdev->dev, "cannot find request_fd %d\n", request_fd);
284	return ERR_PTR(-EINVAL);
285}
286EXPORT_SYMBOL_GPL(media_request_get_by_fd);
287
288int media_request_alloc(struct media_device *mdev, int *alloc_fd)
289{
290	struct media_request *req;
291	struct file *filp;
292	int fd;
293	int ret;
294
295	/* Either both are NULL or both are non-NULL */
296	if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free))
297		return -ENOMEM;
298
 
 
 
 
 
 
 
299	fd = get_unused_fd_flags(O_CLOEXEC);
300	if (fd < 0)
301		return fd;
 
 
302
303	filp = anon_inode_getfile("request", &request_fops, NULL, O_CLOEXEC);
304	if (IS_ERR(filp)) {
305		ret = PTR_ERR(filp);
306		goto err_put_fd;
307	}
308
309	if (mdev->ops->req_alloc)
310		req = mdev->ops->req_alloc(mdev);
311	else
312		req = kzalloc(sizeof(*req), GFP_KERNEL);
313	if (!req) {
314		ret = -ENOMEM;
315		goto err_fput;
316	}
317
318	filp->private_data = req;
319	req->mdev = mdev;
320	req->state = MEDIA_REQUEST_STATE_IDLE;
321	req->num_incomplete_objects = 0;
322	kref_init(&req->kref);
323	INIT_LIST_HEAD(&req->objects);
324	spin_lock_init(&req->lock);
325	init_waitqueue_head(&req->poll_wait);
326	req->updating_count = 0;
327	req->access_count = 0;
328
329	*alloc_fd = fd;
330
331	snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d",
332		 atomic_inc_return(&mdev->request_id), fd);
333	dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str);
334
335	fd_install(fd, filp);
336
337	return 0;
338
339err_fput:
340	fput(filp);
341
342err_put_fd:
343	put_unused_fd(fd);
344
 
 
 
 
 
 
345	return ret;
346}
347
348static void media_request_object_release(struct kref *kref)
349{
350	struct media_request_object *obj =
351		container_of(kref, struct media_request_object, kref);
352	struct media_request *req = obj->req;
353
354	if (WARN_ON(req))
355		media_request_object_unbind(obj);
356	obj->ops->release(obj);
357}
358
359struct media_request_object *
360media_request_object_find(struct media_request *req,
361			  const struct media_request_object_ops *ops,
362			  void *priv)
363{
364	struct media_request_object *obj;
365	struct media_request_object *found = NULL;
366	unsigned long flags;
367
368	if (WARN_ON(!ops || !priv))
369		return NULL;
370
371	spin_lock_irqsave(&req->lock, flags);
372	list_for_each_entry(obj, &req->objects, list) {
373		if (obj->ops == ops && obj->priv == priv) {
374			media_request_object_get(obj);
375			found = obj;
376			break;
377		}
378	}
379	spin_unlock_irqrestore(&req->lock, flags);
380	return found;
381}
382EXPORT_SYMBOL_GPL(media_request_object_find);
383
384void media_request_object_put(struct media_request_object *obj)
385{
386	kref_put(&obj->kref, media_request_object_release);
387}
388EXPORT_SYMBOL_GPL(media_request_object_put);
389
390void media_request_object_init(struct media_request_object *obj)
391{
392	obj->ops = NULL;
393	obj->req = NULL;
394	obj->priv = NULL;
395	obj->completed = false;
396	INIT_LIST_HEAD(&obj->list);
397	kref_init(&obj->kref);
398}
399EXPORT_SYMBOL_GPL(media_request_object_init);
400
401int media_request_object_bind(struct media_request *req,
402			      const struct media_request_object_ops *ops,
403			      void *priv, bool is_buffer,
404			      struct media_request_object *obj)
405{
406	unsigned long flags;
407	int ret = -EBUSY;
408
409	if (WARN_ON(!ops->release))
410		return -EBADR;
411
412	spin_lock_irqsave(&req->lock, flags);
413
414	if (WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING))
 
415		goto unlock;
416
417	obj->req = req;
418	obj->ops = ops;
419	obj->priv = priv;
420
421	if (is_buffer)
422		list_add_tail(&obj->list, &req->objects);
423	else
424		list_add(&obj->list, &req->objects);
425	req->num_incomplete_objects++;
426	ret = 0;
427
428unlock:
429	spin_unlock_irqrestore(&req->lock, flags);
430	return ret;
431}
432EXPORT_SYMBOL_GPL(media_request_object_bind);
433
434void media_request_object_unbind(struct media_request_object *obj)
435{
436	struct media_request *req = obj->req;
437	unsigned long flags;
438	bool completed = false;
439
440	if (WARN_ON(!req))
441		return;
442
443	spin_lock_irqsave(&req->lock, flags);
444	list_del(&obj->list);
445	obj->req = NULL;
446
447	if (req->state == MEDIA_REQUEST_STATE_COMPLETE)
448		goto unlock;
449
450	if (WARN_ON(req->state == MEDIA_REQUEST_STATE_VALIDATING))
451		goto unlock;
452
453	if (req->state == MEDIA_REQUEST_STATE_CLEANING) {
454		if (!obj->completed)
455			req->num_incomplete_objects--;
456		goto unlock;
457	}
458
459	if (WARN_ON(!req->num_incomplete_objects))
460		goto unlock;
461
462	req->num_incomplete_objects--;
463	if (req->state == MEDIA_REQUEST_STATE_QUEUED &&
464	    !req->num_incomplete_objects) {
465		req->state = MEDIA_REQUEST_STATE_COMPLETE;
466		completed = true;
467		wake_up_interruptible_all(&req->poll_wait);
468	}
469
470unlock:
471	spin_unlock_irqrestore(&req->lock, flags);
472	if (obj->ops->unbind)
473		obj->ops->unbind(obj);
474	if (completed)
475		media_request_put(req);
476}
477EXPORT_SYMBOL_GPL(media_request_object_unbind);
478
479void media_request_object_complete(struct media_request_object *obj)
480{
481	struct media_request *req = obj->req;
482	unsigned long flags;
483	bool completed = false;
484
485	spin_lock_irqsave(&req->lock, flags);
486	if (obj->completed)
487		goto unlock;
488	obj->completed = true;
489	if (WARN_ON(!req->num_incomplete_objects) ||
490	    WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
491		goto unlock;
492
493	if (!--req->num_incomplete_objects) {
494		req->state = MEDIA_REQUEST_STATE_COMPLETE;
495		wake_up_interruptible_all(&req->poll_wait);
496		completed = true;
497	}
498unlock:
499	spin_unlock_irqrestore(&req->lock, flags);
500	if (completed)
501		media_request_put(req);
502}
503EXPORT_SYMBOL_GPL(media_request_object_complete);