Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Memory-to-memory device framework for Video for Linux 2.
  3 *
  4 * Helper functions for devices that use memory buffers for both source
  5 * and destination.
  6 *
  7 * Copyright (c) 2009 Samsung Electronics Co., Ltd.
  8 * Pawel Osciak, <pawel@osciak.com>
  9 * Marek Szyprowski, <m.szyprowski@samsung.com>
 10 *
 11 * This program is free software; you can redistribute it and/or modify
 12 * it under the terms of the GNU General Public License as published by the
 13 * Free Software Foundation; either version 2 of the
 14 * License, or (at your option) any later version
 15 */
 16
 17#ifndef _MEDIA_V4L2_MEM2MEM_H
 18#define _MEDIA_V4L2_MEM2MEM_H
 19
 20#include <media/videobuf2-v4l2.h>
 21
 22/**
 23 * struct v4l2_m2m_ops - mem-to-mem device driver callbacks
 24 * @device_run:	required. Begin the actual job (transaction) inside this
 25 *		callback.
 26 *		The job does NOT have to end before this callback returns
 27 *		(and it will be the usual case). When the job finishes,
 28 *		v4l2_m2m_job_finish() has to be called.
 29 * @job_ready:	optional. Should return 0 if the driver does not have a job
 30 *		fully prepared to run yet (i.e. it will not be able to finish a
 31 *		transaction without sleeping). If not provided, it will be
 32 *		assumed that one source and one destination buffer are all
 33 *		that is required for the driver to perform one full transaction.
 34 *		This method may not sleep.
 35 * @job_abort:	required. Informs the driver that it has to abort the currently
 36 *		running transaction as soon as possible (i.e. as soon as it can
 37 *		stop the device safely; e.g. in the next interrupt handler),
 38 *		even if the transaction would not have been finished by then.
 39 *		After the driver performs the necessary steps, it has to call
 40 *		v4l2_m2m_job_finish() (as if the transaction ended normally).
 41 *		This function does not have to (and will usually not) wait
 42 *		until the device enters a state when it can be stopped.
 43 * @lock:	optional. Define a driver's own lock callback, instead of using
 44 *		m2m_ctx->q_lock.
 45 * @unlock:	optional. Define a driver's own unlock callback, instead of
 46 *		using m2m_ctx->q_lock.
 47 */
 48struct v4l2_m2m_ops {
 49	void (*device_run)(void *priv);
 50	int (*job_ready)(void *priv);
 51	void (*job_abort)(void *priv);
 52	void (*lock)(void *priv);
 53	void (*unlock)(void *priv);
 54};
 55
 56struct v4l2_m2m_dev;
 57
 58struct v4l2_m2m_queue_ctx {
 59/* private: internal use only */
 60	struct vb2_queue	q;
 61
 62	/* Queue for buffers ready to be processed as soon as this
 63	 * instance receives access to the device */
 64	struct list_head	rdy_queue;
 65	spinlock_t		rdy_spinlock;
 66	u8			num_rdy;
 67	bool			buffered;
 68};
 69
 70struct v4l2_m2m_ctx {
 71	/* optional cap/out vb2 queues lock */
 72	struct mutex			*q_lock;
 73
 74/* private: internal use only */
 75	struct v4l2_m2m_dev		*m2m_dev;
 76
 77	/* Capture (output to memory) queue context */
 78	struct v4l2_m2m_queue_ctx	cap_q_ctx;
 79
 80	/* Output (input from memory) queue context */
 81	struct v4l2_m2m_queue_ctx	out_q_ctx;
 82
 83	/* For device job queue */
 84	struct list_head		queue;
 85	unsigned long			job_flags;
 86	wait_queue_head_t		finished;
 87
 88	/* Instance private data */
 89	void				*priv;
 90};
 91
 92struct v4l2_m2m_buffer {
 93	struct vb2_v4l2_buffer	vb;
 94	struct list_head	list;
 95};
 96
 97void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev);
 98
 99struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
100				       enum v4l2_buf_type type);
101
102void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx);
103
104void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
105			 struct v4l2_m2m_ctx *m2m_ctx);
106
107static inline void
108v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
109{
110	vb2_buffer_done(&buf->vb2_buf, state);
111}
112
113int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
114		     struct v4l2_requestbuffers *reqbufs);
115
116int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
117		      struct v4l2_buffer *buf);
118
119int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
120		  struct v4l2_buffer *buf);
121int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
122		   struct v4l2_buffer *buf);
123int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
124			 struct v4l2_buffer *buf);
125int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
126			 struct v4l2_create_buffers *create);
127
128int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
129		   struct v4l2_exportbuffer *eb);
130
131int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
132		      enum v4l2_buf_type type);
133int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
134		       enum v4l2_buf_type type);
135
136unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
137			   struct poll_table_struct *wait);
138
139int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
140		  struct vm_area_struct *vma);
141
142struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops);
143void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
144
145struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
146		void *drv_priv,
147		int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq));
148
149static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx,
150					     bool buffered)
151{
152	m2m_ctx->out_q_ctx.buffered = buffered;
153}
154
155static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx,
156					     bool buffered)
157{
158	m2m_ctx->cap_q_ctx.buffered = buffered;
159}
160
161void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
162
163void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
164			struct vb2_v4l2_buffer *vbuf);
165
166/**
167 * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
168 * use
169 *
170 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
171 */
172static inline
173unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
174{
175	return m2m_ctx->out_q_ctx.num_rdy;
176}
177
178/**
179 * v4l2_m2m_num_src_bufs_ready() - return the number of destination buffers
180 * ready for use
181 *
182 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
183 */
184static inline
185unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
186{
187	return m2m_ctx->cap_q_ctx.num_rdy;
188}
189
190void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx);
191
192/**
193 * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready
194 * buffers
195 *
196 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
197 */
198static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
199{
200	return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx);
201}
202
203/**
204 * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of
205 * ready buffers
206 *
207 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
208 */
209static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
210{
211	return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx);
212}
213
214/**
215 * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers
216 *
217 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
218 */
219static inline
220struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
221{
222	return &m2m_ctx->out_q_ctx.q;
223}
224
225/**
226 * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers
227 *
228 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
229 */
230static inline
231struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
232{
233	return &m2m_ctx->cap_q_ctx.q;
234}
235
236void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx);
237
238/**
239 * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready
240 * buffers and return it
241 *
242 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
243 */
244static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
245{
246	return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx);
247}
248
249/**
250 * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of
251 * ready buffers and return it
252 *
253 * @m2m_ctx: pointer to struct v4l2_m2m_ctx
254 */
255static inline void *v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
256{
257	return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx);
258}
259
260/* v4l2 ioctl helpers */
261
262int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
263				struct v4l2_requestbuffers *rb);
264int v4l2_m2m_ioctl_create_bufs(struct file *file, void *fh,
265				struct v4l2_create_buffers *create);
266int v4l2_m2m_ioctl_querybuf(struct file *file, void *fh,
267				struct v4l2_buffer *buf);
268int v4l2_m2m_ioctl_expbuf(struct file *file, void *fh,
269				struct v4l2_exportbuffer *eb);
270int v4l2_m2m_ioctl_qbuf(struct file *file, void *fh,
271				struct v4l2_buffer *buf);
272int v4l2_m2m_ioctl_dqbuf(struct file *file, void *fh,
273				struct v4l2_buffer *buf);
274int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *fh,
275			       struct v4l2_buffer *buf);
276int v4l2_m2m_ioctl_streamon(struct file *file, void *fh,
277				enum v4l2_buf_type type);
278int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh,
279				enum v4l2_buf_type type);
280int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma);
281unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
282
283#endif /* _MEDIA_V4L2_MEM2MEM_H */
284
v3.5.6
  1/*
  2 * Memory-to-memory device framework for Video for Linux 2.
  3 *
  4 * Helper functions for devices that use memory buffers for both source
  5 * and destination.
  6 *
  7 * Copyright (c) 2009 Samsung Electronics Co., Ltd.
  8 * Pawel Osciak, <pawel@osciak.com>
  9 * Marek Szyprowski, <m.szyprowski@samsung.com>
 10 *
 11 * This program is free software; you can redistribute it and/or modify
 12 * it under the terms of the GNU General Public License as published by the
 13 * Free Software Foundation; either version 2 of the
 14 * License, or (at your option) any later version
 15 */
 16
 17#ifndef _MEDIA_V4L2_MEM2MEM_H
 18#define _MEDIA_V4L2_MEM2MEM_H
 19
 20#include <media/videobuf2-core.h>
 21
 22/**
 23 * struct v4l2_m2m_ops - mem-to-mem device driver callbacks
 24 * @device_run:	required. Begin the actual job (transaction) inside this
 25 *		callback.
 26 *		The job does NOT have to end before this callback returns
 27 *		(and it will be the usual case). When the job finishes,
 28 *		v4l2_m2m_job_finish() has to be called.
 29 * @job_ready:	optional. Should return 0 if the driver does not have a job
 30 *		fully prepared to run yet (i.e. it will not be able to finish a
 31 *		transaction without sleeping). If not provided, it will be
 32 *		assumed that one source and one destination buffer are all
 33 *		that is required for the driver to perform one full transaction.
 34 *		This method may not sleep.
 35 * @job_abort:	required. Informs the driver that it has to abort the currently
 36 *		running transaction as soon as possible (i.e. as soon as it can
 37 *		stop the device safely; e.g. in the next interrupt handler),
 38 *		even if the transaction would not have been finished by then.
 39 *		After the driver performs the necessary steps, it has to call
 40 *		v4l2_m2m_job_finish() (as if the transaction ended normally).
 41 *		This function does not have to (and will usually not) wait
 42 *		until the device enters a state when it can be stopped.
 
 
 
 
 43 */
 44struct v4l2_m2m_ops {
 45	void (*device_run)(void *priv);
 46	int (*job_ready)(void *priv);
 47	void (*job_abort)(void *priv);
 48	void (*lock)(void *priv);
 49	void (*unlock)(void *priv);
 50};
 51
 52struct v4l2_m2m_dev;
 53
 54struct v4l2_m2m_queue_ctx {
 55/* private: internal use only */
 56	struct vb2_queue	q;
 57
 58	/* Queue for buffers ready to be processed as soon as this
 59	 * instance receives access to the device */
 60	struct list_head	rdy_queue;
 61	spinlock_t		rdy_spinlock;
 62	u8			num_rdy;
 
 63};
 64
 65struct v4l2_m2m_ctx {
 
 
 
 66/* private: internal use only */
 67	struct v4l2_m2m_dev		*m2m_dev;
 68
 69	/* Capture (output to memory) queue context */
 70	struct v4l2_m2m_queue_ctx	cap_q_ctx;
 71
 72	/* Output (input from memory) queue context */
 73	struct v4l2_m2m_queue_ctx	out_q_ctx;
 74
 75	/* For device job queue */
 76	struct list_head		queue;
 77	unsigned long			job_flags;
 78	wait_queue_head_t		finished;
 79
 80	/* Instance private data */
 81	void				*priv;
 82};
 83
 84struct v4l2_m2m_buffer {
 85	struct vb2_buffer	vb;
 86	struct list_head	list;
 87};
 88
 89void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev);
 90
 91struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
 92				       enum v4l2_buf_type type);
 93
 
 
 94void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
 95			 struct v4l2_m2m_ctx *m2m_ctx);
 96
 97static inline void
 98v4l2_m2m_buf_done(struct vb2_buffer *buf, enum vb2_buffer_state state)
 99{
100	vb2_buffer_done(buf, state);
101}
102
103int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
104		     struct v4l2_requestbuffers *reqbufs);
105
106int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
107		      struct v4l2_buffer *buf);
108
109int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
110		  struct v4l2_buffer *buf);
111int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
112		   struct v4l2_buffer *buf);
 
 
 
 
 
 
 
113
114int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
115		      enum v4l2_buf_type type);
116int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
117		       enum v4l2_buf_type type);
118
119unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
120			   struct poll_table_struct *wait);
121
122int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
123		  struct vm_area_struct *vma);
124
125struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops);
126void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
127
128struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
129		void *drv_priv,
130		int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq));
131
 
 
 
 
 
 
 
 
 
 
 
 
132void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
133
134void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb);
 
135
136/**
137 * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
138 * use
 
 
139 */
140static inline
141unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
142{
143	return m2m_ctx->cap_q_ctx.num_rdy;
144}
145
146/**
147 * v4l2_m2m_num_src_bufs_ready() - return the number of destination buffers
148 * ready for use
 
 
149 */
150static inline
151unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
152{
153	return m2m_ctx->out_q_ctx.num_rdy;
154}
155
156void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx);
157
158/**
159 * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready
160 * buffers
 
 
161 */
162static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
163{
164	return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx);
165}
166
167/**
168 * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of
169 * ready buffers
 
 
170 */
171static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
172{
173	return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx);
174}
175
176/**
177 * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers
 
 
178 */
179static inline
180struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
181{
182	return &m2m_ctx->out_q_ctx.q;
183}
184
185/**
186 * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers
 
 
187 */
188static inline
189struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
190{
191	return &m2m_ctx->cap_q_ctx.q;
192}
193
194void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx);
195
196/**
197 * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready
198 * buffers and return it
 
 
199 */
200static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
201{
202	return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx);
203}
204
205/**
206 * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of
207 * ready buffers and return it
 
 
208 */
209static inline void *v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
210{
211	return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx);
212}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213
214#endif /* _MEDIA_V4L2_MEM2MEM_H */
215