Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * Memory-to-memory device framework for Video for Linux 2.
  4 *
  5 * Helper functions for devices that use memory buffers for both source
  6 * and destination.
  7 *
  8 * Copyright (c) 2009 Samsung Electronics Co., Ltd.
  9 * Pawel Osciak, <pawel@osciak.com>
 10 * Marek Szyprowski, <m.szyprowski@samsung.com>
 11 */
 12
 13#ifndef _MEDIA_V4L2_MEM2MEM_H
 14#define _MEDIA_V4L2_MEM2MEM_H
 15
 16#include <media/videobuf2-v4l2.h>
 17
 18/**
 19 * struct v4l2_m2m_ops - mem-to-mem device driver callbacks
 20 * @device_run:	required. Begin the actual job (transaction) inside this
 21 *		callback.
 22 *		The job does NOT have to end before this callback returns
 23 *		(and it will be the usual case). When the job finishes,
 24 *		v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish()
 25 *		has to be called.
 26 * @job_ready:	optional. Should return 0 if the driver does not have a job
 27 *		fully prepared to run yet (i.e. it will not be able to finish a
 28 *		transaction without sleeping). If not provided, it will be
 29 *		assumed that one source and one destination buffer are all
 30 *		that is required for the driver to perform one full transaction.
 31 *		This method may not sleep.
 32 * @job_abort:	optional. Informs the driver that it has to abort the currently
 33 *		running transaction as soon as possible (i.e. as soon as it can
 34 *		stop the device safely; e.g. in the next interrupt handler),
 35 *		even if the transaction would not have been finished by then.
 36 *		After the driver performs the necessary steps, it has to call
 37 *		v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish() as
 38 *		if the transaction ended normally.
 39 *		This function does not have to (and will usually not) wait
 40 *		until the device enters a state when it can be stopped.
 41 */
 42struct v4l2_m2m_ops {
 43	void (*device_run)(void *priv);
 44	int (*job_ready)(void *priv);
 45	void (*job_abort)(void *priv);
 46};
 47
 48struct video_device;
 49struct v4l2_m2m_dev;
 50
 51/**
 52 * struct v4l2_m2m_queue_ctx - represents a queue for buffers ready to be
 53 *	processed
 54 *
 55 * @q:		pointer to struct &vb2_queue
 56 * @rdy_queue:	List of V4L2 mem-to-mem queues
 57 * @rdy_spinlock: spin lock to protect the struct usage
 58 * @num_rdy:	number of buffers ready to be processed
 59 * @buffered:	is the queue buffered?
 60 *
 61 * Queue for buffers ready to be processed as soon as this
 62 * instance receives access to the device.
 63 */
 64
 65struct v4l2_m2m_queue_ctx {
 66	struct vb2_queue	q;
 67
 68	struct list_head	rdy_queue;
 69	spinlock_t		rdy_spinlock;
 70	u8			num_rdy;
 71	bool			buffered;
 72};
 73
 74/**
 75 * struct v4l2_m2m_ctx - Memory to memory context structure
 76 *
 77 * @q_lock: struct &mutex lock
 78 * @new_frame: valid in the device_run callback: if true, then this
 79 *		starts a new frame; if false, then this is a new slice
 80 *		for an existing frame. This is always true unless
 81 *		V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF is set, which
 82 *		indicates slicing support.
 83 * @is_draining: indicates device is in draining phase
 84 * @last_src_buf: indicate the last source buffer for draining
 85 * @next_buf_last: next capture queud buffer will be tagged as last
 86 * @has_stopped: indicate the device has been stopped
 87 * @ignore_cap_streaming: If true, job_ready can be called even if the CAPTURE
 88 *			  queue is not streaming. This allows firmware to
 89 *			  analyze the bitstream header which arrives on the
 90 *			  OUTPUT queue. The driver must implement the job_ready
 91 *			  callback correctly to make sure that the requirements
 92 *			  for actual decoding are met.
 93 * @m2m_dev: opaque pointer to the internal data to handle M2M context
 94 * @cap_q_ctx: Capture (output to memory) queue context
 95 * @out_q_ctx: Output (input from memory) queue context
 96 * @queue: List of memory to memory contexts
 97 * @job_flags: Job queue flags, used internally by v4l2-mem2mem.c:
 98 *		%TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT.
 99 * @finished: Wait queue used to signalize when a job queue finished.
100 * @priv: Instance private data
101 *
102 * The memory to memory context is specific to a file handle, NOT to e.g.
103 * a device.
104 */
105struct v4l2_m2m_ctx {
106	/* optional cap/out vb2 queues lock */
107	struct mutex			*q_lock;
108
109	bool				new_frame;
110
111	bool				is_draining;
112	struct vb2_v4l2_buffer		*last_src_buf;
113	bool				next_buf_last;
114	bool				has_stopped;
115	bool				ignore_cap_streaming;
116
117	/* internal use only */
118	struct v4l2_m2m_dev		*m2m_dev;
119
120	struct v4l2_m2m_queue_ctx	cap_q_ctx;
121
122	struct v4l2_m2m_queue_ctx	out_q_ctx;
123
124	/* For device job queue */
125	struct list_head		queue;
126	unsigned long			job_flags;
127	wait_queue_head_t		finished;
128
129	void				*priv;
130};
131
132/**
133 * struct v4l2_m2m_buffer - Memory to memory buffer
134 *
135 * @vb: pointer to struct &vb2_v4l2_buffer
136 * @list: list of m2m buffers
137 */
138struct v4l2_m2m_buffer {
139	struct vb2_v4l2_buffer	vb;
140	struct list_head	list;
141};
142
143/**
144 * v4l2_m2m_get_curr_priv() - return driver private data for the currently
145 * running instance or NULL if no instance is running
146 *
147 * @m2m_dev: opaque pointer to the internal data to handle M2M context
148 */
149void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev);
150
151/**
152 * v4l2_m2m_get_vq() - return vb2_queue for the given type
153 *
154 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
155 * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
156 */
157struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
158				       enum v4l2_buf_type type);
159
160/**
161 * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
162 * the pending job queue and add it if so.
163 *
164 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
165 *
166 * There are three basic requirements an instance has to meet to be able to run:
167 * 1) at least one source buffer has to be queued,
168 * 2) at least one destination buffer has to be queued,
169 * 3) streaming has to be on.
170 *
171 * If a queue is buffered (for example a decoder hardware ringbuffer that has
172 * to be drained before doing streamoff), allow scheduling without v4l2 buffers
173 * on that queue.
174 *
175 * There may also be additional, custom requirements. In such case the driver
176 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
177 * return 1 if the instance is ready.
178 * An example of the above could be an instance that requires more than one
179 * src/dst buffer per transaction.
180 */
181void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx);
182
183/**
184 * v4l2_m2m_job_finish() - inform the framework that a job has been finished
185 * and have it clean up
186 *
187 * @m2m_dev: opaque pointer to the internal data to handle M2M context
188 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
189 *
190 * Called by a driver to yield back the device after it has finished with it.
191 * Should be called as soon as possible after reaching a state which allows
192 * other instances to take control of the device.
193 *
194 * This function has to be called only after &v4l2_m2m_ops->device_run
195 * callback has been called on the driver. To prevent recursion, it should
196 * not be called directly from the &v4l2_m2m_ops->device_run callback though.
197 */
198void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
199			 struct v4l2_m2m_ctx *m2m_ctx);
200
201/**
202 * v4l2_m2m_buf_done_and_job_finish() - return source/destination buffers with
203 * state and inform the framework that a job has been finished and have it
204 * clean up
205 *
206 * @m2m_dev: opaque pointer to the internal data to handle M2M context
207 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
208 * @state: vb2 buffer state passed to v4l2_m2m_buf_done().
209 *
210 * Drivers that set V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF must use this
211 * function instead of job_finish() to take held buffers into account. It is
212 * optional for other drivers.
213 *
214 * This function removes the source buffer from the ready list and returns
215 * it with the given state. The same is done for the destination buffer, unless
216 * it is marked 'held'. In that case the buffer is kept on the ready list.
217 *
218 * After that the job is finished (see job_finish()).
219 *
220 * This allows for multiple output buffers to be used to fill in a single
221 * capture buffer. This is typically used by stateless decoders where
222 * multiple e.g. H.264 slices contribute to a single decoded frame.
223 */
224void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
225				      struct v4l2_m2m_ctx *m2m_ctx,
226				      enum vb2_buffer_state state);
227
228static inline void
229v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
230{
231	vb2_buffer_done(&buf->vb2_buf, state);
232}
233
234/**
235 * v4l2_m2m_clear_state() - clear encoding/decoding state
236 *
237 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
238 */
239static inline void
240v4l2_m2m_clear_state(struct v4l2_m2m_ctx *m2m_ctx)
241{
242	m2m_ctx->next_buf_last = false;
243	m2m_ctx->is_draining = false;
244	m2m_ctx->has_stopped = false;
245}
246
247/**
248 * v4l2_m2m_mark_stopped() - set current encoding/decoding state as stopped
249 *
250 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
251 */
252static inline void
253v4l2_m2m_mark_stopped(struct v4l2_m2m_ctx *m2m_ctx)
254{
255	m2m_ctx->next_buf_last = false;
256	m2m_ctx->is_draining = false;
257	m2m_ctx->has_stopped = true;
258}
259
260/**
261 * v4l2_m2m_dst_buf_is_last() - return the current encoding/decoding session
262 * draining management state of next queued capture buffer
263 *
264 * This last capture buffer should be tagged with V4L2_BUF_FLAG_LAST to notify
265 * the end of the capture session.
266 *
267 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
268 */
269static inline bool
270v4l2_m2m_dst_buf_is_last(struct v4l2_m2m_ctx *m2m_ctx)
271{
272	return m2m_ctx->is_draining && m2m_ctx->next_buf_last;
273}
274
275/**
276 * v4l2_m2m_has_stopped() - return the current encoding/decoding session
277 * stopped state
278 *
279 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
280 */
281static inline bool
282v4l2_m2m_has_stopped(struct v4l2_m2m_ctx *m2m_ctx)
283{
284	return m2m_ctx->has_stopped;
285}
286
287/**
288 * v4l2_m2m_is_last_draining_src_buf() - return the output buffer draining
289 * state in the current encoding/decoding session
290 *
291 * This will identify the last output buffer queued before a session stop
292 * was required, leading to an actual encoding/decoding session stop state
293 * in the encoding/decoding process after being processed.
294 *
295 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
296 * @vbuf: pointer to struct &v4l2_buffer
297 */
298static inline bool
299v4l2_m2m_is_last_draining_src_buf(struct v4l2_m2m_ctx *m2m_ctx,
300				  struct vb2_v4l2_buffer *vbuf)
301{
302	return m2m_ctx->is_draining && vbuf == m2m_ctx->last_src_buf;
303}
304
305/**
306 * v4l2_m2m_last_buffer_done() - marks the buffer with LAST flag and DONE
307 *
308 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
309 * @vbuf: pointer to struct &v4l2_buffer
310 */
311void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx,
312			       struct vb2_v4l2_buffer *vbuf);
313
314/**
315 * v4l2_m2m_suspend() - stop new jobs from being run and wait for current job
316 * to finish
317 *
318 * @m2m_dev: opaque pointer to the internal data to handle M2M context
319 *
320 * Called by a driver in the suspend hook. Stop new jobs from being run, and
321 * wait for current running job to finish.
322 */
323void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev);
324
325/**
326 * v4l2_m2m_resume() - resume job running and try to run a queued job
327 *
328 * @m2m_dev: opaque pointer to the internal data to handle M2M context
329 *
330 * Called by a driver in the resume hook. This reverts the operation of
331 * v4l2_m2m_suspend() and allows job to be run. Also try to run a queued job if
332 * there is any.
333 */
334void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev);
335
336/**
337 * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
338 *
339 * @file: pointer to struct &file
340 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
341 * @reqbufs: pointer to struct &v4l2_requestbuffers
342 */
343int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
344		     struct v4l2_requestbuffers *reqbufs);
345
346/**
347 * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
348 *
349 * @file: pointer to struct &file
350 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
351 * @buf: pointer to struct &v4l2_buffer
352 *
353 * See v4l2_m2m_mmap() documentation for details.
354 */
355int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
356		      struct v4l2_buffer *buf);
357
358/**
359 * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
360 * the type
361 *
362 * @file: pointer to struct &file
363 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
364 * @buf: pointer to struct &v4l2_buffer
365 */
366int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
367		  struct v4l2_buffer *buf);
368
369/**
370 * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
371 * the type
372 *
373 * @file: pointer to struct &file
374 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
375 * @buf: pointer to struct &v4l2_buffer
376 */
377int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
378		   struct v4l2_buffer *buf);
379
380/**
381 * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on
382 * the type
383 *
384 * @file: pointer to struct &file
385 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
386 * @buf: pointer to struct &v4l2_buffer
387 */
388int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
389			 struct v4l2_buffer *buf);
390
391/**
392 * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
393 * on the type
394 *
395 * @file: pointer to struct &file
396 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
397 * @create: pointer to struct &v4l2_create_buffers
398 */
399int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
400			 struct v4l2_create_buffers *create);
401
402/**
403 * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
404 * the type
405 *
406 * @file: pointer to struct &file
407 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
408 * @eb: pointer to struct &v4l2_exportbuffer
409 */
410int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
411		   struct v4l2_exportbuffer *eb);
412
413/**
414 * v4l2_m2m_streamon() - turn on streaming for a video queue
415 *
416 * @file: pointer to struct &file
417 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
418 * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
419 */
420int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
421		      enum v4l2_buf_type type);
422
423/**
424 * v4l2_m2m_streamoff() - turn off streaming for a video queue
425 *
426 * @file: pointer to struct &file
427 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
428 * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
429 */
430int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
431		       enum v4l2_buf_type type);
432
433/**
434 * v4l2_m2m_update_start_streaming_state() - update the encoding/decoding
435 * session state when a start of streaming of a video queue is requested
436 *
437 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
438 * @q: queue
439 */
440void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
441					   struct vb2_queue *q);
442
443/**
444 * v4l2_m2m_update_stop_streaming_state() -  update the encoding/decoding
445 * session state when a stop of streaming of a video queue is requested
446 *
447 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
448 * @q: queue
449 */
450void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
451					  struct vb2_queue *q);
452
453/**
454 * v4l2_m2m_encoder_cmd() - execute an encoder command
455 *
456 * @file: pointer to struct &file
457 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
458 * @ec: pointer to the encoder command
459 */
460int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
461			 struct v4l2_encoder_cmd *ec);
462
463/**
464 * v4l2_m2m_decoder_cmd() - execute a decoder command
465 *
466 * @file: pointer to struct &file
467 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
468 * @dc: pointer to the decoder command
469 */
470int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
471			 struct v4l2_decoder_cmd *dc);
472
473/**
474 * v4l2_m2m_poll() - poll replacement, for destination buffers only
475 *
476 * @file: pointer to struct &file
477 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
478 * @wait: pointer to struct &poll_table_struct
479 *
480 * Call from the driver's poll() function. Will poll both queues. If a buffer
481 * is available to dequeue (with dqbuf) from the source queue, this will
482 * indicate that a non-blocking write can be performed, while read will be
483 * returned in case of the destination queue.
484 */
485__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
486			   struct poll_table_struct *wait);
487
488/**
489 * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
490 *
491 * @file: pointer to struct &file
492 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
493 * @vma: pointer to struct &vm_area_struct
494 *
495 * Call from driver's mmap() function. Will handle mmap() for both queues
496 * seamlessly for the video buffer, which will receive normal per-queue offsets
497 * and proper vb2 queue pointers. The differentiation is made outside
498 * vb2 by adding a predefined offset to buffers from one of the queues
499 * and subtracting it before passing it back to vb2. Only drivers (and
500 * thus applications) receive modified offsets.
501 */
502int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
503		  struct vm_area_struct *vma);
504
505#ifndef CONFIG_MMU
506unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr,
507					 unsigned long len, unsigned long pgoff,
508					 unsigned long flags);
509#endif
510/**
511 * v4l2_m2m_init() - initialize per-driver m2m data
512 *
513 * @m2m_ops: pointer to struct v4l2_m2m_ops
514 *
515 * Usually called from driver's ``probe()`` function.
516 *
517 * Return: returns an opaque pointer to the internal data to handle M2M context
518 */
519struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops);
520
521#if defined(CONFIG_MEDIA_CONTROLLER)
522void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev);
523int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
524			struct video_device *vdev, int function);
525#else
526static inline void
527v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
528{
529}
530
531static inline int
532v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
533		struct video_device *vdev, int function)
534{
535	return 0;
536}
537#endif
538
539/**
540 * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
541 *
542 * @m2m_dev: opaque pointer to the internal data to handle M2M context
543 *
544 * Usually called from driver's ``remove()`` function.
545 */
546void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
547
548/**
549 * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
550 *
551 * @m2m_dev: opaque pointer to the internal data to handle M2M context
552 * @drv_priv: driver's instance private data
553 * @queue_init: a callback for queue type-specific initialization function
554 *	to be used for initializing vb2_queues
555 *
556 * Usually called from driver's ``open()`` function.
557 */
558struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
559		void *drv_priv,
560		int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq));
561
562static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx,
563					     bool buffered)
564{
565	m2m_ctx->out_q_ctx.buffered = buffered;
566}
567
568static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx,
569					     bool buffered)
570{
571	m2m_ctx->cap_q_ctx.buffered = buffered;
572}
573
574/**
575 * v4l2_m2m_ctx_release() - release m2m context
576 *
577 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
578 *
579 * Usually called from driver's release() function.
580 */
581void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
582
583/**
584 * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
585 *
586 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
587 * @vbuf: pointer to struct &vb2_v4l2_buffer
588 *
589 * Call from vb2_queue_ops->ops->buf_queue, vb2_queue_ops callback.
590 */
591void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
592			struct vb2_v4l2_buffer *vbuf);
593
594/**
595 * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
596 * use
597 *
598 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
599 */
600static inline
601unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
602{
603	unsigned int num_buf_rdy;
604	unsigned long flags;
605
606	spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
607	num_buf_rdy = m2m_ctx->out_q_ctx.num_rdy;
608	spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
609
610	return num_buf_rdy;
611}
612
613/**
614 * v4l2_m2m_num_dst_bufs_ready() - return the number of destination buffers
615 * ready for use
616 *
617 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
618 */
619static inline
620unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
621{
622	unsigned int num_buf_rdy;
623	unsigned long flags;
624
625	spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
626	num_buf_rdy = m2m_ctx->cap_q_ctx.num_rdy;
627	spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
628
629	return num_buf_rdy;
630}
631
632/**
633 * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
634 *
635 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
636 */
637struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx);
638
639/**
640 * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready
641 * buffers
642 *
643 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
644 */
645static inline struct vb2_v4l2_buffer *
646v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
647{
648	return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx);
649}
650
651/**
652 * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of
653 * ready buffers
654 *
655 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
656 */
657static inline struct vb2_v4l2_buffer *
658v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
659{
660	return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx);
661}
662
663/**
664 * v4l2_m2m_last_buf() - return last buffer from the list of ready buffers
665 *
666 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
667 */
668struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx);
669
670/**
671 * v4l2_m2m_last_src_buf() - return last source buffer from the list of
672 * ready buffers
673 *
674 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
675 */
676static inline struct vb2_v4l2_buffer *
677v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
678{
679	return v4l2_m2m_last_buf(&m2m_ctx->out_q_ctx);
680}
681
682/**
683 * v4l2_m2m_last_dst_buf() - return last destination buffer from the list of
684 * ready buffers
685 *
686 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
687 */
688static inline struct vb2_v4l2_buffer *
689v4l2_m2m_last_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
690{
691	return v4l2_m2m_last_buf(&m2m_ctx->cap_q_ctx);
692}
693
694/**
695 * v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready
696 * buffers
697 *
698 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
699 * @b: current buffer of type struct v4l2_m2m_buffer
700 */
701#define v4l2_m2m_for_each_dst_buf(m2m_ctx, b)	\
702	list_for_each_entry(b, &m2m_ctx->cap_q_ctx.rdy_queue, list)
703
704/**
705 * v4l2_m2m_for_each_src_buf() - iterate over a list of source ready buffers
706 *
707 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
708 * @b: current buffer of type struct v4l2_m2m_buffer
709 */
710#define v4l2_m2m_for_each_src_buf(m2m_ctx, b)	\
711	list_for_each_entry(b, &m2m_ctx->out_q_ctx.rdy_queue, list)
712
713/**
714 * v4l2_m2m_for_each_dst_buf_safe() - iterate over a list of destination ready
715 * buffers safely
716 *
717 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
718 * @b: current buffer of type struct v4l2_m2m_buffer
719 * @n: used as temporary storage
720 */
721#define v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, b, n)	\
722	list_for_each_entry_safe(b, n, &m2m_ctx->cap_q_ctx.rdy_queue, list)
723
724/**
725 * v4l2_m2m_for_each_src_buf_safe() - iterate over a list of source ready
726 * buffers safely
727 *
728 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
729 * @b: current buffer of type struct v4l2_m2m_buffer
730 * @n: used as temporary storage
731 */
732#define v4l2_m2m_for_each_src_buf_safe(m2m_ctx, b, n)	\
733	list_for_each_entry_safe(b, n, &m2m_ctx->out_q_ctx.rdy_queue, list)
734
735/**
736 * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers
737 *
738 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
739 */
740static inline
741struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
742{
743	return &m2m_ctx->out_q_ctx.q;
744}
745
746/**
747 * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers
748 *
749 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
750 */
751static inline
752struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
753{
754	return &m2m_ctx->cap_q_ctx.q;
755}
756
757/**
758 * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
759 * return it
760 *
761 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
762 */
763struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx);
764
765/**
766 * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready
767 * buffers and return it
768 *
769 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
770 */
771static inline struct vb2_v4l2_buffer *
772v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
773{
774	return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx);
775}
776
777/**
778 * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of
779 * ready buffers and return it
780 *
781 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
782 */
783static inline struct vb2_v4l2_buffer *
784v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
785{
786	return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx);
787}
788
789/**
790 * v4l2_m2m_buf_remove_by_buf() - take off exact buffer from the list of ready
791 * buffers
792 *
793 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
794 * @vbuf: the buffer to be removed
795 */
796void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
797				struct vb2_v4l2_buffer *vbuf);
798
799/**
800 * v4l2_m2m_src_buf_remove_by_buf() - take off exact source buffer from the list
801 * of ready buffers
802 *
803 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
804 * @vbuf: the buffer to be removed
805 */
806static inline void v4l2_m2m_src_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx,
807						  struct vb2_v4l2_buffer *vbuf)
808{
809	v4l2_m2m_buf_remove_by_buf(&m2m_ctx->out_q_ctx, vbuf);
810}
811
812/**
813 * v4l2_m2m_dst_buf_remove_by_buf() - take off exact destination buffer from the
814 * list of ready buffers
815 *
816 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
817 * @vbuf: the buffer to be removed
818 */
819static inline void v4l2_m2m_dst_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx,
820						  struct vb2_v4l2_buffer *vbuf)
821{
822	v4l2_m2m_buf_remove_by_buf(&m2m_ctx->cap_q_ctx, vbuf);
823}
824
825struct vb2_v4l2_buffer *
826v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx);
827
828static inline struct vb2_v4l2_buffer *
829v4l2_m2m_src_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
830{
831	return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->out_q_ctx, idx);
832}
833
834static inline struct vb2_v4l2_buffer *
835v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
836{
837	return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx);
838}
839
840/**
841 * v4l2_m2m_buf_copy_metadata() - copy buffer metadata from
842 * the output buffer to the capture buffer
843 *
844 * @out_vb: the output buffer that is the source of the metadata.
845 * @cap_vb: the capture buffer that will receive the metadata.
846 * @copy_frame_flags: copy the KEY/B/PFRAME flags as well.
847 *
848 * This helper function copies the timestamp, timecode (if the TIMECODE
849 * buffer flag was set), field and the TIMECODE, KEYFRAME, BFRAME, PFRAME
850 * and TSTAMP_SRC_MASK flags from @out_vb to @cap_vb.
851 *
852 * If @copy_frame_flags is false, then the KEYFRAME, BFRAME and PFRAME
853 * flags are not copied. This is typically needed for encoders that
854 * set this bits explicitly.
855 */
856void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb,
857				struct vb2_v4l2_buffer *cap_vb,
858				bool copy_frame_flags);
859
860/* v4l2 request helper */
861
862void v4l2_m2m_request_queue(struct media_request *req);
863
864/* v4l2 ioctl helpers */
865
866int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
867				struct v4l2_requestbuffers *rb);
868int v4l2_m2m_ioctl_create_bufs(struct file *file, void *fh,
869				struct v4l2_create_buffers *create);
870int v4l2_m2m_ioctl_remove_bufs(struct file *file, void *priv,
871			       struct v4l2_remove_buffers *d);
872int v4l2_m2m_ioctl_querybuf(struct file *file, void *fh,
873				struct v4l2_buffer *buf);
874int v4l2_m2m_ioctl_expbuf(struct file *file, void *fh,
875				struct v4l2_exportbuffer *eb);
876int v4l2_m2m_ioctl_qbuf(struct file *file, void *fh,
877				struct v4l2_buffer *buf);
878int v4l2_m2m_ioctl_dqbuf(struct file *file, void *fh,
879				struct v4l2_buffer *buf);
880int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *fh,
881			       struct v4l2_buffer *buf);
882int v4l2_m2m_ioctl_streamon(struct file *file, void *fh,
883				enum v4l2_buf_type type);
884int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh,
885				enum v4l2_buf_type type);
886int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *fh,
887			       struct v4l2_encoder_cmd *ec);
888int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *fh,
889			       struct v4l2_decoder_cmd *dc);
890int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh,
891				   struct v4l2_encoder_cmd *ec);
892int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
893				   struct v4l2_decoder_cmd *dc);
894int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh,
895					     struct v4l2_decoder_cmd *dc);
896int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
897					 struct v4l2_decoder_cmd *dc);
898int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma);
899__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
900
901#endif /* _MEDIA_V4L2_MEM2MEM_H */
902
v6.2
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * Memory-to-memory device framework for Video for Linux 2.
  4 *
  5 * Helper functions for devices that use memory buffers for both source
  6 * and destination.
  7 *
  8 * Copyright (c) 2009 Samsung Electronics Co., Ltd.
  9 * Pawel Osciak, <pawel@osciak.com>
 10 * Marek Szyprowski, <m.szyprowski@samsung.com>
 11 */
 12
 13#ifndef _MEDIA_V4L2_MEM2MEM_H
 14#define _MEDIA_V4L2_MEM2MEM_H
 15
 16#include <media/videobuf2-v4l2.h>
 17
 18/**
 19 * struct v4l2_m2m_ops - mem-to-mem device driver callbacks
 20 * @device_run:	required. Begin the actual job (transaction) inside this
 21 *		callback.
 22 *		The job does NOT have to end before this callback returns
 23 *		(and it will be the usual case). When the job finishes,
 24 *		v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish()
 25 *		has to be called.
 26 * @job_ready:	optional. Should return 0 if the driver does not have a job
 27 *		fully prepared to run yet (i.e. it will not be able to finish a
 28 *		transaction without sleeping). If not provided, it will be
 29 *		assumed that one source and one destination buffer are all
 30 *		that is required for the driver to perform one full transaction.
 31 *		This method may not sleep.
 32 * @job_abort:	optional. Informs the driver that it has to abort the currently
 33 *		running transaction as soon as possible (i.e. as soon as it can
 34 *		stop the device safely; e.g. in the next interrupt handler),
 35 *		even if the transaction would not have been finished by then.
 36 *		After the driver performs the necessary steps, it has to call
 37 *		v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish() as
 38 *		if the transaction ended normally.
 39 *		This function does not have to (and will usually not) wait
 40 *		until the device enters a state when it can be stopped.
 41 */
 42struct v4l2_m2m_ops {
 43	void (*device_run)(void *priv);
 44	int (*job_ready)(void *priv);
 45	void (*job_abort)(void *priv);
 46};
 47
 48struct video_device;
 49struct v4l2_m2m_dev;
 50
 51/**
 52 * struct v4l2_m2m_queue_ctx - represents a queue for buffers ready to be
 53 *	processed
 54 *
 55 * @q:		pointer to struct &vb2_queue
 56 * @rdy_queue:	List of V4L2 mem-to-mem queues
 57 * @rdy_spinlock: spin lock to protect the struct usage
 58 * @num_rdy:	number of buffers ready to be processed
 59 * @buffered:	is the queue buffered?
 60 *
 61 * Queue for buffers ready to be processed as soon as this
 62 * instance receives access to the device.
 63 */
 64
 65struct v4l2_m2m_queue_ctx {
 66	struct vb2_queue	q;
 67
 68	struct list_head	rdy_queue;
 69	spinlock_t		rdy_spinlock;
 70	u8			num_rdy;
 71	bool			buffered;
 72};
 73
 74/**
 75 * struct v4l2_m2m_ctx - Memory to memory context structure
 76 *
 77 * @q_lock: struct &mutex lock
 78 * @new_frame: valid in the device_run callback: if true, then this
 79 *		starts a new frame; if false, then this is a new slice
 80 *		for an existing frame. This is always true unless
 81 *		V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF is set, which
 82 *		indicates slicing support.
 83 * @is_draining: indicates device is in draining phase
 84 * @last_src_buf: indicate the last source buffer for draining
 85 * @next_buf_last: next capture queud buffer will be tagged as last
 86 * @has_stopped: indicate the device has been stopped
 
 
 
 
 
 
 87 * @m2m_dev: opaque pointer to the internal data to handle M2M context
 88 * @cap_q_ctx: Capture (output to memory) queue context
 89 * @out_q_ctx: Output (input from memory) queue context
 90 * @queue: List of memory to memory contexts
 91 * @job_flags: Job queue flags, used internally by v4l2-mem2mem.c:
 92 *		%TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT.
 93 * @finished: Wait queue used to signalize when a job queue finished.
 94 * @priv: Instance private data
 95 *
 96 * The memory to memory context is specific to a file handle, NOT to e.g.
 97 * a device.
 98 */
 99struct v4l2_m2m_ctx {
100	/* optional cap/out vb2 queues lock */
101	struct mutex			*q_lock;
102
103	bool				new_frame;
104
105	bool				is_draining;
106	struct vb2_v4l2_buffer		*last_src_buf;
107	bool				next_buf_last;
108	bool				has_stopped;
 
109
110	/* internal use only */
111	struct v4l2_m2m_dev		*m2m_dev;
112
113	struct v4l2_m2m_queue_ctx	cap_q_ctx;
114
115	struct v4l2_m2m_queue_ctx	out_q_ctx;
116
117	/* For device job queue */
118	struct list_head		queue;
119	unsigned long			job_flags;
120	wait_queue_head_t		finished;
121
122	void				*priv;
123};
124
125/**
126 * struct v4l2_m2m_buffer - Memory to memory buffer
127 *
128 * @vb: pointer to struct &vb2_v4l2_buffer
129 * @list: list of m2m buffers
130 */
131struct v4l2_m2m_buffer {
132	struct vb2_v4l2_buffer	vb;
133	struct list_head	list;
134};
135
136/**
137 * v4l2_m2m_get_curr_priv() - return driver private data for the currently
138 * running instance or NULL if no instance is running
139 *
140 * @m2m_dev: opaque pointer to the internal data to handle M2M context
141 */
142void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev);
143
144/**
145 * v4l2_m2m_get_vq() - return vb2_queue for the given type
146 *
147 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
148 * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
149 */
150struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
151				       enum v4l2_buf_type type);
152
153/**
154 * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
155 * the pending job queue and add it if so.
156 *
157 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
158 *
159 * There are three basic requirements an instance has to meet to be able to run:
160 * 1) at least one source buffer has to be queued,
161 * 2) at least one destination buffer has to be queued,
162 * 3) streaming has to be on.
163 *
164 * If a queue is buffered (for example a decoder hardware ringbuffer that has
165 * to be drained before doing streamoff), allow scheduling without v4l2 buffers
166 * on that queue.
167 *
168 * There may also be additional, custom requirements. In such case the driver
169 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
170 * return 1 if the instance is ready.
171 * An example of the above could be an instance that requires more than one
172 * src/dst buffer per transaction.
173 */
174void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx);
175
176/**
177 * v4l2_m2m_job_finish() - inform the framework that a job has been finished
178 * and have it clean up
179 *
180 * @m2m_dev: opaque pointer to the internal data to handle M2M context
181 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
182 *
183 * Called by a driver to yield back the device after it has finished with it.
184 * Should be called as soon as possible after reaching a state which allows
185 * other instances to take control of the device.
186 *
187 * This function has to be called only after &v4l2_m2m_ops->device_run
188 * callback has been called on the driver. To prevent recursion, it should
189 * not be called directly from the &v4l2_m2m_ops->device_run callback though.
190 */
191void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
192			 struct v4l2_m2m_ctx *m2m_ctx);
193
194/**
195 * v4l2_m2m_buf_done_and_job_finish() - return source/destination buffers with
196 * state and inform the framework that a job has been finished and have it
197 * clean up
198 *
199 * @m2m_dev: opaque pointer to the internal data to handle M2M context
200 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
201 * @state: vb2 buffer state passed to v4l2_m2m_buf_done().
202 *
203 * Drivers that set V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF must use this
204 * function instead of job_finish() to take held buffers into account. It is
205 * optional for other drivers.
206 *
207 * This function removes the source buffer from the ready list and returns
208 * it with the given state. The same is done for the destination buffer, unless
209 * it is marked 'held'. In that case the buffer is kept on the ready list.
210 *
211 * After that the job is finished (see job_finish()).
212 *
213 * This allows for multiple output buffers to be used to fill in a single
214 * capture buffer. This is typically used by stateless decoders where
215 * multiple e.g. H.264 slices contribute to a single decoded frame.
216 */
217void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
218				      struct v4l2_m2m_ctx *m2m_ctx,
219				      enum vb2_buffer_state state);
220
221static inline void
222v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
223{
224	vb2_buffer_done(&buf->vb2_buf, state);
225}
226
227/**
228 * v4l2_m2m_clear_state() - clear encoding/decoding state
229 *
230 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
231 */
232static inline void
233v4l2_m2m_clear_state(struct v4l2_m2m_ctx *m2m_ctx)
234{
235	m2m_ctx->next_buf_last = false;
236	m2m_ctx->is_draining = false;
237	m2m_ctx->has_stopped = false;
238}
239
240/**
241 * v4l2_m2m_mark_stopped() - set current encoding/decoding state as stopped
242 *
243 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
244 */
245static inline void
246v4l2_m2m_mark_stopped(struct v4l2_m2m_ctx *m2m_ctx)
247{
248	m2m_ctx->next_buf_last = false;
249	m2m_ctx->is_draining = false;
250	m2m_ctx->has_stopped = true;
251}
252
253/**
254 * v4l2_m2m_dst_buf_is_last() - return the current encoding/decoding session
255 * draining management state of next queued capture buffer
256 *
257 * This last capture buffer should be tagged with V4L2_BUF_FLAG_LAST to notify
258 * the end of the capture session.
259 *
260 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
261 */
262static inline bool
263v4l2_m2m_dst_buf_is_last(struct v4l2_m2m_ctx *m2m_ctx)
264{
265	return m2m_ctx->is_draining && m2m_ctx->next_buf_last;
266}
267
268/**
269 * v4l2_m2m_has_stopped() - return the current encoding/decoding session
270 * stopped state
271 *
272 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
273 */
274static inline bool
275v4l2_m2m_has_stopped(struct v4l2_m2m_ctx *m2m_ctx)
276{
277	return m2m_ctx->has_stopped;
278}
279
280/**
281 * v4l2_m2m_is_last_draining_src_buf() - return the output buffer draining
282 * state in the current encoding/decoding session
283 *
284 * This will identify the last output buffer queued before a session stop
285 * was required, leading to an actual encoding/decoding session stop state
286 * in the encoding/decoding process after being processed.
287 *
288 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
289 * @vbuf: pointer to struct &v4l2_buffer
290 */
291static inline bool
292v4l2_m2m_is_last_draining_src_buf(struct v4l2_m2m_ctx *m2m_ctx,
293				  struct vb2_v4l2_buffer *vbuf)
294{
295	return m2m_ctx->is_draining && vbuf == m2m_ctx->last_src_buf;
296}
297
298/**
299 * v4l2_m2m_last_buffer_done() - marks the buffer with LAST flag and DONE
300 *
301 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
302 * @vbuf: pointer to struct &v4l2_buffer
303 */
304void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx,
305			       struct vb2_v4l2_buffer *vbuf);
306
307/**
308 * v4l2_m2m_suspend() - stop new jobs from being run and wait for current job
309 * to finish
310 *
311 * @m2m_dev: opaque pointer to the internal data to handle M2M context
312 *
313 * Called by a driver in the suspend hook. Stop new jobs from being run, and
314 * wait for current running job to finish.
315 */
316void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev);
317
318/**
319 * v4l2_m2m_resume() - resume job running and try to run a queued job
320 *
321 * @m2m_dev: opaque pointer to the internal data to handle M2M context
322 *
323 * Called by a driver in the resume hook. This reverts the operation of
324 * v4l2_m2m_suspend() and allows job to be run. Also try to run a queued job if
325 * there is any.
326 */
327void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev);
328
329/**
330 * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
331 *
332 * @file: pointer to struct &file
333 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
334 * @reqbufs: pointer to struct &v4l2_requestbuffers
335 */
336int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
337		     struct v4l2_requestbuffers *reqbufs);
338
339/**
340 * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
341 *
342 * @file: pointer to struct &file
343 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
344 * @buf: pointer to struct &v4l2_buffer
345 *
346 * See v4l2_m2m_mmap() documentation for details.
347 */
348int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
349		      struct v4l2_buffer *buf);
350
351/**
352 * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
353 * the type
354 *
355 * @file: pointer to struct &file
356 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
357 * @buf: pointer to struct &v4l2_buffer
358 */
359int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
360		  struct v4l2_buffer *buf);
361
362/**
363 * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
364 * the type
365 *
366 * @file: pointer to struct &file
367 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
368 * @buf: pointer to struct &v4l2_buffer
369 */
370int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
371		   struct v4l2_buffer *buf);
372
373/**
374 * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on
375 * the type
376 *
377 * @file: pointer to struct &file
378 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
379 * @buf: pointer to struct &v4l2_buffer
380 */
381int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
382			 struct v4l2_buffer *buf);
383
384/**
385 * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
386 * on the type
387 *
388 * @file: pointer to struct &file
389 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
390 * @create: pointer to struct &v4l2_create_buffers
391 */
392int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
393			 struct v4l2_create_buffers *create);
394
395/**
396 * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
397 * the type
398 *
399 * @file: pointer to struct &file
400 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
401 * @eb: pointer to struct &v4l2_exportbuffer
402 */
403int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
404		   struct v4l2_exportbuffer *eb);
405
406/**
407 * v4l2_m2m_streamon() - turn on streaming for a video queue
408 *
409 * @file: pointer to struct &file
410 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
411 * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
412 */
413int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
414		      enum v4l2_buf_type type);
415
416/**
417 * v4l2_m2m_streamoff() - turn off streaming for a video queue
418 *
419 * @file: pointer to struct &file
420 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
421 * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
422 */
423int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
424		       enum v4l2_buf_type type);
425
426/**
427 * v4l2_m2m_update_start_streaming_state() - update the encoding/decoding
428 * session state when a start of streaming of a video queue is requested
429 *
430 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
431 * @q: queue
432 */
433void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
434					   struct vb2_queue *q);
435
436/**
437 * v4l2_m2m_update_stop_streaming_state() -  update the encoding/decoding
438 * session state when a stop of streaming of a video queue is requested
439 *
440 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
441 * @q: queue
442 */
443void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
444					  struct vb2_queue *q);
445
446/**
447 * v4l2_m2m_encoder_cmd() - execute an encoder command
448 *
449 * @file: pointer to struct &file
450 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
451 * @ec: pointer to the encoder command
452 */
453int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
454			 struct v4l2_encoder_cmd *ec);
455
456/**
457 * v4l2_m2m_decoder_cmd() - execute a decoder command
458 *
459 * @file: pointer to struct &file
460 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
461 * @dc: pointer to the decoder command
462 */
463int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
464			 struct v4l2_decoder_cmd *dc);
465
466/**
467 * v4l2_m2m_poll() - poll replacement, for destination buffers only
468 *
469 * @file: pointer to struct &file
470 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
471 * @wait: pointer to struct &poll_table_struct
472 *
473 * Call from the driver's poll() function. Will poll both queues. If a buffer
474 * is available to dequeue (with dqbuf) from the source queue, this will
475 * indicate that a non-blocking write can be performed, while read will be
476 * returned in case of the destination queue.
477 */
478__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
479			   struct poll_table_struct *wait);
480
481/**
482 * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
483 *
484 * @file: pointer to struct &file
485 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
486 * @vma: pointer to struct &vm_area_struct
487 *
488 * Call from driver's mmap() function. Will handle mmap() for both queues
489 * seamlessly for the video buffer, which will receive normal per-queue offsets
490 * and proper vb2 queue pointers. The differentiation is made outside
491 * vb2 by adding a predefined offset to buffers from one of the queues
492 * and subtracting it before passing it back to vb2. Only drivers (and
493 * thus applications) receive modified offsets.
494 */
495int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
496		  struct vm_area_struct *vma);
497
498#ifndef CONFIG_MMU
499unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr,
500					 unsigned long len, unsigned long pgoff,
501					 unsigned long flags);
502#endif
503/**
504 * v4l2_m2m_init() - initialize per-driver m2m data
505 *
506 * @m2m_ops: pointer to struct v4l2_m2m_ops
507 *
508 * Usually called from driver's ``probe()`` function.
509 *
510 * Return: returns an opaque pointer to the internal data to handle M2M context
511 */
512struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops);
513
514#if defined(CONFIG_MEDIA_CONTROLLER)
515void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev);
516int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
517			struct video_device *vdev, int function);
518#else
519static inline void
520v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
521{
522}
523
524static inline int
525v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
526		struct video_device *vdev, int function)
527{
528	return 0;
529}
530#endif
531
532/**
533 * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
534 *
535 * @m2m_dev: opaque pointer to the internal data to handle M2M context
536 *
537 * Usually called from driver's ``remove()`` function.
538 */
539void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
540
541/**
542 * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
543 *
544 * @m2m_dev: opaque pointer to the internal data to handle M2M context
545 * @drv_priv: driver's instance private data
546 * @queue_init: a callback for queue type-specific initialization function
547 *	to be used for initializing vb2_queues
548 *
549 * Usually called from driver's ``open()`` function.
550 */
551struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
552		void *drv_priv,
553		int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq));
554
555static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx,
556					     bool buffered)
557{
558	m2m_ctx->out_q_ctx.buffered = buffered;
559}
560
561static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx,
562					     bool buffered)
563{
564	m2m_ctx->cap_q_ctx.buffered = buffered;
565}
566
567/**
568 * v4l2_m2m_ctx_release() - release m2m context
569 *
570 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
571 *
572 * Usually called from driver's release() function.
573 */
574void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
575
576/**
577 * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
578 *
579 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
580 * @vbuf: pointer to struct &vb2_v4l2_buffer
581 *
582 * Call from vb2_queue_ops->ops->buf_queue, vb2_queue_ops callback.
583 */
584void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
585			struct vb2_v4l2_buffer *vbuf);
586
587/**
588 * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
589 * use
590 *
591 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
592 */
593static inline
594unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
595{
596	return m2m_ctx->out_q_ctx.num_rdy;
 
 
 
 
 
 
 
597}
598
599/**
600 * v4l2_m2m_num_dst_bufs_ready() - return the number of destination buffers
601 * ready for use
602 *
603 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
604 */
605static inline
606unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
607{
608	return m2m_ctx->cap_q_ctx.num_rdy;
 
 
 
 
 
 
 
609}
610
611/**
612 * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
613 *
614 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
615 */
616struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx);
617
618/**
619 * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready
620 * buffers
621 *
622 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
623 */
624static inline struct vb2_v4l2_buffer *
625v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
626{
627	return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx);
628}
629
630/**
631 * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of
632 * ready buffers
633 *
634 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
635 */
636static inline struct vb2_v4l2_buffer *
637v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
638{
639	return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx);
640}
641
642/**
643 * v4l2_m2m_last_buf() - return last buffer from the list of ready buffers
644 *
645 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
646 */
647struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx);
648
649/**
650 * v4l2_m2m_last_src_buf() - return last destination buffer from the list of
651 * ready buffers
652 *
653 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
654 */
655static inline struct vb2_v4l2_buffer *
656v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
657{
658	return v4l2_m2m_last_buf(&m2m_ctx->out_q_ctx);
659}
660
661/**
662 * v4l2_m2m_last_dst_buf() - return last destination buffer from the list of
663 * ready buffers
664 *
665 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
666 */
667static inline struct vb2_v4l2_buffer *
668v4l2_m2m_last_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
669{
670	return v4l2_m2m_last_buf(&m2m_ctx->cap_q_ctx);
671}
672
673/**
674 * v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready
675 * buffers
676 *
677 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
678 * @b: current buffer of type struct v4l2_m2m_buffer
679 */
680#define v4l2_m2m_for_each_dst_buf(m2m_ctx, b)	\
681	list_for_each_entry(b, &m2m_ctx->cap_q_ctx.rdy_queue, list)
682
683/**
684 * v4l2_m2m_for_each_src_buf() - iterate over a list of source ready buffers
685 *
686 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
687 * @b: current buffer of type struct v4l2_m2m_buffer
688 */
689#define v4l2_m2m_for_each_src_buf(m2m_ctx, b)	\
690	list_for_each_entry(b, &m2m_ctx->out_q_ctx.rdy_queue, list)
691
692/**
693 * v4l2_m2m_for_each_dst_buf_safe() - iterate over a list of destination ready
694 * buffers safely
695 *
696 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
697 * @b: current buffer of type struct v4l2_m2m_buffer
698 * @n: used as temporary storage
699 */
700#define v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, b, n)	\
701	list_for_each_entry_safe(b, n, &m2m_ctx->cap_q_ctx.rdy_queue, list)
702
703/**
704 * v4l2_m2m_for_each_src_buf_safe() - iterate over a list of source ready
705 * buffers safely
706 *
707 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
708 * @b: current buffer of type struct v4l2_m2m_buffer
709 * @n: used as temporary storage
710 */
711#define v4l2_m2m_for_each_src_buf_safe(m2m_ctx, b, n)	\
712	list_for_each_entry_safe(b, n, &m2m_ctx->out_q_ctx.rdy_queue, list)
713
714/**
715 * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers
716 *
717 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
718 */
719static inline
720struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
721{
722	return &m2m_ctx->out_q_ctx.q;
723}
724
725/**
726 * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers
727 *
728 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
729 */
730static inline
731struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
732{
733	return &m2m_ctx->cap_q_ctx.q;
734}
735
736/**
737 * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
738 * return it
739 *
740 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
741 */
742struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx);
743
744/**
745 * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready
746 * buffers and return it
747 *
748 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
749 */
750static inline struct vb2_v4l2_buffer *
751v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
752{
753	return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx);
754}
755
756/**
757 * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of
758 * ready buffers and return it
759 *
760 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
761 */
762static inline struct vb2_v4l2_buffer *
763v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
764{
765	return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx);
766}
767
768/**
769 * v4l2_m2m_buf_remove_by_buf() - take off exact buffer from the list of ready
770 * buffers
771 *
772 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
773 * @vbuf: the buffer to be removed
774 */
775void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
776				struct vb2_v4l2_buffer *vbuf);
777
778/**
779 * v4l2_m2m_src_buf_remove_by_buf() - take off exact source buffer from the list
780 * of ready buffers
781 *
782 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
783 * @vbuf: the buffer to be removed
784 */
785static inline void v4l2_m2m_src_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx,
786						  struct vb2_v4l2_buffer *vbuf)
787{
788	v4l2_m2m_buf_remove_by_buf(&m2m_ctx->out_q_ctx, vbuf);
789}
790
791/**
792 * v4l2_m2m_dst_buf_remove_by_buf() - take off exact destination buffer from the
793 * list of ready buffers
794 *
795 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
796 * @vbuf: the buffer to be removed
797 */
798static inline void v4l2_m2m_dst_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx,
799						  struct vb2_v4l2_buffer *vbuf)
800{
801	v4l2_m2m_buf_remove_by_buf(&m2m_ctx->cap_q_ctx, vbuf);
802}
803
804struct vb2_v4l2_buffer *
805v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx);
806
807static inline struct vb2_v4l2_buffer *
808v4l2_m2m_src_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
809{
810	return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->out_q_ctx, idx);
811}
812
813static inline struct vb2_v4l2_buffer *
814v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
815{
816	return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx);
817}
818
819/**
820 * v4l2_m2m_buf_copy_metadata() - copy buffer metadata from
821 * the output buffer to the capture buffer
822 *
823 * @out_vb: the output buffer that is the source of the metadata.
824 * @cap_vb: the capture buffer that will receive the metadata.
825 * @copy_frame_flags: copy the KEY/B/PFRAME flags as well.
826 *
827 * This helper function copies the timestamp, timecode (if the TIMECODE
828 * buffer flag was set), field and the TIMECODE, KEYFRAME, BFRAME, PFRAME
829 * and TSTAMP_SRC_MASK flags from @out_vb to @cap_vb.
830 *
831 * If @copy_frame_flags is false, then the KEYFRAME, BFRAME and PFRAME
832 * flags are not copied. This is typically needed for encoders that
833 * set this bits explicitly.
834 */
835void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb,
836				struct vb2_v4l2_buffer *cap_vb,
837				bool copy_frame_flags);
838
839/* v4l2 request helper */
840
841void v4l2_m2m_request_queue(struct media_request *req);
842
843/* v4l2 ioctl helpers */
844
845int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
846				struct v4l2_requestbuffers *rb);
847int v4l2_m2m_ioctl_create_bufs(struct file *file, void *fh,
848				struct v4l2_create_buffers *create);
 
 
849int v4l2_m2m_ioctl_querybuf(struct file *file, void *fh,
850				struct v4l2_buffer *buf);
851int v4l2_m2m_ioctl_expbuf(struct file *file, void *fh,
852				struct v4l2_exportbuffer *eb);
853int v4l2_m2m_ioctl_qbuf(struct file *file, void *fh,
854				struct v4l2_buffer *buf);
855int v4l2_m2m_ioctl_dqbuf(struct file *file, void *fh,
856				struct v4l2_buffer *buf);
857int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *fh,
858			       struct v4l2_buffer *buf);
859int v4l2_m2m_ioctl_streamon(struct file *file, void *fh,
860				enum v4l2_buf_type type);
861int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh,
862				enum v4l2_buf_type type);
863int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *fh,
864			       struct v4l2_encoder_cmd *ec);
865int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *fh,
866			       struct v4l2_decoder_cmd *dc);
867int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh,
868				   struct v4l2_encoder_cmd *ec);
869int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
870				   struct v4l2_decoder_cmd *dc);
871int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh,
872					     struct v4l2_decoder_cmd *dc);
873int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
874					 struct v4l2_decoder_cmd *dc);
875int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma);
876__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
877
878#endif /* _MEDIA_V4L2_MEM2MEM_H */
879