Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 *	uvc_video.c  --  USB Video Class Gadget driver
  4 *
  5 *	Copyright (C) 2009-2010
  6 *	    Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  7 */
  8
  9#include <linux/kernel.h>
 10#include <linux/device.h>
 11#include <linux/errno.h>
 12#include <linux/usb/ch9.h>
 13#include <linux/usb/gadget.h>
 14#include <linux/usb/video.h>
 15#include <asm/unaligned.h>
 16
 17#include <media/v4l2-dev.h>
 18
 19#include "uvc.h"
 20#include "uvc_queue.h"
 21#include "uvc_video.h"
 22
 23/* --------------------------------------------------------------------------
 24 * Video codecs
 25 */
 26
 27static int
 28uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf,
 29		u8 *data, int len)
 30{
 31	struct uvc_device *uvc = container_of(video, struct uvc_device, video);
 32	struct usb_composite_dev *cdev = uvc->func.config->cdev;
 33	struct timespec64 ts = ns_to_timespec64(buf->buf.vb2_buf.timestamp);
 34	int pos = 2;
 35
 36	data[1] = UVC_STREAM_EOH | video->fid;
 37
 38	if (video->queue.buf_used == 0 && ts.tv_sec) {
 39		/* dwClockFrequency is 48 MHz */
 40		u32 pts = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
 41
 42		data[1] |= UVC_STREAM_PTS;
 43		put_unaligned_le32(pts, &data[pos]);
 44		pos += 4;
 45	}
 46
 47	if (cdev->gadget->ops->get_frame) {
 48		u32 sof, stc;
 49
 50		sof = usb_gadget_frame_number(cdev->gadget);
 51		ktime_get_ts64(&ts);
 52		stc = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
 53
 54		data[1] |= UVC_STREAM_SCR;
 55		put_unaligned_le32(stc, &data[pos]);
 56		put_unaligned_le16(sof, &data[pos+4]);
 57		pos += 6;
 58	}
 59
 60	data[0] = pos;
 61
 62	if (buf->bytesused - video->queue.buf_used <= len - pos)
 63		data[1] |= UVC_STREAM_EOF;
 64
 65	return pos;
 66}
 67
 68static int
 69uvc_video_encode_data(struct uvc_video *video, struct uvc_buffer *buf,
 70		u8 *data, int len)
 71{
 72	struct uvc_video_queue *queue = &video->queue;
 73	unsigned int nbytes;
 74	void *mem;
 75
 76	/* Copy video data to the USB buffer. */
 77	mem = buf->mem + queue->buf_used;
 78	nbytes = min((unsigned int)len, buf->bytesused - queue->buf_used);
 79
 80	memcpy(data, mem, nbytes);
 81	queue->buf_used += nbytes;
 82
 83	return nbytes;
 84}
 85
 86static void
 87uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
 88		struct uvc_buffer *buf)
 89{
 90	void *mem = req->buf;
 91	struct uvc_request *ureq = req->context;
 92	int len = video->req_size;
 93	int ret;
 94
 95	/* Add a header at the beginning of the payload. */
 96	if (video->payload_size == 0) {
 97		ret = uvc_video_encode_header(video, buf, mem, len);
 98		video->payload_size += ret;
 99		mem += ret;
100		len -= ret;
101	}
102
103	/* Process video data. */
104	len = min((int)(video->max_payload_size - video->payload_size), len);
105	ret = uvc_video_encode_data(video, buf, mem, len);
106
107	video->payload_size += ret;
108	len -= ret;
109
110	req->length = video->req_size - len;
111	req->zero = video->payload_size == video->max_payload_size;
112
113	if (buf->bytesused == video->queue.buf_used) {
114		video->queue.buf_used = 0;
115		buf->state = UVC_BUF_STATE_DONE;
116		list_del(&buf->queue);
117		video->fid ^= UVC_STREAM_FID;
118		ureq->last_buf = buf;
119
120		video->payload_size = 0;
121	}
122
123	if (video->payload_size == video->max_payload_size ||
124	    video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE ||
125	    buf->bytesused == video->queue.buf_used)
126		video->payload_size = 0;
127}
128
129static void
130uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video,
131		struct uvc_buffer *buf)
132{
133	unsigned int pending = buf->bytesused - video->queue.buf_used;
134	struct uvc_request *ureq = req->context;
135	struct scatterlist *sg, *iter;
136	unsigned int len = video->req_size;
137	unsigned int sg_left, part = 0;
138	unsigned int i;
139	int header_len;
140
141	sg = ureq->sgt.sgl;
142	sg_init_table(sg, ureq->sgt.nents);
143
144	/* Init the header. */
145	header_len = uvc_video_encode_header(video, buf, ureq->header,
146				      video->req_size);
147	sg_set_buf(sg, ureq->header, header_len);
148	len -= header_len;
149
150	if (pending <= len)
151		len = pending;
152
153	req->length = (len == pending) ?
154		len + header_len : video->req_size;
155
156	/* Init the pending sgs with payload */
157	sg = sg_next(sg);
158
159	for_each_sg(sg, iter, ureq->sgt.nents - 1, i) {
160		if (!len || !buf->sg || !buf->sg->length)
161			break;
162
163		sg_left = buf->sg->length - buf->offset;
164		part = min_t(unsigned int, len, sg_left);
165
166		sg_set_page(iter, sg_page(buf->sg), part, buf->offset);
167
168		if (part == sg_left) {
169			buf->offset = 0;
170			buf->sg = sg_next(buf->sg);
171		} else {
172			buf->offset += part;
173		}
174		len -= part;
175	}
176
177	/* Assign the video data with header. */
178	req->buf = NULL;
179	req->sg	= ureq->sgt.sgl;
180	req->num_sgs = i + 1;
181
182	req->length -= len;
183	video->queue.buf_used += req->length - header_len;
184
185	if (buf->bytesused == video->queue.buf_used || !buf->sg ||
186			video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE) {
187		video->queue.buf_used = 0;
188		buf->state = UVC_BUF_STATE_DONE;
189		buf->offset = 0;
190		list_del(&buf->queue);
191		video->fid ^= UVC_STREAM_FID;
192		ureq->last_buf = buf;
193	}
194}
195
196static void
197uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
198		struct uvc_buffer *buf)
199{
200	void *mem = req->buf;
201	struct uvc_request *ureq = req->context;
202	int len = video->req_size;
203	int ret;
204
205	/* Add the header. */
206	ret = uvc_video_encode_header(video, buf, mem, len);
207	mem += ret;
208	len -= ret;
209
210	/* Process video data. */
211	ret = uvc_video_encode_data(video, buf, mem, len);
212	len -= ret;
213
214	req->length = video->req_size - len;
215
216	if (buf->bytesused == video->queue.buf_used ||
217			video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE) {
218		video->queue.buf_used = 0;
219		buf->state = UVC_BUF_STATE_DONE;
220		list_del(&buf->queue);
221		video->fid ^= UVC_STREAM_FID;
222		ureq->last_buf = buf;
223	}
224}
225
226/* --------------------------------------------------------------------------
227 * Request handling
228 */
229
230/*
231 * Callers must take care to hold req_lock when this function may be called
232 * from multiple threads. For example, when frames are streaming to the host.
233 */
234static void
235uvc_video_free_request(struct uvc_request *ureq, struct usb_ep *ep)
236{
237	sg_free_table(&ureq->sgt);
238	if (ureq->req && ep) {
239		usb_ep_free_request(ep, ureq->req);
240		ureq->req = NULL;
241	}
242
243	kfree(ureq->req_buffer);
244	ureq->req_buffer = NULL;
245
246	if (!list_empty(&ureq->list))
247		list_del_init(&ureq->list);
248
249	kfree(ureq);
250}
251
252static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
253{
254	int ret;
255
256	ret = usb_ep_queue(video->ep, req, GFP_ATOMIC);
257	if (ret < 0) {
258		uvcg_err(&video->uvc->func, "Failed to queue request (%d).\n",
259			 ret);
260
261		/* If the endpoint is disabled the descriptor may be NULL. */
262		if (video->ep->desc) {
263			/* Isochronous endpoints can't be halted. */
264			if (usb_endpoint_xfer_bulk(video->ep->desc))
265				usb_ep_set_halt(video->ep);
266		}
267	}
268
269	return ret;
270}
271
272/* This function must be called with video->req_lock held. */
273static int uvcg_video_usb_req_queue(struct uvc_video *video,
274	struct usb_request *req, bool queue_to_ep)
275{
276	bool is_bulk = video->max_payload_size;
277	struct list_head *list = NULL;
278
279	if (!video->is_enabled)
280		return -ENODEV;
281
282	if (queue_to_ep) {
283		struct uvc_request *ureq = req->context;
284		/*
285		 * With USB3 handling more requests at a higher speed, we can't
286		 * afford to generate an interrupt for every request. Decide to
287		 * interrupt:
288		 *
289		 * - When no more requests are available in the free queue, as
290		 *   this may be our last chance to refill the endpoint's
291		 *   request queue.
292		 *
293		 * - When this is request is the last request for the video
294		 *   buffer, as we want to start sending the next video buffer
295		 *   ASAP in case it doesn't get started already in the next
296		 *   iteration of this loop.
297		 *
298		 * - Four times over the length of the requests queue (as
299		 *   indicated by video->uvc_num_requests), as a trade-off
300		 *   between latency and interrupt load.
301		 */
302		if (list_empty(&video->req_free) || ureq->last_buf ||
303			!(video->req_int_count %
304			DIV_ROUND_UP(video->uvc_num_requests, 4))) {
305			video->req_int_count = 0;
306			req->no_interrupt = 0;
307		} else {
308			req->no_interrupt = 1;
309		}
310		video->req_int_count++;
311		return uvcg_video_ep_queue(video, req);
312	}
313	/*
314	 * If we're not queuing to the ep, for isoc we're queuing
315	 * to the req_ready list, otherwise req_free.
316	 */
317	list = is_bulk ? &video->req_free : &video->req_ready;
318	list_add_tail(&req->list, list);
319	return 0;
320}
321
322/*
323 * Must only be called from uvcg_video_enable - since after that we only want to
324 * queue requests to the endpoint from the uvc_video_complete complete handler.
325 * This function is needed in order to 'kick start' the flow of requests from
326 * gadget driver to the usb controller.
327 */
328static void uvc_video_ep_queue_initial_requests(struct uvc_video *video)
329{
330	struct usb_request *req = NULL;
331	unsigned long flags = 0;
332	unsigned int count = 0;
333	int ret = 0;
334
335	/*
336	 * We only queue half of the free list since we still want to have
337	 * some free usb_requests in the free list for the video_pump async_wq
338	 * thread to encode uvc buffers into. Otherwise we could get into a
339	 * situation where the free list does not have any usb requests to
340	 * encode into - we always end up queueing 0 length requests to the
341	 * end point.
342	 */
343	unsigned int half_list_size = video->uvc_num_requests / 2;
344
345	spin_lock_irqsave(&video->req_lock, flags);
346	/*
347	 * Take these requests off the free list and queue them all to the
348	 * endpoint. Since we queue 0 length requests with the req_lock held,
349	 * there isn't any 'data' race involved here with the complete handler.
350	 */
351	while (count < half_list_size) {
352		req = list_first_entry(&video->req_free, struct usb_request,
353					list);
354		list_del(&req->list);
355		req->length = 0;
356		ret = uvcg_video_ep_queue(video, req);
357		if (ret < 0) {
358			uvcg_queue_cancel(&video->queue, 0);
359			break;
360		}
361		count++;
362	}
363	spin_unlock_irqrestore(&video->req_lock, flags);
364}
365
366static void
367uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
368{
369	struct uvc_request *ureq = req->context;
370	struct uvc_video *video = ureq->video;
371	struct uvc_video_queue *queue = &video->queue;
372	struct uvc_buffer *last_buf;
373	unsigned long flags;
374	bool is_bulk = video->max_payload_size;
375	int ret = 0;
376
377	spin_lock_irqsave(&video->req_lock, flags);
378	if (!video->is_enabled) {
379		/*
380		 * When is_enabled is false, uvcg_video_disable() ensures
381		 * that in-flight uvc_buffers are returned, so we can
382		 * safely call free_request without worrying about
383		 * last_buf.
384		 */
385		uvc_video_free_request(ureq, ep);
386		spin_unlock_irqrestore(&video->req_lock, flags);
387		return;
388	}
389
390	last_buf = ureq->last_buf;
391	ureq->last_buf = NULL;
392	spin_unlock_irqrestore(&video->req_lock, flags);
393
394	switch (req->status) {
395	case 0:
396		break;
397
398	case -EXDEV:
399		uvcg_dbg(&video->uvc->func, "VS request missed xfer.\n");
400		queue->flags |= UVC_QUEUE_DROP_INCOMPLETE;
401		break;
402
403	case -ESHUTDOWN:	/* disconnect from host. */
404		uvcg_dbg(&video->uvc->func, "VS request cancelled.\n");
405		uvcg_queue_cancel(queue, 1);
406		break;
407
408	default:
409		uvcg_warn(&video->uvc->func,
410			  "VS request completed with status %d.\n",
411			  req->status);
412		uvcg_queue_cancel(queue, 0);
413	}
414
415	if (last_buf) {
416		spin_lock_irqsave(&queue->irqlock, flags);
417		uvcg_complete_buffer(queue, last_buf);
418		spin_unlock_irqrestore(&queue->irqlock, flags);
419	}
420
421	spin_lock_irqsave(&video->req_lock, flags);
422	/*
423	 * Video stream might have been disabled while we were
424	 * processing the current usb_request. So make sure
425	 * we're still streaming before queueing the usb_request
426	 * back to req_free
427	 */
428	if (video->is_enabled) {
429		/*
430		 * Here we check whether any request is available in the ready
431		 * list. If it is, queue it to the ep and add the current
432		 * usb_request to the req_free list - for video_pump to fill in.
433		 * Otherwise, just use the current usb_request to queue a 0
434		 * length request to the ep. Since we always add to the req_free
435		 * list if we dequeue from the ready list, there will never
436		 * be a situation where the req_free list is completely out of
437		 * requests and cannot recover.
438		 */
439		struct usb_request *to_queue = req;
440
441		to_queue->length = 0;
442		if (!list_empty(&video->req_ready)) {
443			to_queue = list_first_entry(&video->req_ready,
444				struct usb_request, list);
445			list_del(&to_queue->list);
446			list_add_tail(&req->list, &video->req_free);
447			/*
448			 * Queue work to the wq as well since it is possible that a
449			 * buffer may not have been completely encoded with the set of
450			 * in-flight usb requests for whih the complete callbacks are
451			 * firing.
452			 * In that case, if we do not queue work to the worker thread,
453			 * the buffer will never be marked as complete - and therefore
454			 * not be returned to userpsace. As a result,
455			 * dequeue -> queue -> dequeue flow of uvc buffers will not
456			 * happen.
457			 */
458			queue_work(video->async_wq, &video->pump);
459		}
460		/*
461		 * Queue to the endpoint. The actual queueing to ep will
462		 * only happen on one thread - the async_wq for bulk endpoints
463		 * and this thread for isoc endpoints.
464		 */
465		ret = uvcg_video_usb_req_queue(video, to_queue, !is_bulk);
466		if (ret < 0) {
467			/*
468			 * Endpoint error, but the stream is still enabled.
469			 * Put request back in req_free for it to be cleaned
470			 * up later.
471			 */
472			list_add_tail(&to_queue->list, &video->req_free);
473		}
474	} else {
475		uvc_video_free_request(ureq, ep);
476		ret = 0;
477	}
478	spin_unlock_irqrestore(&video->req_lock, flags);
479	if (ret < 0)
480		uvcg_queue_cancel(queue, 0);
481}
482
483static int
484uvc_video_free_requests(struct uvc_video *video)
485{
486	struct uvc_request *ureq, *temp;
 
 
 
 
 
 
487
488	list_for_each_entry_safe(ureq, temp, &video->ureqs, list)
489		uvc_video_free_request(ureq, video->ep);
 
 
 
490
491	INIT_LIST_HEAD(&video->ureqs);
492	INIT_LIST_HEAD(&video->req_free);
493	INIT_LIST_HEAD(&video->req_ready);
494	video->req_size = 0;
495	return 0;
496}
497
498static int
499uvc_video_alloc_requests(struct uvc_video *video)
500{
501	struct uvc_request *ureq;
502	unsigned int req_size;
503	unsigned int i;
504	int ret = -ENOMEM;
505
506	BUG_ON(video->req_size);
507
508	req_size = video->ep->maxpacket
509		 * max_t(unsigned int, video->ep->maxburst, 1)
510		 * (video->ep->mult);
511
512	for (i = 0; i < video->uvc_num_requests; i++) {
513		ureq = kzalloc(sizeof(struct uvc_request), GFP_KERNEL);
514		if (ureq == NULL)
515			goto error;
516
517		INIT_LIST_HEAD(&ureq->list);
518
519		list_add_tail(&ureq->list, &video->ureqs);
520
521		ureq->req_buffer = kmalloc(req_size, GFP_KERNEL);
522		if (ureq->req_buffer == NULL)
523			goto error;
524
525		ureq->req = usb_ep_alloc_request(video->ep, GFP_KERNEL);
526		if (ureq->req == NULL)
527			goto error;
 
528
529		ureq->req->buf = ureq->req_buffer;
530		ureq->req->length = 0;
531		ureq->req->complete = uvc_video_complete;
532		ureq->req->context = ureq;
533		ureq->video = video;
534		ureq->last_buf = NULL;
535
536		list_add_tail(&ureq->req->list, &video->req_free);
537		/* req_size/PAGE_SIZE + 1 for overruns and + 1 for header */
538		sg_alloc_table(&ureq->sgt,
539			       DIV_ROUND_UP(req_size - UVCG_REQUEST_HEADER_LEN,
540					    PAGE_SIZE) + 2, GFP_KERNEL);
541	}
542
543	video->req_size = req_size;
544
545	return 0;
546
547error:
548	uvc_video_free_requests(video);
549	return ret;
550}
551
552/* --------------------------------------------------------------------------
553 * Video streaming
554 */
555
556/*
557 * uvcg_video_pump - Pump video data into the USB requests
558 *
559 * This function fills the available USB requests (listed in req_free) with
560 * video data from the queued buffers.
561 */
562static void uvcg_video_pump(struct work_struct *work)
563{
564	struct uvc_video *video = container_of(work, struct uvc_video, pump);
565	struct uvc_video_queue *queue = &video->queue;
566	/* video->max_payload_size is only set when using bulk transfer */
567	bool is_bulk = video->max_payload_size;
568	struct usb_request *req = NULL;
569	struct uvc_buffer *buf;
570	unsigned long flags;
571	int ret = 0;
572
573	while (true) {
574		if (!video->ep->enabled)
575			return;
576
577		/*
578		 * Check is_enabled and retrieve the first available USB
579		 * request, protected by the request lock.
580		 */
581		spin_lock_irqsave(&video->req_lock, flags);
582		if (!video->is_enabled || list_empty(&video->req_free)) {
583			spin_unlock_irqrestore(&video->req_lock, flags);
584			return;
585		}
586		req = list_first_entry(&video->req_free, struct usb_request,
587					list);
588		list_del(&req->list);
589		spin_unlock_irqrestore(&video->req_lock, flags);
590
591		/*
592		 * Retrieve the first available video buffer and fill the
593		 * request, protected by the video queue irqlock.
594		 */
595		spin_lock_irqsave(&queue->irqlock, flags);
596		buf = uvcg_queue_head(queue);
597
598		if (buf != NULL) {
599			video->encode(req, video, buf);
600		} else {
601			/*
602			 * Either the queue has been disconnected or no video buffer
603			 * available for bulk transfer. Either way, stop processing
604			 * further.
605			 */
606			spin_unlock_irqrestore(&queue->irqlock, flags);
607			break;
608		}
609
610		spin_unlock_irqrestore(&queue->irqlock, flags);
611
612		spin_lock_irqsave(&video->req_lock, flags);
613		/* For bulk end points we queue from the worker thread
614		 * since we would preferably not want to wait on requests
615		 * to be ready, in the uvcg_video_complete() handler.
616		 * For isoc endpoints we add the request to the ready list
617		 * and only queue it to the endpoint from the complete handler.
618		 */
619		ret = uvcg_video_usb_req_queue(video, req, is_bulk);
620		spin_unlock_irqrestore(&video->req_lock, flags);
621
622		if (ret < 0) {
623			uvcg_queue_cancel(queue, 0);
624			break;
625		}
626
627		/* The request is owned by  the endpoint / ready list. */
628		req = NULL;
629	}
630
631	if (!req)
632		return;
633
634	spin_lock_irqsave(&video->req_lock, flags);
635	if (video->is_enabled)
636		list_add_tail(&req->list, &video->req_free);
637	else
638		uvc_video_free_request(req->context, video->ep);
639	spin_unlock_irqrestore(&video->req_lock, flags);
640}
641
642/*
643 * Disable the video stream
644 */
645int
646uvcg_video_disable(struct uvc_video *video)
647{
648	unsigned long flags;
649	struct list_head inflight_bufs;
650	struct usb_request *req, *temp;
651	struct uvc_buffer *buf, *btemp;
652	struct uvc_request *ureq, *utemp;
653
654	if (video->ep == NULL) {
655		uvcg_info(&video->uvc->func,
656			  "Video disable failed, device is uninitialized.\n");
657		return -ENODEV;
658	}
659
660	INIT_LIST_HEAD(&inflight_bufs);
661	spin_lock_irqsave(&video->req_lock, flags);
662	video->is_enabled = false;
663
664	/*
665	 * Remove any in-flight buffers from the uvc_requests
666	 * because we want to return them before cancelling the
667	 * queue. This ensures that we aren't stuck waiting for
668	 * all complete callbacks to come through before disabling
669	 * vb2 queue.
670	 */
671	list_for_each_entry(ureq, &video->ureqs, list) {
672		if (ureq->last_buf) {
673			list_add_tail(&ureq->last_buf->queue, &inflight_bufs);
674			ureq->last_buf = NULL;
675		}
676	}
677	spin_unlock_irqrestore(&video->req_lock, flags);
678
679	cancel_work_sync(&video->pump);
680	uvcg_queue_cancel(&video->queue, 0);
681
682	spin_lock_irqsave(&video->req_lock, flags);
683	/*
684	 * Remove all uvc_requests from ureqs with list_del_init
685	 * This lets uvc_video_free_request correctly identify
686	 * if the uvc_request is attached to a list or not when freeing
687	 * memory.
688	 */
689	list_for_each_entry_safe(ureq, utemp, &video->ureqs, list)
690		list_del_init(&ureq->list);
691
692	list_for_each_entry_safe(req, temp, &video->req_free, list) {
693		list_del(&req->list);
694		uvc_video_free_request(req->context, video->ep);
695	}
696
697	list_for_each_entry_safe(req, temp, &video->req_ready, list) {
698		list_del(&req->list);
699		uvc_video_free_request(req->context, video->ep);
700	}
701
702	INIT_LIST_HEAD(&video->ureqs);
703	INIT_LIST_HEAD(&video->req_free);
704	INIT_LIST_HEAD(&video->req_ready);
705	video->req_size = 0;
706	spin_unlock_irqrestore(&video->req_lock, flags);
707
708	/*
709	 * Return all the video buffers before disabling the queue.
710	 */
711	spin_lock_irqsave(&video->queue.irqlock, flags);
712	list_for_each_entry_safe(buf, btemp, &inflight_bufs, queue) {
713		list_del(&buf->queue);
714		uvcg_complete_buffer(&video->queue, buf);
715	}
716	spin_unlock_irqrestore(&video->queue.irqlock, flags);
717
718	uvcg_queue_enable(&video->queue, 0);
719	return 0;
720}
721
722/*
723 * Enable the video stream.
724 */
725int uvcg_video_enable(struct uvc_video *video)
726{
 
727	int ret;
728
729	if (video->ep == NULL) {
730		uvcg_info(&video->uvc->func,
731			  "Video enable failed, device is uninitialized.\n");
732		return -ENODEV;
733	}
734
735	/*
736	 * Safe to access request related fields without req_lock because
737	 * this is the only thread currently active, and no other
738	 * request handling thread will become active until this function
739	 * returns.
740	 */
741	video->is_enabled = true;
 
 
 
 
 
742
743	if ((ret = uvcg_queue_enable(&video->queue, 1)) < 0)
744		return ret;
745
746	if ((ret = uvc_video_alloc_requests(video)) < 0)
747		return ret;
748
749	if (video->max_payload_size) {
750		video->encode = uvc_video_encode_bulk;
751		video->payload_size = 0;
752	} else
753		video->encode = video->queue.use_sg ?
754			uvc_video_encode_isoc_sg : uvc_video_encode_isoc;
755
756	video->req_int_count = 0;
757
758	uvc_video_ep_queue_initial_requests(video);
759
760	return ret;
761}
762
763/*
764 * Initialize the UVC video stream.
765 */
766int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
767{
768	video->is_enabled = false;
769	INIT_LIST_HEAD(&video->ureqs);
770	INIT_LIST_HEAD(&video->req_free);
771	INIT_LIST_HEAD(&video->req_ready);
772	spin_lock_init(&video->req_lock);
773	INIT_WORK(&video->pump, uvcg_video_pump);
774
775	/* Allocate a work queue for asynchronous video pump handler. */
776	video->async_wq = alloc_workqueue("uvcgadget", WQ_UNBOUND | WQ_HIGHPRI, 0);
777	if (!video->async_wq)
778		return -EINVAL;
779
780	video->uvc = uvc;
781	video->fcc = V4L2_PIX_FMT_YUYV;
782	video->bpp = 16;
783	video->width = 320;
784	video->height = 240;
785	video->imagesize = 320 * 240 * 2;
786
787	/* Initialize the video buffers queue. */
788	uvcg_queue_init(&video->queue, uvc->v4l2_dev.dev->parent,
789			V4L2_BUF_TYPE_VIDEO_OUTPUT, &video->mutex);
790	return 0;
791}
v5.9
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 *	uvc_video.c  --  USB Video Class Gadget driver
  4 *
  5 *	Copyright (C) 2009-2010
  6 *	    Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  7 */
  8
  9#include <linux/kernel.h>
 10#include <linux/device.h>
 11#include <linux/errno.h>
 12#include <linux/usb/ch9.h>
 13#include <linux/usb/gadget.h>
 14#include <linux/usb/video.h>
 
 15
 16#include <media/v4l2-dev.h>
 17
 18#include "uvc.h"
 19#include "uvc_queue.h"
 20#include "uvc_video.h"
 21
 22/* --------------------------------------------------------------------------
 23 * Video codecs
 24 */
 25
 26static int
 27uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf,
 28		u8 *data, int len)
 29{
 30	data[0] = 2;
 
 
 
 
 31	data[1] = UVC_STREAM_EOH | video->fid;
 32
 33	if (buf->bytesused - video->queue.buf_used <= len - 2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 34		data[1] |= UVC_STREAM_EOF;
 35
 36	return 2;
 37}
 38
 39static int
 40uvc_video_encode_data(struct uvc_video *video, struct uvc_buffer *buf,
 41		u8 *data, int len)
 42{
 43	struct uvc_video_queue *queue = &video->queue;
 44	unsigned int nbytes;
 45	void *mem;
 46
 47	/* Copy video data to the USB buffer. */
 48	mem = buf->mem + queue->buf_used;
 49	nbytes = min((unsigned int)len, buf->bytesused - queue->buf_used);
 50
 51	memcpy(data, mem, nbytes);
 52	queue->buf_used += nbytes;
 53
 54	return nbytes;
 55}
 56
 57static void
 58uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
 59		struct uvc_buffer *buf)
 60{
 61	void *mem = req->buf;
 
 62	int len = video->req_size;
 63	int ret;
 64
 65	/* Add a header at the beginning of the payload. */
 66	if (video->payload_size == 0) {
 67		ret = uvc_video_encode_header(video, buf, mem, len);
 68		video->payload_size += ret;
 69		mem += ret;
 70		len -= ret;
 71	}
 72
 73	/* Process video data. */
 74	len = min((int)(video->max_payload_size - video->payload_size), len);
 75	ret = uvc_video_encode_data(video, buf, mem, len);
 76
 77	video->payload_size += ret;
 78	len -= ret;
 79
 80	req->length = video->req_size - len;
 81	req->zero = video->payload_size == video->max_payload_size;
 82
 83	if (buf->bytesused == video->queue.buf_used) {
 84		video->queue.buf_used = 0;
 85		buf->state = UVC_BUF_STATE_DONE;
 86		uvcg_queue_next_buffer(&video->queue, buf);
 87		video->fid ^= UVC_STREAM_FID;
 
 88
 89		video->payload_size = 0;
 90	}
 91
 92	if (video->payload_size == video->max_payload_size ||
 
 93	    buf->bytesused == video->queue.buf_used)
 94		video->payload_size = 0;
 95}
 96
 97static void
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 98uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
 99		struct uvc_buffer *buf)
100{
101	void *mem = req->buf;
 
102	int len = video->req_size;
103	int ret;
104
105	/* Add the header. */
106	ret = uvc_video_encode_header(video, buf, mem, len);
107	mem += ret;
108	len -= ret;
109
110	/* Process video data. */
111	ret = uvc_video_encode_data(video, buf, mem, len);
112	len -= ret;
113
114	req->length = video->req_size - len;
115
116	if (buf->bytesused == video->queue.buf_used) {
 
117		video->queue.buf_used = 0;
118		buf->state = UVC_BUF_STATE_DONE;
119		uvcg_queue_next_buffer(&video->queue, buf);
120		video->fid ^= UVC_STREAM_FID;
 
121	}
122}
123
124/* --------------------------------------------------------------------------
125 * Request handling
126 */
127
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
129{
130	int ret;
131
132	ret = usb_ep_queue(video->ep, req, GFP_ATOMIC);
133	if (ret < 0) {
134		uvcg_err(&video->uvc->func, "Failed to queue request (%d).\n",
135			 ret);
136
137		/* Isochronous endpoints can't be halted. */
138		if (usb_endpoint_xfer_bulk(video->ep->desc))
139			usb_ep_set_halt(video->ep);
 
 
 
140	}
141
142	return ret;
143}
144
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145static void
146uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
147{
148	struct uvc_video *video = req->context;
 
149	struct uvc_video_queue *queue = &video->queue;
 
150	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
152	switch (req->status) {
153	case 0:
154		break;
155
 
 
 
 
 
156	case -ESHUTDOWN:	/* disconnect from host. */
157		uvcg_dbg(&video->uvc->func, "VS request cancelled.\n");
158		uvcg_queue_cancel(queue, 1);
159		break;
160
161	default:
162		uvcg_info(&video->uvc->func,
163			  "VS request completed with status %d.\n",
164			  req->status);
165		uvcg_queue_cancel(queue, 0);
166	}
167
 
 
 
 
 
 
168	spin_lock_irqsave(&video->req_lock, flags);
169	list_add_tail(&req->list, &video->req_free);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170	spin_unlock_irqrestore(&video->req_lock, flags);
171
172	schedule_work(&video->pump);
173}
174
175static int
176uvc_video_free_requests(struct uvc_video *video)
177{
178	unsigned int i;
179
180	for (i = 0; i < UVC_NUM_REQUESTS; ++i) {
181		if (video->req[i]) {
182			usb_ep_free_request(video->ep, video->req[i]);
183			video->req[i] = NULL;
184		}
185
186		if (video->req_buffer[i]) {
187			kfree(video->req_buffer[i]);
188			video->req_buffer[i] = NULL;
189		}
190	}
191
 
192	INIT_LIST_HEAD(&video->req_free);
 
193	video->req_size = 0;
194	return 0;
195}
196
197static int
198uvc_video_alloc_requests(struct uvc_video *video)
199{
 
200	unsigned int req_size;
201	unsigned int i;
202	int ret = -ENOMEM;
203
204	BUG_ON(video->req_size);
205
206	req_size = video->ep->maxpacket
207		 * max_t(unsigned int, video->ep->maxburst, 1)
208		 * (video->ep->mult);
209
210	for (i = 0; i < UVC_NUM_REQUESTS; ++i) {
211		video->req_buffer[i] = kmalloc(req_size, GFP_KERNEL);
212		if (video->req_buffer[i] == NULL)
213			goto error;
214
215		video->req[i] = usb_ep_alloc_request(video->ep, GFP_KERNEL);
216		if (video->req[i] == NULL)
 
 
 
 
217			goto error;
218
219		video->req[i]->buf = video->req_buffer[i];
220		video->req[i]->length = 0;
221		video->req[i]->complete = uvc_video_complete;
222		video->req[i]->context = video;
223
224		list_add_tail(&video->req[i]->list, &video->req_free);
 
 
 
 
 
 
 
 
 
 
 
225	}
226
227	video->req_size = req_size;
228
229	return 0;
230
231error:
232	uvc_video_free_requests(video);
233	return ret;
234}
235
236/* --------------------------------------------------------------------------
237 * Video streaming
238 */
239
240/*
241 * uvcg_video_pump - Pump video data into the USB requests
242 *
243 * This function fills the available USB requests (listed in req_free) with
244 * video data from the queued buffers.
245 */
246static void uvcg_video_pump(struct work_struct *work)
247{
248	struct uvc_video *video = container_of(work, struct uvc_video, pump);
249	struct uvc_video_queue *queue = &video->queue;
250	struct usb_request *req;
 
 
251	struct uvc_buffer *buf;
252	unsigned long flags;
253	int ret;
 
 
 
 
254
255	while (1) {
256		/* Retrieve the first available USB request, protected by the
257		 * request lock.
258		 */
259		spin_lock_irqsave(&video->req_lock, flags);
260		if (list_empty(&video->req_free)) {
261			spin_unlock_irqrestore(&video->req_lock, flags);
262			return;
263		}
264		req = list_first_entry(&video->req_free, struct usb_request,
265					list);
266		list_del(&req->list);
267		spin_unlock_irqrestore(&video->req_lock, flags);
268
269		/* Retrieve the first available video buffer and fill the
 
270		 * request, protected by the video queue irqlock.
271		 */
272		spin_lock_irqsave(&queue->irqlock, flags);
273		buf = uvcg_queue_head(queue);
274		if (buf == NULL) {
 
 
 
 
 
 
 
 
275			spin_unlock_irqrestore(&queue->irqlock, flags);
276			break;
277		}
278
279		video->encode(req, video, buf);
280
281		/* Queue the USB request */
282		ret = uvcg_video_ep_queue(video, req);
283		spin_unlock_irqrestore(&queue->irqlock, flags);
 
 
 
 
 
 
284
285		if (ret < 0) {
286			uvcg_queue_cancel(queue, 0);
287			break;
288		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
289	}
 
 
 
 
290
291	spin_lock_irqsave(&video->req_lock, flags);
292	list_add_tail(&req->list, &video->req_free);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
293	spin_unlock_irqrestore(&video->req_lock, flags);
294	return;
 
 
 
 
 
 
 
 
 
 
 
 
295}
296
297/*
298 * Enable or disable the video stream.
299 */
300int uvcg_video_enable(struct uvc_video *video, int enable)
301{
302	unsigned int i;
303	int ret;
304
305	if (video->ep == NULL) {
306		uvcg_info(&video->uvc->func,
307			  "Video enable failed, device is uninitialized.\n");
308		return -ENODEV;
309	}
310
311	if (!enable) {
312		cancel_work_sync(&video->pump);
313		uvcg_queue_cancel(&video->queue, 0);
314
315		for (i = 0; i < UVC_NUM_REQUESTS; ++i)
316			if (video->req[i])
317				usb_ep_dequeue(video->ep, video->req[i]);
318
319		uvc_video_free_requests(video);
320		uvcg_queue_enable(&video->queue, 0);
321		return 0;
322	}
323
324	if ((ret = uvcg_queue_enable(&video->queue, 1)) < 0)
325		return ret;
326
327	if ((ret = uvc_video_alloc_requests(video)) < 0)
328		return ret;
329
330	if (video->max_payload_size) {
331		video->encode = uvc_video_encode_bulk;
332		video->payload_size = 0;
333	} else
334		video->encode = uvc_video_encode_isoc;
 
 
 
335
336	schedule_work(&video->pump);
337
338	return ret;
339}
340
341/*
342 * Initialize the UVC video stream.
343 */
344int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
345{
 
 
346	INIT_LIST_HEAD(&video->req_free);
 
347	spin_lock_init(&video->req_lock);
348	INIT_WORK(&video->pump, uvcg_video_pump);
349
 
 
 
 
 
350	video->uvc = uvc;
351	video->fcc = V4L2_PIX_FMT_YUYV;
352	video->bpp = 16;
353	video->width = 320;
354	video->height = 240;
355	video->imagesize = 320 * 240 * 2;
356
357	/* Initialize the video buffers queue. */
358	uvcg_queue_init(&video->queue, V4L2_BUF_TYPE_VIDEO_OUTPUT,
359			&video->mutex);
360	return 0;
361}
362