1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *	uvc_video.c  --  USB Video Class Gadget driver
4  *
5  *	Copyright (C) 2009-2010
6  *	    Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/device.h>
11 #include <linux/errno.h>
12 #include <linux/usb/ch9.h>
13 #include <linux/usb/gadget.h>
14 #include <linux/usb/video.h>
15 #include <asm/unaligned.h>
16 
17 #include <media/v4l2-dev.h>
18 
19 #include "uvc.h"
20 #include "uvc_queue.h"
21 #include "uvc_video.h"
22 
23 /* --------------------------------------------------------------------------
24  * Video codecs
25  */
26 
27 static int
28 uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf,
29 		u8 *data, int len)
30 {
31 	struct uvc_device *uvc = container_of(video, struct uvc_device, video);
32 	struct usb_composite_dev *cdev = uvc->func.config->cdev;
33 	struct timespec64 ts = ns_to_timespec64(buf->buf.vb2_buf.timestamp);
34 	int pos = 2;
35 
36 	data[1] = UVC_STREAM_EOH | video->fid;
37 
38 	if (video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE)
39 		data[1] |= UVC_STREAM_ERR;
40 
41 	if (video->queue.buf_used == 0 && ts.tv_sec) {
42 		/* dwClockFrequency is 48 MHz */
43 		u32 pts = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
44 
45 		data[1] |= UVC_STREAM_PTS;
46 		put_unaligned_le32(pts, &data[pos]);
47 		pos += 4;
48 	}
49 
50 	if (cdev->gadget->ops->get_frame) {
51 		u32 sof, stc;
52 
53 		sof = usb_gadget_frame_number(cdev->gadget);
54 		ktime_get_ts64(&ts);
55 		stc = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
56 
57 		data[1] |= UVC_STREAM_SCR;
58 		put_unaligned_le32(stc, &data[pos]);
59 		put_unaligned_le16(sof, &data[pos+4]);
60 		pos += 6;
61 	}
62 
63 	data[0] = pos;
64 
65 	if (buf->bytesused - video->queue.buf_used <= len - pos)
66 		data[1] |= UVC_STREAM_EOF;
67 
68 	return pos;
69 }
70 
71 static int
72 uvc_video_encode_data(struct uvc_video *video, struct uvc_buffer *buf,
73 		u8 *data, int len)
74 {
75 	struct uvc_video_queue *queue = &video->queue;
76 	unsigned int nbytes;
77 	void *mem;
78 
79 	/* Copy video data to the USB buffer. */
80 	mem = buf->mem + queue->buf_used;
81 	nbytes = min((unsigned int)len, buf->bytesused - queue->buf_used);
82 
83 	memcpy(data, mem, nbytes);
84 	queue->buf_used += nbytes;
85 
86 	return nbytes;
87 }
88 
89 static void
90 uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
91 		struct uvc_buffer *buf)
92 {
93 	void *mem = req->buf;
94 	struct uvc_request *ureq = req->context;
95 	int len = video->req_size;
96 	int ret;
97 
98 	/* Add a header at the beginning of the payload. */
99 	if (video->payload_size == 0) {
100 		ret = uvc_video_encode_header(video, buf, mem, len);
101 		video->payload_size += ret;
102 		mem += ret;
103 		len -= ret;
104 	}
105 
106 	/* Process video data. */
107 	len = min((int)(video->max_payload_size - video->payload_size), len);
108 	ret = uvc_video_encode_data(video, buf, mem, len);
109 
110 	video->payload_size += ret;
111 	len -= ret;
112 
113 	req->length = video->req_size - len;
114 	req->zero = video->payload_size == video->max_payload_size;
115 
116 	if (buf->bytesused == video->queue.buf_used) {
117 		video->queue.buf_used = 0;
118 		buf->state = UVC_BUF_STATE_DONE;
119 		list_del(&buf->queue);
120 		video->fid ^= UVC_STREAM_FID;
121 		ureq->last_buf = buf;
122 
123 		video->payload_size = 0;
124 	}
125 
126 	if (video->payload_size == video->max_payload_size ||
127 	    video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE ||
128 	    buf->bytesused == video->queue.buf_used)
129 		video->payload_size = 0;
130 }
131 
132 static void
133 uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video,
134 		struct uvc_buffer *buf)
135 {
136 	unsigned int pending = buf->bytesused - video->queue.buf_used;
137 	struct uvc_request *ureq = req->context;
138 	struct scatterlist *sg, *iter;
139 	unsigned int len = video->req_size;
140 	unsigned int sg_left, part = 0;
141 	unsigned int i;
142 	int header_len;
143 
144 	sg = ureq->sgt.sgl;
145 	sg_init_table(sg, ureq->sgt.nents);
146 
147 	/* Init the header. */
148 	header_len = uvc_video_encode_header(video, buf, ureq->header,
149 				      video->req_size);
150 	sg_set_buf(sg, ureq->header, header_len);
151 	len -= header_len;
152 
153 	if (pending <= len)
154 		len = pending;
155 
156 	req->length = (len == pending) ?
157 		len + header_len : video->req_size;
158 
159 	/* Init the pending sgs with payload */
160 	sg = sg_next(sg);
161 
162 	for_each_sg(sg, iter, ureq->sgt.nents - 1, i) {
163 		if (!len || !buf->sg || !buf->sg->length)
164 			break;
165 
166 		sg_left = buf->sg->length - buf->offset;
167 		part = min_t(unsigned int, len, sg_left);
168 
169 		sg_set_page(iter, sg_page(buf->sg), part, buf->offset);
170 
171 		if (part == sg_left) {
172 			buf->offset = 0;
173 			buf->sg = sg_next(buf->sg);
174 		} else {
175 			buf->offset += part;
176 		}
177 		len -= part;
178 	}
179 
180 	/* Assign the video data with header. */
181 	req->buf = NULL;
182 	req->sg	= ureq->sgt.sgl;
183 	req->num_sgs = i + 1;
184 
185 	req->length -= len;
186 	video->queue.buf_used += req->length - header_len;
187 
188 	if (buf->bytesused == video->queue.buf_used || !buf->sg ||
189 			video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE) {
190 		video->queue.buf_used = 0;
191 		buf->state = UVC_BUF_STATE_DONE;
192 		buf->offset = 0;
193 		list_del(&buf->queue);
194 		video->fid ^= UVC_STREAM_FID;
195 		ureq->last_buf = buf;
196 	}
197 }
198 
199 static void
200 uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
201 		struct uvc_buffer *buf)
202 {
203 	void *mem = req->buf;
204 	struct uvc_request *ureq = req->context;
205 	int len = video->req_size;
206 	int ret;
207 
208 	/* Add the header. */
209 	ret = uvc_video_encode_header(video, buf, mem, len);
210 	mem += ret;
211 	len -= ret;
212 
213 	/* Process video data. */
214 	ret = uvc_video_encode_data(video, buf, mem, len);
215 	len -= ret;
216 
217 	req->length = video->req_size - len;
218 
219 	if (buf->bytesused == video->queue.buf_used ||
220 			video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE) {
221 		video->queue.buf_used = 0;
222 		buf->state = UVC_BUF_STATE_DONE;
223 		list_del(&buf->queue);
224 		video->fid ^= UVC_STREAM_FID;
225 		ureq->last_buf = buf;
226 	}
227 }
228 
229 /* --------------------------------------------------------------------------
230  * Request handling
231  */
232 
233 static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
234 {
235 	int ret;
236 
237 	ret = usb_ep_queue(video->ep, req, GFP_ATOMIC);
238 	if (ret < 0) {
239 		uvcg_err(&video->uvc->func, "Failed to queue request (%d).\n",
240 			 ret);
241 
242 		/* If the endpoint is disabled the descriptor may be NULL. */
243 		if (video->ep->desc) {
244 			/* Isochronous endpoints can't be halted. */
245 			if (usb_endpoint_xfer_bulk(video->ep->desc))
246 				usb_ep_set_halt(video->ep);
247 		}
248 	}
249 
250 	return ret;
251 }
252 
253 static void
254 uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
255 {
256 	struct uvc_request *ureq = req->context;
257 	struct uvc_video *video = ureq->video;
258 	struct uvc_video_queue *queue = &video->queue;
259 	struct uvc_device *uvc = video->uvc;
260 	unsigned long flags;
261 
262 	if (uvc->state == UVC_STATE_CONNECTED) {
263 		usb_ep_free_request(video->ep, ureq->req);
264 		ureq->req = NULL;
265 		return;
266 	}
267 
268 	switch (req->status) {
269 	case 0:
270 		break;
271 
272 	case -EXDEV:
273 		uvcg_dbg(&video->uvc->func, "VS request missed xfer.\n");
274 		queue->flags |= UVC_QUEUE_DROP_INCOMPLETE;
275 		break;
276 
277 	case -ESHUTDOWN:	/* disconnect from host. */
278 		uvcg_dbg(&video->uvc->func, "VS request cancelled.\n");
279 		uvcg_queue_cancel(queue, 1);
280 		break;
281 
282 	default:
283 		uvcg_warn(&video->uvc->func,
284 			  "VS request completed with status %d.\n",
285 			  req->status);
286 		uvcg_queue_cancel(queue, 0);
287 	}
288 
289 	if (ureq->last_buf) {
290 		uvcg_complete_buffer(&video->queue, ureq->last_buf);
291 		ureq->last_buf = NULL;
292 	}
293 
294 	spin_lock_irqsave(&video->req_lock, flags);
295 	list_add_tail(&req->list, &video->req_free);
296 	spin_unlock_irqrestore(&video->req_lock, flags);
297 
298 	if (uvc->state == UVC_STATE_STREAMING)
299 		queue_work(video->async_wq, &video->pump);
300 }
301 
302 static int
303 uvc_video_free_requests(struct uvc_video *video)
304 {
305 	unsigned int i;
306 
307 	if (video->ureq) {
308 		for (i = 0; i < video->uvc_num_requests; ++i) {
309 			sg_free_table(&video->ureq[i].sgt);
310 
311 			if (video->ureq[i].req) {
312 				usb_ep_free_request(video->ep, video->ureq[i].req);
313 				video->ureq[i].req = NULL;
314 			}
315 
316 			if (video->ureq[i].req_buffer) {
317 				kfree(video->ureq[i].req_buffer);
318 				video->ureq[i].req_buffer = NULL;
319 			}
320 		}
321 
322 		kfree(video->ureq);
323 		video->ureq = NULL;
324 	}
325 
326 	INIT_LIST_HEAD(&video->req_free);
327 	video->req_size = 0;
328 	return 0;
329 }
330 
331 static int
332 uvc_video_alloc_requests(struct uvc_video *video)
333 {
334 	unsigned int req_size;
335 	unsigned int i;
336 	int ret = -ENOMEM;
337 
338 	BUG_ON(video->req_size);
339 
340 	req_size = video->ep->maxpacket
341 		 * max_t(unsigned int, video->ep->maxburst, 1)
342 		 * (video->ep->mult);
343 
344 	video->ureq = kcalloc(video->uvc_num_requests, sizeof(struct uvc_request), GFP_KERNEL);
345 	if (video->ureq == NULL)
346 		return -ENOMEM;
347 
348 	for (i = 0; i < video->uvc_num_requests; ++i) {
349 		video->ureq[i].req_buffer = kmalloc(req_size, GFP_KERNEL);
350 		if (video->ureq[i].req_buffer == NULL)
351 			goto error;
352 
353 		video->ureq[i].req = usb_ep_alloc_request(video->ep, GFP_KERNEL);
354 		if (video->ureq[i].req == NULL)
355 			goto error;
356 
357 		video->ureq[i].req->buf = video->ureq[i].req_buffer;
358 		video->ureq[i].req->length = 0;
359 		video->ureq[i].req->complete = uvc_video_complete;
360 		video->ureq[i].req->context = &video->ureq[i];
361 		video->ureq[i].video = video;
362 		video->ureq[i].last_buf = NULL;
363 
364 		list_add_tail(&video->ureq[i].req->list, &video->req_free);
365 		/* req_size/PAGE_SIZE + 1 for overruns and + 1 for header */
366 		sg_alloc_table(&video->ureq[i].sgt,
367 			       DIV_ROUND_UP(req_size - UVCG_REQUEST_HEADER_LEN,
368 					    PAGE_SIZE) + 2, GFP_KERNEL);
369 	}
370 
371 	video->req_size = req_size;
372 
373 	return 0;
374 
375 error:
376 	uvc_video_free_requests(video);
377 	return ret;
378 }
379 
380 /* --------------------------------------------------------------------------
381  * Video streaming
382  */
383 
384 /*
385  * uvcg_video_pump - Pump video data into the USB requests
386  *
387  * This function fills the available USB requests (listed in req_free) with
388  * video data from the queued buffers.
389  */
390 static void uvcg_video_pump(struct work_struct *work)
391 {
392 	struct uvc_video *video = container_of(work, struct uvc_video, pump);
393 	struct uvc_video_queue *queue = &video->queue;
394 	/* video->max_payload_size is only set when using bulk transfer */
395 	bool is_bulk = video->max_payload_size;
396 	struct usb_request *req = NULL;
397 	struct uvc_buffer *buf;
398 	unsigned long flags;
399 	bool buf_done;
400 	int ret;
401 
402 	while (video->ep->enabled) {
403 		/*
404 		 * Retrieve the first available USB request, protected by the
405 		 * request lock.
406 		 */
407 		spin_lock_irqsave(&video->req_lock, flags);
408 		if (list_empty(&video->req_free)) {
409 			spin_unlock_irqrestore(&video->req_lock, flags);
410 			return;
411 		}
412 		req = list_first_entry(&video->req_free, struct usb_request,
413 					list);
414 		list_del(&req->list);
415 		spin_unlock_irqrestore(&video->req_lock, flags);
416 
417 		/*
418 		 * Retrieve the first available video buffer and fill the
419 		 * request, protected by the video queue irqlock.
420 		 */
421 		spin_lock_irqsave(&queue->irqlock, flags);
422 		buf = uvcg_queue_head(queue);
423 
424 		if (buf != NULL) {
425 			video->encode(req, video, buf);
426 			buf_done = buf->state == UVC_BUF_STATE_DONE;
427 		} else if (!(queue->flags & UVC_QUEUE_DISCONNECTED) && !is_bulk) {
428 			/*
429 			 * No video buffer available; the queue is still connected and
430 			 * we're transferring over ISOC. Queue a 0 length request to
431 			 * prevent missed ISOC transfers.
432 			 */
433 			req->length = 0;
434 			buf_done = false;
435 		} else {
436 			/*
437 			 * Either the queue has been disconnected or no video buffer
438 			 * available for bulk transfer. Either way, stop processing
439 			 * further.
440 			 */
441 			spin_unlock_irqrestore(&queue->irqlock, flags);
442 			break;
443 		}
444 
445 		/*
446 		 * With USB3 handling more requests at a higher speed, we can't
447 		 * afford to generate an interrupt for every request. Decide to
448 		 * interrupt:
449 		 *
450 		 * - When no more requests are available in the free queue, as
451 		 *   this may be our last chance to refill the endpoint's
452 		 *   request queue.
453 		 *
454 		 * - When this is request is the last request for the video
455 		 *   buffer, as we want to start sending the next video buffer
456 		 *   ASAP in case it doesn't get started already in the next
457 		 *   iteration of this loop.
458 		 *
459 		 * - Four times over the length of the requests queue (as
460 		 *   indicated by video->uvc_num_requests), as a trade-off
461 		 *   between latency and interrupt load.
462 		 */
463 		if (list_empty(&video->req_free) || buf_done ||
464 		    !(video->req_int_count %
465 		       DIV_ROUND_UP(video->uvc_num_requests, 4))) {
466 			video->req_int_count = 0;
467 			req->no_interrupt = 0;
468 		} else {
469 			req->no_interrupt = 1;
470 		}
471 
472 		/* Queue the USB request */
473 		ret = uvcg_video_ep_queue(video, req);
474 		spin_unlock_irqrestore(&queue->irqlock, flags);
475 
476 		if (ret < 0) {
477 			uvcg_queue_cancel(queue, 0);
478 			break;
479 		}
480 
481 		/* Endpoint now owns the request */
482 		req = NULL;
483 		video->req_int_count++;
484 	}
485 
486 	if (!req)
487 		return;
488 
489 	spin_lock_irqsave(&video->req_lock, flags);
490 	list_add_tail(&req->list, &video->req_free);
491 	spin_unlock_irqrestore(&video->req_lock, flags);
492 	return;
493 }
494 
495 /*
496  * Enable or disable the video stream.
497  */
498 int uvcg_video_enable(struct uvc_video *video, int enable)
499 {
500 	unsigned int i;
501 	int ret;
502 
503 	if (video->ep == NULL) {
504 		uvcg_info(&video->uvc->func,
505 			  "Video enable failed, device is uninitialized.\n");
506 		return -ENODEV;
507 	}
508 
509 	if (!enable) {
510 		cancel_work_sync(&video->pump);
511 		uvcg_queue_cancel(&video->queue, 0);
512 
513 		for (i = 0; i < video->uvc_num_requests; ++i)
514 			if (video->ureq && video->ureq[i].req)
515 				usb_ep_dequeue(video->ep, video->ureq[i].req);
516 
517 		uvc_video_free_requests(video);
518 		uvcg_queue_enable(&video->queue, 0);
519 		return 0;
520 	}
521 
522 	if ((ret = uvcg_queue_enable(&video->queue, 1)) < 0)
523 		return ret;
524 
525 	if ((ret = uvc_video_alloc_requests(video)) < 0)
526 		return ret;
527 
528 	if (video->max_payload_size) {
529 		video->encode = uvc_video_encode_bulk;
530 		video->payload_size = 0;
531 	} else
532 		video->encode = video->queue.use_sg ?
533 			uvc_video_encode_isoc_sg : uvc_video_encode_isoc;
534 
535 	video->req_int_count = 0;
536 
537 	queue_work(video->async_wq, &video->pump);
538 
539 	return ret;
540 }
541 
542 /*
543  * Initialize the UVC video stream.
544  */
545 int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
546 {
547 	INIT_LIST_HEAD(&video->req_free);
548 	spin_lock_init(&video->req_lock);
549 	INIT_WORK(&video->pump, uvcg_video_pump);
550 
551 	/* Allocate a work queue for asynchronous video pump handler. */
552 	video->async_wq = alloc_workqueue("uvcgadget", WQ_UNBOUND | WQ_HIGHPRI, 0);
553 	if (!video->async_wq)
554 		return -EINVAL;
555 
556 	video->uvc = uvc;
557 	video->fcc = V4L2_PIX_FMT_YUYV;
558 	video->bpp = 16;
559 	video->width = 320;
560 	video->height = 240;
561 	video->imagesize = 320 * 240 * 2;
562 
563 	/* Initialize the video buffers queue. */
564 	uvcg_queue_init(&video->queue, uvc->v4l2_dev.dev->parent,
565 			V4L2_BUF_TYPE_VIDEO_OUTPUT, &video->mutex);
566 	return 0;
567 }
568