xref: /openbmc/linux/drivers/media/usb/uvc/uvc_queue.c (revision 9b9c2cd4)
1 /*
2  *      uvc_queue.c  --  USB Video Class driver - Buffers management
3  *
4  *      Copyright (C) 2005-2010
5  *          Laurent Pinchart (laurent.pinchart@ideasonboard.com)
6  *
7  *      This program is free software; you can redistribute it and/or modify
8  *      it under the terms of the GNU General Public License as published by
9  *      the Free Software Foundation; either version 2 of the License, or
10  *      (at your option) any later version.
11  *
12  */
13 
14 #include <linux/atomic.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/usb.h>
20 #include <linux/videodev2.h>
21 #include <linux/vmalloc.h>
22 #include <linux/wait.h>
23 #include <media/videobuf2-v4l2.h>
24 #include <media/videobuf2-vmalloc.h>
25 
26 #include "uvcvideo.h"
27 
28 /* ------------------------------------------------------------------------
29  * Video buffers queue management.
30  *
31  * Video queues is initialized by uvc_queue_init(). The function performs
32  * basic initialization of the uvc_video_queue struct and never fails.
33  *
34  * Video buffers are managed by videobuf2. The driver uses a mutex to protect
35  * the videobuf2 queue operations by serializing calls to videobuf2 and a
36  * spinlock to protect the IRQ queue that holds the buffers to be processed by
37  * the driver.
38  */
39 
40 static inline struct uvc_streaming *
41 uvc_queue_to_stream(struct uvc_video_queue *queue)
42 {
43 	return container_of(queue, struct uvc_streaming, queue);
44 }
45 
46 /*
47  * Return all queued buffers to videobuf2 in the requested state.
48  *
49  * This function must be called with the queue spinlock held.
50  */
51 static void uvc_queue_return_buffers(struct uvc_video_queue *queue,
52 			       enum uvc_buffer_state state)
53 {
54 	enum vb2_buffer_state vb2_state = state == UVC_BUF_STATE_ERROR
55 					? VB2_BUF_STATE_ERROR
56 					: VB2_BUF_STATE_QUEUED;
57 
58 	while (!list_empty(&queue->irqqueue)) {
59 		struct uvc_buffer *buf = list_first_entry(&queue->irqqueue,
60 							  struct uvc_buffer,
61 							  queue);
62 		list_del(&buf->queue);
63 		buf->state = state;
64 		vb2_buffer_done(&buf->buf.vb2_buf, vb2_state);
65 	}
66 }
67 
68 /* -----------------------------------------------------------------------------
69  * videobuf2 queue operations
70  */
71 
72 static int uvc_queue_setup(struct vb2_queue *vq, const void *parg,
73 			   unsigned int *nbuffers, unsigned int *nplanes,
74 			   unsigned int sizes[], void *alloc_ctxs[])
75 {
76 	const struct v4l2_format *fmt = parg;
77 	struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
78 	struct uvc_streaming *stream = uvc_queue_to_stream(queue);
79 
80 	/* Make sure the image size is large enough. */
81 	if (fmt && fmt->fmt.pix.sizeimage < stream->ctrl.dwMaxVideoFrameSize)
82 		return -EINVAL;
83 
84 	*nplanes = 1;
85 
86 	sizes[0] = fmt ? fmt->fmt.pix.sizeimage
87 		 : stream->ctrl.dwMaxVideoFrameSize;
88 
89 	return 0;
90 }
91 
92 static int uvc_buffer_prepare(struct vb2_buffer *vb)
93 {
94 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
95 	struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
96 	struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
97 
98 	if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
99 	    vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
100 		uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
101 		return -EINVAL;
102 	}
103 
104 	if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED))
105 		return -ENODEV;
106 
107 	buf->state = UVC_BUF_STATE_QUEUED;
108 	buf->error = 0;
109 	buf->mem = vb2_plane_vaddr(vb, 0);
110 	buf->length = vb2_plane_size(vb, 0);
111 	if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
112 		buf->bytesused = 0;
113 	else
114 		buf->bytesused = vb2_get_plane_payload(vb, 0);
115 
116 	return 0;
117 }
118 
119 static void uvc_buffer_queue(struct vb2_buffer *vb)
120 {
121 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
122 	struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
123 	struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
124 	unsigned long flags;
125 
126 	spin_lock_irqsave(&queue->irqlock, flags);
127 	if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) {
128 		list_add_tail(&buf->queue, &queue->irqqueue);
129 	} else {
130 		/* If the device is disconnected return the buffer to userspace
131 		 * directly. The next QBUF call will fail with -ENODEV.
132 		 */
133 		buf->state = UVC_BUF_STATE_ERROR;
134 		vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
135 	}
136 
137 	spin_unlock_irqrestore(&queue->irqlock, flags);
138 }
139 
140 static void uvc_buffer_finish(struct vb2_buffer *vb)
141 {
142 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
143 	struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
144 	struct uvc_streaming *stream = uvc_queue_to_stream(queue);
145 	struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
146 
147 	if (vb->state == VB2_BUF_STATE_DONE)
148 		uvc_video_clock_update(stream, vbuf, buf);
149 }
150 
151 static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count)
152 {
153 	struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
154 	struct uvc_streaming *stream = uvc_queue_to_stream(queue);
155 	unsigned long flags;
156 	int ret;
157 
158 	queue->buf_used = 0;
159 
160 	ret = uvc_video_enable(stream, 1);
161 	if (ret == 0)
162 		return 0;
163 
164 	spin_lock_irqsave(&queue->irqlock, flags);
165 	uvc_queue_return_buffers(queue, UVC_BUF_STATE_QUEUED);
166 	spin_unlock_irqrestore(&queue->irqlock, flags);
167 
168 	return ret;
169 }
170 
171 static void uvc_stop_streaming(struct vb2_queue *vq)
172 {
173 	struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
174 	struct uvc_streaming *stream = uvc_queue_to_stream(queue);
175 	unsigned long flags;
176 
177 	uvc_video_enable(stream, 0);
178 
179 	spin_lock_irqsave(&queue->irqlock, flags);
180 	uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
181 	spin_unlock_irqrestore(&queue->irqlock, flags);
182 }
183 
184 static struct vb2_ops uvc_queue_qops = {
185 	.queue_setup = uvc_queue_setup,
186 	.buf_prepare = uvc_buffer_prepare,
187 	.buf_queue = uvc_buffer_queue,
188 	.buf_finish = uvc_buffer_finish,
189 	.wait_prepare = vb2_ops_wait_prepare,
190 	.wait_finish = vb2_ops_wait_finish,
191 	.start_streaming = uvc_start_streaming,
192 	.stop_streaming = uvc_stop_streaming,
193 };
194 
195 int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
196 		    int drop_corrupted)
197 {
198 	int ret;
199 
200 	queue->queue.type = type;
201 	queue->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
202 	queue->queue.drv_priv = queue;
203 	queue->queue.buf_struct_size = sizeof(struct uvc_buffer);
204 	queue->queue.ops = &uvc_queue_qops;
205 	queue->queue.mem_ops = &vb2_vmalloc_memops;
206 	queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
207 		| V4L2_BUF_FLAG_TSTAMP_SRC_SOE;
208 	queue->queue.lock = &queue->mutex;
209 	ret = vb2_queue_init(&queue->queue);
210 	if (ret)
211 		return ret;
212 
213 	mutex_init(&queue->mutex);
214 	spin_lock_init(&queue->irqlock);
215 	INIT_LIST_HEAD(&queue->irqqueue);
216 	queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0;
217 
218 	return 0;
219 }
220 
221 void uvc_queue_release(struct uvc_video_queue *queue)
222 {
223 	mutex_lock(&queue->mutex);
224 	vb2_queue_release(&queue->queue);
225 	mutex_unlock(&queue->mutex);
226 }
227 
228 /* -----------------------------------------------------------------------------
229  * V4L2 queue operations
230  */
231 
232 int uvc_request_buffers(struct uvc_video_queue *queue,
233 			struct v4l2_requestbuffers *rb)
234 {
235 	int ret;
236 
237 	mutex_lock(&queue->mutex);
238 	ret = vb2_reqbufs(&queue->queue, rb);
239 	mutex_unlock(&queue->mutex);
240 
241 	return ret ? ret : rb->count;
242 }
243 
244 int uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
245 {
246 	int ret;
247 
248 	mutex_lock(&queue->mutex);
249 	ret = vb2_querybuf(&queue->queue, buf);
250 	mutex_unlock(&queue->mutex);
251 
252 	return ret;
253 }
254 
255 int uvc_create_buffers(struct uvc_video_queue *queue,
256 		       struct v4l2_create_buffers *cb)
257 {
258 	int ret;
259 
260 	mutex_lock(&queue->mutex);
261 	ret = vb2_create_bufs(&queue->queue, cb);
262 	mutex_unlock(&queue->mutex);
263 
264 	return ret;
265 }
266 
267 int uvc_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
268 {
269 	int ret;
270 
271 	mutex_lock(&queue->mutex);
272 	ret = vb2_qbuf(&queue->queue, buf);
273 	mutex_unlock(&queue->mutex);
274 
275 	return ret;
276 }
277 
278 int uvc_export_buffer(struct uvc_video_queue *queue,
279 		      struct v4l2_exportbuffer *exp)
280 {
281 	int ret;
282 
283 	mutex_lock(&queue->mutex);
284 	ret = vb2_expbuf(&queue->queue, exp);
285 	mutex_unlock(&queue->mutex);
286 
287 	return ret;
288 }
289 
290 int uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf,
291 		       int nonblocking)
292 {
293 	int ret;
294 
295 	mutex_lock(&queue->mutex);
296 	ret = vb2_dqbuf(&queue->queue, buf, nonblocking);
297 	mutex_unlock(&queue->mutex);
298 
299 	return ret;
300 }
301 
302 int uvc_queue_streamon(struct uvc_video_queue *queue, enum v4l2_buf_type type)
303 {
304 	int ret;
305 
306 	mutex_lock(&queue->mutex);
307 	ret = vb2_streamon(&queue->queue, type);
308 	mutex_unlock(&queue->mutex);
309 
310 	return ret;
311 }
312 
313 int uvc_queue_streamoff(struct uvc_video_queue *queue, enum v4l2_buf_type type)
314 {
315 	int ret;
316 
317 	mutex_lock(&queue->mutex);
318 	ret = vb2_streamoff(&queue->queue, type);
319 	mutex_unlock(&queue->mutex);
320 
321 	return ret;
322 }
323 
324 int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
325 {
326 	return vb2_mmap(&queue->queue, vma);
327 }
328 
329 #ifndef CONFIG_MMU
330 unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
331 		unsigned long pgoff)
332 {
333 	return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0);
334 }
335 #endif
336 
337 unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
338 			    poll_table *wait)
339 {
340 	unsigned int ret;
341 
342 	mutex_lock(&queue->mutex);
343 	ret = vb2_poll(&queue->queue, file, wait);
344 	mutex_unlock(&queue->mutex);
345 
346 	return ret;
347 }
348 
349 /* -----------------------------------------------------------------------------
350  *
351  */
352 
353 /*
354  * Check if buffers have been allocated.
355  */
356 int uvc_queue_allocated(struct uvc_video_queue *queue)
357 {
358 	int allocated;
359 
360 	mutex_lock(&queue->mutex);
361 	allocated = vb2_is_busy(&queue->queue);
362 	mutex_unlock(&queue->mutex);
363 
364 	return allocated;
365 }
366 
367 /*
368  * Cancel the video buffers queue.
369  *
370  * Cancelling the queue marks all buffers on the irq queue as erroneous,
371  * wakes them up and removes them from the queue.
372  *
373  * If the disconnect parameter is set, further calls to uvc_queue_buffer will
374  * fail with -ENODEV.
375  *
376  * This function acquires the irq spinlock and can be called from interrupt
377  * context.
378  */
379 void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
380 {
381 	unsigned long flags;
382 
383 	spin_lock_irqsave(&queue->irqlock, flags);
384 	uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
385 	/* This must be protected by the irqlock spinlock to avoid race
386 	 * conditions between uvc_buffer_queue and the disconnection event that
387 	 * could result in an interruptible wait in uvc_dequeue_buffer. Do not
388 	 * blindly replace this logic by checking for the UVC_QUEUE_DISCONNECTED
389 	 * state outside the queue code.
390 	 */
391 	if (disconnect)
392 		queue->flags |= UVC_QUEUE_DISCONNECTED;
393 	spin_unlock_irqrestore(&queue->irqlock, flags);
394 }
395 
396 struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
397 		struct uvc_buffer *buf)
398 {
399 	struct uvc_buffer *nextbuf;
400 	unsigned long flags;
401 
402 	if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) {
403 		buf->error = 0;
404 		buf->state = UVC_BUF_STATE_QUEUED;
405 		buf->bytesused = 0;
406 		vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
407 		return buf;
408 	}
409 
410 	spin_lock_irqsave(&queue->irqlock, flags);
411 	list_del(&buf->queue);
412 	if (!list_empty(&queue->irqqueue))
413 		nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
414 					   queue);
415 	else
416 		nextbuf = NULL;
417 	spin_unlock_irqrestore(&queue->irqlock, flags);
418 
419 	buf->state = buf->error ? VB2_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
420 	vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
421 	vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
422 
423 	return nextbuf;
424 }
425