1 /* 2 * uvc_queue.c -- USB Video Class driver - Buffers management 3 * 4 * Copyright (C) 2005-2010 5 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 */ 13 14 #include <linux/atomic.h> 15 #include <linux/kernel.h> 16 #include <linux/mm.h> 17 #include <linux/list.h> 18 #include <linux/module.h> 19 #include <linux/usb.h> 20 #include <linux/videodev2.h> 21 #include <linux/vmalloc.h> 22 #include <linux/wait.h> 23 #include <media/videobuf2-v4l2.h> 24 #include <media/videobuf2-vmalloc.h> 25 26 #include "uvcvideo.h" 27 28 /* ------------------------------------------------------------------------ 29 * Video buffers queue management. 30 * 31 * Video queues is initialized by uvc_queue_init(). The function performs 32 * basic initialization of the uvc_video_queue struct and never fails. 33 * 34 * Video buffers are managed by videobuf2. The driver uses a mutex to protect 35 * the videobuf2 queue operations by serializing calls to videobuf2 and a 36 * spinlock to protect the IRQ queue that holds the buffers to be processed by 37 * the driver. 38 */ 39 40 static inline struct uvc_streaming * 41 uvc_queue_to_stream(struct uvc_video_queue *queue) 42 { 43 return container_of(queue, struct uvc_streaming, queue); 44 } 45 46 /* 47 * Return all queued buffers to videobuf2 in the requested state. 48 * 49 * This function must be called with the queue spinlock held. 50 */ 51 static void uvc_queue_return_buffers(struct uvc_video_queue *queue, 52 enum uvc_buffer_state state) 53 { 54 enum vb2_buffer_state vb2_state = state == UVC_BUF_STATE_ERROR 55 ? VB2_BUF_STATE_ERROR 56 : VB2_BUF_STATE_QUEUED; 57 58 while (!list_empty(&queue->irqqueue)) { 59 struct uvc_buffer *buf = list_first_entry(&queue->irqqueue, 60 struct uvc_buffer, 61 queue); 62 list_del(&buf->queue); 63 buf->state = state; 64 vb2_buffer_done(&buf->buf.vb2_buf, vb2_state); 65 } 66 } 67 68 /* ----------------------------------------------------------------------------- 69 * videobuf2 queue operations 70 */ 71 72 static int uvc_queue_setup(struct vb2_queue *vq, 73 unsigned int *nbuffers, unsigned int *nplanes, 74 unsigned int sizes[], struct device *alloc_devs[]) 75 { 76 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); 77 struct uvc_streaming *stream = uvc_queue_to_stream(queue); 78 unsigned size = stream->ctrl.dwMaxVideoFrameSize; 79 80 /* Make sure the image size is large enough. */ 81 if (*nplanes) 82 return sizes[0] < size ? -EINVAL : 0; 83 *nplanes = 1; 84 sizes[0] = size; 85 return 0; 86 } 87 88 static int uvc_buffer_prepare(struct vb2_buffer *vb) 89 { 90 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 91 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); 92 struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf); 93 94 if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && 95 vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) { 96 uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n"); 97 return -EINVAL; 98 } 99 100 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) 101 return -ENODEV; 102 103 buf->state = UVC_BUF_STATE_QUEUED; 104 buf->error = 0; 105 buf->mem = vb2_plane_vaddr(vb, 0); 106 buf->length = vb2_plane_size(vb, 0); 107 if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 108 buf->bytesused = 0; 109 else 110 buf->bytesused = vb2_get_plane_payload(vb, 0); 111 112 return 0; 113 } 114 115 static void uvc_buffer_queue(struct vb2_buffer *vb) 116 { 117 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 118 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); 119 struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf); 120 unsigned long flags; 121 122 spin_lock_irqsave(&queue->irqlock, flags); 123 if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) { 124 list_add_tail(&buf->queue, &queue->irqqueue); 125 } else { 126 /* If the device is disconnected return the buffer to userspace 127 * directly. The next QBUF call will fail with -ENODEV. 128 */ 129 buf->state = UVC_BUF_STATE_ERROR; 130 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); 131 } 132 133 spin_unlock_irqrestore(&queue->irqlock, flags); 134 } 135 136 static void uvc_buffer_finish(struct vb2_buffer *vb) 137 { 138 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 139 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); 140 struct uvc_streaming *stream = uvc_queue_to_stream(queue); 141 struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf); 142 143 if (vb->state == VB2_BUF_STATE_DONE) 144 uvc_video_clock_update(stream, vbuf, buf); 145 } 146 147 static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count) 148 { 149 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); 150 struct uvc_streaming *stream = uvc_queue_to_stream(queue); 151 unsigned long flags; 152 int ret; 153 154 queue->buf_used = 0; 155 156 ret = uvc_video_enable(stream, 1); 157 if (ret == 0) 158 return 0; 159 160 spin_lock_irqsave(&queue->irqlock, flags); 161 uvc_queue_return_buffers(queue, UVC_BUF_STATE_QUEUED); 162 spin_unlock_irqrestore(&queue->irqlock, flags); 163 164 return ret; 165 } 166 167 static void uvc_stop_streaming(struct vb2_queue *vq) 168 { 169 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); 170 struct uvc_streaming *stream = uvc_queue_to_stream(queue); 171 unsigned long flags; 172 173 uvc_video_enable(stream, 0); 174 175 spin_lock_irqsave(&queue->irqlock, flags); 176 uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR); 177 spin_unlock_irqrestore(&queue->irqlock, flags); 178 } 179 180 static const struct vb2_ops uvc_queue_qops = { 181 .queue_setup = uvc_queue_setup, 182 .buf_prepare = uvc_buffer_prepare, 183 .buf_queue = uvc_buffer_queue, 184 .buf_finish = uvc_buffer_finish, 185 .wait_prepare = vb2_ops_wait_prepare, 186 .wait_finish = vb2_ops_wait_finish, 187 .start_streaming = uvc_start_streaming, 188 .stop_streaming = uvc_stop_streaming, 189 }; 190 191 int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, 192 int drop_corrupted) 193 { 194 int ret; 195 196 queue->queue.type = type; 197 queue->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; 198 queue->queue.drv_priv = queue; 199 queue->queue.buf_struct_size = sizeof(struct uvc_buffer); 200 queue->queue.ops = &uvc_queue_qops; 201 queue->queue.mem_ops = &vb2_vmalloc_memops; 202 queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC 203 | V4L2_BUF_FLAG_TSTAMP_SRC_SOE; 204 queue->queue.lock = &queue->mutex; 205 ret = vb2_queue_init(&queue->queue); 206 if (ret) 207 return ret; 208 209 mutex_init(&queue->mutex); 210 spin_lock_init(&queue->irqlock); 211 INIT_LIST_HEAD(&queue->irqqueue); 212 queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0; 213 214 return 0; 215 } 216 217 void uvc_queue_release(struct uvc_video_queue *queue) 218 { 219 mutex_lock(&queue->mutex); 220 vb2_queue_release(&queue->queue); 221 mutex_unlock(&queue->mutex); 222 } 223 224 /* ----------------------------------------------------------------------------- 225 * V4L2 queue operations 226 */ 227 228 int uvc_request_buffers(struct uvc_video_queue *queue, 229 struct v4l2_requestbuffers *rb) 230 { 231 int ret; 232 233 mutex_lock(&queue->mutex); 234 ret = vb2_reqbufs(&queue->queue, rb); 235 mutex_unlock(&queue->mutex); 236 237 return ret ? ret : rb->count; 238 } 239 240 int uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) 241 { 242 int ret; 243 244 mutex_lock(&queue->mutex); 245 ret = vb2_querybuf(&queue->queue, buf); 246 mutex_unlock(&queue->mutex); 247 248 return ret; 249 } 250 251 int uvc_create_buffers(struct uvc_video_queue *queue, 252 struct v4l2_create_buffers *cb) 253 { 254 int ret; 255 256 mutex_lock(&queue->mutex); 257 ret = vb2_create_bufs(&queue->queue, cb); 258 mutex_unlock(&queue->mutex); 259 260 return ret; 261 } 262 263 int uvc_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) 264 { 265 int ret; 266 267 mutex_lock(&queue->mutex); 268 ret = vb2_qbuf(&queue->queue, buf); 269 mutex_unlock(&queue->mutex); 270 271 return ret; 272 } 273 274 int uvc_export_buffer(struct uvc_video_queue *queue, 275 struct v4l2_exportbuffer *exp) 276 { 277 int ret; 278 279 mutex_lock(&queue->mutex); 280 ret = vb2_expbuf(&queue->queue, exp); 281 mutex_unlock(&queue->mutex); 282 283 return ret; 284 } 285 286 int uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf, 287 int nonblocking) 288 { 289 int ret; 290 291 mutex_lock(&queue->mutex); 292 ret = vb2_dqbuf(&queue->queue, buf, nonblocking); 293 mutex_unlock(&queue->mutex); 294 295 return ret; 296 } 297 298 int uvc_queue_streamon(struct uvc_video_queue *queue, enum v4l2_buf_type type) 299 { 300 int ret; 301 302 mutex_lock(&queue->mutex); 303 ret = vb2_streamon(&queue->queue, type); 304 mutex_unlock(&queue->mutex); 305 306 return ret; 307 } 308 309 int uvc_queue_streamoff(struct uvc_video_queue *queue, enum v4l2_buf_type type) 310 { 311 int ret; 312 313 mutex_lock(&queue->mutex); 314 ret = vb2_streamoff(&queue->queue, type); 315 mutex_unlock(&queue->mutex); 316 317 return ret; 318 } 319 320 int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) 321 { 322 return vb2_mmap(&queue->queue, vma); 323 } 324 325 #ifndef CONFIG_MMU 326 unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue, 327 unsigned long pgoff) 328 { 329 return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0); 330 } 331 #endif 332 333 unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file, 334 poll_table *wait) 335 { 336 unsigned int ret; 337 338 mutex_lock(&queue->mutex); 339 ret = vb2_poll(&queue->queue, file, wait); 340 mutex_unlock(&queue->mutex); 341 342 return ret; 343 } 344 345 /* ----------------------------------------------------------------------------- 346 * 347 */ 348 349 /* 350 * Check if buffers have been allocated. 351 */ 352 int uvc_queue_allocated(struct uvc_video_queue *queue) 353 { 354 int allocated; 355 356 mutex_lock(&queue->mutex); 357 allocated = vb2_is_busy(&queue->queue); 358 mutex_unlock(&queue->mutex); 359 360 return allocated; 361 } 362 363 /* 364 * Cancel the video buffers queue. 365 * 366 * Cancelling the queue marks all buffers on the irq queue as erroneous, 367 * wakes them up and removes them from the queue. 368 * 369 * If the disconnect parameter is set, further calls to uvc_queue_buffer will 370 * fail with -ENODEV. 371 * 372 * This function acquires the irq spinlock and can be called from interrupt 373 * context. 374 */ 375 void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect) 376 { 377 unsigned long flags; 378 379 spin_lock_irqsave(&queue->irqlock, flags); 380 uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR); 381 /* This must be protected by the irqlock spinlock to avoid race 382 * conditions between uvc_buffer_queue and the disconnection event that 383 * could result in an interruptible wait in uvc_dequeue_buffer. Do not 384 * blindly replace this logic by checking for the UVC_QUEUE_DISCONNECTED 385 * state outside the queue code. 386 */ 387 if (disconnect) 388 queue->flags |= UVC_QUEUE_DISCONNECTED; 389 spin_unlock_irqrestore(&queue->irqlock, flags); 390 } 391 392 struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue, 393 struct uvc_buffer *buf) 394 { 395 struct uvc_buffer *nextbuf; 396 unsigned long flags; 397 398 if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) { 399 buf->error = 0; 400 buf->state = UVC_BUF_STATE_QUEUED; 401 buf->bytesused = 0; 402 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0); 403 return buf; 404 } 405 406 spin_lock_irqsave(&queue->irqlock, flags); 407 list_del(&buf->queue); 408 if (!list_empty(&queue->irqqueue)) 409 nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer, 410 queue); 411 else 412 nextbuf = NULL; 413 spin_unlock_irqrestore(&queue->irqlock, flags); 414 415 buf->state = buf->error ? VB2_BUF_STATE_ERROR : UVC_BUF_STATE_DONE; 416 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused); 417 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE); 418 419 return nextbuf; 420 } 421