1 /* 2 * uvc_queue.c -- USB Video Class driver - Buffers management 3 * 4 * Copyright (C) 2005-2010 5 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 */ 13 14 #include <linux/atomic.h> 15 #include <linux/kernel.h> 16 #include <linux/mm.h> 17 #include <linux/list.h> 18 #include <linux/module.h> 19 #include <linux/usb.h> 20 #include <linux/videodev2.h> 21 #include <linux/vmalloc.h> 22 #include <linux/wait.h> 23 #include <media/videobuf2-v4l2.h> 24 #include <media/videobuf2-vmalloc.h> 25 26 #include "uvcvideo.h" 27 28 /* ------------------------------------------------------------------------ 29 * Video buffers queue management. 30 * 31 * Video queues is initialized by uvc_queue_init(). The function performs 32 * basic initialization of the uvc_video_queue struct and never fails. 33 * 34 * Video buffers are managed by videobuf2. The driver uses a mutex to protect 35 * the videobuf2 queue operations by serializing calls to videobuf2 and a 36 * spinlock to protect the IRQ queue that holds the buffers to be processed by 37 * the driver. 38 */ 39 40 static inline struct uvc_streaming * 41 uvc_queue_to_stream(struct uvc_video_queue *queue) 42 { 43 return container_of(queue, struct uvc_streaming, queue); 44 } 45 46 static inline struct uvc_buffer *uvc_vbuf_to_buffer(struct vb2_v4l2_buffer *buf) 47 { 48 return container_of(buf, struct uvc_buffer, buf); 49 } 50 51 /* 52 * Return all queued buffers to videobuf2 in the requested state. 53 * 54 * This function must be called with the queue spinlock held. 55 */ 56 static void uvc_queue_return_buffers(struct uvc_video_queue *queue, 57 enum uvc_buffer_state state) 58 { 59 enum vb2_buffer_state vb2_state = state == UVC_BUF_STATE_ERROR 60 ? VB2_BUF_STATE_ERROR 61 : VB2_BUF_STATE_QUEUED; 62 63 while (!list_empty(&queue->irqqueue)) { 64 struct uvc_buffer *buf = list_first_entry(&queue->irqqueue, 65 struct uvc_buffer, 66 queue); 67 list_del(&buf->queue); 68 buf->state = state; 69 vb2_buffer_done(&buf->buf.vb2_buf, vb2_state); 70 } 71 } 72 73 /* ----------------------------------------------------------------------------- 74 * videobuf2 queue operations 75 */ 76 77 static int uvc_queue_setup(struct vb2_queue *vq, 78 unsigned int *nbuffers, unsigned int *nplanes, 79 unsigned int sizes[], struct device *alloc_devs[]) 80 { 81 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); 82 struct uvc_streaming *stream; 83 unsigned int size; 84 85 switch (vq->type) { 86 case V4L2_BUF_TYPE_META_CAPTURE: 87 size = UVC_METATADA_BUF_SIZE; 88 break; 89 90 default: 91 stream = uvc_queue_to_stream(queue); 92 size = stream->ctrl.dwMaxVideoFrameSize; 93 break; 94 } 95 96 /* 97 * When called with plane sizes, validate them. The driver supports 98 * single planar formats only, and requires buffers to be large enough 99 * to store a complete frame. 100 */ 101 if (*nplanes) 102 return *nplanes != 1 || sizes[0] < size ? -EINVAL : 0; 103 104 *nplanes = 1; 105 sizes[0] = size; 106 return 0; 107 } 108 109 static int uvc_buffer_prepare(struct vb2_buffer *vb) 110 { 111 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 112 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); 113 struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf); 114 115 if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && 116 vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) { 117 uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n"); 118 return -EINVAL; 119 } 120 121 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) 122 return -ENODEV; 123 124 buf->state = UVC_BUF_STATE_QUEUED; 125 buf->error = 0; 126 buf->mem = vb2_plane_vaddr(vb, 0); 127 buf->length = vb2_plane_size(vb, 0); 128 if (vb->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) 129 buf->bytesused = 0; 130 else 131 buf->bytesused = vb2_get_plane_payload(vb, 0); 132 133 return 0; 134 } 135 136 static void uvc_buffer_queue(struct vb2_buffer *vb) 137 { 138 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 139 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); 140 struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf); 141 unsigned long flags; 142 143 spin_lock_irqsave(&queue->irqlock, flags); 144 if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) { 145 list_add_tail(&buf->queue, &queue->irqqueue); 146 } else { 147 /* If the device is disconnected return the buffer to userspace 148 * directly. The next QBUF call will fail with -ENODEV. 149 */ 150 buf->state = UVC_BUF_STATE_ERROR; 151 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); 152 } 153 154 spin_unlock_irqrestore(&queue->irqlock, flags); 155 } 156 157 static void uvc_buffer_finish(struct vb2_buffer *vb) 158 { 159 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 160 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); 161 struct uvc_streaming *stream = uvc_queue_to_stream(queue); 162 struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf); 163 164 if (vb->state == VB2_BUF_STATE_DONE) 165 uvc_video_clock_update(stream, vbuf, buf); 166 } 167 168 static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count) 169 { 170 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); 171 struct uvc_streaming *stream = uvc_queue_to_stream(queue); 172 unsigned long flags; 173 int ret; 174 175 queue->buf_used = 0; 176 177 ret = uvc_video_enable(stream, 1); 178 if (ret == 0) 179 return 0; 180 181 spin_lock_irqsave(&queue->irqlock, flags); 182 uvc_queue_return_buffers(queue, UVC_BUF_STATE_QUEUED); 183 spin_unlock_irqrestore(&queue->irqlock, flags); 184 185 return ret; 186 } 187 188 static void uvc_stop_streaming(struct vb2_queue *vq) 189 { 190 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); 191 unsigned long flags; 192 193 if (vq->type != V4L2_BUF_TYPE_META_CAPTURE) 194 uvc_video_enable(uvc_queue_to_stream(queue), 0); 195 196 spin_lock_irqsave(&queue->irqlock, flags); 197 uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR); 198 spin_unlock_irqrestore(&queue->irqlock, flags); 199 } 200 201 static const struct vb2_ops uvc_queue_qops = { 202 .queue_setup = uvc_queue_setup, 203 .buf_prepare = uvc_buffer_prepare, 204 .buf_queue = uvc_buffer_queue, 205 .buf_finish = uvc_buffer_finish, 206 .wait_prepare = vb2_ops_wait_prepare, 207 .wait_finish = vb2_ops_wait_finish, 208 .start_streaming = uvc_start_streaming, 209 .stop_streaming = uvc_stop_streaming, 210 }; 211 212 static const struct vb2_ops uvc_meta_queue_qops = { 213 .queue_setup = uvc_queue_setup, 214 .buf_prepare = uvc_buffer_prepare, 215 .buf_queue = uvc_buffer_queue, 216 .wait_prepare = vb2_ops_wait_prepare, 217 .wait_finish = vb2_ops_wait_finish, 218 .stop_streaming = uvc_stop_streaming, 219 }; 220 221 int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, 222 int drop_corrupted) 223 { 224 int ret; 225 226 queue->queue.type = type; 227 queue->queue.io_modes = VB2_MMAP | VB2_USERPTR; 228 queue->queue.drv_priv = queue; 229 queue->queue.buf_struct_size = sizeof(struct uvc_buffer); 230 queue->queue.mem_ops = &vb2_vmalloc_memops; 231 queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC 232 | V4L2_BUF_FLAG_TSTAMP_SRC_SOE; 233 queue->queue.lock = &queue->mutex; 234 235 switch (type) { 236 case V4L2_BUF_TYPE_META_CAPTURE: 237 queue->queue.ops = &uvc_meta_queue_qops; 238 break; 239 default: 240 queue->queue.io_modes |= VB2_DMABUF; 241 queue->queue.ops = &uvc_queue_qops; 242 break; 243 } 244 245 ret = vb2_queue_init(&queue->queue); 246 if (ret) 247 return ret; 248 249 mutex_init(&queue->mutex); 250 spin_lock_init(&queue->irqlock); 251 INIT_LIST_HEAD(&queue->irqqueue); 252 queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0; 253 254 return 0; 255 } 256 257 void uvc_queue_release(struct uvc_video_queue *queue) 258 { 259 mutex_lock(&queue->mutex); 260 vb2_queue_release(&queue->queue); 261 mutex_unlock(&queue->mutex); 262 } 263 264 /* ----------------------------------------------------------------------------- 265 * V4L2 queue operations 266 */ 267 268 int uvc_request_buffers(struct uvc_video_queue *queue, 269 struct v4l2_requestbuffers *rb) 270 { 271 int ret; 272 273 mutex_lock(&queue->mutex); 274 ret = vb2_reqbufs(&queue->queue, rb); 275 mutex_unlock(&queue->mutex); 276 277 return ret ? ret : rb->count; 278 } 279 280 int uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) 281 { 282 int ret; 283 284 mutex_lock(&queue->mutex); 285 ret = vb2_querybuf(&queue->queue, buf); 286 mutex_unlock(&queue->mutex); 287 288 return ret; 289 } 290 291 int uvc_create_buffers(struct uvc_video_queue *queue, 292 struct v4l2_create_buffers *cb) 293 { 294 int ret; 295 296 mutex_lock(&queue->mutex); 297 ret = vb2_create_bufs(&queue->queue, cb); 298 mutex_unlock(&queue->mutex); 299 300 return ret; 301 } 302 303 int uvc_queue_buffer(struct uvc_video_queue *queue, 304 struct media_device *mdev, struct v4l2_buffer *buf) 305 { 306 int ret; 307 308 mutex_lock(&queue->mutex); 309 ret = vb2_qbuf(&queue->queue, mdev, buf); 310 mutex_unlock(&queue->mutex); 311 312 return ret; 313 } 314 315 int uvc_export_buffer(struct uvc_video_queue *queue, 316 struct v4l2_exportbuffer *exp) 317 { 318 int ret; 319 320 mutex_lock(&queue->mutex); 321 ret = vb2_expbuf(&queue->queue, exp); 322 mutex_unlock(&queue->mutex); 323 324 return ret; 325 } 326 327 int uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf, 328 int nonblocking) 329 { 330 int ret; 331 332 mutex_lock(&queue->mutex); 333 ret = vb2_dqbuf(&queue->queue, buf, nonblocking); 334 mutex_unlock(&queue->mutex); 335 336 return ret; 337 } 338 339 int uvc_queue_streamon(struct uvc_video_queue *queue, enum v4l2_buf_type type) 340 { 341 int ret; 342 343 mutex_lock(&queue->mutex); 344 ret = vb2_streamon(&queue->queue, type); 345 mutex_unlock(&queue->mutex); 346 347 return ret; 348 } 349 350 int uvc_queue_streamoff(struct uvc_video_queue *queue, enum v4l2_buf_type type) 351 { 352 int ret; 353 354 mutex_lock(&queue->mutex); 355 ret = vb2_streamoff(&queue->queue, type); 356 mutex_unlock(&queue->mutex); 357 358 return ret; 359 } 360 361 int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) 362 { 363 return vb2_mmap(&queue->queue, vma); 364 } 365 366 #ifndef CONFIG_MMU 367 unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue, 368 unsigned long pgoff) 369 { 370 return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0); 371 } 372 #endif 373 374 __poll_t uvc_queue_poll(struct uvc_video_queue *queue, struct file *file, 375 poll_table *wait) 376 { 377 __poll_t ret; 378 379 mutex_lock(&queue->mutex); 380 ret = vb2_poll(&queue->queue, file, wait); 381 mutex_unlock(&queue->mutex); 382 383 return ret; 384 } 385 386 /* ----------------------------------------------------------------------------- 387 * 388 */ 389 390 /* 391 * Check if buffers have been allocated. 392 */ 393 int uvc_queue_allocated(struct uvc_video_queue *queue) 394 { 395 int allocated; 396 397 mutex_lock(&queue->mutex); 398 allocated = vb2_is_busy(&queue->queue); 399 mutex_unlock(&queue->mutex); 400 401 return allocated; 402 } 403 404 /* 405 * Cancel the video buffers queue. 406 * 407 * Cancelling the queue marks all buffers on the irq queue as erroneous, 408 * wakes them up and removes them from the queue. 409 * 410 * If the disconnect parameter is set, further calls to uvc_queue_buffer will 411 * fail with -ENODEV. 412 * 413 * This function acquires the irq spinlock and can be called from interrupt 414 * context. 415 */ 416 void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect) 417 { 418 unsigned long flags; 419 420 spin_lock_irqsave(&queue->irqlock, flags); 421 uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR); 422 /* This must be protected by the irqlock spinlock to avoid race 423 * conditions between uvc_buffer_queue and the disconnection event that 424 * could result in an interruptible wait in uvc_dequeue_buffer. Do not 425 * blindly replace this logic by checking for the UVC_QUEUE_DISCONNECTED 426 * state outside the queue code. 427 */ 428 if (disconnect) 429 queue->flags |= UVC_QUEUE_DISCONNECTED; 430 spin_unlock_irqrestore(&queue->irqlock, flags); 431 } 432 433 struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue, 434 struct uvc_buffer *buf) 435 { 436 struct uvc_buffer *nextbuf; 437 unsigned long flags; 438 439 if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) { 440 buf->error = 0; 441 buf->state = UVC_BUF_STATE_QUEUED; 442 buf->bytesused = 0; 443 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0); 444 return buf; 445 } 446 447 spin_lock_irqsave(&queue->irqlock, flags); 448 list_del(&buf->queue); 449 if (!list_empty(&queue->irqqueue)) 450 nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer, 451 queue); 452 else 453 nextbuf = NULL; 454 spin_unlock_irqrestore(&queue->irqlock, flags); 455 456 buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE; 457 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused); 458 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE); 459 460 return nextbuf; 461 } 462