1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * uvc_queue.c -- USB Video Class driver - Buffers management 4 * 5 * Copyright (C) 2005-2010 6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 7 */ 8 9 #include <linux/atomic.h> 10 #include <linux/kernel.h> 11 #include <linux/mm.h> 12 #include <linux/list.h> 13 #include <linux/module.h> 14 #include <linux/usb.h> 15 #include <linux/videodev2.h> 16 #include <linux/vmalloc.h> 17 #include <linux/wait.h> 18 #include <media/videobuf2-v4l2.h> 19 #include <media/videobuf2-vmalloc.h> 20 21 #include "uvcvideo.h" 22 23 /* ------------------------------------------------------------------------ 24 * Video buffers queue management. 25 * 26 * Video queues is initialized by uvc_queue_init(). The function performs 27 * basic initialization of the uvc_video_queue struct and never fails. 28 * 29 * Video buffers are managed by videobuf2. The driver uses a mutex to protect 30 * the videobuf2 queue operations by serializing calls to videobuf2 and a 31 * spinlock to protect the IRQ queue that holds the buffers to be processed by 32 * the driver. 33 */ 34 35 static inline struct uvc_streaming * 36 uvc_queue_to_stream(struct uvc_video_queue *queue) 37 { 38 return container_of(queue, struct uvc_streaming, queue); 39 } 40 41 static inline struct uvc_buffer *uvc_vbuf_to_buffer(struct vb2_v4l2_buffer *buf) 42 { 43 return container_of(buf, struct uvc_buffer, buf); 44 } 45 46 /* 47 * Return all queued buffers to videobuf2 in the requested state. 48 * 49 * This function must be called with the queue spinlock held. 50 */ 51 static void uvc_queue_return_buffers(struct uvc_video_queue *queue, 52 enum uvc_buffer_state state) 53 { 54 enum vb2_buffer_state vb2_state = state == UVC_BUF_STATE_ERROR 55 ? VB2_BUF_STATE_ERROR 56 : VB2_BUF_STATE_QUEUED; 57 58 while (!list_empty(&queue->irqqueue)) { 59 struct uvc_buffer *buf = list_first_entry(&queue->irqqueue, 60 struct uvc_buffer, 61 queue); 62 list_del(&buf->queue); 63 buf->state = state; 64 vb2_buffer_done(&buf->buf.vb2_buf, vb2_state); 65 } 66 } 67 68 /* ----------------------------------------------------------------------------- 69 * videobuf2 queue operations 70 */ 71 72 static int uvc_queue_setup(struct vb2_queue *vq, 73 unsigned int *nbuffers, unsigned int *nplanes, 74 unsigned int sizes[], struct device *alloc_devs[]) 75 { 76 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); 77 struct uvc_streaming *stream; 78 unsigned int size; 79 80 switch (vq->type) { 81 case V4L2_BUF_TYPE_META_CAPTURE: 82 size = UVC_METADATA_BUF_SIZE; 83 break; 84 85 default: 86 stream = uvc_queue_to_stream(queue); 87 size = stream->ctrl.dwMaxVideoFrameSize; 88 break; 89 } 90 91 /* 92 * When called with plane sizes, validate them. The driver supports 93 * single planar formats only, and requires buffers to be large enough 94 * to store a complete frame. 95 */ 96 if (*nplanes) 97 return *nplanes != 1 || sizes[0] < size ? -EINVAL : 0; 98 99 *nplanes = 1; 100 sizes[0] = size; 101 return 0; 102 } 103 104 static int uvc_buffer_prepare(struct vb2_buffer *vb) 105 { 106 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 107 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); 108 struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf); 109 110 if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && 111 vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) { 112 uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n"); 113 return -EINVAL; 114 } 115 116 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) 117 return -ENODEV; 118 119 buf->state = UVC_BUF_STATE_QUEUED; 120 buf->error = 0; 121 buf->mem = vb2_plane_vaddr(vb, 0); 122 buf->length = vb2_plane_size(vb, 0); 123 if (vb->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) 124 buf->bytesused = 0; 125 else 126 buf->bytesused = vb2_get_plane_payload(vb, 0); 127 128 return 0; 129 } 130 131 static void uvc_buffer_queue(struct vb2_buffer *vb) 132 { 133 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 134 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); 135 struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf); 136 unsigned long flags; 137 138 spin_lock_irqsave(&queue->irqlock, flags); 139 if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) { 140 kref_init(&buf->ref); 141 list_add_tail(&buf->queue, &queue->irqqueue); 142 } else { 143 /* If the device is disconnected return the buffer to userspace 144 * directly. The next QBUF call will fail with -ENODEV. 145 */ 146 buf->state = UVC_BUF_STATE_ERROR; 147 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); 148 } 149 150 spin_unlock_irqrestore(&queue->irqlock, flags); 151 } 152 153 static void uvc_buffer_finish(struct vb2_buffer *vb) 154 { 155 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 156 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); 157 struct uvc_streaming *stream = uvc_queue_to_stream(queue); 158 struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf); 159 160 if (vb->state == VB2_BUF_STATE_DONE) 161 uvc_video_clock_update(stream, vbuf, buf); 162 } 163 164 static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count) 165 { 166 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); 167 struct uvc_streaming *stream = uvc_queue_to_stream(queue); 168 int ret; 169 170 lockdep_assert_irqs_enabled(); 171 172 queue->buf_used = 0; 173 174 ret = uvc_video_start_streaming(stream); 175 if (ret == 0) 176 return 0; 177 178 spin_lock_irq(&queue->irqlock); 179 uvc_queue_return_buffers(queue, UVC_BUF_STATE_QUEUED); 180 spin_unlock_irq(&queue->irqlock); 181 182 return ret; 183 } 184 185 static void uvc_stop_streaming(struct vb2_queue *vq) 186 { 187 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); 188 189 lockdep_assert_irqs_enabled(); 190 191 if (vq->type != V4L2_BUF_TYPE_META_CAPTURE) 192 uvc_video_stop_streaming(uvc_queue_to_stream(queue)); 193 194 spin_lock_irq(&queue->irqlock); 195 uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR); 196 spin_unlock_irq(&queue->irqlock); 197 } 198 199 static const struct vb2_ops uvc_queue_qops = { 200 .queue_setup = uvc_queue_setup, 201 .buf_prepare = uvc_buffer_prepare, 202 .buf_queue = uvc_buffer_queue, 203 .buf_finish = uvc_buffer_finish, 204 .wait_prepare = vb2_ops_wait_prepare, 205 .wait_finish = vb2_ops_wait_finish, 206 .start_streaming = uvc_start_streaming, 207 .stop_streaming = uvc_stop_streaming, 208 }; 209 210 static const struct vb2_ops uvc_meta_queue_qops = { 211 .queue_setup = uvc_queue_setup, 212 .buf_prepare = uvc_buffer_prepare, 213 .buf_queue = uvc_buffer_queue, 214 .wait_prepare = vb2_ops_wait_prepare, 215 .wait_finish = vb2_ops_wait_finish, 216 .stop_streaming = uvc_stop_streaming, 217 }; 218 219 int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, 220 int drop_corrupted) 221 { 222 int ret; 223 224 queue->queue.type = type; 225 queue->queue.io_modes = VB2_MMAP | VB2_USERPTR; 226 queue->queue.drv_priv = queue; 227 queue->queue.buf_struct_size = sizeof(struct uvc_buffer); 228 queue->queue.mem_ops = &vb2_vmalloc_memops; 229 queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC 230 | V4L2_BUF_FLAG_TSTAMP_SRC_SOE; 231 queue->queue.lock = &queue->mutex; 232 233 switch (type) { 234 case V4L2_BUF_TYPE_META_CAPTURE: 235 queue->queue.ops = &uvc_meta_queue_qops; 236 break; 237 default: 238 queue->queue.io_modes |= VB2_DMABUF; 239 queue->queue.ops = &uvc_queue_qops; 240 break; 241 } 242 243 ret = vb2_queue_init(&queue->queue); 244 if (ret) 245 return ret; 246 247 mutex_init(&queue->mutex); 248 spin_lock_init(&queue->irqlock); 249 INIT_LIST_HEAD(&queue->irqqueue); 250 queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0; 251 252 return 0; 253 } 254 255 void uvc_queue_release(struct uvc_video_queue *queue) 256 { 257 mutex_lock(&queue->mutex); 258 vb2_queue_release(&queue->queue); 259 mutex_unlock(&queue->mutex); 260 } 261 262 /* ----------------------------------------------------------------------------- 263 * V4L2 queue operations 264 */ 265 266 int uvc_request_buffers(struct uvc_video_queue *queue, 267 struct v4l2_requestbuffers *rb) 268 { 269 int ret; 270 271 mutex_lock(&queue->mutex); 272 ret = vb2_reqbufs(&queue->queue, rb); 273 mutex_unlock(&queue->mutex); 274 275 return ret ? ret : rb->count; 276 } 277 278 int uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) 279 { 280 int ret; 281 282 mutex_lock(&queue->mutex); 283 ret = vb2_querybuf(&queue->queue, buf); 284 mutex_unlock(&queue->mutex); 285 286 return ret; 287 } 288 289 int uvc_create_buffers(struct uvc_video_queue *queue, 290 struct v4l2_create_buffers *cb) 291 { 292 int ret; 293 294 mutex_lock(&queue->mutex); 295 ret = vb2_create_bufs(&queue->queue, cb); 296 mutex_unlock(&queue->mutex); 297 298 return ret; 299 } 300 301 int uvc_queue_buffer(struct uvc_video_queue *queue, 302 struct media_device *mdev, struct v4l2_buffer *buf) 303 { 304 int ret; 305 306 mutex_lock(&queue->mutex); 307 ret = vb2_qbuf(&queue->queue, mdev, buf); 308 mutex_unlock(&queue->mutex); 309 310 return ret; 311 } 312 313 int uvc_export_buffer(struct uvc_video_queue *queue, 314 struct v4l2_exportbuffer *exp) 315 { 316 int ret; 317 318 mutex_lock(&queue->mutex); 319 ret = vb2_expbuf(&queue->queue, exp); 320 mutex_unlock(&queue->mutex); 321 322 return ret; 323 } 324 325 int uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf, 326 int nonblocking) 327 { 328 int ret; 329 330 mutex_lock(&queue->mutex); 331 ret = vb2_dqbuf(&queue->queue, buf, nonblocking); 332 mutex_unlock(&queue->mutex); 333 334 return ret; 335 } 336 337 int uvc_queue_streamon(struct uvc_video_queue *queue, enum v4l2_buf_type type) 338 { 339 int ret; 340 341 mutex_lock(&queue->mutex); 342 ret = vb2_streamon(&queue->queue, type); 343 mutex_unlock(&queue->mutex); 344 345 return ret; 346 } 347 348 int uvc_queue_streamoff(struct uvc_video_queue *queue, enum v4l2_buf_type type) 349 { 350 int ret; 351 352 mutex_lock(&queue->mutex); 353 ret = vb2_streamoff(&queue->queue, type); 354 mutex_unlock(&queue->mutex); 355 356 return ret; 357 } 358 359 int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) 360 { 361 return vb2_mmap(&queue->queue, vma); 362 } 363 364 #ifndef CONFIG_MMU 365 unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue, 366 unsigned long pgoff) 367 { 368 return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0); 369 } 370 #endif 371 372 __poll_t uvc_queue_poll(struct uvc_video_queue *queue, struct file *file, 373 poll_table *wait) 374 { 375 __poll_t ret; 376 377 mutex_lock(&queue->mutex); 378 ret = vb2_poll(&queue->queue, file, wait); 379 mutex_unlock(&queue->mutex); 380 381 return ret; 382 } 383 384 /* ----------------------------------------------------------------------------- 385 * 386 */ 387 388 /* 389 * Check if buffers have been allocated. 390 */ 391 int uvc_queue_allocated(struct uvc_video_queue *queue) 392 { 393 int allocated; 394 395 mutex_lock(&queue->mutex); 396 allocated = vb2_is_busy(&queue->queue); 397 mutex_unlock(&queue->mutex); 398 399 return allocated; 400 } 401 402 /* 403 * Cancel the video buffers queue. 404 * 405 * Cancelling the queue marks all buffers on the irq queue as erroneous, 406 * wakes them up and removes them from the queue. 407 * 408 * If the disconnect parameter is set, further calls to uvc_queue_buffer will 409 * fail with -ENODEV. 410 * 411 * This function acquires the irq spinlock and can be called from interrupt 412 * context. 413 */ 414 void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect) 415 { 416 unsigned long flags; 417 418 spin_lock_irqsave(&queue->irqlock, flags); 419 uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR); 420 /* This must be protected by the irqlock spinlock to avoid race 421 * conditions between uvc_buffer_queue and the disconnection event that 422 * could result in an interruptible wait in uvc_dequeue_buffer. Do not 423 * blindly replace this logic by checking for the UVC_QUEUE_DISCONNECTED 424 * state outside the queue code. 425 */ 426 if (disconnect) 427 queue->flags |= UVC_QUEUE_DISCONNECTED; 428 spin_unlock_irqrestore(&queue->irqlock, flags); 429 } 430 431 /* 432 * uvc_queue_get_current_buffer: Obtain the current working output buffer 433 * 434 * Buffers may span multiple packets, and even URBs, therefore the active buffer 435 * remains on the queue until the EOF marker. 436 */ 437 static struct uvc_buffer * 438 __uvc_queue_get_current_buffer(struct uvc_video_queue *queue) 439 { 440 if (list_empty(&queue->irqqueue)) 441 return NULL; 442 443 return list_first_entry(&queue->irqqueue, struct uvc_buffer, queue); 444 } 445 446 struct uvc_buffer *uvc_queue_get_current_buffer(struct uvc_video_queue *queue) 447 { 448 struct uvc_buffer *nextbuf; 449 unsigned long flags; 450 451 spin_lock_irqsave(&queue->irqlock, flags); 452 nextbuf = __uvc_queue_get_current_buffer(queue); 453 spin_unlock_irqrestore(&queue->irqlock, flags); 454 455 return nextbuf; 456 } 457 458 /* 459 * uvc_queue_buffer_requeue: Requeue a buffer on our internal irqqueue 460 * 461 * Reuse a buffer through our internal queue without the need to 'prepare'. 462 * The buffer will be returned to userspace through the uvc_buffer_queue call if 463 * the device has been disconnected. 464 */ 465 static void uvc_queue_buffer_requeue(struct uvc_video_queue *queue, 466 struct uvc_buffer *buf) 467 { 468 buf->error = 0; 469 buf->state = UVC_BUF_STATE_QUEUED; 470 buf->bytesused = 0; 471 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0); 472 473 uvc_buffer_queue(&buf->buf.vb2_buf); 474 } 475 476 static void uvc_queue_buffer_complete(struct kref *ref) 477 { 478 struct uvc_buffer *buf = container_of(ref, struct uvc_buffer, ref); 479 struct vb2_buffer *vb = &buf->buf.vb2_buf; 480 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); 481 482 if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) { 483 uvc_queue_buffer_requeue(queue, buf); 484 return; 485 } 486 487 buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE; 488 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused); 489 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE); 490 } 491 492 /* 493 * Release a reference on the buffer. Complete the buffer when the last 494 * reference is released. 495 */ 496 void uvc_queue_buffer_release(struct uvc_buffer *buf) 497 { 498 kref_put(&buf->ref, uvc_queue_buffer_complete); 499 } 500 501 /* 502 * Remove this buffer from the queue. Lifetime will persist while async actions 503 * are still running (if any), and uvc_queue_buffer_release will give the buffer 504 * back to VB2 when all users have completed. 505 */ 506 struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue, 507 struct uvc_buffer *buf) 508 { 509 struct uvc_buffer *nextbuf; 510 unsigned long flags; 511 512 spin_lock_irqsave(&queue->irqlock, flags); 513 list_del(&buf->queue); 514 nextbuf = __uvc_queue_get_current_buffer(queue); 515 spin_unlock_irqrestore(&queue->irqlock, flags); 516 517 uvc_queue_buffer_release(buf); 518 519 return nextbuf; 520 } 521