1 /* 2 * videobuf2-core.c - video buffer 2 core framework 3 * 4 * Copyright (C) 2010 Samsung Electronics 5 * 6 * Author: Pawel Osciak <pawel@osciak.com> 7 * Marek Szyprowski <m.szyprowski@samsung.com> 8 * 9 * The vb2_thread implementation was based on code from videobuf-dvb.c: 10 * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs] 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation. 15 */ 16 17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18 19 #include <linux/err.h> 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/mm.h> 23 #include <linux/poll.h> 24 #include <linux/slab.h> 25 #include <linux/sched.h> 26 #include <linux/freezer.h> 27 #include <linux/kthread.h> 28 29 #include <media/videobuf2-core.h> 30 #include <media/v4l2-mc.h> 31 32 #include <trace/events/vb2.h> 33 34 static int debug; 35 module_param(debug, int, 0644); 36 37 #define dprintk(q, level, fmt, arg...) \ 38 do { \ 39 if (debug >= level) \ 40 pr_info("[%s] %s: " fmt, (q)->name, __func__, \ 41 ## arg); \ 42 } while (0) 43 44 #ifdef CONFIG_VIDEO_ADV_DEBUG 45 46 /* 47 * If advanced debugging is on, then count how often each op is called 48 * successfully, which can either be per-buffer or per-queue. 49 * 50 * This makes it easy to check that the 'init' and 'cleanup' 51 * (and variations thereof) stay balanced. 52 */ 53 54 #define log_memop(vb, op) \ 55 dprintk((vb)->vb2_queue, 2, "call_memop(%d, %s)%s\n", \ 56 (vb)->index, #op, \ 57 (vb)->vb2_queue->mem_ops->op ? "" : " (nop)") 58 59 #define call_memop(vb, op, args...) \ 60 ({ \ 61 struct vb2_queue *_q = (vb)->vb2_queue; \ 62 int err; \ 63 \ 64 log_memop(vb, op); \ 65 err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \ 66 if (!err) \ 67 (vb)->cnt_mem_ ## op++; \ 68 err; \ 69 }) 70 71 #define call_ptr_memop(vb, op, args...) \ 72 ({ \ 73 struct vb2_queue *_q = (vb)->vb2_queue; \ 74 void *ptr; \ 75 \ 76 log_memop(vb, op); \ 77 ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL; \ 78 if (!IS_ERR_OR_NULL(ptr)) \ 79 (vb)->cnt_mem_ ## op++; \ 80 ptr; \ 81 }) 82 83 #define call_void_memop(vb, op, args...) \ 84 ({ \ 85 struct vb2_queue *_q = (vb)->vb2_queue; \ 86 \ 87 log_memop(vb, op); \ 88 if (_q->mem_ops->op) \ 89 _q->mem_ops->op(args); \ 90 (vb)->cnt_mem_ ## op++; \ 91 }) 92 93 #define log_qop(q, op) \ 94 dprintk(q, 2, "call_qop(%s)%s\n", #op, \ 95 (q)->ops->op ? "" : " (nop)") 96 97 #define call_qop(q, op, args...) \ 98 ({ \ 99 int err; \ 100 \ 101 log_qop(q, op); \ 102 err = (q)->ops->op ? (q)->ops->op(args) : 0; \ 103 if (!err) \ 104 (q)->cnt_ ## op++; \ 105 err; \ 106 }) 107 108 #define call_void_qop(q, op, args...) \ 109 ({ \ 110 log_qop(q, op); \ 111 if ((q)->ops->op) \ 112 (q)->ops->op(args); \ 113 (q)->cnt_ ## op++; \ 114 }) 115 116 #define log_vb_qop(vb, op, args...) \ 117 dprintk((vb)->vb2_queue, 2, "call_vb_qop(%d, %s)%s\n", \ 118 (vb)->index, #op, \ 119 (vb)->vb2_queue->ops->op ? "" : " (nop)") 120 121 #define call_vb_qop(vb, op, args...) \ 122 ({ \ 123 int err; \ 124 \ 125 log_vb_qop(vb, op); \ 126 err = (vb)->vb2_queue->ops->op ? \ 127 (vb)->vb2_queue->ops->op(args) : 0; \ 128 if (!err) \ 129 (vb)->cnt_ ## op++; \ 130 err; \ 131 }) 132 133 #define call_void_vb_qop(vb, op, args...) \ 134 ({ \ 135 log_vb_qop(vb, op); \ 136 if ((vb)->vb2_queue->ops->op) \ 137 (vb)->vb2_queue->ops->op(args); \ 138 (vb)->cnt_ ## op++; \ 139 }) 140 141 #else 142 143 #define call_memop(vb, op, args...) \ 144 ((vb)->vb2_queue->mem_ops->op ? \ 145 (vb)->vb2_queue->mem_ops->op(args) : 0) 146 147 #define call_ptr_memop(vb, op, args...) \ 148 ((vb)->vb2_queue->mem_ops->op ? \ 149 (vb)->vb2_queue->mem_ops->op(args) : NULL) 150 151 #define call_void_memop(vb, op, args...) \ 152 do { \ 153 if ((vb)->vb2_queue->mem_ops->op) \ 154 (vb)->vb2_queue->mem_ops->op(args); \ 155 } while (0) 156 157 #define call_qop(q, op, args...) \ 158 ((q)->ops->op ? (q)->ops->op(args) : 0) 159 160 #define call_void_qop(q, op, args...) \ 161 do { \ 162 if ((q)->ops->op) \ 163 (q)->ops->op(args); \ 164 } while (0) 165 166 #define call_vb_qop(vb, op, args...) \ 167 ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0) 168 169 #define call_void_vb_qop(vb, op, args...) \ 170 do { \ 171 if ((vb)->vb2_queue->ops->op) \ 172 (vb)->vb2_queue->ops->op(args); \ 173 } while (0) 174 175 #endif 176 177 #define call_bufop(q, op, args...) \ 178 ({ \ 179 int ret = 0; \ 180 if (q && q->buf_ops && q->buf_ops->op) \ 181 ret = q->buf_ops->op(args); \ 182 ret; \ 183 }) 184 185 #define call_void_bufop(q, op, args...) \ 186 ({ \ 187 if (q && q->buf_ops && q->buf_ops->op) \ 188 q->buf_ops->op(args); \ 189 }) 190 191 static void __vb2_queue_cancel(struct vb2_queue *q); 192 static void __enqueue_in_driver(struct vb2_buffer *vb); 193 194 /* 195 * __vb2_buf_mem_alloc() - allocate video memory for the given buffer 196 */ 197 static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) 198 { 199 struct vb2_queue *q = vb->vb2_queue; 200 void *mem_priv; 201 int plane; 202 int ret = -ENOMEM; 203 204 /* 205 * Allocate memory for all planes in this buffer 206 * NOTE: mmapped areas should be page aligned 207 */ 208 for (plane = 0; plane < vb->num_planes; ++plane) { 209 /* Memops alloc requires size to be page aligned. */ 210 unsigned long size = PAGE_ALIGN(vb->planes[plane].length); 211 212 /* Did it wrap around? */ 213 if (size < vb->planes[plane].length) 214 goto free; 215 216 mem_priv = call_ptr_memop(vb, alloc, 217 q->alloc_devs[plane] ? : q->dev, 218 q->dma_attrs, size, q->dma_dir, q->gfp_flags); 219 if (IS_ERR_OR_NULL(mem_priv)) { 220 if (mem_priv) 221 ret = PTR_ERR(mem_priv); 222 goto free; 223 } 224 225 /* Associate allocator private data with this plane */ 226 vb->planes[plane].mem_priv = mem_priv; 227 } 228 229 return 0; 230 free: 231 /* Free already allocated memory if one of the allocations failed */ 232 for (; plane > 0; --plane) { 233 call_void_memop(vb, put, vb->planes[plane - 1].mem_priv); 234 vb->planes[plane - 1].mem_priv = NULL; 235 } 236 237 return ret; 238 } 239 240 /* 241 * __vb2_buf_mem_free() - free memory of the given buffer 242 */ 243 static void __vb2_buf_mem_free(struct vb2_buffer *vb) 244 { 245 unsigned int plane; 246 247 for (plane = 0; plane < vb->num_planes; ++plane) { 248 call_void_memop(vb, put, vb->planes[plane].mem_priv); 249 vb->planes[plane].mem_priv = NULL; 250 dprintk(vb->vb2_queue, 3, "freed plane %d of buffer %d\n", 251 plane, vb->index); 252 } 253 } 254 255 /* 256 * __vb2_buf_userptr_put() - release userspace memory associated with 257 * a USERPTR buffer 258 */ 259 static void __vb2_buf_userptr_put(struct vb2_buffer *vb) 260 { 261 unsigned int plane; 262 263 for (plane = 0; plane < vb->num_planes; ++plane) { 264 if (vb->planes[plane].mem_priv) 265 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv); 266 vb->planes[plane].mem_priv = NULL; 267 } 268 } 269 270 /* 271 * __vb2_plane_dmabuf_put() - release memory associated with 272 * a DMABUF shared plane 273 */ 274 static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p) 275 { 276 if (!p->mem_priv) 277 return; 278 279 if (p->dbuf_mapped) 280 call_void_memop(vb, unmap_dmabuf, p->mem_priv); 281 282 call_void_memop(vb, detach_dmabuf, p->mem_priv); 283 dma_buf_put(p->dbuf); 284 p->mem_priv = NULL; 285 p->dbuf = NULL; 286 p->dbuf_mapped = 0; 287 } 288 289 /* 290 * __vb2_buf_dmabuf_put() - release memory associated with 291 * a DMABUF shared buffer 292 */ 293 static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb) 294 { 295 unsigned int plane; 296 297 for (plane = 0; plane < vb->num_planes; ++plane) 298 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]); 299 } 300 301 /* 302 * __vb2_buf_mem_prepare() - call ->prepare() on buffer's private memory 303 * to sync caches 304 */ 305 static void __vb2_buf_mem_prepare(struct vb2_buffer *vb) 306 { 307 unsigned int plane; 308 309 if (vb->synced) 310 return; 311 312 if (vb->need_cache_sync_on_prepare) { 313 for (plane = 0; plane < vb->num_planes; ++plane) 314 call_void_memop(vb, prepare, 315 vb->planes[plane].mem_priv); 316 } 317 vb->synced = 1; 318 } 319 320 /* 321 * __vb2_buf_mem_finish() - call ->finish on buffer's private memory 322 * to sync caches 323 */ 324 static void __vb2_buf_mem_finish(struct vb2_buffer *vb) 325 { 326 unsigned int plane; 327 328 if (!vb->synced) 329 return; 330 331 if (vb->need_cache_sync_on_finish) { 332 for (plane = 0; plane < vb->num_planes; ++plane) 333 call_void_memop(vb, finish, 334 vb->planes[plane].mem_priv); 335 } 336 vb->synced = 0; 337 } 338 339 /* 340 * __setup_offsets() - setup unique offsets ("cookies") for every plane in 341 * the buffer. 342 */ 343 static void __setup_offsets(struct vb2_buffer *vb) 344 { 345 struct vb2_queue *q = vb->vb2_queue; 346 unsigned int plane; 347 unsigned long off = 0; 348 349 if (vb->index) { 350 struct vb2_buffer *prev = q->bufs[vb->index - 1]; 351 struct vb2_plane *p = &prev->planes[prev->num_planes - 1]; 352 353 off = PAGE_ALIGN(p->m.offset + p->length); 354 } 355 356 for (plane = 0; plane < vb->num_planes; ++plane) { 357 vb->planes[plane].m.offset = off; 358 359 dprintk(q, 3, "buffer %d, plane %d offset 0x%08lx\n", 360 vb->index, plane, off); 361 362 off += vb->planes[plane].length; 363 off = PAGE_ALIGN(off); 364 } 365 } 366 367 /* 368 * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type) 369 * video buffer memory for all buffers/planes on the queue and initializes the 370 * queue 371 * 372 * Returns the number of buffers successfully allocated. 373 */ 374 static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, 375 unsigned int num_buffers, unsigned int num_planes, 376 const unsigned plane_sizes[VB2_MAX_PLANES]) 377 { 378 unsigned int buffer, plane; 379 struct vb2_buffer *vb; 380 int ret; 381 382 /* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */ 383 num_buffers = min_t(unsigned int, num_buffers, 384 VB2_MAX_FRAME - q->num_buffers); 385 386 for (buffer = 0; buffer < num_buffers; ++buffer) { 387 /* Allocate videobuf buffer structures */ 388 vb = kzalloc(q->buf_struct_size, GFP_KERNEL); 389 if (!vb) { 390 dprintk(q, 1, "memory alloc for buffer struct failed\n"); 391 break; 392 } 393 394 vb->state = VB2_BUF_STATE_DEQUEUED; 395 vb->vb2_queue = q; 396 vb->num_planes = num_planes; 397 vb->index = q->num_buffers + buffer; 398 vb->type = q->type; 399 vb->memory = memory; 400 for (plane = 0; plane < num_planes; ++plane) { 401 vb->planes[plane].length = plane_sizes[plane]; 402 vb->planes[plane].min_length = plane_sizes[plane]; 403 } 404 call_void_bufop(q, init_buffer, vb); 405 406 q->bufs[vb->index] = vb; 407 408 /* Allocate video buffer memory for the MMAP type */ 409 if (memory == VB2_MEMORY_MMAP) { 410 ret = __vb2_buf_mem_alloc(vb); 411 if (ret) { 412 dprintk(q, 1, "failed allocating memory for buffer %d\n", 413 buffer); 414 q->bufs[vb->index] = NULL; 415 kfree(vb); 416 break; 417 } 418 __setup_offsets(vb); 419 /* 420 * Call the driver-provided buffer initialization 421 * callback, if given. An error in initialization 422 * results in queue setup failure. 423 */ 424 ret = call_vb_qop(vb, buf_init, vb); 425 if (ret) { 426 dprintk(q, 1, "buffer %d %p initialization failed\n", 427 buffer, vb); 428 __vb2_buf_mem_free(vb); 429 q->bufs[vb->index] = NULL; 430 kfree(vb); 431 break; 432 } 433 } 434 } 435 436 dprintk(q, 3, "allocated %d buffers, %d plane(s) each\n", 437 buffer, num_planes); 438 439 return buffer; 440 } 441 442 /* 443 * __vb2_free_mem() - release all video buffer memory for a given queue 444 */ 445 static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers) 446 { 447 unsigned int buffer; 448 struct vb2_buffer *vb; 449 450 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; 451 ++buffer) { 452 vb = q->bufs[buffer]; 453 if (!vb) 454 continue; 455 456 /* Free MMAP buffers or release USERPTR buffers */ 457 if (q->memory == VB2_MEMORY_MMAP) 458 __vb2_buf_mem_free(vb); 459 else if (q->memory == VB2_MEMORY_DMABUF) 460 __vb2_buf_dmabuf_put(vb); 461 else 462 __vb2_buf_userptr_put(vb); 463 } 464 } 465 466 /* 467 * __vb2_queue_free() - free buffers at the end of the queue - video memory and 468 * related information, if no buffers are left return the queue to an 469 * uninitialized state. Might be called even if the queue has already been freed. 470 */ 471 static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers) 472 { 473 unsigned int buffer; 474 475 /* 476 * Sanity check: when preparing a buffer the queue lock is released for 477 * a short while (see __buf_prepare for the details), which would allow 478 * a race with a reqbufs which can call this function. Removing the 479 * buffers from underneath __buf_prepare is obviously a bad idea, so we 480 * check if any of the buffers is in the state PREPARING, and if so we 481 * just return -EAGAIN. 482 */ 483 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; 484 ++buffer) { 485 if (q->bufs[buffer] == NULL) 486 continue; 487 if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) { 488 dprintk(q, 1, "preparing buffers, cannot free\n"); 489 return -EAGAIN; 490 } 491 } 492 493 /* Call driver-provided cleanup function for each buffer, if provided */ 494 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; 495 ++buffer) { 496 struct vb2_buffer *vb = q->bufs[buffer]; 497 498 if (vb && vb->planes[0].mem_priv) 499 call_void_vb_qop(vb, buf_cleanup, vb); 500 } 501 502 /* Release video buffer memory */ 503 __vb2_free_mem(q, buffers); 504 505 #ifdef CONFIG_VIDEO_ADV_DEBUG 506 /* 507 * Check that all the calls were balances during the life-time of this 508 * queue. If not (or if the debug level is 1 or up), then dump the 509 * counters to the kernel log. 510 */ 511 if (q->num_buffers) { 512 bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming || 513 q->cnt_wait_prepare != q->cnt_wait_finish; 514 515 if (unbalanced || debug) { 516 pr_info("counters for queue %p:%s\n", q, 517 unbalanced ? " UNBALANCED!" : ""); 518 pr_info(" setup: %u start_streaming: %u stop_streaming: %u\n", 519 q->cnt_queue_setup, q->cnt_start_streaming, 520 q->cnt_stop_streaming); 521 pr_info(" wait_prepare: %u wait_finish: %u\n", 522 q->cnt_wait_prepare, q->cnt_wait_finish); 523 } 524 q->cnt_queue_setup = 0; 525 q->cnt_wait_prepare = 0; 526 q->cnt_wait_finish = 0; 527 q->cnt_start_streaming = 0; 528 q->cnt_stop_streaming = 0; 529 } 530 for (buffer = 0; buffer < q->num_buffers; ++buffer) { 531 struct vb2_buffer *vb = q->bufs[buffer]; 532 bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put || 533 vb->cnt_mem_prepare != vb->cnt_mem_finish || 534 vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr || 535 vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf || 536 vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf || 537 vb->cnt_buf_queue != vb->cnt_buf_done || 538 vb->cnt_buf_prepare != vb->cnt_buf_finish || 539 vb->cnt_buf_init != vb->cnt_buf_cleanup; 540 541 if (unbalanced || debug) { 542 pr_info(" counters for queue %p, buffer %d:%s\n", 543 q, buffer, unbalanced ? " UNBALANCED!" : ""); 544 pr_info(" buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n", 545 vb->cnt_buf_init, vb->cnt_buf_cleanup, 546 vb->cnt_buf_prepare, vb->cnt_buf_finish); 547 pr_info(" buf_out_validate: %u buf_queue: %u buf_done: %u buf_request_complete: %u\n", 548 vb->cnt_buf_out_validate, vb->cnt_buf_queue, 549 vb->cnt_buf_done, vb->cnt_buf_request_complete); 550 pr_info(" alloc: %u put: %u prepare: %u finish: %u mmap: %u\n", 551 vb->cnt_mem_alloc, vb->cnt_mem_put, 552 vb->cnt_mem_prepare, vb->cnt_mem_finish, 553 vb->cnt_mem_mmap); 554 pr_info(" get_userptr: %u put_userptr: %u\n", 555 vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr); 556 pr_info(" attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n", 557 vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf, 558 vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf); 559 pr_info(" get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n", 560 vb->cnt_mem_get_dmabuf, 561 vb->cnt_mem_num_users, 562 vb->cnt_mem_vaddr, 563 vb->cnt_mem_cookie); 564 } 565 } 566 #endif 567 568 /* Free videobuf buffers */ 569 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; 570 ++buffer) { 571 kfree(q->bufs[buffer]); 572 q->bufs[buffer] = NULL; 573 } 574 575 q->num_buffers -= buffers; 576 if (!q->num_buffers) { 577 q->memory = VB2_MEMORY_UNKNOWN; 578 INIT_LIST_HEAD(&q->queued_list); 579 } 580 return 0; 581 } 582 583 bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb) 584 { 585 unsigned int plane; 586 for (plane = 0; plane < vb->num_planes; ++plane) { 587 void *mem_priv = vb->planes[plane].mem_priv; 588 /* 589 * If num_users() has not been provided, call_memop 590 * will return 0, apparently nobody cares about this 591 * case anyway. If num_users() returns more than 1, 592 * we are not the only user of the plane's memory. 593 */ 594 if (mem_priv && call_memop(vb, num_users, mem_priv) > 1) 595 return true; 596 } 597 return false; 598 } 599 EXPORT_SYMBOL(vb2_buffer_in_use); 600 601 /* 602 * __buffers_in_use() - return true if any buffers on the queue are in use and 603 * the queue cannot be freed (by the means of REQBUFS(0)) call 604 */ 605 static bool __buffers_in_use(struct vb2_queue *q) 606 { 607 unsigned int buffer; 608 for (buffer = 0; buffer < q->num_buffers; ++buffer) { 609 if (vb2_buffer_in_use(q, q->bufs[buffer])) 610 return true; 611 } 612 return false; 613 } 614 615 void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb) 616 { 617 call_void_bufop(q, fill_user_buffer, q->bufs[index], pb); 618 } 619 EXPORT_SYMBOL_GPL(vb2_core_querybuf); 620 621 /* 622 * __verify_userptr_ops() - verify that all memory operations required for 623 * USERPTR queue type have been provided 624 */ 625 static int __verify_userptr_ops(struct vb2_queue *q) 626 { 627 if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr || 628 !q->mem_ops->put_userptr) 629 return -EINVAL; 630 631 return 0; 632 } 633 634 /* 635 * __verify_mmap_ops() - verify that all memory operations required for 636 * MMAP queue type have been provided 637 */ 638 static int __verify_mmap_ops(struct vb2_queue *q) 639 { 640 if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc || 641 !q->mem_ops->put || !q->mem_ops->mmap) 642 return -EINVAL; 643 644 return 0; 645 } 646 647 /* 648 * __verify_dmabuf_ops() - verify that all memory operations required for 649 * DMABUF queue type have been provided 650 */ 651 static int __verify_dmabuf_ops(struct vb2_queue *q) 652 { 653 if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf || 654 !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf || 655 !q->mem_ops->unmap_dmabuf) 656 return -EINVAL; 657 658 return 0; 659 } 660 661 int vb2_verify_memory_type(struct vb2_queue *q, 662 enum vb2_memory memory, unsigned int type) 663 { 664 if (memory != VB2_MEMORY_MMAP && memory != VB2_MEMORY_USERPTR && 665 memory != VB2_MEMORY_DMABUF) { 666 dprintk(q, 1, "unsupported memory type\n"); 667 return -EINVAL; 668 } 669 670 if (type != q->type) { 671 dprintk(q, 1, "requested type is incorrect\n"); 672 return -EINVAL; 673 } 674 675 /* 676 * Make sure all the required memory ops for given memory type 677 * are available. 678 */ 679 if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) { 680 dprintk(q, 1, "MMAP for current setup unsupported\n"); 681 return -EINVAL; 682 } 683 684 if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) { 685 dprintk(q, 1, "USERPTR for current setup unsupported\n"); 686 return -EINVAL; 687 } 688 689 if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) { 690 dprintk(q, 1, "DMABUF for current setup unsupported\n"); 691 return -EINVAL; 692 } 693 694 /* 695 * Place the busy tests at the end: -EBUSY can be ignored when 696 * create_bufs is called with count == 0, but count == 0 should still 697 * do the memory and type validation. 698 */ 699 if (vb2_fileio_is_active(q)) { 700 dprintk(q, 1, "file io in progress\n"); 701 return -EBUSY; 702 } 703 return 0; 704 } 705 EXPORT_SYMBOL(vb2_verify_memory_type); 706 707 static void set_queue_consistency(struct vb2_queue *q, bool consistent_mem) 708 { 709 q->dma_attrs &= ~DMA_ATTR_NON_CONSISTENT; 710 711 if (!vb2_queue_allows_cache_hints(q)) 712 return; 713 if (!consistent_mem) 714 q->dma_attrs |= DMA_ATTR_NON_CONSISTENT; 715 } 716 717 static bool verify_consistency_attr(struct vb2_queue *q, bool consistent_mem) 718 { 719 bool queue_is_consistent = !(q->dma_attrs & DMA_ATTR_NON_CONSISTENT); 720 721 if (consistent_mem != queue_is_consistent) { 722 dprintk(q, 1, "memory consistency model mismatch\n"); 723 return false; 724 } 725 return true; 726 } 727 728 int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, 729 unsigned int flags, unsigned int *count) 730 { 731 unsigned int num_buffers, allocated_buffers, num_planes = 0; 732 unsigned plane_sizes[VB2_MAX_PLANES] = { }; 733 bool consistent_mem = true; 734 unsigned int i; 735 int ret; 736 737 if (flags & V4L2_FLAG_MEMORY_NON_CONSISTENT) 738 consistent_mem = false; 739 740 if (q->streaming) { 741 dprintk(q, 1, "streaming active\n"); 742 return -EBUSY; 743 } 744 745 if (q->waiting_in_dqbuf && *count) { 746 dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); 747 return -EBUSY; 748 } 749 750 if (*count == 0 || q->num_buffers != 0 || 751 (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory) || 752 !verify_consistency_attr(q, consistent_mem)) { 753 /* 754 * We already have buffers allocated, so first check if they 755 * are not in use and can be freed. 756 */ 757 mutex_lock(&q->mmap_lock); 758 if (debug && q->memory == VB2_MEMORY_MMAP && 759 __buffers_in_use(q)) 760 dprintk(q, 1, "memory in use, orphaning buffers\n"); 761 762 /* 763 * Call queue_cancel to clean up any buffers in the 764 * QUEUED state which is possible if buffers were prepared or 765 * queued without ever calling STREAMON. 766 */ 767 __vb2_queue_cancel(q); 768 ret = __vb2_queue_free(q, q->num_buffers); 769 mutex_unlock(&q->mmap_lock); 770 if (ret) 771 return ret; 772 773 /* 774 * In case of REQBUFS(0) return immediately without calling 775 * driver's queue_setup() callback and allocating resources. 776 */ 777 if (*count == 0) 778 return 0; 779 } 780 781 /* 782 * Make sure the requested values and current defaults are sane. 783 */ 784 WARN_ON(q->min_buffers_needed > VB2_MAX_FRAME); 785 num_buffers = max_t(unsigned int, *count, q->min_buffers_needed); 786 num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME); 787 memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); 788 q->memory = memory; 789 set_queue_consistency(q, consistent_mem); 790 791 /* 792 * Ask the driver how many buffers and planes per buffer it requires. 793 * Driver also sets the size and allocator context for each plane. 794 */ 795 ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes, 796 plane_sizes, q->alloc_devs); 797 if (ret) 798 return ret; 799 800 /* Check that driver has set sane values */ 801 if (WARN_ON(!num_planes)) 802 return -EINVAL; 803 804 for (i = 0; i < num_planes; i++) 805 if (WARN_ON(!plane_sizes[i])) 806 return -EINVAL; 807 808 /* Finally, allocate buffers and video memory */ 809 allocated_buffers = 810 __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes); 811 if (allocated_buffers == 0) { 812 dprintk(q, 1, "memory allocation failed\n"); 813 return -ENOMEM; 814 } 815 816 /* 817 * There is no point in continuing if we can't allocate the minimum 818 * number of buffers needed by this vb2_queue. 819 */ 820 if (allocated_buffers < q->min_buffers_needed) 821 ret = -ENOMEM; 822 823 /* 824 * Check if driver can handle the allocated number of buffers. 825 */ 826 if (!ret && allocated_buffers < num_buffers) { 827 num_buffers = allocated_buffers; 828 /* 829 * num_planes is set by the previous queue_setup(), but since it 830 * signals to queue_setup() whether it is called from create_bufs() 831 * vs reqbufs() we zero it here to signal that queue_setup() is 832 * called for the reqbufs() case. 833 */ 834 num_planes = 0; 835 836 ret = call_qop(q, queue_setup, q, &num_buffers, 837 &num_planes, plane_sizes, q->alloc_devs); 838 839 if (!ret && allocated_buffers < num_buffers) 840 ret = -ENOMEM; 841 842 /* 843 * Either the driver has accepted a smaller number of buffers, 844 * or .queue_setup() returned an error 845 */ 846 } 847 848 mutex_lock(&q->mmap_lock); 849 q->num_buffers = allocated_buffers; 850 851 if (ret < 0) { 852 /* 853 * Note: __vb2_queue_free() will subtract 'allocated_buffers' 854 * from q->num_buffers. 855 */ 856 __vb2_queue_free(q, allocated_buffers); 857 mutex_unlock(&q->mmap_lock); 858 return ret; 859 } 860 mutex_unlock(&q->mmap_lock); 861 862 /* 863 * Return the number of successfully allocated buffers 864 * to the userspace. 865 */ 866 *count = allocated_buffers; 867 q->waiting_for_buffers = !q->is_output; 868 869 return 0; 870 } 871 EXPORT_SYMBOL_GPL(vb2_core_reqbufs); 872 873 int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, 874 unsigned int flags, unsigned int *count, 875 unsigned int requested_planes, 876 const unsigned int requested_sizes[]) 877 { 878 unsigned int num_planes = 0, num_buffers, allocated_buffers; 879 unsigned plane_sizes[VB2_MAX_PLANES] = { }; 880 bool consistent_mem = true; 881 int ret; 882 883 if (flags & V4L2_FLAG_MEMORY_NON_CONSISTENT) 884 consistent_mem = false; 885 886 if (q->num_buffers == VB2_MAX_FRAME) { 887 dprintk(q, 1, "maximum number of buffers already allocated\n"); 888 return -ENOBUFS; 889 } 890 891 if (!q->num_buffers) { 892 if (q->waiting_in_dqbuf && *count) { 893 dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); 894 return -EBUSY; 895 } 896 memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); 897 q->memory = memory; 898 set_queue_consistency(q, consistent_mem); 899 q->waiting_for_buffers = !q->is_output; 900 } else { 901 if (q->memory != memory) { 902 dprintk(q, 1, "memory model mismatch\n"); 903 return -EINVAL; 904 } 905 if (!verify_consistency_attr(q, consistent_mem)) 906 return -EINVAL; 907 } 908 909 num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers); 910 911 if (requested_planes && requested_sizes) { 912 num_planes = requested_planes; 913 memcpy(plane_sizes, requested_sizes, sizeof(plane_sizes)); 914 } 915 916 /* 917 * Ask the driver, whether the requested number of buffers, planes per 918 * buffer and their sizes are acceptable 919 */ 920 ret = call_qop(q, queue_setup, q, &num_buffers, 921 &num_planes, plane_sizes, q->alloc_devs); 922 if (ret) 923 return ret; 924 925 /* Finally, allocate buffers and video memory */ 926 allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers, 927 num_planes, plane_sizes); 928 if (allocated_buffers == 0) { 929 dprintk(q, 1, "memory allocation failed\n"); 930 return -ENOMEM; 931 } 932 933 /* 934 * Check if driver can handle the so far allocated number of buffers. 935 */ 936 if (allocated_buffers < num_buffers) { 937 num_buffers = allocated_buffers; 938 939 /* 940 * q->num_buffers contains the total number of buffers, that the 941 * queue driver has set up 942 */ 943 ret = call_qop(q, queue_setup, q, &num_buffers, 944 &num_planes, plane_sizes, q->alloc_devs); 945 946 if (!ret && allocated_buffers < num_buffers) 947 ret = -ENOMEM; 948 949 /* 950 * Either the driver has accepted a smaller number of buffers, 951 * or .queue_setup() returned an error 952 */ 953 } 954 955 mutex_lock(&q->mmap_lock); 956 q->num_buffers += allocated_buffers; 957 958 if (ret < 0) { 959 /* 960 * Note: __vb2_queue_free() will subtract 'allocated_buffers' 961 * from q->num_buffers. 962 */ 963 __vb2_queue_free(q, allocated_buffers); 964 mutex_unlock(&q->mmap_lock); 965 return -ENOMEM; 966 } 967 mutex_unlock(&q->mmap_lock); 968 969 /* 970 * Return the number of successfully allocated buffers 971 * to the userspace. 972 */ 973 *count = allocated_buffers; 974 975 return 0; 976 } 977 EXPORT_SYMBOL_GPL(vb2_core_create_bufs); 978 979 void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) 980 { 981 if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) 982 return NULL; 983 984 return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv); 985 986 } 987 EXPORT_SYMBOL_GPL(vb2_plane_vaddr); 988 989 void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no) 990 { 991 if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) 992 return NULL; 993 994 return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv); 995 } 996 EXPORT_SYMBOL_GPL(vb2_plane_cookie); 997 998 void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) 999 { 1000 struct vb2_queue *q = vb->vb2_queue; 1001 unsigned long flags; 1002 1003 if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE)) 1004 return; 1005 1006 if (WARN_ON(state != VB2_BUF_STATE_DONE && 1007 state != VB2_BUF_STATE_ERROR && 1008 state != VB2_BUF_STATE_QUEUED)) 1009 state = VB2_BUF_STATE_ERROR; 1010 1011 #ifdef CONFIG_VIDEO_ADV_DEBUG 1012 /* 1013 * Although this is not a callback, it still does have to balance 1014 * with the buf_queue op. So update this counter manually. 1015 */ 1016 vb->cnt_buf_done++; 1017 #endif 1018 dprintk(q, 4, "done processing on buffer %d, state: %d\n", 1019 vb->index, state); 1020 1021 if (state != VB2_BUF_STATE_QUEUED) 1022 __vb2_buf_mem_finish(vb); 1023 1024 spin_lock_irqsave(&q->done_lock, flags); 1025 if (state == VB2_BUF_STATE_QUEUED) { 1026 vb->state = VB2_BUF_STATE_QUEUED; 1027 } else { 1028 /* Add the buffer to the done buffers list */ 1029 list_add_tail(&vb->done_entry, &q->done_list); 1030 vb->state = state; 1031 } 1032 atomic_dec(&q->owned_by_drv_count); 1033 1034 if (state != VB2_BUF_STATE_QUEUED && vb->req_obj.req) { 1035 media_request_object_unbind(&vb->req_obj); 1036 media_request_object_put(&vb->req_obj); 1037 } 1038 1039 spin_unlock_irqrestore(&q->done_lock, flags); 1040 1041 trace_vb2_buf_done(q, vb); 1042 1043 switch (state) { 1044 case VB2_BUF_STATE_QUEUED: 1045 return; 1046 default: 1047 /* Inform any processes that may be waiting for buffers */ 1048 wake_up(&q->done_wq); 1049 break; 1050 } 1051 } 1052 EXPORT_SYMBOL_GPL(vb2_buffer_done); 1053 1054 void vb2_discard_done(struct vb2_queue *q) 1055 { 1056 struct vb2_buffer *vb; 1057 unsigned long flags; 1058 1059 spin_lock_irqsave(&q->done_lock, flags); 1060 list_for_each_entry(vb, &q->done_list, done_entry) 1061 vb->state = VB2_BUF_STATE_ERROR; 1062 spin_unlock_irqrestore(&q->done_lock, flags); 1063 } 1064 EXPORT_SYMBOL_GPL(vb2_discard_done); 1065 1066 /* 1067 * __prepare_mmap() - prepare an MMAP buffer 1068 */ 1069 static int __prepare_mmap(struct vb2_buffer *vb) 1070 { 1071 int ret = 0; 1072 1073 ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, 1074 vb, vb->planes); 1075 return ret ? ret : call_vb_qop(vb, buf_prepare, vb); 1076 } 1077 1078 /* 1079 * __prepare_userptr() - prepare a USERPTR buffer 1080 */ 1081 static int __prepare_userptr(struct vb2_buffer *vb) 1082 { 1083 struct vb2_plane planes[VB2_MAX_PLANES]; 1084 struct vb2_queue *q = vb->vb2_queue; 1085 void *mem_priv; 1086 unsigned int plane; 1087 int ret = 0; 1088 bool reacquired = vb->planes[0].mem_priv == NULL; 1089 1090 memset(planes, 0, sizeof(planes[0]) * vb->num_planes); 1091 /* Copy relevant information provided by the userspace */ 1092 ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, 1093 vb, planes); 1094 if (ret) 1095 return ret; 1096 1097 for (plane = 0; plane < vb->num_planes; ++plane) { 1098 /* Skip the plane if already verified */ 1099 if (vb->planes[plane].m.userptr && 1100 vb->planes[plane].m.userptr == planes[plane].m.userptr 1101 && vb->planes[plane].length == planes[plane].length) 1102 continue; 1103 1104 dprintk(q, 3, "userspace address for plane %d changed, reacquiring memory\n", 1105 plane); 1106 1107 /* Check if the provided plane buffer is large enough */ 1108 if (planes[plane].length < vb->planes[plane].min_length) { 1109 dprintk(q, 1, "provided buffer size %u is less than setup size %u for plane %d\n", 1110 planes[plane].length, 1111 vb->planes[plane].min_length, 1112 plane); 1113 ret = -EINVAL; 1114 goto err; 1115 } 1116 1117 /* Release previously acquired memory if present */ 1118 if (vb->planes[plane].mem_priv) { 1119 if (!reacquired) { 1120 reacquired = true; 1121 vb->copied_timestamp = 0; 1122 call_void_vb_qop(vb, buf_cleanup, vb); 1123 } 1124 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv); 1125 } 1126 1127 vb->planes[plane].mem_priv = NULL; 1128 vb->planes[plane].bytesused = 0; 1129 vb->planes[plane].length = 0; 1130 vb->planes[plane].m.userptr = 0; 1131 vb->planes[plane].data_offset = 0; 1132 1133 /* Acquire each plane's memory */ 1134 mem_priv = call_ptr_memop(vb, get_userptr, 1135 q->alloc_devs[plane] ? : q->dev, 1136 planes[plane].m.userptr, 1137 planes[plane].length, q->dma_dir); 1138 if (IS_ERR(mem_priv)) { 1139 dprintk(q, 1, "failed acquiring userspace memory for plane %d\n", 1140 plane); 1141 ret = PTR_ERR(mem_priv); 1142 goto err; 1143 } 1144 vb->planes[plane].mem_priv = mem_priv; 1145 } 1146 1147 /* 1148 * Now that everything is in order, copy relevant information 1149 * provided by userspace. 1150 */ 1151 for (plane = 0; plane < vb->num_planes; ++plane) { 1152 vb->planes[plane].bytesused = planes[plane].bytesused; 1153 vb->planes[plane].length = planes[plane].length; 1154 vb->planes[plane].m.userptr = planes[plane].m.userptr; 1155 vb->planes[plane].data_offset = planes[plane].data_offset; 1156 } 1157 1158 if (reacquired) { 1159 /* 1160 * One or more planes changed, so we must call buf_init to do 1161 * the driver-specific initialization on the newly acquired 1162 * buffer, if provided. 1163 */ 1164 ret = call_vb_qop(vb, buf_init, vb); 1165 if (ret) { 1166 dprintk(q, 1, "buffer initialization failed\n"); 1167 goto err; 1168 } 1169 } 1170 1171 ret = call_vb_qop(vb, buf_prepare, vb); 1172 if (ret) { 1173 dprintk(q, 1, "buffer preparation failed\n"); 1174 call_void_vb_qop(vb, buf_cleanup, vb); 1175 goto err; 1176 } 1177 1178 return 0; 1179 err: 1180 /* In case of errors, release planes that were already acquired */ 1181 for (plane = 0; plane < vb->num_planes; ++plane) { 1182 if (vb->planes[plane].mem_priv) 1183 call_void_memop(vb, put_userptr, 1184 vb->planes[plane].mem_priv); 1185 vb->planes[plane].mem_priv = NULL; 1186 vb->planes[plane].m.userptr = 0; 1187 vb->planes[plane].length = 0; 1188 } 1189 1190 return ret; 1191 } 1192 1193 /* 1194 * __prepare_dmabuf() - prepare a DMABUF buffer 1195 */ 1196 static int __prepare_dmabuf(struct vb2_buffer *vb) 1197 { 1198 struct vb2_plane planes[VB2_MAX_PLANES]; 1199 struct vb2_queue *q = vb->vb2_queue; 1200 void *mem_priv; 1201 unsigned int plane; 1202 int ret = 0; 1203 bool reacquired = vb->planes[0].mem_priv == NULL; 1204 1205 memset(planes, 0, sizeof(planes[0]) * vb->num_planes); 1206 /* Copy relevant information provided by the userspace */ 1207 ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, 1208 vb, planes); 1209 if (ret) 1210 return ret; 1211 1212 for (plane = 0; plane < vb->num_planes; ++plane) { 1213 struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd); 1214 1215 if (IS_ERR_OR_NULL(dbuf)) { 1216 dprintk(q, 1, "invalid dmabuf fd for plane %d\n", 1217 plane); 1218 ret = -EINVAL; 1219 goto err; 1220 } 1221 1222 /* use DMABUF size if length is not provided */ 1223 if (planes[plane].length == 0) 1224 planes[plane].length = dbuf->size; 1225 1226 if (planes[plane].length < vb->planes[plane].min_length) { 1227 dprintk(q, 1, "invalid dmabuf length %u for plane %d, minimum length %u\n", 1228 planes[plane].length, plane, 1229 vb->planes[plane].min_length); 1230 dma_buf_put(dbuf); 1231 ret = -EINVAL; 1232 goto err; 1233 } 1234 1235 /* Skip the plane if already verified */ 1236 if (dbuf == vb->planes[plane].dbuf && 1237 vb->planes[plane].length == planes[plane].length) { 1238 dma_buf_put(dbuf); 1239 continue; 1240 } 1241 1242 dprintk(q, 3, "buffer for plane %d changed\n", plane); 1243 1244 if (!reacquired) { 1245 reacquired = true; 1246 vb->copied_timestamp = 0; 1247 call_void_vb_qop(vb, buf_cleanup, vb); 1248 } 1249 1250 /* Release previously acquired memory if present */ 1251 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]); 1252 vb->planes[plane].bytesused = 0; 1253 vb->planes[plane].length = 0; 1254 vb->planes[plane].m.fd = 0; 1255 vb->planes[plane].data_offset = 0; 1256 1257 /* Acquire each plane's memory */ 1258 mem_priv = call_ptr_memop(vb, attach_dmabuf, 1259 q->alloc_devs[plane] ? : q->dev, 1260 dbuf, planes[plane].length, q->dma_dir); 1261 if (IS_ERR(mem_priv)) { 1262 dprintk(q, 1, "failed to attach dmabuf\n"); 1263 ret = PTR_ERR(mem_priv); 1264 dma_buf_put(dbuf); 1265 goto err; 1266 } 1267 1268 vb->planes[plane].dbuf = dbuf; 1269 vb->planes[plane].mem_priv = mem_priv; 1270 } 1271 1272 /* 1273 * This pins the buffer(s) with dma_buf_map_attachment()). It's done 1274 * here instead just before the DMA, while queueing the buffer(s) so 1275 * userspace knows sooner rather than later if the dma-buf map fails. 1276 */ 1277 for (plane = 0; plane < vb->num_planes; ++plane) { 1278 if (vb->planes[plane].dbuf_mapped) 1279 continue; 1280 1281 ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv); 1282 if (ret) { 1283 dprintk(q, 1, "failed to map dmabuf for plane %d\n", 1284 plane); 1285 goto err; 1286 } 1287 vb->planes[plane].dbuf_mapped = 1; 1288 } 1289 1290 /* 1291 * Now that everything is in order, copy relevant information 1292 * provided by userspace. 1293 */ 1294 for (plane = 0; plane < vb->num_planes; ++plane) { 1295 vb->planes[plane].bytesused = planes[plane].bytesused; 1296 vb->planes[plane].length = planes[plane].length; 1297 vb->planes[plane].m.fd = planes[plane].m.fd; 1298 vb->planes[plane].data_offset = planes[plane].data_offset; 1299 } 1300 1301 if (reacquired) { 1302 /* 1303 * Call driver-specific initialization on the newly acquired buffer, 1304 * if provided. 1305 */ 1306 ret = call_vb_qop(vb, buf_init, vb); 1307 if (ret) { 1308 dprintk(q, 1, "buffer initialization failed\n"); 1309 goto err; 1310 } 1311 } 1312 1313 ret = call_vb_qop(vb, buf_prepare, vb); 1314 if (ret) { 1315 dprintk(q, 1, "buffer preparation failed\n"); 1316 call_void_vb_qop(vb, buf_cleanup, vb); 1317 goto err; 1318 } 1319 1320 return 0; 1321 err: 1322 /* In case of errors, release planes that were already acquired */ 1323 __vb2_buf_dmabuf_put(vb); 1324 1325 return ret; 1326 } 1327 1328 /* 1329 * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing 1330 */ 1331 static void __enqueue_in_driver(struct vb2_buffer *vb) 1332 { 1333 struct vb2_queue *q = vb->vb2_queue; 1334 1335 vb->state = VB2_BUF_STATE_ACTIVE; 1336 atomic_inc(&q->owned_by_drv_count); 1337 1338 trace_vb2_buf_queue(q, vb); 1339 1340 call_void_vb_qop(vb, buf_queue, vb); 1341 } 1342 1343 static int __buf_prepare(struct vb2_buffer *vb) 1344 { 1345 struct vb2_queue *q = vb->vb2_queue; 1346 enum vb2_buffer_state orig_state = vb->state; 1347 int ret; 1348 1349 if (q->error) { 1350 dprintk(q, 1, "fatal error occurred on queue\n"); 1351 return -EIO; 1352 } 1353 1354 if (vb->prepared) 1355 return 0; 1356 WARN_ON(vb->synced); 1357 1358 if (q->is_output) { 1359 ret = call_vb_qop(vb, buf_out_validate, vb); 1360 if (ret) { 1361 dprintk(q, 1, "buffer validation failed\n"); 1362 return ret; 1363 } 1364 } 1365 1366 vb->state = VB2_BUF_STATE_PREPARING; 1367 1368 switch (q->memory) { 1369 case VB2_MEMORY_MMAP: 1370 ret = __prepare_mmap(vb); 1371 break; 1372 case VB2_MEMORY_USERPTR: 1373 ret = __prepare_userptr(vb); 1374 break; 1375 case VB2_MEMORY_DMABUF: 1376 ret = __prepare_dmabuf(vb); 1377 break; 1378 default: 1379 WARN(1, "Invalid queue type\n"); 1380 ret = -EINVAL; 1381 break; 1382 } 1383 1384 if (ret) { 1385 dprintk(q, 1, "buffer preparation failed: %d\n", ret); 1386 vb->state = orig_state; 1387 return ret; 1388 } 1389 1390 __vb2_buf_mem_prepare(vb); 1391 vb->prepared = 1; 1392 vb->state = orig_state; 1393 1394 return 0; 1395 } 1396 1397 static int vb2_req_prepare(struct media_request_object *obj) 1398 { 1399 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1400 int ret; 1401 1402 if (WARN_ON(vb->state != VB2_BUF_STATE_IN_REQUEST)) 1403 return -EINVAL; 1404 1405 mutex_lock(vb->vb2_queue->lock); 1406 ret = __buf_prepare(vb); 1407 mutex_unlock(vb->vb2_queue->lock); 1408 return ret; 1409 } 1410 1411 static void __vb2_dqbuf(struct vb2_buffer *vb); 1412 1413 static void vb2_req_unprepare(struct media_request_object *obj) 1414 { 1415 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1416 1417 mutex_lock(vb->vb2_queue->lock); 1418 __vb2_dqbuf(vb); 1419 vb->state = VB2_BUF_STATE_IN_REQUEST; 1420 mutex_unlock(vb->vb2_queue->lock); 1421 WARN_ON(!vb->req_obj.req); 1422 } 1423 1424 int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, 1425 struct media_request *req); 1426 1427 static void vb2_req_queue(struct media_request_object *obj) 1428 { 1429 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1430 1431 mutex_lock(vb->vb2_queue->lock); 1432 vb2_core_qbuf(vb->vb2_queue, vb->index, NULL, NULL); 1433 mutex_unlock(vb->vb2_queue->lock); 1434 } 1435 1436 static void vb2_req_unbind(struct media_request_object *obj) 1437 { 1438 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1439 1440 if (vb->state == VB2_BUF_STATE_IN_REQUEST) 1441 call_void_bufop(vb->vb2_queue, init_buffer, vb); 1442 } 1443 1444 static void vb2_req_release(struct media_request_object *obj) 1445 { 1446 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1447 1448 if (vb->state == VB2_BUF_STATE_IN_REQUEST) { 1449 vb->state = VB2_BUF_STATE_DEQUEUED; 1450 if (vb->request) 1451 media_request_put(vb->request); 1452 vb->request = NULL; 1453 } 1454 } 1455 1456 static const struct media_request_object_ops vb2_core_req_ops = { 1457 .prepare = vb2_req_prepare, 1458 .unprepare = vb2_req_unprepare, 1459 .queue = vb2_req_queue, 1460 .unbind = vb2_req_unbind, 1461 .release = vb2_req_release, 1462 }; 1463 1464 bool vb2_request_object_is_buffer(struct media_request_object *obj) 1465 { 1466 return obj->ops == &vb2_core_req_ops; 1467 } 1468 EXPORT_SYMBOL_GPL(vb2_request_object_is_buffer); 1469 1470 unsigned int vb2_request_buffer_cnt(struct media_request *req) 1471 { 1472 struct media_request_object *obj; 1473 unsigned long flags; 1474 unsigned int buffer_cnt = 0; 1475 1476 spin_lock_irqsave(&req->lock, flags); 1477 list_for_each_entry(obj, &req->objects, list) 1478 if (vb2_request_object_is_buffer(obj)) 1479 buffer_cnt++; 1480 spin_unlock_irqrestore(&req->lock, flags); 1481 1482 return buffer_cnt; 1483 } 1484 EXPORT_SYMBOL_GPL(vb2_request_buffer_cnt); 1485 1486 int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb) 1487 { 1488 struct vb2_buffer *vb; 1489 int ret; 1490 1491 vb = q->bufs[index]; 1492 if (vb->state != VB2_BUF_STATE_DEQUEUED) { 1493 dprintk(q, 1, "invalid buffer state %d\n", 1494 vb->state); 1495 return -EINVAL; 1496 } 1497 if (vb->prepared) { 1498 dprintk(q, 1, "buffer already prepared\n"); 1499 return -EINVAL; 1500 } 1501 1502 ret = __buf_prepare(vb); 1503 if (ret) 1504 return ret; 1505 1506 /* Fill buffer information for the userspace */ 1507 call_void_bufop(q, fill_user_buffer, vb, pb); 1508 1509 dprintk(q, 2, "prepare of buffer %d succeeded\n", vb->index); 1510 1511 return 0; 1512 } 1513 EXPORT_SYMBOL_GPL(vb2_core_prepare_buf); 1514 1515 /* 1516 * vb2_start_streaming() - Attempt to start streaming. 1517 * @q: videobuf2 queue 1518 * 1519 * Attempt to start streaming. When this function is called there must be 1520 * at least q->min_buffers_needed buffers queued up (i.e. the minimum 1521 * number of buffers required for the DMA engine to function). If the 1522 * @start_streaming op fails it is supposed to return all the driver-owned 1523 * buffers back to vb2 in state QUEUED. Check if that happened and if 1524 * not warn and reclaim them forcefully. 1525 */ 1526 static int vb2_start_streaming(struct vb2_queue *q) 1527 { 1528 struct vb2_buffer *vb; 1529 int ret; 1530 1531 /* 1532 * If any buffers were queued before streamon, 1533 * we can now pass them to driver for processing. 1534 */ 1535 list_for_each_entry(vb, &q->queued_list, queued_entry) 1536 __enqueue_in_driver(vb); 1537 1538 /* Tell the driver to start streaming */ 1539 q->start_streaming_called = 1; 1540 ret = call_qop(q, start_streaming, q, 1541 atomic_read(&q->owned_by_drv_count)); 1542 if (!ret) 1543 return 0; 1544 1545 q->start_streaming_called = 0; 1546 1547 dprintk(q, 1, "driver refused to start streaming\n"); 1548 /* 1549 * If you see this warning, then the driver isn't cleaning up properly 1550 * after a failed start_streaming(). See the start_streaming() 1551 * documentation in videobuf2-core.h for more information how buffers 1552 * should be returned to vb2 in start_streaming(). 1553 */ 1554 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { 1555 unsigned i; 1556 1557 /* 1558 * Forcefully reclaim buffers if the driver did not 1559 * correctly return them to vb2. 1560 */ 1561 for (i = 0; i < q->num_buffers; ++i) { 1562 vb = q->bufs[i]; 1563 if (vb->state == VB2_BUF_STATE_ACTIVE) 1564 vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED); 1565 } 1566 /* Must be zero now */ 1567 WARN_ON(atomic_read(&q->owned_by_drv_count)); 1568 } 1569 /* 1570 * If done_list is not empty, then start_streaming() didn't call 1571 * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or 1572 * STATE_DONE. 1573 */ 1574 WARN_ON(!list_empty(&q->done_list)); 1575 return ret; 1576 } 1577 1578 int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, 1579 struct media_request *req) 1580 { 1581 struct vb2_buffer *vb; 1582 int ret; 1583 1584 if (q->error) { 1585 dprintk(q, 1, "fatal error occurred on queue\n"); 1586 return -EIO; 1587 } 1588 1589 vb = q->bufs[index]; 1590 1591 if (!req && vb->state != VB2_BUF_STATE_IN_REQUEST && 1592 q->requires_requests) { 1593 dprintk(q, 1, "qbuf requires a request\n"); 1594 return -EBADR; 1595 } 1596 1597 if ((req && q->uses_qbuf) || 1598 (!req && vb->state != VB2_BUF_STATE_IN_REQUEST && 1599 q->uses_requests)) { 1600 dprintk(q, 1, "queue in wrong mode (qbuf vs requests)\n"); 1601 return -EBUSY; 1602 } 1603 1604 if (req) { 1605 int ret; 1606 1607 q->uses_requests = 1; 1608 if (vb->state != VB2_BUF_STATE_DEQUEUED) { 1609 dprintk(q, 1, "buffer %d not in dequeued state\n", 1610 vb->index); 1611 return -EINVAL; 1612 } 1613 1614 if (q->is_output && !vb->prepared) { 1615 ret = call_vb_qop(vb, buf_out_validate, vb); 1616 if (ret) { 1617 dprintk(q, 1, "buffer validation failed\n"); 1618 return ret; 1619 } 1620 } 1621 1622 media_request_object_init(&vb->req_obj); 1623 1624 /* Make sure the request is in a safe state for updating. */ 1625 ret = media_request_lock_for_update(req); 1626 if (ret) 1627 return ret; 1628 ret = media_request_object_bind(req, &vb2_core_req_ops, 1629 q, true, &vb->req_obj); 1630 media_request_unlock_for_update(req); 1631 if (ret) 1632 return ret; 1633 1634 vb->state = VB2_BUF_STATE_IN_REQUEST; 1635 1636 /* 1637 * Increment the refcount and store the request. 1638 * The request refcount is decremented again when the 1639 * buffer is dequeued. This is to prevent vb2_buffer_done() 1640 * from freeing the request from interrupt context, which can 1641 * happen if the application closed the request fd after 1642 * queueing the request. 1643 */ 1644 media_request_get(req); 1645 vb->request = req; 1646 1647 /* Fill buffer information for the userspace */ 1648 if (pb) { 1649 call_void_bufop(q, copy_timestamp, vb, pb); 1650 call_void_bufop(q, fill_user_buffer, vb, pb); 1651 } 1652 1653 dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index); 1654 return 0; 1655 } 1656 1657 if (vb->state != VB2_BUF_STATE_IN_REQUEST) 1658 q->uses_qbuf = 1; 1659 1660 switch (vb->state) { 1661 case VB2_BUF_STATE_DEQUEUED: 1662 case VB2_BUF_STATE_IN_REQUEST: 1663 if (!vb->prepared) { 1664 ret = __buf_prepare(vb); 1665 if (ret) 1666 return ret; 1667 } 1668 break; 1669 case VB2_BUF_STATE_PREPARING: 1670 dprintk(q, 1, "buffer still being prepared\n"); 1671 return -EINVAL; 1672 default: 1673 dprintk(q, 1, "invalid buffer state %d\n", vb->state); 1674 return -EINVAL; 1675 } 1676 1677 /* 1678 * Add to the queued buffers list, a buffer will stay on it until 1679 * dequeued in dqbuf. 1680 */ 1681 list_add_tail(&vb->queued_entry, &q->queued_list); 1682 q->queued_count++; 1683 q->waiting_for_buffers = false; 1684 vb->state = VB2_BUF_STATE_QUEUED; 1685 1686 if (pb) 1687 call_void_bufop(q, copy_timestamp, vb, pb); 1688 1689 trace_vb2_qbuf(q, vb); 1690 1691 /* 1692 * If already streaming, give the buffer to driver for processing. 1693 * If not, the buffer will be given to driver on next streamon. 1694 */ 1695 if (q->start_streaming_called) 1696 __enqueue_in_driver(vb); 1697 1698 /* Fill buffer information for the userspace */ 1699 if (pb) 1700 call_void_bufop(q, fill_user_buffer, vb, pb); 1701 1702 /* 1703 * If streamon has been called, and we haven't yet called 1704 * start_streaming() since not enough buffers were queued, and 1705 * we now have reached the minimum number of queued buffers, 1706 * then we can finally call start_streaming(). 1707 */ 1708 if (q->streaming && !q->start_streaming_called && 1709 q->queued_count >= q->min_buffers_needed) { 1710 ret = vb2_start_streaming(q); 1711 if (ret) 1712 return ret; 1713 } 1714 1715 dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index); 1716 return 0; 1717 } 1718 EXPORT_SYMBOL_GPL(vb2_core_qbuf); 1719 1720 /* 1721 * __vb2_wait_for_done_vb() - wait for a buffer to become available 1722 * for dequeuing 1723 * 1724 * Will sleep if required for nonblocking == false. 1725 */ 1726 static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) 1727 { 1728 /* 1729 * All operations on vb_done_list are performed under done_lock 1730 * spinlock protection. However, buffers may be removed from 1731 * it and returned to userspace only while holding both driver's 1732 * lock and the done_lock spinlock. Thus we can be sure that as 1733 * long as we hold the driver's lock, the list will remain not 1734 * empty if list_empty() check succeeds. 1735 */ 1736 1737 for (;;) { 1738 int ret; 1739 1740 if (q->waiting_in_dqbuf) { 1741 dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); 1742 return -EBUSY; 1743 } 1744 1745 if (!q->streaming) { 1746 dprintk(q, 1, "streaming off, will not wait for buffers\n"); 1747 return -EINVAL; 1748 } 1749 1750 if (q->error) { 1751 dprintk(q, 1, "Queue in error state, will not wait for buffers\n"); 1752 return -EIO; 1753 } 1754 1755 if (q->last_buffer_dequeued) { 1756 dprintk(q, 3, "last buffer dequeued already, will not wait for buffers\n"); 1757 return -EPIPE; 1758 } 1759 1760 if (!list_empty(&q->done_list)) { 1761 /* 1762 * Found a buffer that we were waiting for. 1763 */ 1764 break; 1765 } 1766 1767 if (nonblocking) { 1768 dprintk(q, 3, "nonblocking and no buffers to dequeue, will not wait\n"); 1769 return -EAGAIN; 1770 } 1771 1772 q->waiting_in_dqbuf = 1; 1773 /* 1774 * We are streaming and blocking, wait for another buffer to 1775 * become ready or for streamoff. Driver's lock is released to 1776 * allow streamoff or qbuf to be called while waiting. 1777 */ 1778 call_void_qop(q, wait_prepare, q); 1779 1780 /* 1781 * All locks have been released, it is safe to sleep now. 1782 */ 1783 dprintk(q, 3, "will sleep waiting for buffers\n"); 1784 ret = wait_event_interruptible(q->done_wq, 1785 !list_empty(&q->done_list) || !q->streaming || 1786 q->error); 1787 1788 /* 1789 * We need to reevaluate both conditions again after reacquiring 1790 * the locks or return an error if one occurred. 1791 */ 1792 call_void_qop(q, wait_finish, q); 1793 q->waiting_in_dqbuf = 0; 1794 if (ret) { 1795 dprintk(q, 1, "sleep was interrupted\n"); 1796 return ret; 1797 } 1798 } 1799 return 0; 1800 } 1801 1802 /* 1803 * __vb2_get_done_vb() - get a buffer ready for dequeuing 1804 * 1805 * Will sleep if required for nonblocking == false. 1806 */ 1807 static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, 1808 void *pb, int nonblocking) 1809 { 1810 unsigned long flags; 1811 int ret = 0; 1812 1813 /* 1814 * Wait for at least one buffer to become available on the done_list. 1815 */ 1816 ret = __vb2_wait_for_done_vb(q, nonblocking); 1817 if (ret) 1818 return ret; 1819 1820 /* 1821 * Driver's lock has been held since we last verified that done_list 1822 * is not empty, so no need for another list_empty(done_list) check. 1823 */ 1824 spin_lock_irqsave(&q->done_lock, flags); 1825 *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry); 1826 /* 1827 * Only remove the buffer from done_list if all planes can be 1828 * handled. Some cases such as V4L2 file I/O and DVB have pb 1829 * == NULL; skip the check then as there's nothing to verify. 1830 */ 1831 if (pb) 1832 ret = call_bufop(q, verify_planes_array, *vb, pb); 1833 if (!ret) 1834 list_del(&(*vb)->done_entry); 1835 spin_unlock_irqrestore(&q->done_lock, flags); 1836 1837 return ret; 1838 } 1839 1840 int vb2_wait_for_all_buffers(struct vb2_queue *q) 1841 { 1842 if (!q->streaming) { 1843 dprintk(q, 1, "streaming off, will not wait for buffers\n"); 1844 return -EINVAL; 1845 } 1846 1847 if (q->start_streaming_called) 1848 wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count)); 1849 return 0; 1850 } 1851 EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers); 1852 1853 /* 1854 * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state 1855 */ 1856 static void __vb2_dqbuf(struct vb2_buffer *vb) 1857 { 1858 struct vb2_queue *q = vb->vb2_queue; 1859 1860 /* nothing to do if the buffer is already dequeued */ 1861 if (vb->state == VB2_BUF_STATE_DEQUEUED) 1862 return; 1863 1864 vb->state = VB2_BUF_STATE_DEQUEUED; 1865 1866 call_void_bufop(q, init_buffer, vb); 1867 } 1868 1869 int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, 1870 bool nonblocking) 1871 { 1872 struct vb2_buffer *vb = NULL; 1873 int ret; 1874 1875 ret = __vb2_get_done_vb(q, &vb, pb, nonblocking); 1876 if (ret < 0) 1877 return ret; 1878 1879 switch (vb->state) { 1880 case VB2_BUF_STATE_DONE: 1881 dprintk(q, 3, "returning done buffer\n"); 1882 break; 1883 case VB2_BUF_STATE_ERROR: 1884 dprintk(q, 3, "returning done buffer with errors\n"); 1885 break; 1886 default: 1887 dprintk(q, 1, "invalid buffer state\n"); 1888 return -EINVAL; 1889 } 1890 1891 call_void_vb_qop(vb, buf_finish, vb); 1892 vb->prepared = 0; 1893 1894 if (pindex) 1895 *pindex = vb->index; 1896 1897 /* Fill buffer information for the userspace */ 1898 if (pb) 1899 call_void_bufop(q, fill_user_buffer, vb, pb); 1900 1901 /* Remove from videobuf queue */ 1902 list_del(&vb->queued_entry); 1903 q->queued_count--; 1904 1905 trace_vb2_dqbuf(q, vb); 1906 1907 /* go back to dequeued state */ 1908 __vb2_dqbuf(vb); 1909 1910 if (WARN_ON(vb->req_obj.req)) { 1911 media_request_object_unbind(&vb->req_obj); 1912 media_request_object_put(&vb->req_obj); 1913 } 1914 if (vb->request) 1915 media_request_put(vb->request); 1916 vb->request = NULL; 1917 1918 dprintk(q, 2, "dqbuf of buffer %d, with state %d\n", 1919 vb->index, vb->state); 1920 1921 return 0; 1922 1923 } 1924 EXPORT_SYMBOL_GPL(vb2_core_dqbuf); 1925 1926 /* 1927 * __vb2_queue_cancel() - cancel and stop (pause) streaming 1928 * 1929 * Removes all queued buffers from driver's queue and all buffers queued by 1930 * userspace from videobuf's queue. Returns to state after reqbufs. 1931 */ 1932 static void __vb2_queue_cancel(struct vb2_queue *q) 1933 { 1934 unsigned int i; 1935 1936 /* 1937 * Tell driver to stop all transactions and release all queued 1938 * buffers. 1939 */ 1940 if (q->start_streaming_called) 1941 call_void_qop(q, stop_streaming, q); 1942 1943 /* 1944 * If you see this warning, then the driver isn't cleaning up properly 1945 * in stop_streaming(). See the stop_streaming() documentation in 1946 * videobuf2-core.h for more information how buffers should be returned 1947 * to vb2 in stop_streaming(). 1948 */ 1949 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { 1950 for (i = 0; i < q->num_buffers; ++i) 1951 if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) { 1952 pr_warn("driver bug: stop_streaming operation is leaving buf %p in active state\n", 1953 q->bufs[i]); 1954 vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR); 1955 } 1956 /* Must be zero now */ 1957 WARN_ON(atomic_read(&q->owned_by_drv_count)); 1958 } 1959 1960 q->streaming = 0; 1961 q->start_streaming_called = 0; 1962 q->queued_count = 0; 1963 q->error = 0; 1964 q->uses_requests = 0; 1965 q->uses_qbuf = 0; 1966 1967 /* 1968 * Remove all buffers from videobuf's list... 1969 */ 1970 INIT_LIST_HEAD(&q->queued_list); 1971 /* 1972 * ...and done list; userspace will not receive any buffers it 1973 * has not already dequeued before initiating cancel. 1974 */ 1975 INIT_LIST_HEAD(&q->done_list); 1976 atomic_set(&q->owned_by_drv_count, 0); 1977 wake_up_all(&q->done_wq); 1978 1979 /* 1980 * Reinitialize all buffers for next use. 1981 * Make sure to call buf_finish for any queued buffers. Normally 1982 * that's done in dqbuf, but that's not going to happen when we 1983 * cancel the whole queue. Note: this code belongs here, not in 1984 * __vb2_dqbuf() since in vb2_core_dqbuf() there is a critical 1985 * call to __fill_user_buffer() after buf_finish(). That order can't 1986 * be changed, so we can't move the buf_finish() to __vb2_dqbuf(). 1987 */ 1988 for (i = 0; i < q->num_buffers; ++i) { 1989 struct vb2_buffer *vb = q->bufs[i]; 1990 struct media_request *req = vb->req_obj.req; 1991 1992 /* 1993 * If a request is associated with this buffer, then 1994 * call buf_request_cancel() to give the driver to complete() 1995 * related request objects. Otherwise those objects would 1996 * never complete. 1997 */ 1998 if (req) { 1999 enum media_request_state state; 2000 unsigned long flags; 2001 2002 spin_lock_irqsave(&req->lock, flags); 2003 state = req->state; 2004 spin_unlock_irqrestore(&req->lock, flags); 2005 2006 if (state == MEDIA_REQUEST_STATE_QUEUED) 2007 call_void_vb_qop(vb, buf_request_complete, vb); 2008 } 2009 2010 __vb2_buf_mem_finish(vb); 2011 2012 if (vb->prepared) { 2013 call_void_vb_qop(vb, buf_finish, vb); 2014 vb->prepared = 0; 2015 } 2016 __vb2_dqbuf(vb); 2017 2018 if (vb->req_obj.req) { 2019 media_request_object_unbind(&vb->req_obj); 2020 media_request_object_put(&vb->req_obj); 2021 } 2022 if (vb->request) 2023 media_request_put(vb->request); 2024 vb->request = NULL; 2025 vb->copied_timestamp = 0; 2026 } 2027 } 2028 2029 int vb2_core_streamon(struct vb2_queue *q, unsigned int type) 2030 { 2031 int ret; 2032 2033 if (type != q->type) { 2034 dprintk(q, 1, "invalid stream type\n"); 2035 return -EINVAL; 2036 } 2037 2038 if (q->streaming) { 2039 dprintk(q, 3, "already streaming\n"); 2040 return 0; 2041 } 2042 2043 if (!q->num_buffers) { 2044 dprintk(q, 1, "no buffers have been allocated\n"); 2045 return -EINVAL; 2046 } 2047 2048 if (q->num_buffers < q->min_buffers_needed) { 2049 dprintk(q, 1, "need at least %u allocated buffers\n", 2050 q->min_buffers_needed); 2051 return -EINVAL; 2052 } 2053 2054 /* 2055 * Tell driver to start streaming provided sufficient buffers 2056 * are available. 2057 */ 2058 if (q->queued_count >= q->min_buffers_needed) { 2059 ret = v4l_vb2q_enable_media_source(q); 2060 if (ret) 2061 return ret; 2062 ret = vb2_start_streaming(q); 2063 if (ret) 2064 return ret; 2065 } 2066 2067 q->streaming = 1; 2068 2069 dprintk(q, 3, "successful\n"); 2070 return 0; 2071 } 2072 EXPORT_SYMBOL_GPL(vb2_core_streamon); 2073 2074 void vb2_queue_error(struct vb2_queue *q) 2075 { 2076 q->error = 1; 2077 2078 wake_up_all(&q->done_wq); 2079 } 2080 EXPORT_SYMBOL_GPL(vb2_queue_error); 2081 2082 int vb2_core_streamoff(struct vb2_queue *q, unsigned int type) 2083 { 2084 if (type != q->type) { 2085 dprintk(q, 1, "invalid stream type\n"); 2086 return -EINVAL; 2087 } 2088 2089 /* 2090 * Cancel will pause streaming and remove all buffers from the driver 2091 * and videobuf, effectively returning control over them to userspace. 2092 * 2093 * Note that we do this even if q->streaming == 0: if you prepare or 2094 * queue buffers, and then call streamoff without ever having called 2095 * streamon, you would still expect those buffers to be returned to 2096 * their normal dequeued state. 2097 */ 2098 __vb2_queue_cancel(q); 2099 q->waiting_for_buffers = !q->is_output; 2100 q->last_buffer_dequeued = false; 2101 2102 dprintk(q, 3, "successful\n"); 2103 return 0; 2104 } 2105 EXPORT_SYMBOL_GPL(vb2_core_streamoff); 2106 2107 /* 2108 * __find_plane_by_offset() - find plane associated with the given offset off 2109 */ 2110 static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off, 2111 unsigned int *_buffer, unsigned int *_plane) 2112 { 2113 struct vb2_buffer *vb; 2114 unsigned int buffer, plane; 2115 2116 /* 2117 * Go over all buffers and their planes, comparing the given offset 2118 * with an offset assigned to each plane. If a match is found, 2119 * return its buffer and plane numbers. 2120 */ 2121 for (buffer = 0; buffer < q->num_buffers; ++buffer) { 2122 vb = q->bufs[buffer]; 2123 2124 for (plane = 0; plane < vb->num_planes; ++plane) { 2125 if (vb->planes[plane].m.offset == off) { 2126 *_buffer = buffer; 2127 *_plane = plane; 2128 return 0; 2129 } 2130 } 2131 } 2132 2133 return -EINVAL; 2134 } 2135 2136 int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type, 2137 unsigned int index, unsigned int plane, unsigned int flags) 2138 { 2139 struct vb2_buffer *vb = NULL; 2140 struct vb2_plane *vb_plane; 2141 int ret; 2142 struct dma_buf *dbuf; 2143 2144 if (q->memory != VB2_MEMORY_MMAP) { 2145 dprintk(q, 1, "queue is not currently set up for mmap\n"); 2146 return -EINVAL; 2147 } 2148 2149 if (!q->mem_ops->get_dmabuf) { 2150 dprintk(q, 1, "queue does not support DMA buffer exporting\n"); 2151 return -EINVAL; 2152 } 2153 2154 if (flags & ~(O_CLOEXEC | O_ACCMODE)) { 2155 dprintk(q, 1, "queue does support only O_CLOEXEC and access mode flags\n"); 2156 return -EINVAL; 2157 } 2158 2159 if (type != q->type) { 2160 dprintk(q, 1, "invalid buffer type\n"); 2161 return -EINVAL; 2162 } 2163 2164 if (index >= q->num_buffers) { 2165 dprintk(q, 1, "buffer index out of range\n"); 2166 return -EINVAL; 2167 } 2168 2169 vb = q->bufs[index]; 2170 2171 if (plane >= vb->num_planes) { 2172 dprintk(q, 1, "buffer plane out of range\n"); 2173 return -EINVAL; 2174 } 2175 2176 if (vb2_fileio_is_active(q)) { 2177 dprintk(q, 1, "expbuf: file io in progress\n"); 2178 return -EBUSY; 2179 } 2180 2181 vb_plane = &vb->planes[plane]; 2182 2183 dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv, 2184 flags & O_ACCMODE); 2185 if (IS_ERR_OR_NULL(dbuf)) { 2186 dprintk(q, 1, "failed to export buffer %d, plane %d\n", 2187 index, plane); 2188 return -EINVAL; 2189 } 2190 2191 ret = dma_buf_fd(dbuf, flags & ~O_ACCMODE); 2192 if (ret < 0) { 2193 dprintk(q, 3, "buffer %d, plane %d failed to export (%d)\n", 2194 index, plane, ret); 2195 dma_buf_put(dbuf); 2196 return ret; 2197 } 2198 2199 dprintk(q, 3, "buffer %d, plane %d exported as %d descriptor\n", 2200 index, plane, ret); 2201 *fd = ret; 2202 2203 return 0; 2204 } 2205 EXPORT_SYMBOL_GPL(vb2_core_expbuf); 2206 2207 int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) 2208 { 2209 unsigned long off = vma->vm_pgoff << PAGE_SHIFT; 2210 struct vb2_buffer *vb; 2211 unsigned int buffer = 0, plane = 0; 2212 int ret; 2213 unsigned long length; 2214 2215 if (q->memory != VB2_MEMORY_MMAP) { 2216 dprintk(q, 1, "queue is not currently set up for mmap\n"); 2217 return -EINVAL; 2218 } 2219 2220 /* 2221 * Check memory area access mode. 2222 */ 2223 if (!(vma->vm_flags & VM_SHARED)) { 2224 dprintk(q, 1, "invalid vma flags, VM_SHARED needed\n"); 2225 return -EINVAL; 2226 } 2227 if (q->is_output) { 2228 if (!(vma->vm_flags & VM_WRITE)) { 2229 dprintk(q, 1, "invalid vma flags, VM_WRITE needed\n"); 2230 return -EINVAL; 2231 } 2232 } else { 2233 if (!(vma->vm_flags & VM_READ)) { 2234 dprintk(q, 1, "invalid vma flags, VM_READ needed\n"); 2235 return -EINVAL; 2236 } 2237 } 2238 2239 mutex_lock(&q->mmap_lock); 2240 2241 if (vb2_fileio_is_active(q)) { 2242 dprintk(q, 1, "mmap: file io in progress\n"); 2243 ret = -EBUSY; 2244 goto unlock; 2245 } 2246 2247 /* 2248 * Find the plane corresponding to the offset passed by userspace. 2249 */ 2250 ret = __find_plane_by_offset(q, off, &buffer, &plane); 2251 if (ret) 2252 goto unlock; 2253 2254 vb = q->bufs[buffer]; 2255 2256 /* 2257 * MMAP requires page_aligned buffers. 2258 * The buffer length was page_aligned at __vb2_buf_mem_alloc(), 2259 * so, we need to do the same here. 2260 */ 2261 length = PAGE_ALIGN(vb->planes[plane].length); 2262 if (length < (vma->vm_end - vma->vm_start)) { 2263 dprintk(q, 1, 2264 "MMAP invalid, as it would overflow buffer length\n"); 2265 ret = -EINVAL; 2266 goto unlock; 2267 } 2268 2269 /* 2270 * vm_pgoff is treated in V4L2 API as a 'cookie' to select a buffer, 2271 * not as a in-buffer offset. We always want to mmap a whole buffer 2272 * from its beginning. 2273 */ 2274 vma->vm_pgoff = 0; 2275 2276 ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma); 2277 2278 unlock: 2279 mutex_unlock(&q->mmap_lock); 2280 if (ret) 2281 return ret; 2282 2283 dprintk(q, 3, "buffer %d, plane %d successfully mapped\n", buffer, plane); 2284 return 0; 2285 } 2286 EXPORT_SYMBOL_GPL(vb2_mmap); 2287 2288 #ifndef CONFIG_MMU 2289 unsigned long vb2_get_unmapped_area(struct vb2_queue *q, 2290 unsigned long addr, 2291 unsigned long len, 2292 unsigned long pgoff, 2293 unsigned long flags) 2294 { 2295 unsigned long off = pgoff << PAGE_SHIFT; 2296 struct vb2_buffer *vb; 2297 unsigned int buffer, plane; 2298 void *vaddr; 2299 int ret; 2300 2301 if (q->memory != VB2_MEMORY_MMAP) { 2302 dprintk(q, 1, "queue is not currently set up for mmap\n"); 2303 return -EINVAL; 2304 } 2305 2306 /* 2307 * Find the plane corresponding to the offset passed by userspace. 2308 */ 2309 ret = __find_plane_by_offset(q, off, &buffer, &plane); 2310 if (ret) 2311 return ret; 2312 2313 vb = q->bufs[buffer]; 2314 2315 vaddr = vb2_plane_vaddr(vb, plane); 2316 return vaddr ? (unsigned long)vaddr : -EINVAL; 2317 } 2318 EXPORT_SYMBOL_GPL(vb2_get_unmapped_area); 2319 #endif 2320 2321 int vb2_core_queue_init(struct vb2_queue *q) 2322 { 2323 /* 2324 * Sanity check 2325 */ 2326 if (WARN_ON(!q) || 2327 WARN_ON(!q->ops) || 2328 WARN_ON(!q->mem_ops) || 2329 WARN_ON(!q->type) || 2330 WARN_ON(!q->io_modes) || 2331 WARN_ON(!q->ops->queue_setup) || 2332 WARN_ON(!q->ops->buf_queue)) 2333 return -EINVAL; 2334 2335 if (WARN_ON(q->requires_requests && !q->supports_requests)) 2336 return -EINVAL; 2337 2338 INIT_LIST_HEAD(&q->queued_list); 2339 INIT_LIST_HEAD(&q->done_list); 2340 spin_lock_init(&q->done_lock); 2341 mutex_init(&q->mmap_lock); 2342 init_waitqueue_head(&q->done_wq); 2343 2344 q->memory = VB2_MEMORY_UNKNOWN; 2345 2346 if (q->buf_struct_size == 0) 2347 q->buf_struct_size = sizeof(struct vb2_buffer); 2348 2349 if (q->bidirectional) 2350 q->dma_dir = DMA_BIDIRECTIONAL; 2351 else 2352 q->dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 2353 2354 if (q->name[0] == '\0') 2355 snprintf(q->name, sizeof(q->name), "%s-%p", 2356 q->is_output ? "out" : "cap", q); 2357 2358 return 0; 2359 } 2360 EXPORT_SYMBOL_GPL(vb2_core_queue_init); 2361 2362 static int __vb2_init_fileio(struct vb2_queue *q, int read); 2363 static int __vb2_cleanup_fileio(struct vb2_queue *q); 2364 void vb2_core_queue_release(struct vb2_queue *q) 2365 { 2366 __vb2_cleanup_fileio(q); 2367 __vb2_queue_cancel(q); 2368 mutex_lock(&q->mmap_lock); 2369 __vb2_queue_free(q, q->num_buffers); 2370 mutex_unlock(&q->mmap_lock); 2371 } 2372 EXPORT_SYMBOL_GPL(vb2_core_queue_release); 2373 2374 __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file, 2375 poll_table *wait) 2376 { 2377 __poll_t req_events = poll_requested_events(wait); 2378 struct vb2_buffer *vb = NULL; 2379 unsigned long flags; 2380 2381 if (!q->is_output && !(req_events & (EPOLLIN | EPOLLRDNORM))) 2382 return 0; 2383 if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM))) 2384 return 0; 2385 2386 poll_wait(file, &q->done_wq, wait); 2387 2388 /* 2389 * Start file I/O emulator only if streaming API has not been used yet. 2390 */ 2391 if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) { 2392 if (!q->is_output && (q->io_modes & VB2_READ) && 2393 (req_events & (EPOLLIN | EPOLLRDNORM))) { 2394 if (__vb2_init_fileio(q, 1)) 2395 return EPOLLERR; 2396 } 2397 if (q->is_output && (q->io_modes & VB2_WRITE) && 2398 (req_events & (EPOLLOUT | EPOLLWRNORM))) { 2399 if (__vb2_init_fileio(q, 0)) 2400 return EPOLLERR; 2401 /* 2402 * Write to OUTPUT queue can be done immediately. 2403 */ 2404 return EPOLLOUT | EPOLLWRNORM; 2405 } 2406 } 2407 2408 /* 2409 * There is nothing to wait for if the queue isn't streaming, or if the 2410 * error flag is set. 2411 */ 2412 if (!vb2_is_streaming(q) || q->error) 2413 return EPOLLERR; 2414 2415 /* 2416 * If this quirk is set and QBUF hasn't been called yet then 2417 * return EPOLLERR as well. This only affects capture queues, output 2418 * queues will always initialize waiting_for_buffers to false. 2419 * This quirk is set by V4L2 for backwards compatibility reasons. 2420 */ 2421 if (q->quirk_poll_must_check_waiting_for_buffers && 2422 q->waiting_for_buffers && (req_events & (EPOLLIN | EPOLLRDNORM))) 2423 return EPOLLERR; 2424 2425 /* 2426 * For output streams you can call write() as long as there are fewer 2427 * buffers queued than there are buffers available. 2428 */ 2429 if (q->is_output && q->fileio && q->queued_count < q->num_buffers) 2430 return EPOLLOUT | EPOLLWRNORM; 2431 2432 if (list_empty(&q->done_list)) { 2433 /* 2434 * If the last buffer was dequeued from a capture queue, 2435 * return immediately. DQBUF will return -EPIPE. 2436 */ 2437 if (q->last_buffer_dequeued) 2438 return EPOLLIN | EPOLLRDNORM; 2439 } 2440 2441 /* 2442 * Take first buffer available for dequeuing. 2443 */ 2444 spin_lock_irqsave(&q->done_lock, flags); 2445 if (!list_empty(&q->done_list)) 2446 vb = list_first_entry(&q->done_list, struct vb2_buffer, 2447 done_entry); 2448 spin_unlock_irqrestore(&q->done_lock, flags); 2449 2450 if (vb && (vb->state == VB2_BUF_STATE_DONE 2451 || vb->state == VB2_BUF_STATE_ERROR)) { 2452 return (q->is_output) ? 2453 EPOLLOUT | EPOLLWRNORM : 2454 EPOLLIN | EPOLLRDNORM; 2455 } 2456 return 0; 2457 } 2458 EXPORT_SYMBOL_GPL(vb2_core_poll); 2459 2460 /* 2461 * struct vb2_fileio_buf - buffer context used by file io emulator 2462 * 2463 * vb2 provides a compatibility layer and emulator of file io (read and 2464 * write) calls on top of streaming API. This structure is used for 2465 * tracking context related to the buffers. 2466 */ 2467 struct vb2_fileio_buf { 2468 void *vaddr; 2469 unsigned int size; 2470 unsigned int pos; 2471 unsigned int queued:1; 2472 }; 2473 2474 /* 2475 * struct vb2_fileio_data - queue context used by file io emulator 2476 * 2477 * @cur_index: the index of the buffer currently being read from or 2478 * written to. If equal to q->num_buffers then a new buffer 2479 * must be dequeued. 2480 * @initial_index: in the read() case all buffers are queued up immediately 2481 * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles 2482 * buffers. However, in the write() case no buffers are initially 2483 * queued, instead whenever a buffer is full it is queued up by 2484 * __vb2_perform_fileio(). Only once all available buffers have 2485 * been queued up will __vb2_perform_fileio() start to dequeue 2486 * buffers. This means that initially __vb2_perform_fileio() 2487 * needs to know what buffer index to use when it is queuing up 2488 * the buffers for the first time. That initial index is stored 2489 * in this field. Once it is equal to q->num_buffers all 2490 * available buffers have been queued and __vb2_perform_fileio() 2491 * should start the normal dequeue/queue cycle. 2492 * 2493 * vb2 provides a compatibility layer and emulator of file io (read and 2494 * write) calls on top of streaming API. For proper operation it required 2495 * this structure to save the driver state between each call of the read 2496 * or write function. 2497 */ 2498 struct vb2_fileio_data { 2499 unsigned int count; 2500 unsigned int type; 2501 unsigned int memory; 2502 struct vb2_fileio_buf bufs[VB2_MAX_FRAME]; 2503 unsigned int cur_index; 2504 unsigned int initial_index; 2505 unsigned int q_count; 2506 unsigned int dq_count; 2507 unsigned read_once:1; 2508 unsigned write_immediately:1; 2509 }; 2510 2511 /* 2512 * __vb2_init_fileio() - initialize file io emulator 2513 * @q: videobuf2 queue 2514 * @read: mode selector (1 means read, 0 means write) 2515 */ 2516 static int __vb2_init_fileio(struct vb2_queue *q, int read) 2517 { 2518 struct vb2_fileio_data *fileio; 2519 int i, ret; 2520 unsigned int count = 0; 2521 2522 /* 2523 * Sanity check 2524 */ 2525 if (WARN_ON((read && !(q->io_modes & VB2_READ)) || 2526 (!read && !(q->io_modes & VB2_WRITE)))) 2527 return -EINVAL; 2528 2529 /* 2530 * Check if device supports mapping buffers to kernel virtual space. 2531 */ 2532 if (!q->mem_ops->vaddr) 2533 return -EBUSY; 2534 2535 /* 2536 * Check if streaming api has not been already activated. 2537 */ 2538 if (q->streaming || q->num_buffers > 0) 2539 return -EBUSY; 2540 2541 /* 2542 * Start with count 1, driver can increase it in queue_setup() 2543 */ 2544 count = 1; 2545 2546 dprintk(q, 3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n", 2547 (read) ? "read" : "write", count, q->fileio_read_once, 2548 q->fileio_write_immediately); 2549 2550 fileio = kzalloc(sizeof(*fileio), GFP_KERNEL); 2551 if (fileio == NULL) 2552 return -ENOMEM; 2553 2554 fileio->read_once = q->fileio_read_once; 2555 fileio->write_immediately = q->fileio_write_immediately; 2556 2557 /* 2558 * Request buffers and use MMAP type to force driver 2559 * to allocate buffers by itself. 2560 */ 2561 fileio->count = count; 2562 fileio->memory = VB2_MEMORY_MMAP; 2563 fileio->type = q->type; 2564 q->fileio = fileio; 2565 ret = vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); 2566 if (ret) 2567 goto err_kfree; 2568 2569 /* 2570 * Check if plane_count is correct 2571 * (multiplane buffers are not supported). 2572 */ 2573 if (q->bufs[0]->num_planes != 1) { 2574 ret = -EBUSY; 2575 goto err_reqbufs; 2576 } 2577 2578 /* 2579 * Get kernel address of each buffer. 2580 */ 2581 for (i = 0; i < q->num_buffers; i++) { 2582 fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0); 2583 if (fileio->bufs[i].vaddr == NULL) { 2584 ret = -EINVAL; 2585 goto err_reqbufs; 2586 } 2587 fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0); 2588 } 2589 2590 /* 2591 * Read mode requires pre queuing of all buffers. 2592 */ 2593 if (read) { 2594 /* 2595 * Queue all buffers. 2596 */ 2597 for (i = 0; i < q->num_buffers; i++) { 2598 ret = vb2_core_qbuf(q, i, NULL, NULL); 2599 if (ret) 2600 goto err_reqbufs; 2601 fileio->bufs[i].queued = 1; 2602 } 2603 /* 2604 * All buffers have been queued, so mark that by setting 2605 * initial_index to q->num_buffers 2606 */ 2607 fileio->initial_index = q->num_buffers; 2608 fileio->cur_index = q->num_buffers; 2609 } 2610 2611 /* 2612 * Start streaming. 2613 */ 2614 ret = vb2_core_streamon(q, q->type); 2615 if (ret) 2616 goto err_reqbufs; 2617 2618 return ret; 2619 2620 err_reqbufs: 2621 fileio->count = 0; 2622 vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); 2623 2624 err_kfree: 2625 q->fileio = NULL; 2626 kfree(fileio); 2627 return ret; 2628 } 2629 2630 /* 2631 * __vb2_cleanup_fileio() - free resourced used by file io emulator 2632 * @q: videobuf2 queue 2633 */ 2634 static int __vb2_cleanup_fileio(struct vb2_queue *q) 2635 { 2636 struct vb2_fileio_data *fileio = q->fileio; 2637 2638 if (fileio) { 2639 vb2_core_streamoff(q, q->type); 2640 q->fileio = NULL; 2641 fileio->count = 0; 2642 vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); 2643 kfree(fileio); 2644 dprintk(q, 3, "file io emulator closed\n"); 2645 } 2646 return 0; 2647 } 2648 2649 /* 2650 * __vb2_perform_fileio() - perform a single file io (read or write) operation 2651 * @q: videobuf2 queue 2652 * @data: pointed to target userspace buffer 2653 * @count: number of bytes to read or write 2654 * @ppos: file handle position tracking pointer 2655 * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking) 2656 * @read: access mode selector (1 means read, 0 means write) 2657 */ 2658 static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count, 2659 loff_t *ppos, int nonblock, int read) 2660 { 2661 struct vb2_fileio_data *fileio; 2662 struct vb2_fileio_buf *buf; 2663 bool is_multiplanar = q->is_multiplanar; 2664 /* 2665 * When using write() to write data to an output video node the vb2 core 2666 * should copy timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody 2667 * else is able to provide this information with the write() operation. 2668 */ 2669 bool copy_timestamp = !read && q->copy_timestamp; 2670 unsigned index; 2671 int ret; 2672 2673 dprintk(q, 3, "mode %s, offset %ld, count %zd, %sblocking\n", 2674 read ? "read" : "write", (long)*ppos, count, 2675 nonblock ? "non" : ""); 2676 2677 if (!data) 2678 return -EINVAL; 2679 2680 if (q->waiting_in_dqbuf) { 2681 dprintk(q, 3, "another dup()ped fd is %s\n", 2682 read ? "reading" : "writing"); 2683 return -EBUSY; 2684 } 2685 2686 /* 2687 * Initialize emulator on first call. 2688 */ 2689 if (!vb2_fileio_is_active(q)) { 2690 ret = __vb2_init_fileio(q, read); 2691 dprintk(q, 3, "vb2_init_fileio result: %d\n", ret); 2692 if (ret) 2693 return ret; 2694 } 2695 fileio = q->fileio; 2696 2697 /* 2698 * Check if we need to dequeue the buffer. 2699 */ 2700 index = fileio->cur_index; 2701 if (index >= q->num_buffers) { 2702 struct vb2_buffer *b; 2703 2704 /* 2705 * Call vb2_dqbuf to get buffer back. 2706 */ 2707 ret = vb2_core_dqbuf(q, &index, NULL, nonblock); 2708 dprintk(q, 5, "vb2_dqbuf result: %d\n", ret); 2709 if (ret) 2710 return ret; 2711 fileio->dq_count += 1; 2712 2713 fileio->cur_index = index; 2714 buf = &fileio->bufs[index]; 2715 b = q->bufs[index]; 2716 2717 /* 2718 * Get number of bytes filled by the driver 2719 */ 2720 buf->pos = 0; 2721 buf->queued = 0; 2722 buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0) 2723 : vb2_plane_size(q->bufs[index], 0); 2724 /* Compensate for data_offset on read in the multiplanar case. */ 2725 if (is_multiplanar && read && 2726 b->planes[0].data_offset < buf->size) { 2727 buf->pos = b->planes[0].data_offset; 2728 buf->size -= buf->pos; 2729 } 2730 } else { 2731 buf = &fileio->bufs[index]; 2732 } 2733 2734 /* 2735 * Limit count on last few bytes of the buffer. 2736 */ 2737 if (buf->pos + count > buf->size) { 2738 count = buf->size - buf->pos; 2739 dprintk(q, 5, "reducing read count: %zd\n", count); 2740 } 2741 2742 /* 2743 * Transfer data to userspace. 2744 */ 2745 dprintk(q, 3, "copying %zd bytes - buffer %d, offset %u\n", 2746 count, index, buf->pos); 2747 if (read) 2748 ret = copy_to_user(data, buf->vaddr + buf->pos, count); 2749 else 2750 ret = copy_from_user(buf->vaddr + buf->pos, data, count); 2751 if (ret) { 2752 dprintk(q, 3, "error copying data\n"); 2753 return -EFAULT; 2754 } 2755 2756 /* 2757 * Update counters. 2758 */ 2759 buf->pos += count; 2760 *ppos += count; 2761 2762 /* 2763 * Queue next buffer if required. 2764 */ 2765 if (buf->pos == buf->size || (!read && fileio->write_immediately)) { 2766 struct vb2_buffer *b = q->bufs[index]; 2767 2768 /* 2769 * Check if this is the last buffer to read. 2770 */ 2771 if (read && fileio->read_once && fileio->dq_count == 1) { 2772 dprintk(q, 3, "read limit reached\n"); 2773 return __vb2_cleanup_fileio(q); 2774 } 2775 2776 /* 2777 * Call vb2_qbuf and give buffer to the driver. 2778 */ 2779 b->planes[0].bytesused = buf->pos; 2780 2781 if (copy_timestamp) 2782 b->timestamp = ktime_get_ns(); 2783 ret = vb2_core_qbuf(q, index, NULL, NULL); 2784 dprintk(q, 5, "vb2_dbuf result: %d\n", ret); 2785 if (ret) 2786 return ret; 2787 2788 /* 2789 * Buffer has been queued, update the status 2790 */ 2791 buf->pos = 0; 2792 buf->queued = 1; 2793 buf->size = vb2_plane_size(q->bufs[index], 0); 2794 fileio->q_count += 1; 2795 /* 2796 * If we are queuing up buffers for the first time, then 2797 * increase initial_index by one. 2798 */ 2799 if (fileio->initial_index < q->num_buffers) 2800 fileio->initial_index++; 2801 /* 2802 * The next buffer to use is either a buffer that's going to be 2803 * queued for the first time (initial_index < q->num_buffers) 2804 * or it is equal to q->num_buffers, meaning that the next 2805 * time we need to dequeue a buffer since we've now queued up 2806 * all the 'first time' buffers. 2807 */ 2808 fileio->cur_index = fileio->initial_index; 2809 } 2810 2811 /* 2812 * Return proper number of bytes processed. 2813 */ 2814 if (ret == 0) 2815 ret = count; 2816 return ret; 2817 } 2818 2819 size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, 2820 loff_t *ppos, int nonblocking) 2821 { 2822 return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1); 2823 } 2824 EXPORT_SYMBOL_GPL(vb2_read); 2825 2826 size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, 2827 loff_t *ppos, int nonblocking) 2828 { 2829 return __vb2_perform_fileio(q, (char __user *) data, count, 2830 ppos, nonblocking, 0); 2831 } 2832 EXPORT_SYMBOL_GPL(vb2_write); 2833 2834 struct vb2_threadio_data { 2835 struct task_struct *thread; 2836 vb2_thread_fnc fnc; 2837 void *priv; 2838 bool stop; 2839 }; 2840 2841 static int vb2_thread(void *data) 2842 { 2843 struct vb2_queue *q = data; 2844 struct vb2_threadio_data *threadio = q->threadio; 2845 bool copy_timestamp = false; 2846 unsigned prequeue = 0; 2847 unsigned index = 0; 2848 int ret = 0; 2849 2850 if (q->is_output) { 2851 prequeue = q->num_buffers; 2852 copy_timestamp = q->copy_timestamp; 2853 } 2854 2855 set_freezable(); 2856 2857 for (;;) { 2858 struct vb2_buffer *vb; 2859 2860 /* 2861 * Call vb2_dqbuf to get buffer back. 2862 */ 2863 if (prequeue) { 2864 vb = q->bufs[index++]; 2865 prequeue--; 2866 } else { 2867 call_void_qop(q, wait_finish, q); 2868 if (!threadio->stop) 2869 ret = vb2_core_dqbuf(q, &index, NULL, 0); 2870 call_void_qop(q, wait_prepare, q); 2871 dprintk(q, 5, "file io: vb2_dqbuf result: %d\n", ret); 2872 if (!ret) 2873 vb = q->bufs[index]; 2874 } 2875 if (ret || threadio->stop) 2876 break; 2877 try_to_freeze(); 2878 2879 if (vb->state != VB2_BUF_STATE_ERROR) 2880 if (threadio->fnc(vb, threadio->priv)) 2881 break; 2882 call_void_qop(q, wait_finish, q); 2883 if (copy_timestamp) 2884 vb->timestamp = ktime_get_ns(); 2885 if (!threadio->stop) 2886 ret = vb2_core_qbuf(q, vb->index, NULL, NULL); 2887 call_void_qop(q, wait_prepare, q); 2888 if (ret || threadio->stop) 2889 break; 2890 } 2891 2892 /* Hmm, linux becomes *very* unhappy without this ... */ 2893 while (!kthread_should_stop()) { 2894 set_current_state(TASK_INTERRUPTIBLE); 2895 schedule(); 2896 } 2897 return 0; 2898 } 2899 2900 /* 2901 * This function should not be used for anything else but the videobuf2-dvb 2902 * support. If you think you have another good use-case for this, then please 2903 * contact the linux-media mailinglist first. 2904 */ 2905 int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, 2906 const char *thread_name) 2907 { 2908 struct vb2_threadio_data *threadio; 2909 int ret = 0; 2910 2911 if (q->threadio) 2912 return -EBUSY; 2913 if (vb2_is_busy(q)) 2914 return -EBUSY; 2915 if (WARN_ON(q->fileio)) 2916 return -EBUSY; 2917 2918 threadio = kzalloc(sizeof(*threadio), GFP_KERNEL); 2919 if (threadio == NULL) 2920 return -ENOMEM; 2921 threadio->fnc = fnc; 2922 threadio->priv = priv; 2923 2924 ret = __vb2_init_fileio(q, !q->is_output); 2925 dprintk(q, 3, "file io: vb2_init_fileio result: %d\n", ret); 2926 if (ret) 2927 goto nomem; 2928 q->threadio = threadio; 2929 threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name); 2930 if (IS_ERR(threadio->thread)) { 2931 ret = PTR_ERR(threadio->thread); 2932 threadio->thread = NULL; 2933 goto nothread; 2934 } 2935 return 0; 2936 2937 nothread: 2938 __vb2_cleanup_fileio(q); 2939 nomem: 2940 kfree(threadio); 2941 return ret; 2942 } 2943 EXPORT_SYMBOL_GPL(vb2_thread_start); 2944 2945 int vb2_thread_stop(struct vb2_queue *q) 2946 { 2947 struct vb2_threadio_data *threadio = q->threadio; 2948 int err; 2949 2950 if (threadio == NULL) 2951 return 0; 2952 threadio->stop = true; 2953 /* Wake up all pending sleeps in the thread */ 2954 vb2_queue_error(q); 2955 err = kthread_stop(threadio->thread); 2956 __vb2_cleanup_fileio(q); 2957 threadio->thread = NULL; 2958 kfree(threadio); 2959 q->threadio = NULL; 2960 return err; 2961 } 2962 EXPORT_SYMBOL_GPL(vb2_thread_stop); 2963 2964 MODULE_DESCRIPTION("Media buffer core framework"); 2965 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski"); 2966 MODULE_LICENSE("GPL"); 2967