1 /* 2 * videobuf2-core.c - video buffer 2 core framework 3 * 4 * Copyright (C) 2010 Samsung Electronics 5 * 6 * Author: Pawel Osciak <pawel@osciak.com> 7 * Marek Szyprowski <m.szyprowski@samsung.com> 8 * 9 * The vb2_thread implementation was based on code from videobuf-dvb.c: 10 * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs] 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation. 15 */ 16 17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18 19 #include <linux/err.h> 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/mm.h> 23 #include <linux/poll.h> 24 #include <linux/slab.h> 25 #include <linux/sched.h> 26 #include <linux/freezer.h> 27 #include <linux/kthread.h> 28 29 #include <media/videobuf2-core.h> 30 #include <media/v4l2-mc.h> 31 32 #include <trace/events/vb2.h> 33 34 static int debug; 35 module_param(debug, int, 0644); 36 37 #define dprintk(q, level, fmt, arg...) \ 38 do { \ 39 if (debug >= level) \ 40 pr_info("[%s] %s: " fmt, (q)->name, __func__, \ 41 ## arg); \ 42 } while (0) 43 44 #ifdef CONFIG_VIDEO_ADV_DEBUG 45 46 /* 47 * If advanced debugging is on, then count how often each op is called 48 * successfully, which can either be per-buffer or per-queue. 49 * 50 * This makes it easy to check that the 'init' and 'cleanup' 51 * (and variations thereof) stay balanced. 52 */ 53 54 #define log_memop(vb, op) \ 55 dprintk((vb)->vb2_queue, 2, "call_memop(%d, %s)%s\n", \ 56 (vb)->index, #op, \ 57 (vb)->vb2_queue->mem_ops->op ? "" : " (nop)") 58 59 #define call_memop(vb, op, args...) \ 60 ({ \ 61 struct vb2_queue *_q = (vb)->vb2_queue; \ 62 int err; \ 63 \ 64 log_memop(vb, op); \ 65 err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \ 66 if (!err) \ 67 (vb)->cnt_mem_ ## op++; \ 68 err; \ 69 }) 70 71 #define call_ptr_memop(op, vb, args...) \ 72 ({ \ 73 struct vb2_queue *_q = (vb)->vb2_queue; \ 74 void *ptr; \ 75 \ 76 log_memop(vb, op); \ 77 ptr = _q->mem_ops->op ? _q->mem_ops->op(vb, args) : NULL; \ 78 if (!IS_ERR_OR_NULL(ptr)) \ 79 (vb)->cnt_mem_ ## op++; \ 80 ptr; \ 81 }) 82 83 #define call_void_memop(vb, op, args...) \ 84 ({ \ 85 struct vb2_queue *_q = (vb)->vb2_queue; \ 86 \ 87 log_memop(vb, op); \ 88 if (_q->mem_ops->op) \ 89 _q->mem_ops->op(args); \ 90 (vb)->cnt_mem_ ## op++; \ 91 }) 92 93 #define log_qop(q, op) \ 94 dprintk(q, 2, "call_qop(%s)%s\n", #op, \ 95 (q)->ops->op ? "" : " (nop)") 96 97 #define call_qop(q, op, args...) \ 98 ({ \ 99 int err; \ 100 \ 101 log_qop(q, op); \ 102 err = (q)->ops->op ? (q)->ops->op(args) : 0; \ 103 if (!err) \ 104 (q)->cnt_ ## op++; \ 105 err; \ 106 }) 107 108 #define call_void_qop(q, op, args...) \ 109 ({ \ 110 log_qop(q, op); \ 111 if ((q)->ops->op) \ 112 (q)->ops->op(args); \ 113 (q)->cnt_ ## op++; \ 114 }) 115 116 #define log_vb_qop(vb, op, args...) \ 117 dprintk((vb)->vb2_queue, 2, "call_vb_qop(%d, %s)%s\n", \ 118 (vb)->index, #op, \ 119 (vb)->vb2_queue->ops->op ? "" : " (nop)") 120 121 #define call_vb_qop(vb, op, args...) \ 122 ({ \ 123 int err; \ 124 \ 125 log_vb_qop(vb, op); \ 126 err = (vb)->vb2_queue->ops->op ? \ 127 (vb)->vb2_queue->ops->op(args) : 0; \ 128 if (!err) \ 129 (vb)->cnt_ ## op++; \ 130 err; \ 131 }) 132 133 #define call_void_vb_qop(vb, op, args...) \ 134 ({ \ 135 log_vb_qop(vb, op); \ 136 if ((vb)->vb2_queue->ops->op) \ 137 (vb)->vb2_queue->ops->op(args); \ 138 (vb)->cnt_ ## op++; \ 139 }) 140 141 #else 142 143 #define call_memop(vb, op, args...) \ 144 ((vb)->vb2_queue->mem_ops->op ? \ 145 (vb)->vb2_queue->mem_ops->op(args) : 0) 146 147 #define call_ptr_memop(op, vb, args...) \ 148 ((vb)->vb2_queue->mem_ops->op ? \ 149 (vb)->vb2_queue->mem_ops->op(vb, args) : NULL) 150 151 #define call_void_memop(vb, op, args...) \ 152 do { \ 153 if ((vb)->vb2_queue->mem_ops->op) \ 154 (vb)->vb2_queue->mem_ops->op(args); \ 155 } while (0) 156 157 #define call_qop(q, op, args...) \ 158 ((q)->ops->op ? (q)->ops->op(args) : 0) 159 160 #define call_void_qop(q, op, args...) \ 161 do { \ 162 if ((q)->ops->op) \ 163 (q)->ops->op(args); \ 164 } while (0) 165 166 #define call_vb_qop(vb, op, args...) \ 167 ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0) 168 169 #define call_void_vb_qop(vb, op, args...) \ 170 do { \ 171 if ((vb)->vb2_queue->ops->op) \ 172 (vb)->vb2_queue->ops->op(args); \ 173 } while (0) 174 175 #endif 176 177 #define call_bufop(q, op, args...) \ 178 ({ \ 179 int ret = 0; \ 180 if (q && q->buf_ops && q->buf_ops->op) \ 181 ret = q->buf_ops->op(args); \ 182 ret; \ 183 }) 184 185 #define call_void_bufop(q, op, args...) \ 186 ({ \ 187 if (q && q->buf_ops && q->buf_ops->op) \ 188 q->buf_ops->op(args); \ 189 }) 190 191 static void __vb2_queue_cancel(struct vb2_queue *q); 192 static void __enqueue_in_driver(struct vb2_buffer *vb); 193 194 static const char *vb2_state_name(enum vb2_buffer_state s) 195 { 196 static const char * const state_names[] = { 197 [VB2_BUF_STATE_DEQUEUED] = "dequeued", 198 [VB2_BUF_STATE_IN_REQUEST] = "in request", 199 [VB2_BUF_STATE_PREPARING] = "preparing", 200 [VB2_BUF_STATE_QUEUED] = "queued", 201 [VB2_BUF_STATE_ACTIVE] = "active", 202 [VB2_BUF_STATE_DONE] = "done", 203 [VB2_BUF_STATE_ERROR] = "error", 204 }; 205 206 if ((unsigned int)(s) < ARRAY_SIZE(state_names)) 207 return state_names[s]; 208 return "unknown"; 209 } 210 211 /* 212 * __vb2_buf_mem_alloc() - allocate video memory for the given buffer 213 */ 214 static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) 215 { 216 struct vb2_queue *q = vb->vb2_queue; 217 void *mem_priv; 218 int plane; 219 int ret = -ENOMEM; 220 221 /* 222 * Allocate memory for all planes in this buffer 223 * NOTE: mmapped areas should be page aligned 224 */ 225 for (plane = 0; plane < vb->num_planes; ++plane) { 226 /* Memops alloc requires size to be page aligned. */ 227 unsigned long size = PAGE_ALIGN(vb->planes[plane].length); 228 229 /* Did it wrap around? */ 230 if (size < vb->planes[plane].length) 231 goto free; 232 233 mem_priv = call_ptr_memop(alloc, 234 vb, 235 q->alloc_devs[plane] ? : q->dev, 236 size); 237 if (IS_ERR_OR_NULL(mem_priv)) { 238 if (mem_priv) 239 ret = PTR_ERR(mem_priv); 240 goto free; 241 } 242 243 /* Associate allocator private data with this plane */ 244 vb->planes[plane].mem_priv = mem_priv; 245 } 246 247 return 0; 248 free: 249 /* Free already allocated memory if one of the allocations failed */ 250 for (; plane > 0; --plane) { 251 call_void_memop(vb, put, vb->planes[plane - 1].mem_priv); 252 vb->planes[plane - 1].mem_priv = NULL; 253 } 254 255 return ret; 256 } 257 258 /* 259 * __vb2_buf_mem_free() - free memory of the given buffer 260 */ 261 static void __vb2_buf_mem_free(struct vb2_buffer *vb) 262 { 263 unsigned int plane; 264 265 for (plane = 0; plane < vb->num_planes; ++plane) { 266 call_void_memop(vb, put, vb->planes[plane].mem_priv); 267 vb->planes[plane].mem_priv = NULL; 268 dprintk(vb->vb2_queue, 3, "freed plane %d of buffer %d\n", 269 plane, vb->index); 270 } 271 } 272 273 /* 274 * __vb2_buf_userptr_put() - release userspace memory associated with 275 * a USERPTR buffer 276 */ 277 static void __vb2_buf_userptr_put(struct vb2_buffer *vb) 278 { 279 unsigned int plane; 280 281 for (plane = 0; plane < vb->num_planes; ++plane) { 282 if (vb->planes[plane].mem_priv) 283 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv); 284 vb->planes[plane].mem_priv = NULL; 285 } 286 } 287 288 /* 289 * __vb2_plane_dmabuf_put() - release memory associated with 290 * a DMABUF shared plane 291 */ 292 static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p) 293 { 294 if (!p->mem_priv) 295 return; 296 297 if (p->dbuf_mapped) 298 call_void_memop(vb, unmap_dmabuf, p->mem_priv); 299 300 call_void_memop(vb, detach_dmabuf, p->mem_priv); 301 dma_buf_put(p->dbuf); 302 p->mem_priv = NULL; 303 p->dbuf = NULL; 304 p->dbuf_mapped = 0; 305 } 306 307 /* 308 * __vb2_buf_dmabuf_put() - release memory associated with 309 * a DMABUF shared buffer 310 */ 311 static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb) 312 { 313 unsigned int plane; 314 315 for (plane = 0; plane < vb->num_planes; ++plane) 316 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]); 317 } 318 319 /* 320 * __vb2_buf_mem_prepare() - call ->prepare() on buffer's private memory 321 * to sync caches 322 */ 323 static void __vb2_buf_mem_prepare(struct vb2_buffer *vb) 324 { 325 unsigned int plane; 326 327 if (vb->synced) 328 return; 329 330 vb->synced = 1; 331 for (plane = 0; plane < vb->num_planes; ++plane) 332 call_void_memop(vb, prepare, vb->planes[plane].mem_priv); 333 } 334 335 /* 336 * __vb2_buf_mem_finish() - call ->finish on buffer's private memory 337 * to sync caches 338 */ 339 static void __vb2_buf_mem_finish(struct vb2_buffer *vb) 340 { 341 unsigned int plane; 342 343 if (!vb->synced) 344 return; 345 346 vb->synced = 0; 347 for (plane = 0; plane < vb->num_planes; ++plane) 348 call_void_memop(vb, finish, vb->planes[plane].mem_priv); 349 } 350 351 /* 352 * __setup_offsets() - setup unique offsets ("cookies") for every plane in 353 * the buffer. 354 */ 355 static void __setup_offsets(struct vb2_buffer *vb) 356 { 357 struct vb2_queue *q = vb->vb2_queue; 358 unsigned int plane; 359 unsigned long off = 0; 360 361 if (vb->index) { 362 struct vb2_buffer *prev = q->bufs[vb->index - 1]; 363 struct vb2_plane *p = &prev->planes[prev->num_planes - 1]; 364 365 off = PAGE_ALIGN(p->m.offset + p->length); 366 } 367 368 for (plane = 0; plane < vb->num_planes; ++plane) { 369 vb->planes[plane].m.offset = off; 370 371 dprintk(q, 3, "buffer %d, plane %d offset 0x%08lx\n", 372 vb->index, plane, off); 373 374 off += vb->planes[plane].length; 375 off = PAGE_ALIGN(off); 376 } 377 } 378 379 static void init_buffer_cache_hints(struct vb2_queue *q, struct vb2_buffer *vb) 380 { 381 /* 382 * DMA exporter should take care of cache syncs, so we can avoid 383 * explicit ->prepare()/->finish() syncs. For other ->memory types 384 * we always need ->prepare() or/and ->finish() cache sync. 385 */ 386 if (q->memory == VB2_MEMORY_DMABUF) { 387 vb->skip_cache_sync_on_finish = 1; 388 vb->skip_cache_sync_on_prepare = 1; 389 return; 390 } 391 392 /* 393 * ->finish() cache sync can be avoided when queue direction is 394 * TO_DEVICE. 395 */ 396 if (q->dma_dir == DMA_TO_DEVICE) 397 vb->skip_cache_sync_on_finish = 1; 398 } 399 400 /* 401 * __vb2_queue_alloc() - allocate vb2 buffer structures and (for MMAP type) 402 * video buffer memory for all buffers/planes on the queue and initializes the 403 * queue 404 * 405 * Returns the number of buffers successfully allocated. 406 */ 407 static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, 408 unsigned int num_buffers, unsigned int num_planes, 409 const unsigned plane_sizes[VB2_MAX_PLANES]) 410 { 411 unsigned int buffer, plane; 412 struct vb2_buffer *vb; 413 int ret; 414 415 /* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */ 416 num_buffers = min_t(unsigned int, num_buffers, 417 VB2_MAX_FRAME - q->num_buffers); 418 419 for (buffer = 0; buffer < num_buffers; ++buffer) { 420 /* Allocate vb2 buffer structures */ 421 vb = kzalloc(q->buf_struct_size, GFP_KERNEL); 422 if (!vb) { 423 dprintk(q, 1, "memory alloc for buffer struct failed\n"); 424 break; 425 } 426 427 vb->state = VB2_BUF_STATE_DEQUEUED; 428 vb->vb2_queue = q; 429 vb->num_planes = num_planes; 430 vb->index = q->num_buffers + buffer; 431 vb->type = q->type; 432 vb->memory = memory; 433 init_buffer_cache_hints(q, vb); 434 for (plane = 0; plane < num_planes; ++plane) { 435 vb->planes[plane].length = plane_sizes[plane]; 436 vb->planes[plane].min_length = plane_sizes[plane]; 437 } 438 call_void_bufop(q, init_buffer, vb); 439 440 q->bufs[vb->index] = vb; 441 442 /* Allocate video buffer memory for the MMAP type */ 443 if (memory == VB2_MEMORY_MMAP) { 444 ret = __vb2_buf_mem_alloc(vb); 445 if (ret) { 446 dprintk(q, 1, "failed allocating memory for buffer %d\n", 447 buffer); 448 q->bufs[vb->index] = NULL; 449 kfree(vb); 450 break; 451 } 452 __setup_offsets(vb); 453 /* 454 * Call the driver-provided buffer initialization 455 * callback, if given. An error in initialization 456 * results in queue setup failure. 457 */ 458 ret = call_vb_qop(vb, buf_init, vb); 459 if (ret) { 460 dprintk(q, 1, "buffer %d %p initialization failed\n", 461 buffer, vb); 462 __vb2_buf_mem_free(vb); 463 q->bufs[vb->index] = NULL; 464 kfree(vb); 465 break; 466 } 467 } 468 } 469 470 dprintk(q, 3, "allocated %d buffers, %d plane(s) each\n", 471 buffer, num_planes); 472 473 return buffer; 474 } 475 476 /* 477 * __vb2_free_mem() - release all video buffer memory for a given queue 478 */ 479 static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers) 480 { 481 unsigned int buffer; 482 struct vb2_buffer *vb; 483 484 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; 485 ++buffer) { 486 vb = q->bufs[buffer]; 487 if (!vb) 488 continue; 489 490 /* Free MMAP buffers or release USERPTR buffers */ 491 if (q->memory == VB2_MEMORY_MMAP) 492 __vb2_buf_mem_free(vb); 493 else if (q->memory == VB2_MEMORY_DMABUF) 494 __vb2_buf_dmabuf_put(vb); 495 else 496 __vb2_buf_userptr_put(vb); 497 } 498 } 499 500 /* 501 * __vb2_queue_free() - free buffers at the end of the queue - video memory and 502 * related information, if no buffers are left return the queue to an 503 * uninitialized state. Might be called even if the queue has already been freed. 504 */ 505 static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers) 506 { 507 unsigned int buffer; 508 509 /* 510 * Sanity check: when preparing a buffer the queue lock is released for 511 * a short while (see __buf_prepare for the details), which would allow 512 * a race with a reqbufs which can call this function. Removing the 513 * buffers from underneath __buf_prepare is obviously a bad idea, so we 514 * check if any of the buffers is in the state PREPARING, and if so we 515 * just return -EAGAIN. 516 */ 517 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; 518 ++buffer) { 519 if (q->bufs[buffer] == NULL) 520 continue; 521 if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) { 522 dprintk(q, 1, "preparing buffers, cannot free\n"); 523 return -EAGAIN; 524 } 525 } 526 527 /* Call driver-provided cleanup function for each buffer, if provided */ 528 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; 529 ++buffer) { 530 struct vb2_buffer *vb = q->bufs[buffer]; 531 532 if (vb && vb->planes[0].mem_priv) 533 call_void_vb_qop(vb, buf_cleanup, vb); 534 } 535 536 /* Release video buffer memory */ 537 __vb2_free_mem(q, buffers); 538 539 #ifdef CONFIG_VIDEO_ADV_DEBUG 540 /* 541 * Check that all the calls were balances during the life-time of this 542 * queue. If not (or if the debug level is 1 or up), then dump the 543 * counters to the kernel log. 544 */ 545 if (q->num_buffers) { 546 bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming || 547 q->cnt_prepare_streaming != q->cnt_unprepare_streaming || 548 q->cnt_wait_prepare != q->cnt_wait_finish; 549 550 if (unbalanced || debug) { 551 pr_info("counters for queue %p:%s\n", q, 552 unbalanced ? " UNBALANCED!" : ""); 553 pr_info(" setup: %u start_streaming: %u stop_streaming: %u\n", 554 q->cnt_queue_setup, q->cnt_start_streaming, 555 q->cnt_stop_streaming); 556 pr_info(" prepare_streaming: %u unprepare_streaming: %u\n", 557 q->cnt_prepare_streaming, q->cnt_unprepare_streaming); 558 pr_info(" wait_prepare: %u wait_finish: %u\n", 559 q->cnt_wait_prepare, q->cnt_wait_finish); 560 } 561 q->cnt_queue_setup = 0; 562 q->cnt_wait_prepare = 0; 563 q->cnt_wait_finish = 0; 564 q->cnt_prepare_streaming = 0; 565 q->cnt_start_streaming = 0; 566 q->cnt_stop_streaming = 0; 567 q->cnt_unprepare_streaming = 0; 568 } 569 for (buffer = 0; buffer < q->num_buffers; ++buffer) { 570 struct vb2_buffer *vb = q->bufs[buffer]; 571 bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put || 572 vb->cnt_mem_prepare != vb->cnt_mem_finish || 573 vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr || 574 vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf || 575 vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf || 576 vb->cnt_buf_queue != vb->cnt_buf_done || 577 vb->cnt_buf_prepare != vb->cnt_buf_finish || 578 vb->cnt_buf_init != vb->cnt_buf_cleanup; 579 580 if (unbalanced || debug) { 581 pr_info(" counters for queue %p, buffer %d:%s\n", 582 q, buffer, unbalanced ? " UNBALANCED!" : ""); 583 pr_info(" buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n", 584 vb->cnt_buf_init, vb->cnt_buf_cleanup, 585 vb->cnt_buf_prepare, vb->cnt_buf_finish); 586 pr_info(" buf_out_validate: %u buf_queue: %u buf_done: %u buf_request_complete: %u\n", 587 vb->cnt_buf_out_validate, vb->cnt_buf_queue, 588 vb->cnt_buf_done, vb->cnt_buf_request_complete); 589 pr_info(" alloc: %u put: %u prepare: %u finish: %u mmap: %u\n", 590 vb->cnt_mem_alloc, vb->cnt_mem_put, 591 vb->cnt_mem_prepare, vb->cnt_mem_finish, 592 vb->cnt_mem_mmap); 593 pr_info(" get_userptr: %u put_userptr: %u\n", 594 vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr); 595 pr_info(" attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n", 596 vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf, 597 vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf); 598 pr_info(" get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n", 599 vb->cnt_mem_get_dmabuf, 600 vb->cnt_mem_num_users, 601 vb->cnt_mem_vaddr, 602 vb->cnt_mem_cookie); 603 } 604 } 605 #endif 606 607 /* Free vb2 buffers */ 608 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; 609 ++buffer) { 610 kfree(q->bufs[buffer]); 611 q->bufs[buffer] = NULL; 612 } 613 614 q->num_buffers -= buffers; 615 if (!q->num_buffers) { 616 q->memory = VB2_MEMORY_UNKNOWN; 617 INIT_LIST_HEAD(&q->queued_list); 618 } 619 return 0; 620 } 621 622 bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb) 623 { 624 unsigned int plane; 625 for (plane = 0; plane < vb->num_planes; ++plane) { 626 void *mem_priv = vb->planes[plane].mem_priv; 627 /* 628 * If num_users() has not been provided, call_memop 629 * will return 0, apparently nobody cares about this 630 * case anyway. If num_users() returns more than 1, 631 * we are not the only user of the plane's memory. 632 */ 633 if (mem_priv && call_memop(vb, num_users, mem_priv) > 1) 634 return true; 635 } 636 return false; 637 } 638 EXPORT_SYMBOL(vb2_buffer_in_use); 639 640 /* 641 * __buffers_in_use() - return true if any buffers on the queue are in use and 642 * the queue cannot be freed (by the means of REQBUFS(0)) call 643 */ 644 static bool __buffers_in_use(struct vb2_queue *q) 645 { 646 unsigned int buffer; 647 for (buffer = 0; buffer < q->num_buffers; ++buffer) { 648 if (vb2_buffer_in_use(q, q->bufs[buffer])) 649 return true; 650 } 651 return false; 652 } 653 654 void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb) 655 { 656 call_void_bufop(q, fill_user_buffer, q->bufs[index], pb); 657 } 658 EXPORT_SYMBOL_GPL(vb2_core_querybuf); 659 660 /* 661 * __verify_userptr_ops() - verify that all memory operations required for 662 * USERPTR queue type have been provided 663 */ 664 static int __verify_userptr_ops(struct vb2_queue *q) 665 { 666 if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr || 667 !q->mem_ops->put_userptr) 668 return -EINVAL; 669 670 return 0; 671 } 672 673 /* 674 * __verify_mmap_ops() - verify that all memory operations required for 675 * MMAP queue type have been provided 676 */ 677 static int __verify_mmap_ops(struct vb2_queue *q) 678 { 679 if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc || 680 !q->mem_ops->put || !q->mem_ops->mmap) 681 return -EINVAL; 682 683 return 0; 684 } 685 686 /* 687 * __verify_dmabuf_ops() - verify that all memory operations required for 688 * DMABUF queue type have been provided 689 */ 690 static int __verify_dmabuf_ops(struct vb2_queue *q) 691 { 692 if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf || 693 !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf || 694 !q->mem_ops->unmap_dmabuf) 695 return -EINVAL; 696 697 return 0; 698 } 699 700 int vb2_verify_memory_type(struct vb2_queue *q, 701 enum vb2_memory memory, unsigned int type) 702 { 703 if (memory != VB2_MEMORY_MMAP && memory != VB2_MEMORY_USERPTR && 704 memory != VB2_MEMORY_DMABUF) { 705 dprintk(q, 1, "unsupported memory type\n"); 706 return -EINVAL; 707 } 708 709 if (type != q->type) { 710 dprintk(q, 1, "requested type is incorrect\n"); 711 return -EINVAL; 712 } 713 714 /* 715 * Make sure all the required memory ops for given memory type 716 * are available. 717 */ 718 if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) { 719 dprintk(q, 1, "MMAP for current setup unsupported\n"); 720 return -EINVAL; 721 } 722 723 if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) { 724 dprintk(q, 1, "USERPTR for current setup unsupported\n"); 725 return -EINVAL; 726 } 727 728 if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) { 729 dprintk(q, 1, "DMABUF for current setup unsupported\n"); 730 return -EINVAL; 731 } 732 733 /* 734 * Place the busy tests at the end: -EBUSY can be ignored when 735 * create_bufs is called with count == 0, but count == 0 should still 736 * do the memory and type validation. 737 */ 738 if (vb2_fileio_is_active(q)) { 739 dprintk(q, 1, "file io in progress\n"); 740 return -EBUSY; 741 } 742 return 0; 743 } 744 EXPORT_SYMBOL(vb2_verify_memory_type); 745 746 static void set_queue_coherency(struct vb2_queue *q, bool non_coherent_mem) 747 { 748 q->non_coherent_mem = 0; 749 750 if (!vb2_queue_allows_cache_hints(q)) 751 return; 752 q->non_coherent_mem = non_coherent_mem; 753 } 754 755 static bool verify_coherency_flags(struct vb2_queue *q, bool non_coherent_mem) 756 { 757 if (non_coherent_mem != q->non_coherent_mem) { 758 dprintk(q, 1, "memory coherency model mismatch\n"); 759 return false; 760 } 761 return true; 762 } 763 764 int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, 765 unsigned int flags, unsigned int *count) 766 { 767 unsigned int num_buffers, allocated_buffers, num_planes = 0; 768 unsigned plane_sizes[VB2_MAX_PLANES] = { }; 769 bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT; 770 unsigned int i; 771 int ret; 772 773 if (q->streaming) { 774 dprintk(q, 1, "streaming active\n"); 775 return -EBUSY; 776 } 777 778 if (q->waiting_in_dqbuf && *count) { 779 dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); 780 return -EBUSY; 781 } 782 783 if (*count == 0 || q->num_buffers != 0 || 784 (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory) || 785 !verify_coherency_flags(q, non_coherent_mem)) { 786 /* 787 * We already have buffers allocated, so first check if they 788 * are not in use and can be freed. 789 */ 790 mutex_lock(&q->mmap_lock); 791 if (debug && q->memory == VB2_MEMORY_MMAP && 792 __buffers_in_use(q)) 793 dprintk(q, 1, "memory in use, orphaning buffers\n"); 794 795 /* 796 * Call queue_cancel to clean up any buffers in the 797 * QUEUED state which is possible if buffers were prepared or 798 * queued without ever calling STREAMON. 799 */ 800 __vb2_queue_cancel(q); 801 ret = __vb2_queue_free(q, q->num_buffers); 802 mutex_unlock(&q->mmap_lock); 803 if (ret) 804 return ret; 805 806 /* 807 * In case of REQBUFS(0) return immediately without calling 808 * driver's queue_setup() callback and allocating resources. 809 */ 810 if (*count == 0) 811 return 0; 812 } 813 814 /* 815 * Make sure the requested values and current defaults are sane. 816 */ 817 WARN_ON(q->min_buffers_needed > VB2_MAX_FRAME); 818 num_buffers = max_t(unsigned int, *count, q->min_buffers_needed); 819 num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME); 820 memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); 821 /* 822 * Set this now to ensure that drivers see the correct q->memory value 823 * in the queue_setup op. 824 */ 825 mutex_lock(&q->mmap_lock); 826 q->memory = memory; 827 mutex_unlock(&q->mmap_lock); 828 set_queue_coherency(q, non_coherent_mem); 829 830 /* 831 * Ask the driver how many buffers and planes per buffer it requires. 832 * Driver also sets the size and allocator context for each plane. 833 */ 834 ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes, 835 plane_sizes, q->alloc_devs); 836 if (ret) 837 goto error; 838 839 /* Check that driver has set sane values */ 840 if (WARN_ON(!num_planes)) { 841 ret = -EINVAL; 842 goto error; 843 } 844 845 for (i = 0; i < num_planes; i++) 846 if (WARN_ON(!plane_sizes[i])) { 847 ret = -EINVAL; 848 goto error; 849 } 850 851 /* Finally, allocate buffers and video memory */ 852 allocated_buffers = 853 __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes); 854 if (allocated_buffers == 0) { 855 dprintk(q, 1, "memory allocation failed\n"); 856 ret = -ENOMEM; 857 goto error; 858 } 859 860 /* 861 * There is no point in continuing if we can't allocate the minimum 862 * number of buffers needed by this vb2_queue. 863 */ 864 if (allocated_buffers < q->min_buffers_needed) 865 ret = -ENOMEM; 866 867 /* 868 * Check if driver can handle the allocated number of buffers. 869 */ 870 if (!ret && allocated_buffers < num_buffers) { 871 num_buffers = allocated_buffers; 872 /* 873 * num_planes is set by the previous queue_setup(), but since it 874 * signals to queue_setup() whether it is called from create_bufs() 875 * vs reqbufs() we zero it here to signal that queue_setup() is 876 * called for the reqbufs() case. 877 */ 878 num_planes = 0; 879 880 ret = call_qop(q, queue_setup, q, &num_buffers, 881 &num_planes, plane_sizes, q->alloc_devs); 882 883 if (!ret && allocated_buffers < num_buffers) 884 ret = -ENOMEM; 885 886 /* 887 * Either the driver has accepted a smaller number of buffers, 888 * or .queue_setup() returned an error 889 */ 890 } 891 892 mutex_lock(&q->mmap_lock); 893 q->num_buffers = allocated_buffers; 894 895 if (ret < 0) { 896 /* 897 * Note: __vb2_queue_free() will subtract 'allocated_buffers' 898 * from q->num_buffers and it will reset q->memory to 899 * VB2_MEMORY_UNKNOWN. 900 */ 901 __vb2_queue_free(q, allocated_buffers); 902 mutex_unlock(&q->mmap_lock); 903 return ret; 904 } 905 mutex_unlock(&q->mmap_lock); 906 907 /* 908 * Return the number of successfully allocated buffers 909 * to the userspace. 910 */ 911 *count = allocated_buffers; 912 q->waiting_for_buffers = !q->is_output; 913 914 return 0; 915 916 error: 917 mutex_lock(&q->mmap_lock); 918 q->memory = VB2_MEMORY_UNKNOWN; 919 mutex_unlock(&q->mmap_lock); 920 return ret; 921 } 922 EXPORT_SYMBOL_GPL(vb2_core_reqbufs); 923 924 int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, 925 unsigned int flags, unsigned int *count, 926 unsigned int requested_planes, 927 const unsigned int requested_sizes[]) 928 { 929 unsigned int num_planes = 0, num_buffers, allocated_buffers; 930 unsigned plane_sizes[VB2_MAX_PLANES] = { }; 931 bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT; 932 bool no_previous_buffers = !q->num_buffers; 933 int ret; 934 935 if (q->num_buffers == VB2_MAX_FRAME) { 936 dprintk(q, 1, "maximum number of buffers already allocated\n"); 937 return -ENOBUFS; 938 } 939 940 if (no_previous_buffers) { 941 if (q->waiting_in_dqbuf && *count) { 942 dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); 943 return -EBUSY; 944 } 945 memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); 946 /* 947 * Set this now to ensure that drivers see the correct q->memory 948 * value in the queue_setup op. 949 */ 950 mutex_lock(&q->mmap_lock); 951 q->memory = memory; 952 mutex_unlock(&q->mmap_lock); 953 q->waiting_for_buffers = !q->is_output; 954 set_queue_coherency(q, non_coherent_mem); 955 } else { 956 if (q->memory != memory) { 957 dprintk(q, 1, "memory model mismatch\n"); 958 return -EINVAL; 959 } 960 if (!verify_coherency_flags(q, non_coherent_mem)) 961 return -EINVAL; 962 } 963 964 num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers); 965 966 if (requested_planes && requested_sizes) { 967 num_planes = requested_planes; 968 memcpy(plane_sizes, requested_sizes, sizeof(plane_sizes)); 969 } 970 971 /* 972 * Ask the driver, whether the requested number of buffers, planes per 973 * buffer and their sizes are acceptable 974 */ 975 ret = call_qop(q, queue_setup, q, &num_buffers, 976 &num_planes, plane_sizes, q->alloc_devs); 977 if (ret) 978 goto error; 979 980 /* Finally, allocate buffers and video memory */ 981 allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers, 982 num_planes, plane_sizes); 983 if (allocated_buffers == 0) { 984 dprintk(q, 1, "memory allocation failed\n"); 985 ret = -ENOMEM; 986 goto error; 987 } 988 989 /* 990 * Check if driver can handle the so far allocated number of buffers. 991 */ 992 if (allocated_buffers < num_buffers) { 993 num_buffers = allocated_buffers; 994 995 /* 996 * q->num_buffers contains the total number of buffers, that the 997 * queue driver has set up 998 */ 999 ret = call_qop(q, queue_setup, q, &num_buffers, 1000 &num_planes, plane_sizes, q->alloc_devs); 1001 1002 if (!ret && allocated_buffers < num_buffers) 1003 ret = -ENOMEM; 1004 1005 /* 1006 * Either the driver has accepted a smaller number of buffers, 1007 * or .queue_setup() returned an error 1008 */ 1009 } 1010 1011 mutex_lock(&q->mmap_lock); 1012 q->num_buffers += allocated_buffers; 1013 1014 if (ret < 0) { 1015 /* 1016 * Note: __vb2_queue_free() will subtract 'allocated_buffers' 1017 * from q->num_buffers and it will reset q->memory to 1018 * VB2_MEMORY_UNKNOWN. 1019 */ 1020 __vb2_queue_free(q, allocated_buffers); 1021 mutex_unlock(&q->mmap_lock); 1022 return -ENOMEM; 1023 } 1024 mutex_unlock(&q->mmap_lock); 1025 1026 /* 1027 * Return the number of successfully allocated buffers 1028 * to the userspace. 1029 */ 1030 *count = allocated_buffers; 1031 1032 return 0; 1033 1034 error: 1035 if (no_previous_buffers) { 1036 mutex_lock(&q->mmap_lock); 1037 q->memory = VB2_MEMORY_UNKNOWN; 1038 mutex_unlock(&q->mmap_lock); 1039 } 1040 return ret; 1041 } 1042 EXPORT_SYMBOL_GPL(vb2_core_create_bufs); 1043 1044 void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) 1045 { 1046 if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) 1047 return NULL; 1048 1049 return call_ptr_memop(vaddr, vb, vb->planes[plane_no].mem_priv); 1050 1051 } 1052 EXPORT_SYMBOL_GPL(vb2_plane_vaddr); 1053 1054 void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no) 1055 { 1056 if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) 1057 return NULL; 1058 1059 return call_ptr_memop(cookie, vb, vb->planes[plane_no].mem_priv); 1060 } 1061 EXPORT_SYMBOL_GPL(vb2_plane_cookie); 1062 1063 void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) 1064 { 1065 struct vb2_queue *q = vb->vb2_queue; 1066 unsigned long flags; 1067 1068 if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE)) 1069 return; 1070 1071 if (WARN_ON(state != VB2_BUF_STATE_DONE && 1072 state != VB2_BUF_STATE_ERROR && 1073 state != VB2_BUF_STATE_QUEUED)) 1074 state = VB2_BUF_STATE_ERROR; 1075 1076 #ifdef CONFIG_VIDEO_ADV_DEBUG 1077 /* 1078 * Although this is not a callback, it still does have to balance 1079 * with the buf_queue op. So update this counter manually. 1080 */ 1081 vb->cnt_buf_done++; 1082 #endif 1083 dprintk(q, 4, "done processing on buffer %d, state: %s\n", 1084 vb->index, vb2_state_name(state)); 1085 1086 if (state != VB2_BUF_STATE_QUEUED) 1087 __vb2_buf_mem_finish(vb); 1088 1089 spin_lock_irqsave(&q->done_lock, flags); 1090 if (state == VB2_BUF_STATE_QUEUED) { 1091 vb->state = VB2_BUF_STATE_QUEUED; 1092 } else { 1093 /* Add the buffer to the done buffers list */ 1094 list_add_tail(&vb->done_entry, &q->done_list); 1095 vb->state = state; 1096 } 1097 atomic_dec(&q->owned_by_drv_count); 1098 1099 if (state != VB2_BUF_STATE_QUEUED && vb->req_obj.req) { 1100 media_request_object_unbind(&vb->req_obj); 1101 media_request_object_put(&vb->req_obj); 1102 } 1103 1104 spin_unlock_irqrestore(&q->done_lock, flags); 1105 1106 trace_vb2_buf_done(q, vb); 1107 1108 switch (state) { 1109 case VB2_BUF_STATE_QUEUED: 1110 return; 1111 default: 1112 /* Inform any processes that may be waiting for buffers */ 1113 wake_up(&q->done_wq); 1114 break; 1115 } 1116 } 1117 EXPORT_SYMBOL_GPL(vb2_buffer_done); 1118 1119 void vb2_discard_done(struct vb2_queue *q) 1120 { 1121 struct vb2_buffer *vb; 1122 unsigned long flags; 1123 1124 spin_lock_irqsave(&q->done_lock, flags); 1125 list_for_each_entry(vb, &q->done_list, done_entry) 1126 vb->state = VB2_BUF_STATE_ERROR; 1127 spin_unlock_irqrestore(&q->done_lock, flags); 1128 } 1129 EXPORT_SYMBOL_GPL(vb2_discard_done); 1130 1131 /* 1132 * __prepare_mmap() - prepare an MMAP buffer 1133 */ 1134 static int __prepare_mmap(struct vb2_buffer *vb) 1135 { 1136 int ret = 0; 1137 1138 ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, 1139 vb, vb->planes); 1140 return ret ? ret : call_vb_qop(vb, buf_prepare, vb); 1141 } 1142 1143 /* 1144 * __prepare_userptr() - prepare a USERPTR buffer 1145 */ 1146 static int __prepare_userptr(struct vb2_buffer *vb) 1147 { 1148 struct vb2_plane planes[VB2_MAX_PLANES]; 1149 struct vb2_queue *q = vb->vb2_queue; 1150 void *mem_priv; 1151 unsigned int plane; 1152 int ret = 0; 1153 bool reacquired = vb->planes[0].mem_priv == NULL; 1154 1155 memset(planes, 0, sizeof(planes[0]) * vb->num_planes); 1156 /* Copy relevant information provided by the userspace */ 1157 ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, 1158 vb, planes); 1159 if (ret) 1160 return ret; 1161 1162 for (plane = 0; plane < vb->num_planes; ++plane) { 1163 /* Skip the plane if already verified */ 1164 if (vb->planes[plane].m.userptr && 1165 vb->planes[plane].m.userptr == planes[plane].m.userptr 1166 && vb->planes[plane].length == planes[plane].length) 1167 continue; 1168 1169 dprintk(q, 3, "userspace address for plane %d changed, reacquiring memory\n", 1170 plane); 1171 1172 /* Check if the provided plane buffer is large enough */ 1173 if (planes[plane].length < vb->planes[plane].min_length) { 1174 dprintk(q, 1, "provided buffer size %u is less than setup size %u for plane %d\n", 1175 planes[plane].length, 1176 vb->planes[plane].min_length, 1177 plane); 1178 ret = -EINVAL; 1179 goto err; 1180 } 1181 1182 /* Release previously acquired memory if present */ 1183 if (vb->planes[plane].mem_priv) { 1184 if (!reacquired) { 1185 reacquired = true; 1186 vb->copied_timestamp = 0; 1187 call_void_vb_qop(vb, buf_cleanup, vb); 1188 } 1189 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv); 1190 } 1191 1192 vb->planes[plane].mem_priv = NULL; 1193 vb->planes[plane].bytesused = 0; 1194 vb->planes[plane].length = 0; 1195 vb->planes[plane].m.userptr = 0; 1196 vb->planes[plane].data_offset = 0; 1197 1198 /* Acquire each plane's memory */ 1199 mem_priv = call_ptr_memop(get_userptr, 1200 vb, 1201 q->alloc_devs[plane] ? : q->dev, 1202 planes[plane].m.userptr, 1203 planes[plane].length); 1204 if (IS_ERR(mem_priv)) { 1205 dprintk(q, 1, "failed acquiring userspace memory for plane %d\n", 1206 plane); 1207 ret = PTR_ERR(mem_priv); 1208 goto err; 1209 } 1210 vb->planes[plane].mem_priv = mem_priv; 1211 } 1212 1213 /* 1214 * Now that everything is in order, copy relevant information 1215 * provided by userspace. 1216 */ 1217 for (plane = 0; plane < vb->num_planes; ++plane) { 1218 vb->planes[plane].bytesused = planes[plane].bytesused; 1219 vb->planes[plane].length = planes[plane].length; 1220 vb->planes[plane].m.userptr = planes[plane].m.userptr; 1221 vb->planes[plane].data_offset = planes[plane].data_offset; 1222 } 1223 1224 if (reacquired) { 1225 /* 1226 * One or more planes changed, so we must call buf_init to do 1227 * the driver-specific initialization on the newly acquired 1228 * buffer, if provided. 1229 */ 1230 ret = call_vb_qop(vb, buf_init, vb); 1231 if (ret) { 1232 dprintk(q, 1, "buffer initialization failed\n"); 1233 goto err; 1234 } 1235 } 1236 1237 ret = call_vb_qop(vb, buf_prepare, vb); 1238 if (ret) { 1239 dprintk(q, 1, "buffer preparation failed\n"); 1240 call_void_vb_qop(vb, buf_cleanup, vb); 1241 goto err; 1242 } 1243 1244 return 0; 1245 err: 1246 /* In case of errors, release planes that were already acquired */ 1247 for (plane = 0; plane < vb->num_planes; ++plane) { 1248 if (vb->planes[plane].mem_priv) 1249 call_void_memop(vb, put_userptr, 1250 vb->planes[plane].mem_priv); 1251 vb->planes[plane].mem_priv = NULL; 1252 vb->planes[plane].m.userptr = 0; 1253 vb->planes[plane].length = 0; 1254 } 1255 1256 return ret; 1257 } 1258 1259 /* 1260 * __prepare_dmabuf() - prepare a DMABUF buffer 1261 */ 1262 static int __prepare_dmabuf(struct vb2_buffer *vb) 1263 { 1264 struct vb2_plane planes[VB2_MAX_PLANES]; 1265 struct vb2_queue *q = vb->vb2_queue; 1266 void *mem_priv; 1267 unsigned int plane; 1268 int ret = 0; 1269 bool reacquired = vb->planes[0].mem_priv == NULL; 1270 1271 memset(planes, 0, sizeof(planes[0]) * vb->num_planes); 1272 /* Copy relevant information provided by the userspace */ 1273 ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, 1274 vb, planes); 1275 if (ret) 1276 return ret; 1277 1278 for (plane = 0; plane < vb->num_planes; ++plane) { 1279 struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd); 1280 1281 if (IS_ERR_OR_NULL(dbuf)) { 1282 dprintk(q, 1, "invalid dmabuf fd for plane %d\n", 1283 plane); 1284 ret = -EINVAL; 1285 goto err; 1286 } 1287 1288 /* use DMABUF size if length is not provided */ 1289 if (planes[plane].length == 0) 1290 planes[plane].length = dbuf->size; 1291 1292 if (planes[plane].length < vb->planes[plane].min_length) { 1293 dprintk(q, 1, "invalid dmabuf length %u for plane %d, minimum length %u\n", 1294 planes[plane].length, plane, 1295 vb->planes[plane].min_length); 1296 dma_buf_put(dbuf); 1297 ret = -EINVAL; 1298 goto err; 1299 } 1300 1301 /* Skip the plane if already verified */ 1302 if (dbuf == vb->planes[plane].dbuf && 1303 vb->planes[plane].length == planes[plane].length) { 1304 dma_buf_put(dbuf); 1305 continue; 1306 } 1307 1308 dprintk(q, 3, "buffer for plane %d changed\n", plane); 1309 1310 if (!reacquired) { 1311 reacquired = true; 1312 vb->copied_timestamp = 0; 1313 call_void_vb_qop(vb, buf_cleanup, vb); 1314 } 1315 1316 /* Release previously acquired memory if present */ 1317 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]); 1318 vb->planes[plane].bytesused = 0; 1319 vb->planes[plane].length = 0; 1320 vb->planes[plane].m.fd = 0; 1321 vb->planes[plane].data_offset = 0; 1322 1323 /* Acquire each plane's memory */ 1324 mem_priv = call_ptr_memop(attach_dmabuf, 1325 vb, 1326 q->alloc_devs[plane] ? : q->dev, 1327 dbuf, 1328 planes[plane].length); 1329 if (IS_ERR(mem_priv)) { 1330 dprintk(q, 1, "failed to attach dmabuf\n"); 1331 ret = PTR_ERR(mem_priv); 1332 dma_buf_put(dbuf); 1333 goto err; 1334 } 1335 1336 vb->planes[plane].dbuf = dbuf; 1337 vb->planes[plane].mem_priv = mem_priv; 1338 } 1339 1340 /* 1341 * This pins the buffer(s) with dma_buf_map_attachment()). It's done 1342 * here instead just before the DMA, while queueing the buffer(s) so 1343 * userspace knows sooner rather than later if the dma-buf map fails. 1344 */ 1345 for (plane = 0; plane < vb->num_planes; ++plane) { 1346 if (vb->planes[plane].dbuf_mapped) 1347 continue; 1348 1349 ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv); 1350 if (ret) { 1351 dprintk(q, 1, "failed to map dmabuf for plane %d\n", 1352 plane); 1353 goto err; 1354 } 1355 vb->planes[plane].dbuf_mapped = 1; 1356 } 1357 1358 /* 1359 * Now that everything is in order, copy relevant information 1360 * provided by userspace. 1361 */ 1362 for (plane = 0; plane < vb->num_planes; ++plane) { 1363 vb->planes[plane].bytesused = planes[plane].bytesused; 1364 vb->planes[plane].length = planes[plane].length; 1365 vb->planes[plane].m.fd = planes[plane].m.fd; 1366 vb->planes[plane].data_offset = planes[plane].data_offset; 1367 } 1368 1369 if (reacquired) { 1370 /* 1371 * Call driver-specific initialization on the newly acquired buffer, 1372 * if provided. 1373 */ 1374 ret = call_vb_qop(vb, buf_init, vb); 1375 if (ret) { 1376 dprintk(q, 1, "buffer initialization failed\n"); 1377 goto err; 1378 } 1379 } 1380 1381 ret = call_vb_qop(vb, buf_prepare, vb); 1382 if (ret) { 1383 dprintk(q, 1, "buffer preparation failed\n"); 1384 call_void_vb_qop(vb, buf_cleanup, vb); 1385 goto err; 1386 } 1387 1388 return 0; 1389 err: 1390 /* In case of errors, release planes that were already acquired */ 1391 __vb2_buf_dmabuf_put(vb); 1392 1393 return ret; 1394 } 1395 1396 /* 1397 * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing 1398 */ 1399 static void __enqueue_in_driver(struct vb2_buffer *vb) 1400 { 1401 struct vb2_queue *q = vb->vb2_queue; 1402 1403 vb->state = VB2_BUF_STATE_ACTIVE; 1404 atomic_inc(&q->owned_by_drv_count); 1405 1406 trace_vb2_buf_queue(q, vb); 1407 1408 call_void_vb_qop(vb, buf_queue, vb); 1409 } 1410 1411 static int __buf_prepare(struct vb2_buffer *vb) 1412 { 1413 struct vb2_queue *q = vb->vb2_queue; 1414 enum vb2_buffer_state orig_state = vb->state; 1415 int ret; 1416 1417 if (q->error) { 1418 dprintk(q, 1, "fatal error occurred on queue\n"); 1419 return -EIO; 1420 } 1421 1422 if (vb->prepared) 1423 return 0; 1424 WARN_ON(vb->synced); 1425 1426 if (q->is_output) { 1427 ret = call_vb_qop(vb, buf_out_validate, vb); 1428 if (ret) { 1429 dprintk(q, 1, "buffer validation failed\n"); 1430 return ret; 1431 } 1432 } 1433 1434 vb->state = VB2_BUF_STATE_PREPARING; 1435 1436 switch (q->memory) { 1437 case VB2_MEMORY_MMAP: 1438 ret = __prepare_mmap(vb); 1439 break; 1440 case VB2_MEMORY_USERPTR: 1441 ret = __prepare_userptr(vb); 1442 break; 1443 case VB2_MEMORY_DMABUF: 1444 ret = __prepare_dmabuf(vb); 1445 break; 1446 default: 1447 WARN(1, "Invalid queue type\n"); 1448 ret = -EINVAL; 1449 break; 1450 } 1451 1452 if (ret) { 1453 dprintk(q, 1, "buffer preparation failed: %d\n", ret); 1454 vb->state = orig_state; 1455 return ret; 1456 } 1457 1458 __vb2_buf_mem_prepare(vb); 1459 vb->prepared = 1; 1460 vb->state = orig_state; 1461 1462 return 0; 1463 } 1464 1465 static int vb2_req_prepare(struct media_request_object *obj) 1466 { 1467 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1468 int ret; 1469 1470 if (WARN_ON(vb->state != VB2_BUF_STATE_IN_REQUEST)) 1471 return -EINVAL; 1472 1473 mutex_lock(vb->vb2_queue->lock); 1474 ret = __buf_prepare(vb); 1475 mutex_unlock(vb->vb2_queue->lock); 1476 return ret; 1477 } 1478 1479 static void __vb2_dqbuf(struct vb2_buffer *vb); 1480 1481 static void vb2_req_unprepare(struct media_request_object *obj) 1482 { 1483 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1484 1485 mutex_lock(vb->vb2_queue->lock); 1486 __vb2_dqbuf(vb); 1487 vb->state = VB2_BUF_STATE_IN_REQUEST; 1488 mutex_unlock(vb->vb2_queue->lock); 1489 WARN_ON(!vb->req_obj.req); 1490 } 1491 1492 int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, 1493 struct media_request *req); 1494 1495 static void vb2_req_queue(struct media_request_object *obj) 1496 { 1497 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1498 int err; 1499 1500 mutex_lock(vb->vb2_queue->lock); 1501 /* 1502 * There is no method to propagate an error from vb2_core_qbuf(), 1503 * so if this returns a non-0 value, then WARN. 1504 * 1505 * The only exception is -EIO which is returned if q->error is 1506 * set. We just ignore that, and expect this will be caught the 1507 * next time vb2_req_prepare() is called. 1508 */ 1509 err = vb2_core_qbuf(vb->vb2_queue, vb->index, NULL, NULL); 1510 WARN_ON_ONCE(err && err != -EIO); 1511 mutex_unlock(vb->vb2_queue->lock); 1512 } 1513 1514 static void vb2_req_unbind(struct media_request_object *obj) 1515 { 1516 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1517 1518 if (vb->state == VB2_BUF_STATE_IN_REQUEST) 1519 call_void_bufop(vb->vb2_queue, init_buffer, vb); 1520 } 1521 1522 static void vb2_req_release(struct media_request_object *obj) 1523 { 1524 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1525 1526 if (vb->state == VB2_BUF_STATE_IN_REQUEST) { 1527 vb->state = VB2_BUF_STATE_DEQUEUED; 1528 if (vb->request) 1529 media_request_put(vb->request); 1530 vb->request = NULL; 1531 } 1532 } 1533 1534 static const struct media_request_object_ops vb2_core_req_ops = { 1535 .prepare = vb2_req_prepare, 1536 .unprepare = vb2_req_unprepare, 1537 .queue = vb2_req_queue, 1538 .unbind = vb2_req_unbind, 1539 .release = vb2_req_release, 1540 }; 1541 1542 bool vb2_request_object_is_buffer(struct media_request_object *obj) 1543 { 1544 return obj->ops == &vb2_core_req_ops; 1545 } 1546 EXPORT_SYMBOL_GPL(vb2_request_object_is_buffer); 1547 1548 unsigned int vb2_request_buffer_cnt(struct media_request *req) 1549 { 1550 struct media_request_object *obj; 1551 unsigned long flags; 1552 unsigned int buffer_cnt = 0; 1553 1554 spin_lock_irqsave(&req->lock, flags); 1555 list_for_each_entry(obj, &req->objects, list) 1556 if (vb2_request_object_is_buffer(obj)) 1557 buffer_cnt++; 1558 spin_unlock_irqrestore(&req->lock, flags); 1559 1560 return buffer_cnt; 1561 } 1562 EXPORT_SYMBOL_GPL(vb2_request_buffer_cnt); 1563 1564 int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb) 1565 { 1566 struct vb2_buffer *vb; 1567 int ret; 1568 1569 vb = q->bufs[index]; 1570 if (vb->state != VB2_BUF_STATE_DEQUEUED) { 1571 dprintk(q, 1, "invalid buffer state %s\n", 1572 vb2_state_name(vb->state)); 1573 return -EINVAL; 1574 } 1575 if (vb->prepared) { 1576 dprintk(q, 1, "buffer already prepared\n"); 1577 return -EINVAL; 1578 } 1579 1580 ret = __buf_prepare(vb); 1581 if (ret) 1582 return ret; 1583 1584 /* Fill buffer information for the userspace */ 1585 call_void_bufop(q, fill_user_buffer, vb, pb); 1586 1587 dprintk(q, 2, "prepare of buffer %d succeeded\n", vb->index); 1588 1589 return 0; 1590 } 1591 EXPORT_SYMBOL_GPL(vb2_core_prepare_buf); 1592 1593 /* 1594 * vb2_start_streaming() - Attempt to start streaming. 1595 * @q: videobuf2 queue 1596 * 1597 * Attempt to start streaming. When this function is called there must be 1598 * at least q->min_buffers_needed buffers queued up (i.e. the minimum 1599 * number of buffers required for the DMA engine to function). If the 1600 * @start_streaming op fails it is supposed to return all the driver-owned 1601 * buffers back to vb2 in state QUEUED. Check if that happened and if 1602 * not warn and reclaim them forcefully. 1603 */ 1604 static int vb2_start_streaming(struct vb2_queue *q) 1605 { 1606 struct vb2_buffer *vb; 1607 int ret; 1608 1609 /* 1610 * If any buffers were queued before streamon, 1611 * we can now pass them to driver for processing. 1612 */ 1613 list_for_each_entry(vb, &q->queued_list, queued_entry) 1614 __enqueue_in_driver(vb); 1615 1616 /* Tell the driver to start streaming */ 1617 q->start_streaming_called = 1; 1618 ret = call_qop(q, start_streaming, q, 1619 atomic_read(&q->owned_by_drv_count)); 1620 if (!ret) 1621 return 0; 1622 1623 q->start_streaming_called = 0; 1624 1625 dprintk(q, 1, "driver refused to start streaming\n"); 1626 /* 1627 * If you see this warning, then the driver isn't cleaning up properly 1628 * after a failed start_streaming(). See the start_streaming() 1629 * documentation in videobuf2-core.h for more information how buffers 1630 * should be returned to vb2 in start_streaming(). 1631 */ 1632 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { 1633 unsigned i; 1634 1635 /* 1636 * Forcefully reclaim buffers if the driver did not 1637 * correctly return them to vb2. 1638 */ 1639 for (i = 0; i < q->num_buffers; ++i) { 1640 vb = q->bufs[i]; 1641 if (vb->state == VB2_BUF_STATE_ACTIVE) 1642 vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED); 1643 } 1644 /* Must be zero now */ 1645 WARN_ON(atomic_read(&q->owned_by_drv_count)); 1646 } 1647 /* 1648 * If done_list is not empty, then start_streaming() didn't call 1649 * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or 1650 * STATE_DONE. 1651 */ 1652 WARN_ON(!list_empty(&q->done_list)); 1653 return ret; 1654 } 1655 1656 int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, 1657 struct media_request *req) 1658 { 1659 struct vb2_buffer *vb; 1660 enum vb2_buffer_state orig_state; 1661 int ret; 1662 1663 if (q->error) { 1664 dprintk(q, 1, "fatal error occurred on queue\n"); 1665 return -EIO; 1666 } 1667 1668 vb = q->bufs[index]; 1669 1670 if (!req && vb->state != VB2_BUF_STATE_IN_REQUEST && 1671 q->requires_requests) { 1672 dprintk(q, 1, "qbuf requires a request\n"); 1673 return -EBADR; 1674 } 1675 1676 if ((req && q->uses_qbuf) || 1677 (!req && vb->state != VB2_BUF_STATE_IN_REQUEST && 1678 q->uses_requests)) { 1679 dprintk(q, 1, "queue in wrong mode (qbuf vs requests)\n"); 1680 return -EBUSY; 1681 } 1682 1683 if (req) { 1684 int ret; 1685 1686 q->uses_requests = 1; 1687 if (vb->state != VB2_BUF_STATE_DEQUEUED) { 1688 dprintk(q, 1, "buffer %d not in dequeued state\n", 1689 vb->index); 1690 return -EINVAL; 1691 } 1692 1693 if (q->is_output && !vb->prepared) { 1694 ret = call_vb_qop(vb, buf_out_validate, vb); 1695 if (ret) { 1696 dprintk(q, 1, "buffer validation failed\n"); 1697 return ret; 1698 } 1699 } 1700 1701 media_request_object_init(&vb->req_obj); 1702 1703 /* Make sure the request is in a safe state for updating. */ 1704 ret = media_request_lock_for_update(req); 1705 if (ret) 1706 return ret; 1707 ret = media_request_object_bind(req, &vb2_core_req_ops, 1708 q, true, &vb->req_obj); 1709 media_request_unlock_for_update(req); 1710 if (ret) 1711 return ret; 1712 1713 vb->state = VB2_BUF_STATE_IN_REQUEST; 1714 1715 /* 1716 * Increment the refcount and store the request. 1717 * The request refcount is decremented again when the 1718 * buffer is dequeued. This is to prevent vb2_buffer_done() 1719 * from freeing the request from interrupt context, which can 1720 * happen if the application closed the request fd after 1721 * queueing the request. 1722 */ 1723 media_request_get(req); 1724 vb->request = req; 1725 1726 /* Fill buffer information for the userspace */ 1727 if (pb) { 1728 call_void_bufop(q, copy_timestamp, vb, pb); 1729 call_void_bufop(q, fill_user_buffer, vb, pb); 1730 } 1731 1732 dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index); 1733 return 0; 1734 } 1735 1736 if (vb->state != VB2_BUF_STATE_IN_REQUEST) 1737 q->uses_qbuf = 1; 1738 1739 switch (vb->state) { 1740 case VB2_BUF_STATE_DEQUEUED: 1741 case VB2_BUF_STATE_IN_REQUEST: 1742 if (!vb->prepared) { 1743 ret = __buf_prepare(vb); 1744 if (ret) 1745 return ret; 1746 } 1747 break; 1748 case VB2_BUF_STATE_PREPARING: 1749 dprintk(q, 1, "buffer still being prepared\n"); 1750 return -EINVAL; 1751 default: 1752 dprintk(q, 1, "invalid buffer state %s\n", 1753 vb2_state_name(vb->state)); 1754 return -EINVAL; 1755 } 1756 1757 /* 1758 * Add to the queued buffers list, a buffer will stay on it until 1759 * dequeued in dqbuf. 1760 */ 1761 orig_state = vb->state; 1762 list_add_tail(&vb->queued_entry, &q->queued_list); 1763 q->queued_count++; 1764 q->waiting_for_buffers = false; 1765 vb->state = VB2_BUF_STATE_QUEUED; 1766 1767 if (pb) 1768 call_void_bufop(q, copy_timestamp, vb, pb); 1769 1770 trace_vb2_qbuf(q, vb); 1771 1772 /* 1773 * If already streaming, give the buffer to driver for processing. 1774 * If not, the buffer will be given to driver on next streamon. 1775 */ 1776 if (q->start_streaming_called) 1777 __enqueue_in_driver(vb); 1778 1779 /* Fill buffer information for the userspace */ 1780 if (pb) 1781 call_void_bufop(q, fill_user_buffer, vb, pb); 1782 1783 /* 1784 * If streamon has been called, and we haven't yet called 1785 * start_streaming() since not enough buffers were queued, and 1786 * we now have reached the minimum number of queued buffers, 1787 * then we can finally call start_streaming(). 1788 */ 1789 if (q->streaming && !q->start_streaming_called && 1790 q->queued_count >= q->min_buffers_needed) { 1791 ret = vb2_start_streaming(q); 1792 if (ret) { 1793 /* 1794 * Since vb2_core_qbuf will return with an error, 1795 * we should return it to state DEQUEUED since 1796 * the error indicates that the buffer wasn't queued. 1797 */ 1798 list_del(&vb->queued_entry); 1799 q->queued_count--; 1800 vb->state = orig_state; 1801 return ret; 1802 } 1803 } 1804 1805 dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index); 1806 return 0; 1807 } 1808 EXPORT_SYMBOL_GPL(vb2_core_qbuf); 1809 1810 /* 1811 * __vb2_wait_for_done_vb() - wait for a buffer to become available 1812 * for dequeuing 1813 * 1814 * Will sleep if required for nonblocking == false. 1815 */ 1816 static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) 1817 { 1818 /* 1819 * All operations on vb_done_list are performed under done_lock 1820 * spinlock protection. However, buffers may be removed from 1821 * it and returned to userspace only while holding both driver's 1822 * lock and the done_lock spinlock. Thus we can be sure that as 1823 * long as we hold the driver's lock, the list will remain not 1824 * empty if list_empty() check succeeds. 1825 */ 1826 1827 for (;;) { 1828 int ret; 1829 1830 if (q->waiting_in_dqbuf) { 1831 dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); 1832 return -EBUSY; 1833 } 1834 1835 if (!q->streaming) { 1836 dprintk(q, 1, "streaming off, will not wait for buffers\n"); 1837 return -EINVAL; 1838 } 1839 1840 if (q->error) { 1841 dprintk(q, 1, "Queue in error state, will not wait for buffers\n"); 1842 return -EIO; 1843 } 1844 1845 if (q->last_buffer_dequeued) { 1846 dprintk(q, 3, "last buffer dequeued already, will not wait for buffers\n"); 1847 return -EPIPE; 1848 } 1849 1850 if (!list_empty(&q->done_list)) { 1851 /* 1852 * Found a buffer that we were waiting for. 1853 */ 1854 break; 1855 } 1856 1857 if (nonblocking) { 1858 dprintk(q, 3, "nonblocking and no buffers to dequeue, will not wait\n"); 1859 return -EAGAIN; 1860 } 1861 1862 q->waiting_in_dqbuf = 1; 1863 /* 1864 * We are streaming and blocking, wait for another buffer to 1865 * become ready or for streamoff. Driver's lock is released to 1866 * allow streamoff or qbuf to be called while waiting. 1867 */ 1868 call_void_qop(q, wait_prepare, q); 1869 1870 /* 1871 * All locks have been released, it is safe to sleep now. 1872 */ 1873 dprintk(q, 3, "will sleep waiting for buffers\n"); 1874 ret = wait_event_interruptible(q->done_wq, 1875 !list_empty(&q->done_list) || !q->streaming || 1876 q->error); 1877 1878 /* 1879 * We need to reevaluate both conditions again after reacquiring 1880 * the locks or return an error if one occurred. 1881 */ 1882 call_void_qop(q, wait_finish, q); 1883 q->waiting_in_dqbuf = 0; 1884 if (ret) { 1885 dprintk(q, 1, "sleep was interrupted\n"); 1886 return ret; 1887 } 1888 } 1889 return 0; 1890 } 1891 1892 /* 1893 * __vb2_get_done_vb() - get a buffer ready for dequeuing 1894 * 1895 * Will sleep if required for nonblocking == false. 1896 */ 1897 static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, 1898 void *pb, int nonblocking) 1899 { 1900 unsigned long flags; 1901 int ret = 0; 1902 1903 /* 1904 * Wait for at least one buffer to become available on the done_list. 1905 */ 1906 ret = __vb2_wait_for_done_vb(q, nonblocking); 1907 if (ret) 1908 return ret; 1909 1910 /* 1911 * Driver's lock has been held since we last verified that done_list 1912 * is not empty, so no need for another list_empty(done_list) check. 1913 */ 1914 spin_lock_irqsave(&q->done_lock, flags); 1915 *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry); 1916 /* 1917 * Only remove the buffer from done_list if all planes can be 1918 * handled. Some cases such as V4L2 file I/O and DVB have pb 1919 * == NULL; skip the check then as there's nothing to verify. 1920 */ 1921 if (pb) 1922 ret = call_bufop(q, verify_planes_array, *vb, pb); 1923 if (!ret) 1924 list_del(&(*vb)->done_entry); 1925 spin_unlock_irqrestore(&q->done_lock, flags); 1926 1927 return ret; 1928 } 1929 1930 int vb2_wait_for_all_buffers(struct vb2_queue *q) 1931 { 1932 if (!q->streaming) { 1933 dprintk(q, 1, "streaming off, will not wait for buffers\n"); 1934 return -EINVAL; 1935 } 1936 1937 if (q->start_streaming_called) 1938 wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count)); 1939 return 0; 1940 } 1941 EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers); 1942 1943 /* 1944 * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state 1945 */ 1946 static void __vb2_dqbuf(struct vb2_buffer *vb) 1947 { 1948 struct vb2_queue *q = vb->vb2_queue; 1949 1950 /* nothing to do if the buffer is already dequeued */ 1951 if (vb->state == VB2_BUF_STATE_DEQUEUED) 1952 return; 1953 1954 vb->state = VB2_BUF_STATE_DEQUEUED; 1955 1956 call_void_bufop(q, init_buffer, vb); 1957 } 1958 1959 int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, 1960 bool nonblocking) 1961 { 1962 struct vb2_buffer *vb = NULL; 1963 int ret; 1964 1965 ret = __vb2_get_done_vb(q, &vb, pb, nonblocking); 1966 if (ret < 0) 1967 return ret; 1968 1969 switch (vb->state) { 1970 case VB2_BUF_STATE_DONE: 1971 dprintk(q, 3, "returning done buffer\n"); 1972 break; 1973 case VB2_BUF_STATE_ERROR: 1974 dprintk(q, 3, "returning done buffer with errors\n"); 1975 break; 1976 default: 1977 dprintk(q, 1, "invalid buffer state %s\n", 1978 vb2_state_name(vb->state)); 1979 return -EINVAL; 1980 } 1981 1982 call_void_vb_qop(vb, buf_finish, vb); 1983 vb->prepared = 0; 1984 1985 if (pindex) 1986 *pindex = vb->index; 1987 1988 /* Fill buffer information for the userspace */ 1989 if (pb) 1990 call_void_bufop(q, fill_user_buffer, vb, pb); 1991 1992 /* Remove from vb2 queue */ 1993 list_del(&vb->queued_entry); 1994 q->queued_count--; 1995 1996 trace_vb2_dqbuf(q, vb); 1997 1998 /* go back to dequeued state */ 1999 __vb2_dqbuf(vb); 2000 2001 if (WARN_ON(vb->req_obj.req)) { 2002 media_request_object_unbind(&vb->req_obj); 2003 media_request_object_put(&vb->req_obj); 2004 } 2005 if (vb->request) 2006 media_request_put(vb->request); 2007 vb->request = NULL; 2008 2009 dprintk(q, 2, "dqbuf of buffer %d, state: %s\n", 2010 vb->index, vb2_state_name(vb->state)); 2011 2012 return 0; 2013 2014 } 2015 EXPORT_SYMBOL_GPL(vb2_core_dqbuf); 2016 2017 /* 2018 * __vb2_queue_cancel() - cancel and stop (pause) streaming 2019 * 2020 * Removes all queued buffers from driver's queue and all buffers queued by 2021 * userspace from vb2's queue. Returns to state after reqbufs. 2022 */ 2023 static void __vb2_queue_cancel(struct vb2_queue *q) 2024 { 2025 unsigned int i; 2026 2027 /* 2028 * Tell driver to stop all transactions and release all queued 2029 * buffers. 2030 */ 2031 if (q->start_streaming_called) 2032 call_void_qop(q, stop_streaming, q); 2033 2034 if (q->streaming) 2035 call_void_qop(q, unprepare_streaming, q); 2036 2037 /* 2038 * If you see this warning, then the driver isn't cleaning up properly 2039 * in stop_streaming(). See the stop_streaming() documentation in 2040 * videobuf2-core.h for more information how buffers should be returned 2041 * to vb2 in stop_streaming(). 2042 */ 2043 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { 2044 for (i = 0; i < q->num_buffers; ++i) 2045 if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) { 2046 pr_warn("driver bug: stop_streaming operation is leaving buf %p in active state\n", 2047 q->bufs[i]); 2048 vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR); 2049 } 2050 /* Must be zero now */ 2051 WARN_ON(atomic_read(&q->owned_by_drv_count)); 2052 } 2053 2054 q->streaming = 0; 2055 q->start_streaming_called = 0; 2056 q->queued_count = 0; 2057 q->error = 0; 2058 q->uses_requests = 0; 2059 q->uses_qbuf = 0; 2060 2061 /* 2062 * Remove all buffers from vb2's list... 2063 */ 2064 INIT_LIST_HEAD(&q->queued_list); 2065 /* 2066 * ...and done list; userspace will not receive any buffers it 2067 * has not already dequeued before initiating cancel. 2068 */ 2069 INIT_LIST_HEAD(&q->done_list); 2070 atomic_set(&q->owned_by_drv_count, 0); 2071 wake_up_all(&q->done_wq); 2072 2073 /* 2074 * Reinitialize all buffers for next use. 2075 * Make sure to call buf_finish for any queued buffers. Normally 2076 * that's done in dqbuf, but that's not going to happen when we 2077 * cancel the whole queue. Note: this code belongs here, not in 2078 * __vb2_dqbuf() since in vb2_core_dqbuf() there is a critical 2079 * call to __fill_user_buffer() after buf_finish(). That order can't 2080 * be changed, so we can't move the buf_finish() to __vb2_dqbuf(). 2081 */ 2082 for (i = 0; i < q->num_buffers; ++i) { 2083 struct vb2_buffer *vb = q->bufs[i]; 2084 struct media_request *req = vb->req_obj.req; 2085 2086 /* 2087 * If a request is associated with this buffer, then 2088 * call buf_request_cancel() to give the driver to complete() 2089 * related request objects. Otherwise those objects would 2090 * never complete. 2091 */ 2092 if (req) { 2093 enum media_request_state state; 2094 unsigned long flags; 2095 2096 spin_lock_irqsave(&req->lock, flags); 2097 state = req->state; 2098 spin_unlock_irqrestore(&req->lock, flags); 2099 2100 if (state == MEDIA_REQUEST_STATE_QUEUED) 2101 call_void_vb_qop(vb, buf_request_complete, vb); 2102 } 2103 2104 __vb2_buf_mem_finish(vb); 2105 2106 if (vb->prepared) { 2107 call_void_vb_qop(vb, buf_finish, vb); 2108 vb->prepared = 0; 2109 } 2110 __vb2_dqbuf(vb); 2111 2112 if (vb->req_obj.req) { 2113 media_request_object_unbind(&vb->req_obj); 2114 media_request_object_put(&vb->req_obj); 2115 } 2116 if (vb->request) 2117 media_request_put(vb->request); 2118 vb->request = NULL; 2119 vb->copied_timestamp = 0; 2120 } 2121 } 2122 2123 int vb2_core_streamon(struct vb2_queue *q, unsigned int type) 2124 { 2125 int ret; 2126 2127 if (type != q->type) { 2128 dprintk(q, 1, "invalid stream type\n"); 2129 return -EINVAL; 2130 } 2131 2132 if (q->streaming) { 2133 dprintk(q, 3, "already streaming\n"); 2134 return 0; 2135 } 2136 2137 if (!q->num_buffers) { 2138 dprintk(q, 1, "no buffers have been allocated\n"); 2139 return -EINVAL; 2140 } 2141 2142 if (q->num_buffers < q->min_buffers_needed) { 2143 dprintk(q, 1, "need at least %u allocated buffers\n", 2144 q->min_buffers_needed); 2145 return -EINVAL; 2146 } 2147 2148 ret = call_qop(q, prepare_streaming, q); 2149 if (ret) 2150 return ret; 2151 2152 q->streaming = 1; 2153 2154 /* 2155 * Tell driver to start streaming provided sufficient buffers 2156 * are available. 2157 */ 2158 if (q->queued_count >= q->min_buffers_needed) { 2159 ret = vb2_start_streaming(q); 2160 if (ret) 2161 goto unprepare; 2162 } 2163 2164 dprintk(q, 3, "successful\n"); 2165 return 0; 2166 2167 unprepare: 2168 call_void_qop(q, unprepare_streaming, q); 2169 q->streaming = 0; 2170 return ret; 2171 } 2172 EXPORT_SYMBOL_GPL(vb2_core_streamon); 2173 2174 void vb2_queue_error(struct vb2_queue *q) 2175 { 2176 q->error = 1; 2177 2178 wake_up_all(&q->done_wq); 2179 } 2180 EXPORT_SYMBOL_GPL(vb2_queue_error); 2181 2182 int vb2_core_streamoff(struct vb2_queue *q, unsigned int type) 2183 { 2184 if (type != q->type) { 2185 dprintk(q, 1, "invalid stream type\n"); 2186 return -EINVAL; 2187 } 2188 2189 /* 2190 * Cancel will pause streaming and remove all buffers from the driver 2191 * and vb2, effectively returning control over them to userspace. 2192 * 2193 * Note that we do this even if q->streaming == 0: if you prepare or 2194 * queue buffers, and then call streamoff without ever having called 2195 * streamon, you would still expect those buffers to be returned to 2196 * their normal dequeued state. 2197 */ 2198 __vb2_queue_cancel(q); 2199 q->waiting_for_buffers = !q->is_output; 2200 q->last_buffer_dequeued = false; 2201 2202 dprintk(q, 3, "successful\n"); 2203 return 0; 2204 } 2205 EXPORT_SYMBOL_GPL(vb2_core_streamoff); 2206 2207 /* 2208 * __find_plane_by_offset() - find plane associated with the given offset off 2209 */ 2210 static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off, 2211 unsigned int *_buffer, unsigned int *_plane) 2212 { 2213 struct vb2_buffer *vb; 2214 unsigned int buffer, plane; 2215 2216 /* 2217 * Sanity checks to ensure the lock is held, MEMORY_MMAP is 2218 * used and fileio isn't active. 2219 */ 2220 lockdep_assert_held(&q->mmap_lock); 2221 2222 if (q->memory != VB2_MEMORY_MMAP) { 2223 dprintk(q, 1, "queue is not currently set up for mmap\n"); 2224 return -EINVAL; 2225 } 2226 2227 if (vb2_fileio_is_active(q)) { 2228 dprintk(q, 1, "file io in progress\n"); 2229 return -EBUSY; 2230 } 2231 2232 /* 2233 * Go over all buffers and their planes, comparing the given offset 2234 * with an offset assigned to each plane. If a match is found, 2235 * return its buffer and plane numbers. 2236 */ 2237 for (buffer = 0; buffer < q->num_buffers; ++buffer) { 2238 vb = q->bufs[buffer]; 2239 2240 for (plane = 0; plane < vb->num_planes; ++plane) { 2241 if (vb->planes[plane].m.offset == off) { 2242 *_buffer = buffer; 2243 *_plane = plane; 2244 return 0; 2245 } 2246 } 2247 } 2248 2249 return -EINVAL; 2250 } 2251 2252 int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type, 2253 unsigned int index, unsigned int plane, unsigned int flags) 2254 { 2255 struct vb2_buffer *vb = NULL; 2256 struct vb2_plane *vb_plane; 2257 int ret; 2258 struct dma_buf *dbuf; 2259 2260 if (q->memory != VB2_MEMORY_MMAP) { 2261 dprintk(q, 1, "queue is not currently set up for mmap\n"); 2262 return -EINVAL; 2263 } 2264 2265 if (!q->mem_ops->get_dmabuf) { 2266 dprintk(q, 1, "queue does not support DMA buffer exporting\n"); 2267 return -EINVAL; 2268 } 2269 2270 if (flags & ~(O_CLOEXEC | O_ACCMODE)) { 2271 dprintk(q, 1, "queue does support only O_CLOEXEC and access mode flags\n"); 2272 return -EINVAL; 2273 } 2274 2275 if (type != q->type) { 2276 dprintk(q, 1, "invalid buffer type\n"); 2277 return -EINVAL; 2278 } 2279 2280 if (index >= q->num_buffers) { 2281 dprintk(q, 1, "buffer index out of range\n"); 2282 return -EINVAL; 2283 } 2284 2285 vb = q->bufs[index]; 2286 2287 if (plane >= vb->num_planes) { 2288 dprintk(q, 1, "buffer plane out of range\n"); 2289 return -EINVAL; 2290 } 2291 2292 if (vb2_fileio_is_active(q)) { 2293 dprintk(q, 1, "expbuf: file io in progress\n"); 2294 return -EBUSY; 2295 } 2296 2297 vb_plane = &vb->planes[plane]; 2298 2299 dbuf = call_ptr_memop(get_dmabuf, 2300 vb, 2301 vb_plane->mem_priv, 2302 flags & O_ACCMODE); 2303 if (IS_ERR_OR_NULL(dbuf)) { 2304 dprintk(q, 1, "failed to export buffer %d, plane %d\n", 2305 index, plane); 2306 return -EINVAL; 2307 } 2308 2309 ret = dma_buf_fd(dbuf, flags & ~O_ACCMODE); 2310 if (ret < 0) { 2311 dprintk(q, 3, "buffer %d, plane %d failed to export (%d)\n", 2312 index, plane, ret); 2313 dma_buf_put(dbuf); 2314 return ret; 2315 } 2316 2317 dprintk(q, 3, "buffer %d, plane %d exported as %d descriptor\n", 2318 index, plane, ret); 2319 *fd = ret; 2320 2321 return 0; 2322 } 2323 EXPORT_SYMBOL_GPL(vb2_core_expbuf); 2324 2325 int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) 2326 { 2327 unsigned long off = vma->vm_pgoff << PAGE_SHIFT; 2328 struct vb2_buffer *vb; 2329 unsigned int buffer = 0, plane = 0; 2330 int ret; 2331 unsigned long length; 2332 2333 /* 2334 * Check memory area access mode. 2335 */ 2336 if (!(vma->vm_flags & VM_SHARED)) { 2337 dprintk(q, 1, "invalid vma flags, VM_SHARED needed\n"); 2338 return -EINVAL; 2339 } 2340 if (q->is_output) { 2341 if (!(vma->vm_flags & VM_WRITE)) { 2342 dprintk(q, 1, "invalid vma flags, VM_WRITE needed\n"); 2343 return -EINVAL; 2344 } 2345 } else { 2346 if (!(vma->vm_flags & VM_READ)) { 2347 dprintk(q, 1, "invalid vma flags, VM_READ needed\n"); 2348 return -EINVAL; 2349 } 2350 } 2351 2352 mutex_lock(&q->mmap_lock); 2353 2354 /* 2355 * Find the plane corresponding to the offset passed by userspace. This 2356 * will return an error if not MEMORY_MMAP or file I/O is in progress. 2357 */ 2358 ret = __find_plane_by_offset(q, off, &buffer, &plane); 2359 if (ret) 2360 goto unlock; 2361 2362 vb = q->bufs[buffer]; 2363 2364 /* 2365 * MMAP requires page_aligned buffers. 2366 * The buffer length was page_aligned at __vb2_buf_mem_alloc(), 2367 * so, we need to do the same here. 2368 */ 2369 length = PAGE_ALIGN(vb->planes[plane].length); 2370 if (length < (vma->vm_end - vma->vm_start)) { 2371 dprintk(q, 1, 2372 "MMAP invalid, as it would overflow buffer length\n"); 2373 ret = -EINVAL; 2374 goto unlock; 2375 } 2376 2377 /* 2378 * vm_pgoff is treated in V4L2 API as a 'cookie' to select a buffer, 2379 * not as a in-buffer offset. We always want to mmap a whole buffer 2380 * from its beginning. 2381 */ 2382 vma->vm_pgoff = 0; 2383 2384 ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma); 2385 2386 unlock: 2387 mutex_unlock(&q->mmap_lock); 2388 if (ret) 2389 return ret; 2390 2391 dprintk(q, 3, "buffer %d, plane %d successfully mapped\n", buffer, plane); 2392 return 0; 2393 } 2394 EXPORT_SYMBOL_GPL(vb2_mmap); 2395 2396 #ifndef CONFIG_MMU 2397 unsigned long vb2_get_unmapped_area(struct vb2_queue *q, 2398 unsigned long addr, 2399 unsigned long len, 2400 unsigned long pgoff, 2401 unsigned long flags) 2402 { 2403 unsigned long off = pgoff << PAGE_SHIFT; 2404 struct vb2_buffer *vb; 2405 unsigned int buffer, plane; 2406 void *vaddr; 2407 int ret; 2408 2409 mutex_lock(&q->mmap_lock); 2410 2411 /* 2412 * Find the plane corresponding to the offset passed by userspace. This 2413 * will return an error if not MEMORY_MMAP or file I/O is in progress. 2414 */ 2415 ret = __find_plane_by_offset(q, off, &buffer, &plane); 2416 if (ret) 2417 goto unlock; 2418 2419 vb = q->bufs[buffer]; 2420 2421 vaddr = vb2_plane_vaddr(vb, plane); 2422 mutex_unlock(&q->mmap_lock); 2423 return vaddr ? (unsigned long)vaddr : -EINVAL; 2424 2425 unlock: 2426 mutex_unlock(&q->mmap_lock); 2427 return ret; 2428 } 2429 EXPORT_SYMBOL_GPL(vb2_get_unmapped_area); 2430 #endif 2431 2432 int vb2_core_queue_init(struct vb2_queue *q) 2433 { 2434 /* 2435 * Sanity check 2436 */ 2437 if (WARN_ON(!q) || 2438 WARN_ON(!q->ops) || 2439 WARN_ON(!q->mem_ops) || 2440 WARN_ON(!q->type) || 2441 WARN_ON(!q->io_modes) || 2442 WARN_ON(!q->ops->queue_setup) || 2443 WARN_ON(!q->ops->buf_queue)) 2444 return -EINVAL; 2445 2446 if (WARN_ON(q->requires_requests && !q->supports_requests)) 2447 return -EINVAL; 2448 2449 /* 2450 * This combination is not allowed since a non-zero value of 2451 * q->min_buffers_needed can cause vb2_core_qbuf() to fail if 2452 * it has to call start_streaming(), and the Request API expects 2453 * that queueing a request (and thus queueing a buffer contained 2454 * in that request) will always succeed. There is no method of 2455 * propagating an error back to userspace. 2456 */ 2457 if (WARN_ON(q->supports_requests && q->min_buffers_needed)) 2458 return -EINVAL; 2459 2460 INIT_LIST_HEAD(&q->queued_list); 2461 INIT_LIST_HEAD(&q->done_list); 2462 spin_lock_init(&q->done_lock); 2463 mutex_init(&q->mmap_lock); 2464 init_waitqueue_head(&q->done_wq); 2465 2466 q->memory = VB2_MEMORY_UNKNOWN; 2467 2468 if (q->buf_struct_size == 0) 2469 q->buf_struct_size = sizeof(struct vb2_buffer); 2470 2471 if (q->bidirectional) 2472 q->dma_dir = DMA_BIDIRECTIONAL; 2473 else 2474 q->dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 2475 2476 if (q->name[0] == '\0') 2477 snprintf(q->name, sizeof(q->name), "%s-%p", 2478 q->is_output ? "out" : "cap", q); 2479 2480 return 0; 2481 } 2482 EXPORT_SYMBOL_GPL(vb2_core_queue_init); 2483 2484 static int __vb2_init_fileio(struct vb2_queue *q, int read); 2485 static int __vb2_cleanup_fileio(struct vb2_queue *q); 2486 void vb2_core_queue_release(struct vb2_queue *q) 2487 { 2488 __vb2_cleanup_fileio(q); 2489 __vb2_queue_cancel(q); 2490 mutex_lock(&q->mmap_lock); 2491 __vb2_queue_free(q, q->num_buffers); 2492 mutex_unlock(&q->mmap_lock); 2493 } 2494 EXPORT_SYMBOL_GPL(vb2_core_queue_release); 2495 2496 __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file, 2497 poll_table *wait) 2498 { 2499 __poll_t req_events = poll_requested_events(wait); 2500 struct vb2_buffer *vb = NULL; 2501 unsigned long flags; 2502 2503 /* 2504 * poll_wait() MUST be called on the first invocation on all the 2505 * potential queues of interest, even if we are not interested in their 2506 * events during this first call. Failure to do so will result in 2507 * queue's events to be ignored because the poll_table won't be capable 2508 * of adding new wait queues thereafter. 2509 */ 2510 poll_wait(file, &q->done_wq, wait); 2511 2512 if (!q->is_output && !(req_events & (EPOLLIN | EPOLLRDNORM))) 2513 return 0; 2514 if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM))) 2515 return 0; 2516 2517 /* 2518 * Start file I/O emulator only if streaming API has not been used yet. 2519 */ 2520 if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) { 2521 if (!q->is_output && (q->io_modes & VB2_READ) && 2522 (req_events & (EPOLLIN | EPOLLRDNORM))) { 2523 if (__vb2_init_fileio(q, 1)) 2524 return EPOLLERR; 2525 } 2526 if (q->is_output && (q->io_modes & VB2_WRITE) && 2527 (req_events & (EPOLLOUT | EPOLLWRNORM))) { 2528 if (__vb2_init_fileio(q, 0)) 2529 return EPOLLERR; 2530 /* 2531 * Write to OUTPUT queue can be done immediately. 2532 */ 2533 return EPOLLOUT | EPOLLWRNORM; 2534 } 2535 } 2536 2537 /* 2538 * There is nothing to wait for if the queue isn't streaming, or if the 2539 * error flag is set. 2540 */ 2541 if (!vb2_is_streaming(q) || q->error) 2542 return EPOLLERR; 2543 2544 /* 2545 * If this quirk is set and QBUF hasn't been called yet then 2546 * return EPOLLERR as well. This only affects capture queues, output 2547 * queues will always initialize waiting_for_buffers to false. 2548 * This quirk is set by V4L2 for backwards compatibility reasons. 2549 */ 2550 if (q->quirk_poll_must_check_waiting_for_buffers && 2551 q->waiting_for_buffers && (req_events & (EPOLLIN | EPOLLRDNORM))) 2552 return EPOLLERR; 2553 2554 /* 2555 * For output streams you can call write() as long as there are fewer 2556 * buffers queued than there are buffers available. 2557 */ 2558 if (q->is_output && q->fileio && q->queued_count < q->num_buffers) 2559 return EPOLLOUT | EPOLLWRNORM; 2560 2561 if (list_empty(&q->done_list)) { 2562 /* 2563 * If the last buffer was dequeued from a capture queue, 2564 * return immediately. DQBUF will return -EPIPE. 2565 */ 2566 if (q->last_buffer_dequeued) 2567 return EPOLLIN | EPOLLRDNORM; 2568 } 2569 2570 /* 2571 * Take first buffer available for dequeuing. 2572 */ 2573 spin_lock_irqsave(&q->done_lock, flags); 2574 if (!list_empty(&q->done_list)) 2575 vb = list_first_entry(&q->done_list, struct vb2_buffer, 2576 done_entry); 2577 spin_unlock_irqrestore(&q->done_lock, flags); 2578 2579 if (vb && (vb->state == VB2_BUF_STATE_DONE 2580 || vb->state == VB2_BUF_STATE_ERROR)) { 2581 return (q->is_output) ? 2582 EPOLLOUT | EPOLLWRNORM : 2583 EPOLLIN | EPOLLRDNORM; 2584 } 2585 return 0; 2586 } 2587 EXPORT_SYMBOL_GPL(vb2_core_poll); 2588 2589 /* 2590 * struct vb2_fileio_buf - buffer context used by file io emulator 2591 * 2592 * vb2 provides a compatibility layer and emulator of file io (read and 2593 * write) calls on top of streaming API. This structure is used for 2594 * tracking context related to the buffers. 2595 */ 2596 struct vb2_fileio_buf { 2597 void *vaddr; 2598 unsigned int size; 2599 unsigned int pos; 2600 unsigned int queued:1; 2601 }; 2602 2603 /* 2604 * struct vb2_fileio_data - queue context used by file io emulator 2605 * 2606 * @cur_index: the index of the buffer currently being read from or 2607 * written to. If equal to q->num_buffers then a new buffer 2608 * must be dequeued. 2609 * @initial_index: in the read() case all buffers are queued up immediately 2610 * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles 2611 * buffers. However, in the write() case no buffers are initially 2612 * queued, instead whenever a buffer is full it is queued up by 2613 * __vb2_perform_fileio(). Only once all available buffers have 2614 * been queued up will __vb2_perform_fileio() start to dequeue 2615 * buffers. This means that initially __vb2_perform_fileio() 2616 * needs to know what buffer index to use when it is queuing up 2617 * the buffers for the first time. That initial index is stored 2618 * in this field. Once it is equal to q->num_buffers all 2619 * available buffers have been queued and __vb2_perform_fileio() 2620 * should start the normal dequeue/queue cycle. 2621 * 2622 * vb2 provides a compatibility layer and emulator of file io (read and 2623 * write) calls on top of streaming API. For proper operation it required 2624 * this structure to save the driver state between each call of the read 2625 * or write function. 2626 */ 2627 struct vb2_fileio_data { 2628 unsigned int count; 2629 unsigned int type; 2630 unsigned int memory; 2631 struct vb2_fileio_buf bufs[VB2_MAX_FRAME]; 2632 unsigned int cur_index; 2633 unsigned int initial_index; 2634 unsigned int q_count; 2635 unsigned int dq_count; 2636 unsigned read_once:1; 2637 unsigned write_immediately:1; 2638 }; 2639 2640 /* 2641 * __vb2_init_fileio() - initialize file io emulator 2642 * @q: videobuf2 queue 2643 * @read: mode selector (1 means read, 0 means write) 2644 */ 2645 static int __vb2_init_fileio(struct vb2_queue *q, int read) 2646 { 2647 struct vb2_fileio_data *fileio; 2648 int i, ret; 2649 unsigned int count = 0; 2650 2651 /* 2652 * Sanity check 2653 */ 2654 if (WARN_ON((read && !(q->io_modes & VB2_READ)) || 2655 (!read && !(q->io_modes & VB2_WRITE)))) 2656 return -EINVAL; 2657 2658 /* 2659 * Check if device supports mapping buffers to kernel virtual space. 2660 */ 2661 if (!q->mem_ops->vaddr) 2662 return -EBUSY; 2663 2664 /* 2665 * Check if streaming api has not been already activated. 2666 */ 2667 if (q->streaming || q->num_buffers > 0) 2668 return -EBUSY; 2669 2670 /* 2671 * Start with count 1, driver can increase it in queue_setup() 2672 */ 2673 count = 1; 2674 2675 dprintk(q, 3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n", 2676 (read) ? "read" : "write", count, q->fileio_read_once, 2677 q->fileio_write_immediately); 2678 2679 fileio = kzalloc(sizeof(*fileio), GFP_KERNEL); 2680 if (fileio == NULL) 2681 return -ENOMEM; 2682 2683 fileio->read_once = q->fileio_read_once; 2684 fileio->write_immediately = q->fileio_write_immediately; 2685 2686 /* 2687 * Request buffers and use MMAP type to force driver 2688 * to allocate buffers by itself. 2689 */ 2690 fileio->count = count; 2691 fileio->memory = VB2_MEMORY_MMAP; 2692 fileio->type = q->type; 2693 q->fileio = fileio; 2694 ret = vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); 2695 if (ret) 2696 goto err_kfree; 2697 2698 /* 2699 * Check if plane_count is correct 2700 * (multiplane buffers are not supported). 2701 */ 2702 if (q->bufs[0]->num_planes != 1) { 2703 ret = -EBUSY; 2704 goto err_reqbufs; 2705 } 2706 2707 /* 2708 * Get kernel address of each buffer. 2709 */ 2710 for (i = 0; i < q->num_buffers; i++) { 2711 fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0); 2712 if (fileio->bufs[i].vaddr == NULL) { 2713 ret = -EINVAL; 2714 goto err_reqbufs; 2715 } 2716 fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0); 2717 } 2718 2719 /* 2720 * Read mode requires pre queuing of all buffers. 2721 */ 2722 if (read) { 2723 /* 2724 * Queue all buffers. 2725 */ 2726 for (i = 0; i < q->num_buffers; i++) { 2727 ret = vb2_core_qbuf(q, i, NULL, NULL); 2728 if (ret) 2729 goto err_reqbufs; 2730 fileio->bufs[i].queued = 1; 2731 } 2732 /* 2733 * All buffers have been queued, so mark that by setting 2734 * initial_index to q->num_buffers 2735 */ 2736 fileio->initial_index = q->num_buffers; 2737 fileio->cur_index = q->num_buffers; 2738 } 2739 2740 /* 2741 * Start streaming. 2742 */ 2743 ret = vb2_core_streamon(q, q->type); 2744 if (ret) 2745 goto err_reqbufs; 2746 2747 return ret; 2748 2749 err_reqbufs: 2750 fileio->count = 0; 2751 vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); 2752 2753 err_kfree: 2754 q->fileio = NULL; 2755 kfree(fileio); 2756 return ret; 2757 } 2758 2759 /* 2760 * __vb2_cleanup_fileio() - free resourced used by file io emulator 2761 * @q: videobuf2 queue 2762 */ 2763 static int __vb2_cleanup_fileio(struct vb2_queue *q) 2764 { 2765 struct vb2_fileio_data *fileio = q->fileio; 2766 2767 if (fileio) { 2768 vb2_core_streamoff(q, q->type); 2769 q->fileio = NULL; 2770 fileio->count = 0; 2771 vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); 2772 kfree(fileio); 2773 dprintk(q, 3, "file io emulator closed\n"); 2774 } 2775 return 0; 2776 } 2777 2778 /* 2779 * __vb2_perform_fileio() - perform a single file io (read or write) operation 2780 * @q: videobuf2 queue 2781 * @data: pointed to target userspace buffer 2782 * @count: number of bytes to read or write 2783 * @ppos: file handle position tracking pointer 2784 * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking) 2785 * @read: access mode selector (1 means read, 0 means write) 2786 */ 2787 static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count, 2788 loff_t *ppos, int nonblock, int read) 2789 { 2790 struct vb2_fileio_data *fileio; 2791 struct vb2_fileio_buf *buf; 2792 bool is_multiplanar = q->is_multiplanar; 2793 /* 2794 * When using write() to write data to an output video node the vb2 core 2795 * should copy timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody 2796 * else is able to provide this information with the write() operation. 2797 */ 2798 bool copy_timestamp = !read && q->copy_timestamp; 2799 unsigned index; 2800 int ret; 2801 2802 dprintk(q, 3, "mode %s, offset %ld, count %zd, %sblocking\n", 2803 read ? "read" : "write", (long)*ppos, count, 2804 nonblock ? "non" : ""); 2805 2806 if (!data) 2807 return -EINVAL; 2808 2809 if (q->waiting_in_dqbuf) { 2810 dprintk(q, 3, "another dup()ped fd is %s\n", 2811 read ? "reading" : "writing"); 2812 return -EBUSY; 2813 } 2814 2815 /* 2816 * Initialize emulator on first call. 2817 */ 2818 if (!vb2_fileio_is_active(q)) { 2819 ret = __vb2_init_fileio(q, read); 2820 dprintk(q, 3, "vb2_init_fileio result: %d\n", ret); 2821 if (ret) 2822 return ret; 2823 } 2824 fileio = q->fileio; 2825 2826 /* 2827 * Check if we need to dequeue the buffer. 2828 */ 2829 index = fileio->cur_index; 2830 if (index >= q->num_buffers) { 2831 struct vb2_buffer *b; 2832 2833 /* 2834 * Call vb2_dqbuf to get buffer back. 2835 */ 2836 ret = vb2_core_dqbuf(q, &index, NULL, nonblock); 2837 dprintk(q, 5, "vb2_dqbuf result: %d\n", ret); 2838 if (ret) 2839 return ret; 2840 fileio->dq_count += 1; 2841 2842 fileio->cur_index = index; 2843 buf = &fileio->bufs[index]; 2844 b = q->bufs[index]; 2845 2846 /* 2847 * Get number of bytes filled by the driver 2848 */ 2849 buf->pos = 0; 2850 buf->queued = 0; 2851 buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0) 2852 : vb2_plane_size(q->bufs[index], 0); 2853 /* Compensate for data_offset on read in the multiplanar case. */ 2854 if (is_multiplanar && read && 2855 b->planes[0].data_offset < buf->size) { 2856 buf->pos = b->planes[0].data_offset; 2857 buf->size -= buf->pos; 2858 } 2859 } else { 2860 buf = &fileio->bufs[index]; 2861 } 2862 2863 /* 2864 * Limit count on last few bytes of the buffer. 2865 */ 2866 if (buf->pos + count > buf->size) { 2867 count = buf->size - buf->pos; 2868 dprintk(q, 5, "reducing read count: %zd\n", count); 2869 } 2870 2871 /* 2872 * Transfer data to userspace. 2873 */ 2874 dprintk(q, 3, "copying %zd bytes - buffer %d, offset %u\n", 2875 count, index, buf->pos); 2876 if (read) 2877 ret = copy_to_user(data, buf->vaddr + buf->pos, count); 2878 else 2879 ret = copy_from_user(buf->vaddr + buf->pos, data, count); 2880 if (ret) { 2881 dprintk(q, 3, "error copying data\n"); 2882 return -EFAULT; 2883 } 2884 2885 /* 2886 * Update counters. 2887 */ 2888 buf->pos += count; 2889 *ppos += count; 2890 2891 /* 2892 * Queue next buffer if required. 2893 */ 2894 if (buf->pos == buf->size || (!read && fileio->write_immediately)) { 2895 struct vb2_buffer *b = q->bufs[index]; 2896 2897 /* 2898 * Check if this is the last buffer to read. 2899 */ 2900 if (read && fileio->read_once && fileio->dq_count == 1) { 2901 dprintk(q, 3, "read limit reached\n"); 2902 return __vb2_cleanup_fileio(q); 2903 } 2904 2905 /* 2906 * Call vb2_qbuf and give buffer to the driver. 2907 */ 2908 b->planes[0].bytesused = buf->pos; 2909 2910 if (copy_timestamp) 2911 b->timestamp = ktime_get_ns(); 2912 ret = vb2_core_qbuf(q, index, NULL, NULL); 2913 dprintk(q, 5, "vb2_dbuf result: %d\n", ret); 2914 if (ret) 2915 return ret; 2916 2917 /* 2918 * Buffer has been queued, update the status 2919 */ 2920 buf->pos = 0; 2921 buf->queued = 1; 2922 buf->size = vb2_plane_size(q->bufs[index], 0); 2923 fileio->q_count += 1; 2924 /* 2925 * If we are queuing up buffers for the first time, then 2926 * increase initial_index by one. 2927 */ 2928 if (fileio->initial_index < q->num_buffers) 2929 fileio->initial_index++; 2930 /* 2931 * The next buffer to use is either a buffer that's going to be 2932 * queued for the first time (initial_index < q->num_buffers) 2933 * or it is equal to q->num_buffers, meaning that the next 2934 * time we need to dequeue a buffer since we've now queued up 2935 * all the 'first time' buffers. 2936 */ 2937 fileio->cur_index = fileio->initial_index; 2938 } 2939 2940 /* 2941 * Return proper number of bytes processed. 2942 */ 2943 if (ret == 0) 2944 ret = count; 2945 return ret; 2946 } 2947 2948 size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, 2949 loff_t *ppos, int nonblocking) 2950 { 2951 return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1); 2952 } 2953 EXPORT_SYMBOL_GPL(vb2_read); 2954 2955 size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, 2956 loff_t *ppos, int nonblocking) 2957 { 2958 return __vb2_perform_fileio(q, (char __user *) data, count, 2959 ppos, nonblocking, 0); 2960 } 2961 EXPORT_SYMBOL_GPL(vb2_write); 2962 2963 struct vb2_threadio_data { 2964 struct task_struct *thread; 2965 vb2_thread_fnc fnc; 2966 void *priv; 2967 bool stop; 2968 }; 2969 2970 static int vb2_thread(void *data) 2971 { 2972 struct vb2_queue *q = data; 2973 struct vb2_threadio_data *threadio = q->threadio; 2974 bool copy_timestamp = false; 2975 unsigned prequeue = 0; 2976 unsigned index = 0; 2977 int ret = 0; 2978 2979 if (q->is_output) { 2980 prequeue = q->num_buffers; 2981 copy_timestamp = q->copy_timestamp; 2982 } 2983 2984 set_freezable(); 2985 2986 for (;;) { 2987 struct vb2_buffer *vb; 2988 2989 /* 2990 * Call vb2_dqbuf to get buffer back. 2991 */ 2992 if (prequeue) { 2993 vb = q->bufs[index++]; 2994 prequeue--; 2995 } else { 2996 call_void_qop(q, wait_finish, q); 2997 if (!threadio->stop) 2998 ret = vb2_core_dqbuf(q, &index, NULL, 0); 2999 call_void_qop(q, wait_prepare, q); 3000 dprintk(q, 5, "file io: vb2_dqbuf result: %d\n", ret); 3001 if (!ret) 3002 vb = q->bufs[index]; 3003 } 3004 if (ret || threadio->stop) 3005 break; 3006 try_to_freeze(); 3007 3008 if (vb->state != VB2_BUF_STATE_ERROR) 3009 if (threadio->fnc(vb, threadio->priv)) 3010 break; 3011 call_void_qop(q, wait_finish, q); 3012 if (copy_timestamp) 3013 vb->timestamp = ktime_get_ns(); 3014 if (!threadio->stop) 3015 ret = vb2_core_qbuf(q, vb->index, NULL, NULL); 3016 call_void_qop(q, wait_prepare, q); 3017 if (ret || threadio->stop) 3018 break; 3019 } 3020 3021 /* Hmm, linux becomes *very* unhappy without this ... */ 3022 while (!kthread_should_stop()) { 3023 set_current_state(TASK_INTERRUPTIBLE); 3024 schedule(); 3025 } 3026 return 0; 3027 } 3028 3029 /* 3030 * This function should not be used for anything else but the videobuf2-dvb 3031 * support. If you think you have another good use-case for this, then please 3032 * contact the linux-media mailinglist first. 3033 */ 3034 int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, 3035 const char *thread_name) 3036 { 3037 struct vb2_threadio_data *threadio; 3038 int ret = 0; 3039 3040 if (q->threadio) 3041 return -EBUSY; 3042 if (vb2_is_busy(q)) 3043 return -EBUSY; 3044 if (WARN_ON(q->fileio)) 3045 return -EBUSY; 3046 3047 threadio = kzalloc(sizeof(*threadio), GFP_KERNEL); 3048 if (threadio == NULL) 3049 return -ENOMEM; 3050 threadio->fnc = fnc; 3051 threadio->priv = priv; 3052 3053 ret = __vb2_init_fileio(q, !q->is_output); 3054 dprintk(q, 3, "file io: vb2_init_fileio result: %d\n", ret); 3055 if (ret) 3056 goto nomem; 3057 q->threadio = threadio; 3058 threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name); 3059 if (IS_ERR(threadio->thread)) { 3060 ret = PTR_ERR(threadio->thread); 3061 threadio->thread = NULL; 3062 goto nothread; 3063 } 3064 return 0; 3065 3066 nothread: 3067 __vb2_cleanup_fileio(q); 3068 nomem: 3069 kfree(threadio); 3070 return ret; 3071 } 3072 EXPORT_SYMBOL_GPL(vb2_thread_start); 3073 3074 int vb2_thread_stop(struct vb2_queue *q) 3075 { 3076 struct vb2_threadio_data *threadio = q->threadio; 3077 int err; 3078 3079 if (threadio == NULL) 3080 return 0; 3081 threadio->stop = true; 3082 /* Wake up all pending sleeps in the thread */ 3083 vb2_queue_error(q); 3084 err = kthread_stop(threadio->thread); 3085 __vb2_cleanup_fileio(q); 3086 threadio->thread = NULL; 3087 kfree(threadio); 3088 q->threadio = NULL; 3089 return err; 3090 } 3091 EXPORT_SYMBOL_GPL(vb2_thread_stop); 3092 3093 MODULE_DESCRIPTION("Media buffer core framework"); 3094 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski"); 3095 MODULE_LICENSE("GPL"); 3096 MODULE_IMPORT_NS(DMA_BUF); 3097