1 /* 2 * videobuf2-core.c - video buffer 2 core framework 3 * 4 * Copyright (C) 2010 Samsung Electronics 5 * 6 * Author: Pawel Osciak <pawel@osciak.com> 7 * Marek Szyprowski <m.szyprowski@samsung.com> 8 * 9 * The vb2_thread implementation was based on code from videobuf-dvb.c: 10 * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs] 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation. 15 */ 16 17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18 19 #include <linux/err.h> 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/mm.h> 23 #include <linux/poll.h> 24 #include <linux/slab.h> 25 #include <linux/sched.h> 26 #include <linux/freezer.h> 27 #include <linux/kthread.h> 28 29 #include <media/videobuf2-core.h> 30 #include <media/v4l2-mc.h> 31 32 #include <trace/events/vb2.h> 33 34 static int debug; 35 module_param(debug, int, 0644); 36 37 #define dprintk(q, level, fmt, arg...) \ 38 do { \ 39 if (debug >= level) \ 40 pr_info("[%s] %s: " fmt, (q)->name, __func__, \ 41 ## arg); \ 42 } while (0) 43 44 #ifdef CONFIG_VIDEO_ADV_DEBUG 45 46 /* 47 * If advanced debugging is on, then count how often each op is called 48 * successfully, which can either be per-buffer or per-queue. 49 * 50 * This makes it easy to check that the 'init' and 'cleanup' 51 * (and variations thereof) stay balanced. 52 */ 53 54 #define log_memop(vb, op) \ 55 dprintk((vb)->vb2_queue, 2, "call_memop(%d, %s)%s\n", \ 56 (vb)->index, #op, \ 57 (vb)->vb2_queue->mem_ops->op ? "" : " (nop)") 58 59 #define call_memop(vb, op, args...) \ 60 ({ \ 61 struct vb2_queue *_q = (vb)->vb2_queue; \ 62 int err; \ 63 \ 64 log_memop(vb, op); \ 65 err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \ 66 if (!err) \ 67 (vb)->cnt_mem_ ## op++; \ 68 err; \ 69 }) 70 71 #define call_ptr_memop(op, vb, args...) \ 72 ({ \ 73 struct vb2_queue *_q = (vb)->vb2_queue; \ 74 void *ptr; \ 75 \ 76 log_memop(vb, op); \ 77 ptr = _q->mem_ops->op ? _q->mem_ops->op(vb, args) : NULL; \ 78 if (!IS_ERR_OR_NULL(ptr)) \ 79 (vb)->cnt_mem_ ## op++; \ 80 ptr; \ 81 }) 82 83 #define call_void_memop(vb, op, args...) \ 84 ({ \ 85 struct vb2_queue *_q = (vb)->vb2_queue; \ 86 \ 87 log_memop(vb, op); \ 88 if (_q->mem_ops->op) \ 89 _q->mem_ops->op(args); \ 90 (vb)->cnt_mem_ ## op++; \ 91 }) 92 93 #define log_qop(q, op) \ 94 dprintk(q, 2, "call_qop(%s)%s\n", #op, \ 95 (q)->ops->op ? "" : " (nop)") 96 97 #define call_qop(q, op, args...) \ 98 ({ \ 99 int err; \ 100 \ 101 log_qop(q, op); \ 102 err = (q)->ops->op ? (q)->ops->op(args) : 0; \ 103 if (!err) \ 104 (q)->cnt_ ## op++; \ 105 err; \ 106 }) 107 108 #define call_void_qop(q, op, args...) \ 109 ({ \ 110 log_qop(q, op); \ 111 if ((q)->ops->op) \ 112 (q)->ops->op(args); \ 113 (q)->cnt_ ## op++; \ 114 }) 115 116 #define log_vb_qop(vb, op, args...) \ 117 dprintk((vb)->vb2_queue, 2, "call_vb_qop(%d, %s)%s\n", \ 118 (vb)->index, #op, \ 119 (vb)->vb2_queue->ops->op ? "" : " (nop)") 120 121 #define call_vb_qop(vb, op, args...) \ 122 ({ \ 123 int err; \ 124 \ 125 log_vb_qop(vb, op); \ 126 err = (vb)->vb2_queue->ops->op ? \ 127 (vb)->vb2_queue->ops->op(args) : 0; \ 128 if (!err) \ 129 (vb)->cnt_ ## op++; \ 130 err; \ 131 }) 132 133 #define call_void_vb_qop(vb, op, args...) \ 134 ({ \ 135 log_vb_qop(vb, op); \ 136 if ((vb)->vb2_queue->ops->op) \ 137 (vb)->vb2_queue->ops->op(args); \ 138 (vb)->cnt_ ## op++; \ 139 }) 140 141 #else 142 143 #define call_memop(vb, op, args...) \ 144 ((vb)->vb2_queue->mem_ops->op ? \ 145 (vb)->vb2_queue->mem_ops->op(args) : 0) 146 147 #define call_ptr_memop(op, vb, args...) \ 148 ((vb)->vb2_queue->mem_ops->op ? \ 149 (vb)->vb2_queue->mem_ops->op(vb, args) : NULL) 150 151 #define call_void_memop(vb, op, args...) \ 152 do { \ 153 if ((vb)->vb2_queue->mem_ops->op) \ 154 (vb)->vb2_queue->mem_ops->op(args); \ 155 } while (0) 156 157 #define call_qop(q, op, args...) \ 158 ((q)->ops->op ? (q)->ops->op(args) : 0) 159 160 #define call_void_qop(q, op, args...) \ 161 do { \ 162 if ((q)->ops->op) \ 163 (q)->ops->op(args); \ 164 } while (0) 165 166 #define call_vb_qop(vb, op, args...) \ 167 ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0) 168 169 #define call_void_vb_qop(vb, op, args...) \ 170 do { \ 171 if ((vb)->vb2_queue->ops->op) \ 172 (vb)->vb2_queue->ops->op(args); \ 173 } while (0) 174 175 #endif 176 177 #define call_bufop(q, op, args...) \ 178 ({ \ 179 int ret = 0; \ 180 if (q && q->buf_ops && q->buf_ops->op) \ 181 ret = q->buf_ops->op(args); \ 182 ret; \ 183 }) 184 185 #define call_void_bufop(q, op, args...) \ 186 ({ \ 187 if (q && q->buf_ops && q->buf_ops->op) \ 188 q->buf_ops->op(args); \ 189 }) 190 191 static void __vb2_queue_cancel(struct vb2_queue *q); 192 static void __enqueue_in_driver(struct vb2_buffer *vb); 193 194 static const char *vb2_state_name(enum vb2_buffer_state s) 195 { 196 static const char * const state_names[] = { 197 [VB2_BUF_STATE_DEQUEUED] = "dequeued", 198 [VB2_BUF_STATE_IN_REQUEST] = "in request", 199 [VB2_BUF_STATE_PREPARING] = "preparing", 200 [VB2_BUF_STATE_QUEUED] = "queued", 201 [VB2_BUF_STATE_ACTIVE] = "active", 202 [VB2_BUF_STATE_DONE] = "done", 203 [VB2_BUF_STATE_ERROR] = "error", 204 }; 205 206 if ((unsigned int)(s) < ARRAY_SIZE(state_names)) 207 return state_names[s]; 208 return "unknown"; 209 } 210 211 /* 212 * __vb2_buf_mem_alloc() - allocate video memory for the given buffer 213 */ 214 static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) 215 { 216 struct vb2_queue *q = vb->vb2_queue; 217 void *mem_priv; 218 int plane; 219 int ret = -ENOMEM; 220 221 /* 222 * Allocate memory for all planes in this buffer 223 * NOTE: mmapped areas should be page aligned 224 */ 225 for (plane = 0; plane < vb->num_planes; ++plane) { 226 /* Memops alloc requires size to be page aligned. */ 227 unsigned long size = PAGE_ALIGN(vb->planes[plane].length); 228 229 /* Did it wrap around? */ 230 if (size < vb->planes[plane].length) 231 goto free; 232 233 mem_priv = call_ptr_memop(alloc, 234 vb, 235 q->alloc_devs[plane] ? : q->dev, 236 size); 237 if (IS_ERR_OR_NULL(mem_priv)) { 238 if (mem_priv) 239 ret = PTR_ERR(mem_priv); 240 goto free; 241 } 242 243 /* Associate allocator private data with this plane */ 244 vb->planes[plane].mem_priv = mem_priv; 245 } 246 247 return 0; 248 free: 249 /* Free already allocated memory if one of the allocations failed */ 250 for (; plane > 0; --plane) { 251 call_void_memop(vb, put, vb->planes[plane - 1].mem_priv); 252 vb->planes[plane - 1].mem_priv = NULL; 253 } 254 255 return ret; 256 } 257 258 /* 259 * __vb2_buf_mem_free() - free memory of the given buffer 260 */ 261 static void __vb2_buf_mem_free(struct vb2_buffer *vb) 262 { 263 unsigned int plane; 264 265 for (plane = 0; plane < vb->num_planes; ++plane) { 266 call_void_memop(vb, put, vb->planes[plane].mem_priv); 267 vb->planes[plane].mem_priv = NULL; 268 dprintk(vb->vb2_queue, 3, "freed plane %d of buffer %d\n", 269 plane, vb->index); 270 } 271 } 272 273 /* 274 * __vb2_buf_userptr_put() - release userspace memory associated with 275 * a USERPTR buffer 276 */ 277 static void __vb2_buf_userptr_put(struct vb2_buffer *vb) 278 { 279 unsigned int plane; 280 281 for (plane = 0; plane < vb->num_planes; ++plane) { 282 if (vb->planes[plane].mem_priv) 283 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv); 284 vb->planes[plane].mem_priv = NULL; 285 } 286 } 287 288 /* 289 * __vb2_plane_dmabuf_put() - release memory associated with 290 * a DMABUF shared plane 291 */ 292 static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p) 293 { 294 if (!p->mem_priv) 295 return; 296 297 if (p->dbuf_mapped) 298 call_void_memop(vb, unmap_dmabuf, p->mem_priv); 299 300 call_void_memop(vb, detach_dmabuf, p->mem_priv); 301 dma_buf_put(p->dbuf); 302 p->mem_priv = NULL; 303 p->dbuf = NULL; 304 p->dbuf_mapped = 0; 305 } 306 307 /* 308 * __vb2_buf_dmabuf_put() - release memory associated with 309 * a DMABUF shared buffer 310 */ 311 static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb) 312 { 313 unsigned int plane; 314 315 for (plane = 0; plane < vb->num_planes; ++plane) 316 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]); 317 } 318 319 /* 320 * __vb2_buf_mem_prepare() - call ->prepare() on buffer's private memory 321 * to sync caches 322 */ 323 static void __vb2_buf_mem_prepare(struct vb2_buffer *vb) 324 { 325 unsigned int plane; 326 327 if (vb->synced) 328 return; 329 330 vb->synced = 1; 331 for (plane = 0; plane < vb->num_planes; ++plane) 332 call_void_memop(vb, prepare, vb->planes[plane].mem_priv); 333 } 334 335 /* 336 * __vb2_buf_mem_finish() - call ->finish on buffer's private memory 337 * to sync caches 338 */ 339 static void __vb2_buf_mem_finish(struct vb2_buffer *vb) 340 { 341 unsigned int plane; 342 343 if (!vb->synced) 344 return; 345 346 vb->synced = 0; 347 for (plane = 0; plane < vb->num_planes; ++plane) 348 call_void_memop(vb, finish, vb->planes[plane].mem_priv); 349 } 350 351 /* 352 * __setup_offsets() - setup unique offsets ("cookies") for every plane in 353 * the buffer. 354 */ 355 static void __setup_offsets(struct vb2_buffer *vb) 356 { 357 struct vb2_queue *q = vb->vb2_queue; 358 unsigned int plane; 359 unsigned long off = 0; 360 361 if (vb->index) { 362 struct vb2_buffer *prev = q->bufs[vb->index - 1]; 363 struct vb2_plane *p = &prev->planes[prev->num_planes - 1]; 364 365 off = PAGE_ALIGN(p->m.offset + p->length); 366 } 367 368 for (plane = 0; plane < vb->num_planes; ++plane) { 369 vb->planes[plane].m.offset = off; 370 371 dprintk(q, 3, "buffer %d, plane %d offset 0x%08lx\n", 372 vb->index, plane, off); 373 374 off += vb->planes[plane].length; 375 off = PAGE_ALIGN(off); 376 } 377 } 378 379 static void init_buffer_cache_hints(struct vb2_queue *q, struct vb2_buffer *vb) 380 { 381 /* 382 * DMA exporter should take care of cache syncs, so we can avoid 383 * explicit ->prepare()/->finish() syncs. For other ->memory types 384 * we always need ->prepare() or/and ->finish() cache sync. 385 */ 386 if (q->memory == VB2_MEMORY_DMABUF) { 387 vb->skip_cache_sync_on_finish = 1; 388 vb->skip_cache_sync_on_prepare = 1; 389 return; 390 } 391 392 /* 393 * ->finish() cache sync can be avoided when queue direction is 394 * TO_DEVICE. 395 */ 396 if (q->dma_dir == DMA_TO_DEVICE) 397 vb->skip_cache_sync_on_finish = 1; 398 } 399 400 /* 401 * __vb2_queue_alloc() - allocate vb2 buffer structures and (for MMAP type) 402 * video buffer memory for all buffers/planes on the queue and initializes the 403 * queue 404 * 405 * Returns the number of buffers successfully allocated. 406 */ 407 static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, 408 unsigned int num_buffers, unsigned int num_planes, 409 const unsigned plane_sizes[VB2_MAX_PLANES]) 410 { 411 unsigned int buffer, plane; 412 struct vb2_buffer *vb; 413 int ret; 414 415 /* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */ 416 num_buffers = min_t(unsigned int, num_buffers, 417 VB2_MAX_FRAME - q->num_buffers); 418 419 for (buffer = 0; buffer < num_buffers; ++buffer) { 420 /* Allocate vb2 buffer structures */ 421 vb = kzalloc(q->buf_struct_size, GFP_KERNEL); 422 if (!vb) { 423 dprintk(q, 1, "memory alloc for buffer struct failed\n"); 424 break; 425 } 426 427 vb->state = VB2_BUF_STATE_DEQUEUED; 428 vb->vb2_queue = q; 429 vb->num_planes = num_planes; 430 vb->index = q->num_buffers + buffer; 431 vb->type = q->type; 432 vb->memory = memory; 433 init_buffer_cache_hints(q, vb); 434 for (plane = 0; plane < num_planes; ++plane) { 435 vb->planes[plane].length = plane_sizes[plane]; 436 vb->planes[plane].min_length = plane_sizes[plane]; 437 } 438 call_void_bufop(q, init_buffer, vb); 439 440 q->bufs[vb->index] = vb; 441 442 /* Allocate video buffer memory for the MMAP type */ 443 if (memory == VB2_MEMORY_MMAP) { 444 ret = __vb2_buf_mem_alloc(vb); 445 if (ret) { 446 dprintk(q, 1, "failed allocating memory for buffer %d\n", 447 buffer); 448 q->bufs[vb->index] = NULL; 449 kfree(vb); 450 break; 451 } 452 __setup_offsets(vb); 453 /* 454 * Call the driver-provided buffer initialization 455 * callback, if given. An error in initialization 456 * results in queue setup failure. 457 */ 458 ret = call_vb_qop(vb, buf_init, vb); 459 if (ret) { 460 dprintk(q, 1, "buffer %d %p initialization failed\n", 461 buffer, vb); 462 __vb2_buf_mem_free(vb); 463 q->bufs[vb->index] = NULL; 464 kfree(vb); 465 break; 466 } 467 } 468 } 469 470 dprintk(q, 3, "allocated %d buffers, %d plane(s) each\n", 471 buffer, num_planes); 472 473 return buffer; 474 } 475 476 /* 477 * __vb2_free_mem() - release all video buffer memory for a given queue 478 */ 479 static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers) 480 { 481 unsigned int buffer; 482 struct vb2_buffer *vb; 483 484 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; 485 ++buffer) { 486 vb = q->bufs[buffer]; 487 if (!vb) 488 continue; 489 490 /* Free MMAP buffers or release USERPTR buffers */ 491 if (q->memory == VB2_MEMORY_MMAP) 492 __vb2_buf_mem_free(vb); 493 else if (q->memory == VB2_MEMORY_DMABUF) 494 __vb2_buf_dmabuf_put(vb); 495 else 496 __vb2_buf_userptr_put(vb); 497 } 498 } 499 500 /* 501 * __vb2_queue_free() - free buffers at the end of the queue - video memory and 502 * related information, if no buffers are left return the queue to an 503 * uninitialized state. Might be called even if the queue has already been freed. 504 */ 505 static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers) 506 { 507 unsigned int buffer; 508 509 /* 510 * Sanity check: when preparing a buffer the queue lock is released for 511 * a short while (see __buf_prepare for the details), which would allow 512 * a race with a reqbufs which can call this function. Removing the 513 * buffers from underneath __buf_prepare is obviously a bad idea, so we 514 * check if any of the buffers is in the state PREPARING, and if so we 515 * just return -EAGAIN. 516 */ 517 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; 518 ++buffer) { 519 if (q->bufs[buffer] == NULL) 520 continue; 521 if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) { 522 dprintk(q, 1, "preparing buffers, cannot free\n"); 523 return -EAGAIN; 524 } 525 } 526 527 /* Call driver-provided cleanup function for each buffer, if provided */ 528 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; 529 ++buffer) { 530 struct vb2_buffer *vb = q->bufs[buffer]; 531 532 if (vb && vb->planes[0].mem_priv) 533 call_void_vb_qop(vb, buf_cleanup, vb); 534 } 535 536 /* Release video buffer memory */ 537 __vb2_free_mem(q, buffers); 538 539 #ifdef CONFIG_VIDEO_ADV_DEBUG 540 /* 541 * Check that all the calls were balances during the life-time of this 542 * queue. If not (or if the debug level is 1 or up), then dump the 543 * counters to the kernel log. 544 */ 545 if (q->num_buffers) { 546 bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming || 547 q->cnt_prepare_streaming != q->cnt_unprepare_streaming || 548 q->cnt_wait_prepare != q->cnt_wait_finish; 549 550 if (unbalanced || debug) { 551 pr_info("counters for queue %p:%s\n", q, 552 unbalanced ? " UNBALANCED!" : ""); 553 pr_info(" setup: %u start_streaming: %u stop_streaming: %u\n", 554 q->cnt_queue_setup, q->cnt_start_streaming, 555 q->cnt_stop_streaming); 556 pr_info(" prepare_streaming: %u unprepare_streaming: %u\n", 557 q->cnt_prepare_streaming, q->cnt_unprepare_streaming); 558 pr_info(" wait_prepare: %u wait_finish: %u\n", 559 q->cnt_wait_prepare, q->cnt_wait_finish); 560 } 561 q->cnt_queue_setup = 0; 562 q->cnt_wait_prepare = 0; 563 q->cnt_wait_finish = 0; 564 q->cnt_prepare_streaming = 0; 565 q->cnt_start_streaming = 0; 566 q->cnt_stop_streaming = 0; 567 q->cnt_unprepare_streaming = 0; 568 } 569 for (buffer = 0; buffer < q->num_buffers; ++buffer) { 570 struct vb2_buffer *vb = q->bufs[buffer]; 571 bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put || 572 vb->cnt_mem_prepare != vb->cnt_mem_finish || 573 vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr || 574 vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf || 575 vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf || 576 vb->cnt_buf_queue != vb->cnt_buf_done || 577 vb->cnt_buf_prepare != vb->cnt_buf_finish || 578 vb->cnt_buf_init != vb->cnt_buf_cleanup; 579 580 if (unbalanced || debug) { 581 pr_info(" counters for queue %p, buffer %d:%s\n", 582 q, buffer, unbalanced ? " UNBALANCED!" : ""); 583 pr_info(" buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n", 584 vb->cnt_buf_init, vb->cnt_buf_cleanup, 585 vb->cnt_buf_prepare, vb->cnt_buf_finish); 586 pr_info(" buf_out_validate: %u buf_queue: %u buf_done: %u buf_request_complete: %u\n", 587 vb->cnt_buf_out_validate, vb->cnt_buf_queue, 588 vb->cnt_buf_done, vb->cnt_buf_request_complete); 589 pr_info(" alloc: %u put: %u prepare: %u finish: %u mmap: %u\n", 590 vb->cnt_mem_alloc, vb->cnt_mem_put, 591 vb->cnt_mem_prepare, vb->cnt_mem_finish, 592 vb->cnt_mem_mmap); 593 pr_info(" get_userptr: %u put_userptr: %u\n", 594 vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr); 595 pr_info(" attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n", 596 vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf, 597 vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf); 598 pr_info(" get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n", 599 vb->cnt_mem_get_dmabuf, 600 vb->cnt_mem_num_users, 601 vb->cnt_mem_vaddr, 602 vb->cnt_mem_cookie); 603 } 604 } 605 #endif 606 607 /* Free vb2 buffers */ 608 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; 609 ++buffer) { 610 kfree(q->bufs[buffer]); 611 q->bufs[buffer] = NULL; 612 } 613 614 q->num_buffers -= buffers; 615 if (!q->num_buffers) { 616 q->memory = VB2_MEMORY_UNKNOWN; 617 INIT_LIST_HEAD(&q->queued_list); 618 } 619 return 0; 620 } 621 622 bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb) 623 { 624 unsigned int plane; 625 for (plane = 0; plane < vb->num_planes; ++plane) { 626 void *mem_priv = vb->planes[plane].mem_priv; 627 /* 628 * If num_users() has not been provided, call_memop 629 * will return 0, apparently nobody cares about this 630 * case anyway. If num_users() returns more than 1, 631 * we are not the only user of the plane's memory. 632 */ 633 if (mem_priv && call_memop(vb, num_users, mem_priv) > 1) 634 return true; 635 } 636 return false; 637 } 638 EXPORT_SYMBOL(vb2_buffer_in_use); 639 640 /* 641 * __buffers_in_use() - return true if any buffers on the queue are in use and 642 * the queue cannot be freed (by the means of REQBUFS(0)) call 643 */ 644 static bool __buffers_in_use(struct vb2_queue *q) 645 { 646 unsigned int buffer; 647 for (buffer = 0; buffer < q->num_buffers; ++buffer) { 648 if (vb2_buffer_in_use(q, q->bufs[buffer])) 649 return true; 650 } 651 return false; 652 } 653 654 void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb) 655 { 656 call_void_bufop(q, fill_user_buffer, q->bufs[index], pb); 657 } 658 EXPORT_SYMBOL_GPL(vb2_core_querybuf); 659 660 /* 661 * __verify_userptr_ops() - verify that all memory operations required for 662 * USERPTR queue type have been provided 663 */ 664 static int __verify_userptr_ops(struct vb2_queue *q) 665 { 666 if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr || 667 !q->mem_ops->put_userptr) 668 return -EINVAL; 669 670 return 0; 671 } 672 673 /* 674 * __verify_mmap_ops() - verify that all memory operations required for 675 * MMAP queue type have been provided 676 */ 677 static int __verify_mmap_ops(struct vb2_queue *q) 678 { 679 if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc || 680 !q->mem_ops->put || !q->mem_ops->mmap) 681 return -EINVAL; 682 683 return 0; 684 } 685 686 /* 687 * __verify_dmabuf_ops() - verify that all memory operations required for 688 * DMABUF queue type have been provided 689 */ 690 static int __verify_dmabuf_ops(struct vb2_queue *q) 691 { 692 if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf || 693 !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf || 694 !q->mem_ops->unmap_dmabuf) 695 return -EINVAL; 696 697 return 0; 698 } 699 700 int vb2_verify_memory_type(struct vb2_queue *q, 701 enum vb2_memory memory, unsigned int type) 702 { 703 if (memory != VB2_MEMORY_MMAP && memory != VB2_MEMORY_USERPTR && 704 memory != VB2_MEMORY_DMABUF) { 705 dprintk(q, 1, "unsupported memory type\n"); 706 return -EINVAL; 707 } 708 709 if (type != q->type) { 710 dprintk(q, 1, "requested type is incorrect\n"); 711 return -EINVAL; 712 } 713 714 /* 715 * Make sure all the required memory ops for given memory type 716 * are available. 717 */ 718 if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) { 719 dprintk(q, 1, "MMAP for current setup unsupported\n"); 720 return -EINVAL; 721 } 722 723 if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) { 724 dprintk(q, 1, "USERPTR for current setup unsupported\n"); 725 return -EINVAL; 726 } 727 728 if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) { 729 dprintk(q, 1, "DMABUF for current setup unsupported\n"); 730 return -EINVAL; 731 } 732 733 /* 734 * Place the busy tests at the end: -EBUSY can be ignored when 735 * create_bufs is called with count == 0, but count == 0 should still 736 * do the memory and type validation. 737 */ 738 if (vb2_fileio_is_active(q)) { 739 dprintk(q, 1, "file io in progress\n"); 740 return -EBUSY; 741 } 742 return 0; 743 } 744 EXPORT_SYMBOL(vb2_verify_memory_type); 745 746 static void set_queue_coherency(struct vb2_queue *q, bool non_coherent_mem) 747 { 748 q->non_coherent_mem = 0; 749 750 if (!vb2_queue_allows_cache_hints(q)) 751 return; 752 q->non_coherent_mem = non_coherent_mem; 753 } 754 755 static bool verify_coherency_flags(struct vb2_queue *q, bool non_coherent_mem) 756 { 757 if (non_coherent_mem != q->non_coherent_mem) { 758 dprintk(q, 1, "memory coherency model mismatch\n"); 759 return false; 760 } 761 return true; 762 } 763 764 int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, 765 unsigned int flags, unsigned int *count) 766 { 767 unsigned int num_buffers, allocated_buffers, num_planes = 0; 768 unsigned plane_sizes[VB2_MAX_PLANES] = { }; 769 bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT; 770 unsigned int i; 771 int ret; 772 773 if (q->streaming) { 774 dprintk(q, 1, "streaming active\n"); 775 return -EBUSY; 776 } 777 778 if (q->waiting_in_dqbuf && *count) { 779 dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); 780 return -EBUSY; 781 } 782 783 if (*count == 0 || q->num_buffers != 0 || 784 (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory) || 785 !verify_coherency_flags(q, non_coherent_mem)) { 786 /* 787 * We already have buffers allocated, so first check if they 788 * are not in use and can be freed. 789 */ 790 mutex_lock(&q->mmap_lock); 791 if (debug && q->memory == VB2_MEMORY_MMAP && 792 __buffers_in_use(q)) 793 dprintk(q, 1, "memory in use, orphaning buffers\n"); 794 795 /* 796 * Call queue_cancel to clean up any buffers in the 797 * QUEUED state which is possible if buffers were prepared or 798 * queued without ever calling STREAMON. 799 */ 800 __vb2_queue_cancel(q); 801 ret = __vb2_queue_free(q, q->num_buffers); 802 mutex_unlock(&q->mmap_lock); 803 if (ret) 804 return ret; 805 806 /* 807 * In case of REQBUFS(0) return immediately without calling 808 * driver's queue_setup() callback and allocating resources. 809 */ 810 if (*count == 0) 811 return 0; 812 } 813 814 /* 815 * Make sure the requested values and current defaults are sane. 816 */ 817 WARN_ON(q->min_buffers_needed > VB2_MAX_FRAME); 818 num_buffers = max_t(unsigned int, *count, q->min_buffers_needed); 819 num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME); 820 memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); 821 q->memory = memory; 822 set_queue_coherency(q, non_coherent_mem); 823 824 /* 825 * Ask the driver how many buffers and planes per buffer it requires. 826 * Driver also sets the size and allocator context for each plane. 827 */ 828 ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes, 829 plane_sizes, q->alloc_devs); 830 if (ret) 831 return ret; 832 833 /* Check that driver has set sane values */ 834 if (WARN_ON(!num_planes)) 835 return -EINVAL; 836 837 for (i = 0; i < num_planes; i++) 838 if (WARN_ON(!plane_sizes[i])) 839 return -EINVAL; 840 841 /* Finally, allocate buffers and video memory */ 842 allocated_buffers = 843 __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes); 844 if (allocated_buffers == 0) { 845 dprintk(q, 1, "memory allocation failed\n"); 846 return -ENOMEM; 847 } 848 849 /* 850 * There is no point in continuing if we can't allocate the minimum 851 * number of buffers needed by this vb2_queue. 852 */ 853 if (allocated_buffers < q->min_buffers_needed) 854 ret = -ENOMEM; 855 856 /* 857 * Check if driver can handle the allocated number of buffers. 858 */ 859 if (!ret && allocated_buffers < num_buffers) { 860 num_buffers = allocated_buffers; 861 /* 862 * num_planes is set by the previous queue_setup(), but since it 863 * signals to queue_setup() whether it is called from create_bufs() 864 * vs reqbufs() we zero it here to signal that queue_setup() is 865 * called for the reqbufs() case. 866 */ 867 num_planes = 0; 868 869 ret = call_qop(q, queue_setup, q, &num_buffers, 870 &num_planes, plane_sizes, q->alloc_devs); 871 872 if (!ret && allocated_buffers < num_buffers) 873 ret = -ENOMEM; 874 875 /* 876 * Either the driver has accepted a smaller number of buffers, 877 * or .queue_setup() returned an error 878 */ 879 } 880 881 mutex_lock(&q->mmap_lock); 882 q->num_buffers = allocated_buffers; 883 884 if (ret < 0) { 885 /* 886 * Note: __vb2_queue_free() will subtract 'allocated_buffers' 887 * from q->num_buffers. 888 */ 889 __vb2_queue_free(q, allocated_buffers); 890 mutex_unlock(&q->mmap_lock); 891 return ret; 892 } 893 mutex_unlock(&q->mmap_lock); 894 895 /* 896 * Return the number of successfully allocated buffers 897 * to the userspace. 898 */ 899 *count = allocated_buffers; 900 q->waiting_for_buffers = !q->is_output; 901 902 return 0; 903 } 904 EXPORT_SYMBOL_GPL(vb2_core_reqbufs); 905 906 int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, 907 unsigned int flags, unsigned int *count, 908 unsigned int requested_planes, 909 const unsigned int requested_sizes[]) 910 { 911 unsigned int num_planes = 0, num_buffers, allocated_buffers; 912 unsigned plane_sizes[VB2_MAX_PLANES] = { }; 913 bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT; 914 int ret; 915 916 if (q->num_buffers == VB2_MAX_FRAME) { 917 dprintk(q, 1, "maximum number of buffers already allocated\n"); 918 return -ENOBUFS; 919 } 920 921 if (!q->num_buffers) { 922 if (q->waiting_in_dqbuf && *count) { 923 dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); 924 return -EBUSY; 925 } 926 memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); 927 q->memory = memory; 928 q->waiting_for_buffers = !q->is_output; 929 set_queue_coherency(q, non_coherent_mem); 930 } else { 931 if (q->memory != memory) { 932 dprintk(q, 1, "memory model mismatch\n"); 933 return -EINVAL; 934 } 935 if (!verify_coherency_flags(q, non_coherent_mem)) 936 return -EINVAL; 937 } 938 939 num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers); 940 941 if (requested_planes && requested_sizes) { 942 num_planes = requested_planes; 943 memcpy(plane_sizes, requested_sizes, sizeof(plane_sizes)); 944 } 945 946 /* 947 * Ask the driver, whether the requested number of buffers, planes per 948 * buffer and their sizes are acceptable 949 */ 950 ret = call_qop(q, queue_setup, q, &num_buffers, 951 &num_planes, plane_sizes, q->alloc_devs); 952 if (ret) 953 return ret; 954 955 /* Finally, allocate buffers and video memory */ 956 allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers, 957 num_planes, plane_sizes); 958 if (allocated_buffers == 0) { 959 dprintk(q, 1, "memory allocation failed\n"); 960 return -ENOMEM; 961 } 962 963 /* 964 * Check if driver can handle the so far allocated number of buffers. 965 */ 966 if (allocated_buffers < num_buffers) { 967 num_buffers = allocated_buffers; 968 969 /* 970 * q->num_buffers contains the total number of buffers, that the 971 * queue driver has set up 972 */ 973 ret = call_qop(q, queue_setup, q, &num_buffers, 974 &num_planes, plane_sizes, q->alloc_devs); 975 976 if (!ret && allocated_buffers < num_buffers) 977 ret = -ENOMEM; 978 979 /* 980 * Either the driver has accepted a smaller number of buffers, 981 * or .queue_setup() returned an error 982 */ 983 } 984 985 mutex_lock(&q->mmap_lock); 986 q->num_buffers += allocated_buffers; 987 988 if (ret < 0) { 989 /* 990 * Note: __vb2_queue_free() will subtract 'allocated_buffers' 991 * from q->num_buffers. 992 */ 993 __vb2_queue_free(q, allocated_buffers); 994 mutex_unlock(&q->mmap_lock); 995 return -ENOMEM; 996 } 997 mutex_unlock(&q->mmap_lock); 998 999 /* 1000 * Return the number of successfully allocated buffers 1001 * to the userspace. 1002 */ 1003 *count = allocated_buffers; 1004 1005 return 0; 1006 } 1007 EXPORT_SYMBOL_GPL(vb2_core_create_bufs); 1008 1009 void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) 1010 { 1011 if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) 1012 return NULL; 1013 1014 return call_ptr_memop(vaddr, vb, vb->planes[plane_no].mem_priv); 1015 1016 } 1017 EXPORT_SYMBOL_GPL(vb2_plane_vaddr); 1018 1019 void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no) 1020 { 1021 if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) 1022 return NULL; 1023 1024 return call_ptr_memop(cookie, vb, vb->planes[plane_no].mem_priv); 1025 } 1026 EXPORT_SYMBOL_GPL(vb2_plane_cookie); 1027 1028 void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) 1029 { 1030 struct vb2_queue *q = vb->vb2_queue; 1031 unsigned long flags; 1032 1033 if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE)) 1034 return; 1035 1036 if (WARN_ON(state != VB2_BUF_STATE_DONE && 1037 state != VB2_BUF_STATE_ERROR && 1038 state != VB2_BUF_STATE_QUEUED)) 1039 state = VB2_BUF_STATE_ERROR; 1040 1041 #ifdef CONFIG_VIDEO_ADV_DEBUG 1042 /* 1043 * Although this is not a callback, it still does have to balance 1044 * with the buf_queue op. So update this counter manually. 1045 */ 1046 vb->cnt_buf_done++; 1047 #endif 1048 dprintk(q, 4, "done processing on buffer %d, state: %s\n", 1049 vb->index, vb2_state_name(state)); 1050 1051 if (state != VB2_BUF_STATE_QUEUED) 1052 __vb2_buf_mem_finish(vb); 1053 1054 spin_lock_irqsave(&q->done_lock, flags); 1055 if (state == VB2_BUF_STATE_QUEUED) { 1056 vb->state = VB2_BUF_STATE_QUEUED; 1057 } else { 1058 /* Add the buffer to the done buffers list */ 1059 list_add_tail(&vb->done_entry, &q->done_list); 1060 vb->state = state; 1061 } 1062 atomic_dec(&q->owned_by_drv_count); 1063 1064 if (state != VB2_BUF_STATE_QUEUED && vb->req_obj.req) { 1065 media_request_object_unbind(&vb->req_obj); 1066 media_request_object_put(&vb->req_obj); 1067 } 1068 1069 spin_unlock_irqrestore(&q->done_lock, flags); 1070 1071 trace_vb2_buf_done(q, vb); 1072 1073 switch (state) { 1074 case VB2_BUF_STATE_QUEUED: 1075 return; 1076 default: 1077 /* Inform any processes that may be waiting for buffers */ 1078 wake_up(&q->done_wq); 1079 break; 1080 } 1081 } 1082 EXPORT_SYMBOL_GPL(vb2_buffer_done); 1083 1084 void vb2_discard_done(struct vb2_queue *q) 1085 { 1086 struct vb2_buffer *vb; 1087 unsigned long flags; 1088 1089 spin_lock_irqsave(&q->done_lock, flags); 1090 list_for_each_entry(vb, &q->done_list, done_entry) 1091 vb->state = VB2_BUF_STATE_ERROR; 1092 spin_unlock_irqrestore(&q->done_lock, flags); 1093 } 1094 EXPORT_SYMBOL_GPL(vb2_discard_done); 1095 1096 /* 1097 * __prepare_mmap() - prepare an MMAP buffer 1098 */ 1099 static int __prepare_mmap(struct vb2_buffer *vb) 1100 { 1101 int ret = 0; 1102 1103 ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, 1104 vb, vb->planes); 1105 return ret ? ret : call_vb_qop(vb, buf_prepare, vb); 1106 } 1107 1108 /* 1109 * __prepare_userptr() - prepare a USERPTR buffer 1110 */ 1111 static int __prepare_userptr(struct vb2_buffer *vb) 1112 { 1113 struct vb2_plane planes[VB2_MAX_PLANES]; 1114 struct vb2_queue *q = vb->vb2_queue; 1115 void *mem_priv; 1116 unsigned int plane; 1117 int ret = 0; 1118 bool reacquired = vb->planes[0].mem_priv == NULL; 1119 1120 memset(planes, 0, sizeof(planes[0]) * vb->num_planes); 1121 /* Copy relevant information provided by the userspace */ 1122 ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, 1123 vb, planes); 1124 if (ret) 1125 return ret; 1126 1127 for (plane = 0; plane < vb->num_planes; ++plane) { 1128 /* Skip the plane if already verified */ 1129 if (vb->planes[plane].m.userptr && 1130 vb->planes[plane].m.userptr == planes[plane].m.userptr 1131 && vb->planes[plane].length == planes[plane].length) 1132 continue; 1133 1134 dprintk(q, 3, "userspace address for plane %d changed, reacquiring memory\n", 1135 plane); 1136 1137 /* Check if the provided plane buffer is large enough */ 1138 if (planes[plane].length < vb->planes[plane].min_length) { 1139 dprintk(q, 1, "provided buffer size %u is less than setup size %u for plane %d\n", 1140 planes[plane].length, 1141 vb->planes[plane].min_length, 1142 plane); 1143 ret = -EINVAL; 1144 goto err; 1145 } 1146 1147 /* Release previously acquired memory if present */ 1148 if (vb->planes[plane].mem_priv) { 1149 if (!reacquired) { 1150 reacquired = true; 1151 vb->copied_timestamp = 0; 1152 call_void_vb_qop(vb, buf_cleanup, vb); 1153 } 1154 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv); 1155 } 1156 1157 vb->planes[plane].mem_priv = NULL; 1158 vb->planes[plane].bytesused = 0; 1159 vb->planes[plane].length = 0; 1160 vb->planes[plane].m.userptr = 0; 1161 vb->planes[plane].data_offset = 0; 1162 1163 /* Acquire each plane's memory */ 1164 mem_priv = call_ptr_memop(get_userptr, 1165 vb, 1166 q->alloc_devs[plane] ? : q->dev, 1167 planes[plane].m.userptr, 1168 planes[plane].length); 1169 if (IS_ERR(mem_priv)) { 1170 dprintk(q, 1, "failed acquiring userspace memory for plane %d\n", 1171 plane); 1172 ret = PTR_ERR(mem_priv); 1173 goto err; 1174 } 1175 vb->planes[plane].mem_priv = mem_priv; 1176 } 1177 1178 /* 1179 * Now that everything is in order, copy relevant information 1180 * provided by userspace. 1181 */ 1182 for (plane = 0; plane < vb->num_planes; ++plane) { 1183 vb->planes[plane].bytesused = planes[plane].bytesused; 1184 vb->planes[plane].length = planes[plane].length; 1185 vb->planes[plane].m.userptr = planes[plane].m.userptr; 1186 vb->planes[plane].data_offset = planes[plane].data_offset; 1187 } 1188 1189 if (reacquired) { 1190 /* 1191 * One or more planes changed, so we must call buf_init to do 1192 * the driver-specific initialization on the newly acquired 1193 * buffer, if provided. 1194 */ 1195 ret = call_vb_qop(vb, buf_init, vb); 1196 if (ret) { 1197 dprintk(q, 1, "buffer initialization failed\n"); 1198 goto err; 1199 } 1200 } 1201 1202 ret = call_vb_qop(vb, buf_prepare, vb); 1203 if (ret) { 1204 dprintk(q, 1, "buffer preparation failed\n"); 1205 call_void_vb_qop(vb, buf_cleanup, vb); 1206 goto err; 1207 } 1208 1209 return 0; 1210 err: 1211 /* In case of errors, release planes that were already acquired */ 1212 for (plane = 0; plane < vb->num_planes; ++plane) { 1213 if (vb->planes[plane].mem_priv) 1214 call_void_memop(vb, put_userptr, 1215 vb->planes[plane].mem_priv); 1216 vb->planes[plane].mem_priv = NULL; 1217 vb->planes[plane].m.userptr = 0; 1218 vb->planes[plane].length = 0; 1219 } 1220 1221 return ret; 1222 } 1223 1224 /* 1225 * __prepare_dmabuf() - prepare a DMABUF buffer 1226 */ 1227 static int __prepare_dmabuf(struct vb2_buffer *vb) 1228 { 1229 struct vb2_plane planes[VB2_MAX_PLANES]; 1230 struct vb2_queue *q = vb->vb2_queue; 1231 void *mem_priv; 1232 unsigned int plane; 1233 int ret = 0; 1234 bool reacquired = vb->planes[0].mem_priv == NULL; 1235 1236 memset(planes, 0, sizeof(planes[0]) * vb->num_planes); 1237 /* Copy relevant information provided by the userspace */ 1238 ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, 1239 vb, planes); 1240 if (ret) 1241 return ret; 1242 1243 for (plane = 0; plane < vb->num_planes; ++plane) { 1244 struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd); 1245 1246 if (IS_ERR_OR_NULL(dbuf)) { 1247 dprintk(q, 1, "invalid dmabuf fd for plane %d\n", 1248 plane); 1249 ret = -EINVAL; 1250 goto err; 1251 } 1252 1253 /* use DMABUF size if length is not provided */ 1254 if (planes[plane].length == 0) 1255 planes[plane].length = dbuf->size; 1256 1257 if (planes[plane].length < vb->planes[plane].min_length) { 1258 dprintk(q, 1, "invalid dmabuf length %u for plane %d, minimum length %u\n", 1259 planes[plane].length, plane, 1260 vb->planes[plane].min_length); 1261 dma_buf_put(dbuf); 1262 ret = -EINVAL; 1263 goto err; 1264 } 1265 1266 /* Skip the plane if already verified */ 1267 if (dbuf == vb->planes[plane].dbuf && 1268 vb->planes[plane].length == planes[plane].length) { 1269 dma_buf_put(dbuf); 1270 continue; 1271 } 1272 1273 dprintk(q, 3, "buffer for plane %d changed\n", plane); 1274 1275 if (!reacquired) { 1276 reacquired = true; 1277 vb->copied_timestamp = 0; 1278 call_void_vb_qop(vb, buf_cleanup, vb); 1279 } 1280 1281 /* Release previously acquired memory if present */ 1282 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]); 1283 vb->planes[plane].bytesused = 0; 1284 vb->planes[plane].length = 0; 1285 vb->planes[plane].m.fd = 0; 1286 vb->planes[plane].data_offset = 0; 1287 1288 /* Acquire each plane's memory */ 1289 mem_priv = call_ptr_memop(attach_dmabuf, 1290 vb, 1291 q->alloc_devs[plane] ? : q->dev, 1292 dbuf, 1293 planes[plane].length); 1294 if (IS_ERR(mem_priv)) { 1295 dprintk(q, 1, "failed to attach dmabuf\n"); 1296 ret = PTR_ERR(mem_priv); 1297 dma_buf_put(dbuf); 1298 goto err; 1299 } 1300 1301 vb->planes[plane].dbuf = dbuf; 1302 vb->planes[plane].mem_priv = mem_priv; 1303 } 1304 1305 /* 1306 * This pins the buffer(s) with dma_buf_map_attachment()). It's done 1307 * here instead just before the DMA, while queueing the buffer(s) so 1308 * userspace knows sooner rather than later if the dma-buf map fails. 1309 */ 1310 for (plane = 0; plane < vb->num_planes; ++plane) { 1311 if (vb->planes[plane].dbuf_mapped) 1312 continue; 1313 1314 ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv); 1315 if (ret) { 1316 dprintk(q, 1, "failed to map dmabuf for plane %d\n", 1317 plane); 1318 goto err; 1319 } 1320 vb->planes[plane].dbuf_mapped = 1; 1321 } 1322 1323 /* 1324 * Now that everything is in order, copy relevant information 1325 * provided by userspace. 1326 */ 1327 for (plane = 0; plane < vb->num_planes; ++plane) { 1328 vb->planes[plane].bytesused = planes[plane].bytesused; 1329 vb->planes[plane].length = planes[plane].length; 1330 vb->planes[plane].m.fd = planes[plane].m.fd; 1331 vb->planes[plane].data_offset = planes[plane].data_offset; 1332 } 1333 1334 if (reacquired) { 1335 /* 1336 * Call driver-specific initialization on the newly acquired buffer, 1337 * if provided. 1338 */ 1339 ret = call_vb_qop(vb, buf_init, vb); 1340 if (ret) { 1341 dprintk(q, 1, "buffer initialization failed\n"); 1342 goto err; 1343 } 1344 } 1345 1346 ret = call_vb_qop(vb, buf_prepare, vb); 1347 if (ret) { 1348 dprintk(q, 1, "buffer preparation failed\n"); 1349 call_void_vb_qop(vb, buf_cleanup, vb); 1350 goto err; 1351 } 1352 1353 return 0; 1354 err: 1355 /* In case of errors, release planes that were already acquired */ 1356 __vb2_buf_dmabuf_put(vb); 1357 1358 return ret; 1359 } 1360 1361 /* 1362 * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing 1363 */ 1364 static void __enqueue_in_driver(struct vb2_buffer *vb) 1365 { 1366 struct vb2_queue *q = vb->vb2_queue; 1367 1368 vb->state = VB2_BUF_STATE_ACTIVE; 1369 atomic_inc(&q->owned_by_drv_count); 1370 1371 trace_vb2_buf_queue(q, vb); 1372 1373 call_void_vb_qop(vb, buf_queue, vb); 1374 } 1375 1376 static int __buf_prepare(struct vb2_buffer *vb) 1377 { 1378 struct vb2_queue *q = vb->vb2_queue; 1379 enum vb2_buffer_state orig_state = vb->state; 1380 int ret; 1381 1382 if (q->error) { 1383 dprintk(q, 1, "fatal error occurred on queue\n"); 1384 return -EIO; 1385 } 1386 1387 if (vb->prepared) 1388 return 0; 1389 WARN_ON(vb->synced); 1390 1391 if (q->is_output) { 1392 ret = call_vb_qop(vb, buf_out_validate, vb); 1393 if (ret) { 1394 dprintk(q, 1, "buffer validation failed\n"); 1395 return ret; 1396 } 1397 } 1398 1399 vb->state = VB2_BUF_STATE_PREPARING; 1400 1401 switch (q->memory) { 1402 case VB2_MEMORY_MMAP: 1403 ret = __prepare_mmap(vb); 1404 break; 1405 case VB2_MEMORY_USERPTR: 1406 ret = __prepare_userptr(vb); 1407 break; 1408 case VB2_MEMORY_DMABUF: 1409 ret = __prepare_dmabuf(vb); 1410 break; 1411 default: 1412 WARN(1, "Invalid queue type\n"); 1413 ret = -EINVAL; 1414 break; 1415 } 1416 1417 if (ret) { 1418 dprintk(q, 1, "buffer preparation failed: %d\n", ret); 1419 vb->state = orig_state; 1420 return ret; 1421 } 1422 1423 __vb2_buf_mem_prepare(vb); 1424 vb->prepared = 1; 1425 vb->state = orig_state; 1426 1427 return 0; 1428 } 1429 1430 static int vb2_req_prepare(struct media_request_object *obj) 1431 { 1432 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1433 int ret; 1434 1435 if (WARN_ON(vb->state != VB2_BUF_STATE_IN_REQUEST)) 1436 return -EINVAL; 1437 1438 mutex_lock(vb->vb2_queue->lock); 1439 ret = __buf_prepare(vb); 1440 mutex_unlock(vb->vb2_queue->lock); 1441 return ret; 1442 } 1443 1444 static void __vb2_dqbuf(struct vb2_buffer *vb); 1445 1446 static void vb2_req_unprepare(struct media_request_object *obj) 1447 { 1448 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1449 1450 mutex_lock(vb->vb2_queue->lock); 1451 __vb2_dqbuf(vb); 1452 vb->state = VB2_BUF_STATE_IN_REQUEST; 1453 mutex_unlock(vb->vb2_queue->lock); 1454 WARN_ON(!vb->req_obj.req); 1455 } 1456 1457 int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, 1458 struct media_request *req); 1459 1460 static void vb2_req_queue(struct media_request_object *obj) 1461 { 1462 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1463 int err; 1464 1465 mutex_lock(vb->vb2_queue->lock); 1466 /* 1467 * There is no method to propagate an error from vb2_core_qbuf(), 1468 * so if this returns a non-0 value, then WARN. 1469 * 1470 * The only exception is -EIO which is returned if q->error is 1471 * set. We just ignore that, and expect this will be caught the 1472 * next time vb2_req_prepare() is called. 1473 */ 1474 err = vb2_core_qbuf(vb->vb2_queue, vb->index, NULL, NULL); 1475 WARN_ON_ONCE(err && err != -EIO); 1476 mutex_unlock(vb->vb2_queue->lock); 1477 } 1478 1479 static void vb2_req_unbind(struct media_request_object *obj) 1480 { 1481 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1482 1483 if (vb->state == VB2_BUF_STATE_IN_REQUEST) 1484 call_void_bufop(vb->vb2_queue, init_buffer, vb); 1485 } 1486 1487 static void vb2_req_release(struct media_request_object *obj) 1488 { 1489 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1490 1491 if (vb->state == VB2_BUF_STATE_IN_REQUEST) { 1492 vb->state = VB2_BUF_STATE_DEQUEUED; 1493 if (vb->request) 1494 media_request_put(vb->request); 1495 vb->request = NULL; 1496 } 1497 } 1498 1499 static const struct media_request_object_ops vb2_core_req_ops = { 1500 .prepare = vb2_req_prepare, 1501 .unprepare = vb2_req_unprepare, 1502 .queue = vb2_req_queue, 1503 .unbind = vb2_req_unbind, 1504 .release = vb2_req_release, 1505 }; 1506 1507 bool vb2_request_object_is_buffer(struct media_request_object *obj) 1508 { 1509 return obj->ops == &vb2_core_req_ops; 1510 } 1511 EXPORT_SYMBOL_GPL(vb2_request_object_is_buffer); 1512 1513 unsigned int vb2_request_buffer_cnt(struct media_request *req) 1514 { 1515 struct media_request_object *obj; 1516 unsigned long flags; 1517 unsigned int buffer_cnt = 0; 1518 1519 spin_lock_irqsave(&req->lock, flags); 1520 list_for_each_entry(obj, &req->objects, list) 1521 if (vb2_request_object_is_buffer(obj)) 1522 buffer_cnt++; 1523 spin_unlock_irqrestore(&req->lock, flags); 1524 1525 return buffer_cnt; 1526 } 1527 EXPORT_SYMBOL_GPL(vb2_request_buffer_cnt); 1528 1529 int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb) 1530 { 1531 struct vb2_buffer *vb; 1532 int ret; 1533 1534 vb = q->bufs[index]; 1535 if (vb->state != VB2_BUF_STATE_DEQUEUED) { 1536 dprintk(q, 1, "invalid buffer state %s\n", 1537 vb2_state_name(vb->state)); 1538 return -EINVAL; 1539 } 1540 if (vb->prepared) { 1541 dprintk(q, 1, "buffer already prepared\n"); 1542 return -EINVAL; 1543 } 1544 1545 ret = __buf_prepare(vb); 1546 if (ret) 1547 return ret; 1548 1549 /* Fill buffer information for the userspace */ 1550 call_void_bufop(q, fill_user_buffer, vb, pb); 1551 1552 dprintk(q, 2, "prepare of buffer %d succeeded\n", vb->index); 1553 1554 return 0; 1555 } 1556 EXPORT_SYMBOL_GPL(vb2_core_prepare_buf); 1557 1558 /* 1559 * vb2_start_streaming() - Attempt to start streaming. 1560 * @q: videobuf2 queue 1561 * 1562 * Attempt to start streaming. When this function is called there must be 1563 * at least q->min_buffers_needed buffers queued up (i.e. the minimum 1564 * number of buffers required for the DMA engine to function). If the 1565 * @start_streaming op fails it is supposed to return all the driver-owned 1566 * buffers back to vb2 in state QUEUED. Check if that happened and if 1567 * not warn and reclaim them forcefully. 1568 */ 1569 static int vb2_start_streaming(struct vb2_queue *q) 1570 { 1571 struct vb2_buffer *vb; 1572 int ret; 1573 1574 /* 1575 * If any buffers were queued before streamon, 1576 * we can now pass them to driver for processing. 1577 */ 1578 list_for_each_entry(vb, &q->queued_list, queued_entry) 1579 __enqueue_in_driver(vb); 1580 1581 /* Tell the driver to start streaming */ 1582 q->start_streaming_called = 1; 1583 ret = call_qop(q, start_streaming, q, 1584 atomic_read(&q->owned_by_drv_count)); 1585 if (!ret) 1586 return 0; 1587 1588 q->start_streaming_called = 0; 1589 1590 dprintk(q, 1, "driver refused to start streaming\n"); 1591 /* 1592 * If you see this warning, then the driver isn't cleaning up properly 1593 * after a failed start_streaming(). See the start_streaming() 1594 * documentation in videobuf2-core.h for more information how buffers 1595 * should be returned to vb2 in start_streaming(). 1596 */ 1597 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { 1598 unsigned i; 1599 1600 /* 1601 * Forcefully reclaim buffers if the driver did not 1602 * correctly return them to vb2. 1603 */ 1604 for (i = 0; i < q->num_buffers; ++i) { 1605 vb = q->bufs[i]; 1606 if (vb->state == VB2_BUF_STATE_ACTIVE) 1607 vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED); 1608 } 1609 /* Must be zero now */ 1610 WARN_ON(atomic_read(&q->owned_by_drv_count)); 1611 } 1612 /* 1613 * If done_list is not empty, then start_streaming() didn't call 1614 * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or 1615 * STATE_DONE. 1616 */ 1617 WARN_ON(!list_empty(&q->done_list)); 1618 return ret; 1619 } 1620 1621 int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, 1622 struct media_request *req) 1623 { 1624 struct vb2_buffer *vb; 1625 enum vb2_buffer_state orig_state; 1626 int ret; 1627 1628 if (q->error) { 1629 dprintk(q, 1, "fatal error occurred on queue\n"); 1630 return -EIO; 1631 } 1632 1633 vb = q->bufs[index]; 1634 1635 if (!req && vb->state != VB2_BUF_STATE_IN_REQUEST && 1636 q->requires_requests) { 1637 dprintk(q, 1, "qbuf requires a request\n"); 1638 return -EBADR; 1639 } 1640 1641 if ((req && q->uses_qbuf) || 1642 (!req && vb->state != VB2_BUF_STATE_IN_REQUEST && 1643 q->uses_requests)) { 1644 dprintk(q, 1, "queue in wrong mode (qbuf vs requests)\n"); 1645 return -EBUSY; 1646 } 1647 1648 if (req) { 1649 int ret; 1650 1651 q->uses_requests = 1; 1652 if (vb->state != VB2_BUF_STATE_DEQUEUED) { 1653 dprintk(q, 1, "buffer %d not in dequeued state\n", 1654 vb->index); 1655 return -EINVAL; 1656 } 1657 1658 if (q->is_output && !vb->prepared) { 1659 ret = call_vb_qop(vb, buf_out_validate, vb); 1660 if (ret) { 1661 dprintk(q, 1, "buffer validation failed\n"); 1662 return ret; 1663 } 1664 } 1665 1666 media_request_object_init(&vb->req_obj); 1667 1668 /* Make sure the request is in a safe state for updating. */ 1669 ret = media_request_lock_for_update(req); 1670 if (ret) 1671 return ret; 1672 ret = media_request_object_bind(req, &vb2_core_req_ops, 1673 q, true, &vb->req_obj); 1674 media_request_unlock_for_update(req); 1675 if (ret) 1676 return ret; 1677 1678 vb->state = VB2_BUF_STATE_IN_REQUEST; 1679 1680 /* 1681 * Increment the refcount and store the request. 1682 * The request refcount is decremented again when the 1683 * buffer is dequeued. This is to prevent vb2_buffer_done() 1684 * from freeing the request from interrupt context, which can 1685 * happen if the application closed the request fd after 1686 * queueing the request. 1687 */ 1688 media_request_get(req); 1689 vb->request = req; 1690 1691 /* Fill buffer information for the userspace */ 1692 if (pb) { 1693 call_void_bufop(q, copy_timestamp, vb, pb); 1694 call_void_bufop(q, fill_user_buffer, vb, pb); 1695 } 1696 1697 dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index); 1698 return 0; 1699 } 1700 1701 if (vb->state != VB2_BUF_STATE_IN_REQUEST) 1702 q->uses_qbuf = 1; 1703 1704 switch (vb->state) { 1705 case VB2_BUF_STATE_DEQUEUED: 1706 case VB2_BUF_STATE_IN_REQUEST: 1707 if (!vb->prepared) { 1708 ret = __buf_prepare(vb); 1709 if (ret) 1710 return ret; 1711 } 1712 break; 1713 case VB2_BUF_STATE_PREPARING: 1714 dprintk(q, 1, "buffer still being prepared\n"); 1715 return -EINVAL; 1716 default: 1717 dprintk(q, 1, "invalid buffer state %s\n", 1718 vb2_state_name(vb->state)); 1719 return -EINVAL; 1720 } 1721 1722 /* 1723 * Add to the queued buffers list, a buffer will stay on it until 1724 * dequeued in dqbuf. 1725 */ 1726 orig_state = vb->state; 1727 list_add_tail(&vb->queued_entry, &q->queued_list); 1728 q->queued_count++; 1729 q->waiting_for_buffers = false; 1730 vb->state = VB2_BUF_STATE_QUEUED; 1731 1732 if (pb) 1733 call_void_bufop(q, copy_timestamp, vb, pb); 1734 1735 trace_vb2_qbuf(q, vb); 1736 1737 /* 1738 * If already streaming, give the buffer to driver for processing. 1739 * If not, the buffer will be given to driver on next streamon. 1740 */ 1741 if (q->start_streaming_called) 1742 __enqueue_in_driver(vb); 1743 1744 /* Fill buffer information for the userspace */ 1745 if (pb) 1746 call_void_bufop(q, fill_user_buffer, vb, pb); 1747 1748 /* 1749 * If streamon has been called, and we haven't yet called 1750 * start_streaming() since not enough buffers were queued, and 1751 * we now have reached the minimum number of queued buffers, 1752 * then we can finally call start_streaming(). 1753 */ 1754 if (q->streaming && !q->start_streaming_called && 1755 q->queued_count >= q->min_buffers_needed) { 1756 ret = vb2_start_streaming(q); 1757 if (ret) { 1758 /* 1759 * Since vb2_core_qbuf will return with an error, 1760 * we should return it to state DEQUEUED since 1761 * the error indicates that the buffer wasn't queued. 1762 */ 1763 list_del(&vb->queued_entry); 1764 q->queued_count--; 1765 vb->state = orig_state; 1766 return ret; 1767 } 1768 } 1769 1770 dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index); 1771 return 0; 1772 } 1773 EXPORT_SYMBOL_GPL(vb2_core_qbuf); 1774 1775 /* 1776 * __vb2_wait_for_done_vb() - wait for a buffer to become available 1777 * for dequeuing 1778 * 1779 * Will sleep if required for nonblocking == false. 1780 */ 1781 static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) 1782 { 1783 /* 1784 * All operations on vb_done_list are performed under done_lock 1785 * spinlock protection. However, buffers may be removed from 1786 * it and returned to userspace only while holding both driver's 1787 * lock and the done_lock spinlock. Thus we can be sure that as 1788 * long as we hold the driver's lock, the list will remain not 1789 * empty if list_empty() check succeeds. 1790 */ 1791 1792 for (;;) { 1793 int ret; 1794 1795 if (q->waiting_in_dqbuf) { 1796 dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); 1797 return -EBUSY; 1798 } 1799 1800 if (!q->streaming) { 1801 dprintk(q, 1, "streaming off, will not wait for buffers\n"); 1802 return -EINVAL; 1803 } 1804 1805 if (q->error) { 1806 dprintk(q, 1, "Queue in error state, will not wait for buffers\n"); 1807 return -EIO; 1808 } 1809 1810 if (q->last_buffer_dequeued) { 1811 dprintk(q, 3, "last buffer dequeued already, will not wait for buffers\n"); 1812 return -EPIPE; 1813 } 1814 1815 if (!list_empty(&q->done_list)) { 1816 /* 1817 * Found a buffer that we were waiting for. 1818 */ 1819 break; 1820 } 1821 1822 if (nonblocking) { 1823 dprintk(q, 3, "nonblocking and no buffers to dequeue, will not wait\n"); 1824 return -EAGAIN; 1825 } 1826 1827 q->waiting_in_dqbuf = 1; 1828 /* 1829 * We are streaming and blocking, wait for another buffer to 1830 * become ready or for streamoff. Driver's lock is released to 1831 * allow streamoff or qbuf to be called while waiting. 1832 */ 1833 call_void_qop(q, wait_prepare, q); 1834 1835 /* 1836 * All locks have been released, it is safe to sleep now. 1837 */ 1838 dprintk(q, 3, "will sleep waiting for buffers\n"); 1839 ret = wait_event_interruptible(q->done_wq, 1840 !list_empty(&q->done_list) || !q->streaming || 1841 q->error); 1842 1843 /* 1844 * We need to reevaluate both conditions again after reacquiring 1845 * the locks or return an error if one occurred. 1846 */ 1847 call_void_qop(q, wait_finish, q); 1848 q->waiting_in_dqbuf = 0; 1849 if (ret) { 1850 dprintk(q, 1, "sleep was interrupted\n"); 1851 return ret; 1852 } 1853 } 1854 return 0; 1855 } 1856 1857 /* 1858 * __vb2_get_done_vb() - get a buffer ready for dequeuing 1859 * 1860 * Will sleep if required for nonblocking == false. 1861 */ 1862 static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, 1863 void *pb, int nonblocking) 1864 { 1865 unsigned long flags; 1866 int ret = 0; 1867 1868 /* 1869 * Wait for at least one buffer to become available on the done_list. 1870 */ 1871 ret = __vb2_wait_for_done_vb(q, nonblocking); 1872 if (ret) 1873 return ret; 1874 1875 /* 1876 * Driver's lock has been held since we last verified that done_list 1877 * is not empty, so no need for another list_empty(done_list) check. 1878 */ 1879 spin_lock_irqsave(&q->done_lock, flags); 1880 *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry); 1881 /* 1882 * Only remove the buffer from done_list if all planes can be 1883 * handled. Some cases such as V4L2 file I/O and DVB have pb 1884 * == NULL; skip the check then as there's nothing to verify. 1885 */ 1886 if (pb) 1887 ret = call_bufop(q, verify_planes_array, *vb, pb); 1888 if (!ret) 1889 list_del(&(*vb)->done_entry); 1890 spin_unlock_irqrestore(&q->done_lock, flags); 1891 1892 return ret; 1893 } 1894 1895 int vb2_wait_for_all_buffers(struct vb2_queue *q) 1896 { 1897 if (!q->streaming) { 1898 dprintk(q, 1, "streaming off, will not wait for buffers\n"); 1899 return -EINVAL; 1900 } 1901 1902 if (q->start_streaming_called) 1903 wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count)); 1904 return 0; 1905 } 1906 EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers); 1907 1908 /* 1909 * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state 1910 */ 1911 static void __vb2_dqbuf(struct vb2_buffer *vb) 1912 { 1913 struct vb2_queue *q = vb->vb2_queue; 1914 1915 /* nothing to do if the buffer is already dequeued */ 1916 if (vb->state == VB2_BUF_STATE_DEQUEUED) 1917 return; 1918 1919 vb->state = VB2_BUF_STATE_DEQUEUED; 1920 1921 call_void_bufop(q, init_buffer, vb); 1922 } 1923 1924 int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, 1925 bool nonblocking) 1926 { 1927 struct vb2_buffer *vb = NULL; 1928 int ret; 1929 1930 ret = __vb2_get_done_vb(q, &vb, pb, nonblocking); 1931 if (ret < 0) 1932 return ret; 1933 1934 switch (vb->state) { 1935 case VB2_BUF_STATE_DONE: 1936 dprintk(q, 3, "returning done buffer\n"); 1937 break; 1938 case VB2_BUF_STATE_ERROR: 1939 dprintk(q, 3, "returning done buffer with errors\n"); 1940 break; 1941 default: 1942 dprintk(q, 1, "invalid buffer state %s\n", 1943 vb2_state_name(vb->state)); 1944 return -EINVAL; 1945 } 1946 1947 call_void_vb_qop(vb, buf_finish, vb); 1948 vb->prepared = 0; 1949 1950 if (pindex) 1951 *pindex = vb->index; 1952 1953 /* Fill buffer information for the userspace */ 1954 if (pb) 1955 call_void_bufop(q, fill_user_buffer, vb, pb); 1956 1957 /* Remove from vb2 queue */ 1958 list_del(&vb->queued_entry); 1959 q->queued_count--; 1960 1961 trace_vb2_dqbuf(q, vb); 1962 1963 /* go back to dequeued state */ 1964 __vb2_dqbuf(vb); 1965 1966 if (WARN_ON(vb->req_obj.req)) { 1967 media_request_object_unbind(&vb->req_obj); 1968 media_request_object_put(&vb->req_obj); 1969 } 1970 if (vb->request) 1971 media_request_put(vb->request); 1972 vb->request = NULL; 1973 1974 dprintk(q, 2, "dqbuf of buffer %d, state: %s\n", 1975 vb->index, vb2_state_name(vb->state)); 1976 1977 return 0; 1978 1979 } 1980 EXPORT_SYMBOL_GPL(vb2_core_dqbuf); 1981 1982 /* 1983 * __vb2_queue_cancel() - cancel and stop (pause) streaming 1984 * 1985 * Removes all queued buffers from driver's queue and all buffers queued by 1986 * userspace from vb2's queue. Returns to state after reqbufs. 1987 */ 1988 static void __vb2_queue_cancel(struct vb2_queue *q) 1989 { 1990 unsigned int i; 1991 1992 /* 1993 * Tell driver to stop all transactions and release all queued 1994 * buffers. 1995 */ 1996 if (q->start_streaming_called) 1997 call_void_qop(q, stop_streaming, q); 1998 1999 if (q->streaming) 2000 call_void_qop(q, unprepare_streaming, q); 2001 2002 /* 2003 * If you see this warning, then the driver isn't cleaning up properly 2004 * in stop_streaming(). See the stop_streaming() documentation in 2005 * videobuf2-core.h for more information how buffers should be returned 2006 * to vb2 in stop_streaming(). 2007 */ 2008 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { 2009 for (i = 0; i < q->num_buffers; ++i) 2010 if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) { 2011 pr_warn("driver bug: stop_streaming operation is leaving buf %p in active state\n", 2012 q->bufs[i]); 2013 vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR); 2014 } 2015 /* Must be zero now */ 2016 WARN_ON(atomic_read(&q->owned_by_drv_count)); 2017 } 2018 2019 q->streaming = 0; 2020 q->start_streaming_called = 0; 2021 q->queued_count = 0; 2022 q->error = 0; 2023 q->uses_requests = 0; 2024 q->uses_qbuf = 0; 2025 2026 /* 2027 * Remove all buffers from vb2's list... 2028 */ 2029 INIT_LIST_HEAD(&q->queued_list); 2030 /* 2031 * ...and done list; userspace will not receive any buffers it 2032 * has not already dequeued before initiating cancel. 2033 */ 2034 INIT_LIST_HEAD(&q->done_list); 2035 atomic_set(&q->owned_by_drv_count, 0); 2036 wake_up_all(&q->done_wq); 2037 2038 /* 2039 * Reinitialize all buffers for next use. 2040 * Make sure to call buf_finish for any queued buffers. Normally 2041 * that's done in dqbuf, but that's not going to happen when we 2042 * cancel the whole queue. Note: this code belongs here, not in 2043 * __vb2_dqbuf() since in vb2_core_dqbuf() there is a critical 2044 * call to __fill_user_buffer() after buf_finish(). That order can't 2045 * be changed, so we can't move the buf_finish() to __vb2_dqbuf(). 2046 */ 2047 for (i = 0; i < q->num_buffers; ++i) { 2048 struct vb2_buffer *vb = q->bufs[i]; 2049 struct media_request *req = vb->req_obj.req; 2050 2051 /* 2052 * If a request is associated with this buffer, then 2053 * call buf_request_cancel() to give the driver to complete() 2054 * related request objects. Otherwise those objects would 2055 * never complete. 2056 */ 2057 if (req) { 2058 enum media_request_state state; 2059 unsigned long flags; 2060 2061 spin_lock_irqsave(&req->lock, flags); 2062 state = req->state; 2063 spin_unlock_irqrestore(&req->lock, flags); 2064 2065 if (state == MEDIA_REQUEST_STATE_QUEUED) 2066 call_void_vb_qop(vb, buf_request_complete, vb); 2067 } 2068 2069 __vb2_buf_mem_finish(vb); 2070 2071 if (vb->prepared) { 2072 call_void_vb_qop(vb, buf_finish, vb); 2073 vb->prepared = 0; 2074 } 2075 __vb2_dqbuf(vb); 2076 2077 if (vb->req_obj.req) { 2078 media_request_object_unbind(&vb->req_obj); 2079 media_request_object_put(&vb->req_obj); 2080 } 2081 if (vb->request) 2082 media_request_put(vb->request); 2083 vb->request = NULL; 2084 vb->copied_timestamp = 0; 2085 } 2086 } 2087 2088 int vb2_core_streamon(struct vb2_queue *q, unsigned int type) 2089 { 2090 int ret; 2091 2092 if (type != q->type) { 2093 dprintk(q, 1, "invalid stream type\n"); 2094 return -EINVAL; 2095 } 2096 2097 if (q->streaming) { 2098 dprintk(q, 3, "already streaming\n"); 2099 return 0; 2100 } 2101 2102 if (!q->num_buffers) { 2103 dprintk(q, 1, "no buffers have been allocated\n"); 2104 return -EINVAL; 2105 } 2106 2107 if (q->num_buffers < q->min_buffers_needed) { 2108 dprintk(q, 1, "need at least %u allocated buffers\n", 2109 q->min_buffers_needed); 2110 return -EINVAL; 2111 } 2112 2113 ret = call_qop(q, prepare_streaming, q); 2114 if (ret) 2115 return ret; 2116 2117 q->streaming = 1; 2118 2119 /* 2120 * Tell driver to start streaming provided sufficient buffers 2121 * are available. 2122 */ 2123 if (q->queued_count >= q->min_buffers_needed) { 2124 ret = vb2_start_streaming(q); 2125 if (ret) 2126 goto unprepare; 2127 } 2128 2129 dprintk(q, 3, "successful\n"); 2130 return 0; 2131 2132 unprepare: 2133 call_void_qop(q, unprepare_streaming, q); 2134 q->streaming = 0; 2135 return ret; 2136 } 2137 EXPORT_SYMBOL_GPL(vb2_core_streamon); 2138 2139 void vb2_queue_error(struct vb2_queue *q) 2140 { 2141 q->error = 1; 2142 2143 wake_up_all(&q->done_wq); 2144 } 2145 EXPORT_SYMBOL_GPL(vb2_queue_error); 2146 2147 int vb2_core_streamoff(struct vb2_queue *q, unsigned int type) 2148 { 2149 if (type != q->type) { 2150 dprintk(q, 1, "invalid stream type\n"); 2151 return -EINVAL; 2152 } 2153 2154 /* 2155 * Cancel will pause streaming and remove all buffers from the driver 2156 * and vb2, effectively returning control over them to userspace. 2157 * 2158 * Note that we do this even if q->streaming == 0: if you prepare or 2159 * queue buffers, and then call streamoff without ever having called 2160 * streamon, you would still expect those buffers to be returned to 2161 * their normal dequeued state. 2162 */ 2163 __vb2_queue_cancel(q); 2164 q->waiting_for_buffers = !q->is_output; 2165 q->last_buffer_dequeued = false; 2166 2167 dprintk(q, 3, "successful\n"); 2168 return 0; 2169 } 2170 EXPORT_SYMBOL_GPL(vb2_core_streamoff); 2171 2172 /* 2173 * __find_plane_by_offset() - find plane associated with the given offset off 2174 */ 2175 static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off, 2176 unsigned int *_buffer, unsigned int *_plane) 2177 { 2178 struct vb2_buffer *vb; 2179 unsigned int buffer, plane; 2180 2181 /* 2182 * Go over all buffers and their planes, comparing the given offset 2183 * with an offset assigned to each plane. If a match is found, 2184 * return its buffer and plane numbers. 2185 */ 2186 for (buffer = 0; buffer < q->num_buffers; ++buffer) { 2187 vb = q->bufs[buffer]; 2188 2189 for (plane = 0; plane < vb->num_planes; ++plane) { 2190 if (vb->planes[plane].m.offset == off) { 2191 *_buffer = buffer; 2192 *_plane = plane; 2193 return 0; 2194 } 2195 } 2196 } 2197 2198 return -EINVAL; 2199 } 2200 2201 int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type, 2202 unsigned int index, unsigned int plane, unsigned int flags) 2203 { 2204 struct vb2_buffer *vb = NULL; 2205 struct vb2_plane *vb_plane; 2206 int ret; 2207 struct dma_buf *dbuf; 2208 2209 if (q->memory != VB2_MEMORY_MMAP) { 2210 dprintk(q, 1, "queue is not currently set up for mmap\n"); 2211 return -EINVAL; 2212 } 2213 2214 if (!q->mem_ops->get_dmabuf) { 2215 dprintk(q, 1, "queue does not support DMA buffer exporting\n"); 2216 return -EINVAL; 2217 } 2218 2219 if (flags & ~(O_CLOEXEC | O_ACCMODE)) { 2220 dprintk(q, 1, "queue does support only O_CLOEXEC and access mode flags\n"); 2221 return -EINVAL; 2222 } 2223 2224 if (type != q->type) { 2225 dprintk(q, 1, "invalid buffer type\n"); 2226 return -EINVAL; 2227 } 2228 2229 if (index >= q->num_buffers) { 2230 dprintk(q, 1, "buffer index out of range\n"); 2231 return -EINVAL; 2232 } 2233 2234 vb = q->bufs[index]; 2235 2236 if (plane >= vb->num_planes) { 2237 dprintk(q, 1, "buffer plane out of range\n"); 2238 return -EINVAL; 2239 } 2240 2241 if (vb2_fileio_is_active(q)) { 2242 dprintk(q, 1, "expbuf: file io in progress\n"); 2243 return -EBUSY; 2244 } 2245 2246 vb_plane = &vb->planes[plane]; 2247 2248 dbuf = call_ptr_memop(get_dmabuf, 2249 vb, 2250 vb_plane->mem_priv, 2251 flags & O_ACCMODE); 2252 if (IS_ERR_OR_NULL(dbuf)) { 2253 dprintk(q, 1, "failed to export buffer %d, plane %d\n", 2254 index, plane); 2255 return -EINVAL; 2256 } 2257 2258 ret = dma_buf_fd(dbuf, flags & ~O_ACCMODE); 2259 if (ret < 0) { 2260 dprintk(q, 3, "buffer %d, plane %d failed to export (%d)\n", 2261 index, plane, ret); 2262 dma_buf_put(dbuf); 2263 return ret; 2264 } 2265 2266 dprintk(q, 3, "buffer %d, plane %d exported as %d descriptor\n", 2267 index, plane, ret); 2268 *fd = ret; 2269 2270 return 0; 2271 } 2272 EXPORT_SYMBOL_GPL(vb2_core_expbuf); 2273 2274 int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) 2275 { 2276 unsigned long off = vma->vm_pgoff << PAGE_SHIFT; 2277 struct vb2_buffer *vb; 2278 unsigned int buffer = 0, plane = 0; 2279 int ret; 2280 unsigned long length; 2281 2282 if (q->memory != VB2_MEMORY_MMAP) { 2283 dprintk(q, 1, "queue is not currently set up for mmap\n"); 2284 return -EINVAL; 2285 } 2286 2287 /* 2288 * Check memory area access mode. 2289 */ 2290 if (!(vma->vm_flags & VM_SHARED)) { 2291 dprintk(q, 1, "invalid vma flags, VM_SHARED needed\n"); 2292 return -EINVAL; 2293 } 2294 if (q->is_output) { 2295 if (!(vma->vm_flags & VM_WRITE)) { 2296 dprintk(q, 1, "invalid vma flags, VM_WRITE needed\n"); 2297 return -EINVAL; 2298 } 2299 } else { 2300 if (!(vma->vm_flags & VM_READ)) { 2301 dprintk(q, 1, "invalid vma flags, VM_READ needed\n"); 2302 return -EINVAL; 2303 } 2304 } 2305 2306 mutex_lock(&q->mmap_lock); 2307 2308 if (vb2_fileio_is_active(q)) { 2309 dprintk(q, 1, "mmap: file io in progress\n"); 2310 ret = -EBUSY; 2311 goto unlock; 2312 } 2313 2314 /* 2315 * Find the plane corresponding to the offset passed by userspace. 2316 */ 2317 ret = __find_plane_by_offset(q, off, &buffer, &plane); 2318 if (ret) 2319 goto unlock; 2320 2321 vb = q->bufs[buffer]; 2322 2323 /* 2324 * MMAP requires page_aligned buffers. 2325 * The buffer length was page_aligned at __vb2_buf_mem_alloc(), 2326 * so, we need to do the same here. 2327 */ 2328 length = PAGE_ALIGN(vb->planes[plane].length); 2329 if (length < (vma->vm_end - vma->vm_start)) { 2330 dprintk(q, 1, 2331 "MMAP invalid, as it would overflow buffer length\n"); 2332 ret = -EINVAL; 2333 goto unlock; 2334 } 2335 2336 /* 2337 * vm_pgoff is treated in V4L2 API as a 'cookie' to select a buffer, 2338 * not as a in-buffer offset. We always want to mmap a whole buffer 2339 * from its beginning. 2340 */ 2341 vma->vm_pgoff = 0; 2342 2343 ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma); 2344 2345 unlock: 2346 mutex_unlock(&q->mmap_lock); 2347 if (ret) 2348 return ret; 2349 2350 dprintk(q, 3, "buffer %d, plane %d successfully mapped\n", buffer, plane); 2351 return 0; 2352 } 2353 EXPORT_SYMBOL_GPL(vb2_mmap); 2354 2355 #ifndef CONFIG_MMU 2356 unsigned long vb2_get_unmapped_area(struct vb2_queue *q, 2357 unsigned long addr, 2358 unsigned long len, 2359 unsigned long pgoff, 2360 unsigned long flags) 2361 { 2362 unsigned long off = pgoff << PAGE_SHIFT; 2363 struct vb2_buffer *vb; 2364 unsigned int buffer, plane; 2365 void *vaddr; 2366 int ret; 2367 2368 if (q->memory != VB2_MEMORY_MMAP) { 2369 dprintk(q, 1, "queue is not currently set up for mmap\n"); 2370 return -EINVAL; 2371 } 2372 2373 /* 2374 * Find the plane corresponding to the offset passed by userspace. 2375 */ 2376 ret = __find_plane_by_offset(q, off, &buffer, &plane); 2377 if (ret) 2378 return ret; 2379 2380 vb = q->bufs[buffer]; 2381 2382 vaddr = vb2_plane_vaddr(vb, plane); 2383 return vaddr ? (unsigned long)vaddr : -EINVAL; 2384 } 2385 EXPORT_SYMBOL_GPL(vb2_get_unmapped_area); 2386 #endif 2387 2388 int vb2_core_queue_init(struct vb2_queue *q) 2389 { 2390 /* 2391 * Sanity check 2392 */ 2393 if (WARN_ON(!q) || 2394 WARN_ON(!q->ops) || 2395 WARN_ON(!q->mem_ops) || 2396 WARN_ON(!q->type) || 2397 WARN_ON(!q->io_modes) || 2398 WARN_ON(!q->ops->queue_setup) || 2399 WARN_ON(!q->ops->buf_queue)) 2400 return -EINVAL; 2401 2402 if (WARN_ON(q->requires_requests && !q->supports_requests)) 2403 return -EINVAL; 2404 2405 /* 2406 * This combination is not allowed since a non-zero value of 2407 * q->min_buffers_needed can cause vb2_core_qbuf() to fail if 2408 * it has to call start_streaming(), and the Request API expects 2409 * that queueing a request (and thus queueing a buffer contained 2410 * in that request) will always succeed. There is no method of 2411 * propagating an error back to userspace. 2412 */ 2413 if (WARN_ON(q->supports_requests && q->min_buffers_needed)) 2414 return -EINVAL; 2415 2416 INIT_LIST_HEAD(&q->queued_list); 2417 INIT_LIST_HEAD(&q->done_list); 2418 spin_lock_init(&q->done_lock); 2419 mutex_init(&q->mmap_lock); 2420 init_waitqueue_head(&q->done_wq); 2421 2422 q->memory = VB2_MEMORY_UNKNOWN; 2423 2424 if (q->buf_struct_size == 0) 2425 q->buf_struct_size = sizeof(struct vb2_buffer); 2426 2427 if (q->bidirectional) 2428 q->dma_dir = DMA_BIDIRECTIONAL; 2429 else 2430 q->dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 2431 2432 if (q->name[0] == '\0') 2433 snprintf(q->name, sizeof(q->name), "%s-%p", 2434 q->is_output ? "out" : "cap", q); 2435 2436 return 0; 2437 } 2438 EXPORT_SYMBOL_GPL(vb2_core_queue_init); 2439 2440 static int __vb2_init_fileio(struct vb2_queue *q, int read); 2441 static int __vb2_cleanup_fileio(struct vb2_queue *q); 2442 void vb2_core_queue_release(struct vb2_queue *q) 2443 { 2444 __vb2_cleanup_fileio(q); 2445 __vb2_queue_cancel(q); 2446 mutex_lock(&q->mmap_lock); 2447 __vb2_queue_free(q, q->num_buffers); 2448 mutex_unlock(&q->mmap_lock); 2449 } 2450 EXPORT_SYMBOL_GPL(vb2_core_queue_release); 2451 2452 __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file, 2453 poll_table *wait) 2454 { 2455 __poll_t req_events = poll_requested_events(wait); 2456 struct vb2_buffer *vb = NULL; 2457 unsigned long flags; 2458 2459 /* 2460 * poll_wait() MUST be called on the first invocation on all the 2461 * potential queues of interest, even if we are not interested in their 2462 * events during this first call. Failure to do so will result in 2463 * queue's events to be ignored because the poll_table won't be capable 2464 * of adding new wait queues thereafter. 2465 */ 2466 poll_wait(file, &q->done_wq, wait); 2467 2468 if (!q->is_output && !(req_events & (EPOLLIN | EPOLLRDNORM))) 2469 return 0; 2470 if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM))) 2471 return 0; 2472 2473 /* 2474 * Start file I/O emulator only if streaming API has not been used yet. 2475 */ 2476 if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) { 2477 if (!q->is_output && (q->io_modes & VB2_READ) && 2478 (req_events & (EPOLLIN | EPOLLRDNORM))) { 2479 if (__vb2_init_fileio(q, 1)) 2480 return EPOLLERR; 2481 } 2482 if (q->is_output && (q->io_modes & VB2_WRITE) && 2483 (req_events & (EPOLLOUT | EPOLLWRNORM))) { 2484 if (__vb2_init_fileio(q, 0)) 2485 return EPOLLERR; 2486 /* 2487 * Write to OUTPUT queue can be done immediately. 2488 */ 2489 return EPOLLOUT | EPOLLWRNORM; 2490 } 2491 } 2492 2493 /* 2494 * There is nothing to wait for if the queue isn't streaming, or if the 2495 * error flag is set. 2496 */ 2497 if (!vb2_is_streaming(q) || q->error) 2498 return EPOLLERR; 2499 2500 /* 2501 * If this quirk is set and QBUF hasn't been called yet then 2502 * return EPOLLERR as well. This only affects capture queues, output 2503 * queues will always initialize waiting_for_buffers to false. 2504 * This quirk is set by V4L2 for backwards compatibility reasons. 2505 */ 2506 if (q->quirk_poll_must_check_waiting_for_buffers && 2507 q->waiting_for_buffers && (req_events & (EPOLLIN | EPOLLRDNORM))) 2508 return EPOLLERR; 2509 2510 /* 2511 * For output streams you can call write() as long as there are fewer 2512 * buffers queued than there are buffers available. 2513 */ 2514 if (q->is_output && q->fileio && q->queued_count < q->num_buffers) 2515 return EPOLLOUT | EPOLLWRNORM; 2516 2517 if (list_empty(&q->done_list)) { 2518 /* 2519 * If the last buffer was dequeued from a capture queue, 2520 * return immediately. DQBUF will return -EPIPE. 2521 */ 2522 if (q->last_buffer_dequeued) 2523 return EPOLLIN | EPOLLRDNORM; 2524 } 2525 2526 /* 2527 * Take first buffer available for dequeuing. 2528 */ 2529 spin_lock_irqsave(&q->done_lock, flags); 2530 if (!list_empty(&q->done_list)) 2531 vb = list_first_entry(&q->done_list, struct vb2_buffer, 2532 done_entry); 2533 spin_unlock_irqrestore(&q->done_lock, flags); 2534 2535 if (vb && (vb->state == VB2_BUF_STATE_DONE 2536 || vb->state == VB2_BUF_STATE_ERROR)) { 2537 return (q->is_output) ? 2538 EPOLLOUT | EPOLLWRNORM : 2539 EPOLLIN | EPOLLRDNORM; 2540 } 2541 return 0; 2542 } 2543 EXPORT_SYMBOL_GPL(vb2_core_poll); 2544 2545 /* 2546 * struct vb2_fileio_buf - buffer context used by file io emulator 2547 * 2548 * vb2 provides a compatibility layer and emulator of file io (read and 2549 * write) calls on top of streaming API. This structure is used for 2550 * tracking context related to the buffers. 2551 */ 2552 struct vb2_fileio_buf { 2553 void *vaddr; 2554 unsigned int size; 2555 unsigned int pos; 2556 unsigned int queued:1; 2557 }; 2558 2559 /* 2560 * struct vb2_fileio_data - queue context used by file io emulator 2561 * 2562 * @cur_index: the index of the buffer currently being read from or 2563 * written to. If equal to q->num_buffers then a new buffer 2564 * must be dequeued. 2565 * @initial_index: in the read() case all buffers are queued up immediately 2566 * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles 2567 * buffers. However, in the write() case no buffers are initially 2568 * queued, instead whenever a buffer is full it is queued up by 2569 * __vb2_perform_fileio(). Only once all available buffers have 2570 * been queued up will __vb2_perform_fileio() start to dequeue 2571 * buffers. This means that initially __vb2_perform_fileio() 2572 * needs to know what buffer index to use when it is queuing up 2573 * the buffers for the first time. That initial index is stored 2574 * in this field. Once it is equal to q->num_buffers all 2575 * available buffers have been queued and __vb2_perform_fileio() 2576 * should start the normal dequeue/queue cycle. 2577 * 2578 * vb2 provides a compatibility layer and emulator of file io (read and 2579 * write) calls on top of streaming API. For proper operation it required 2580 * this structure to save the driver state between each call of the read 2581 * or write function. 2582 */ 2583 struct vb2_fileio_data { 2584 unsigned int count; 2585 unsigned int type; 2586 unsigned int memory; 2587 struct vb2_fileio_buf bufs[VB2_MAX_FRAME]; 2588 unsigned int cur_index; 2589 unsigned int initial_index; 2590 unsigned int q_count; 2591 unsigned int dq_count; 2592 unsigned read_once:1; 2593 unsigned write_immediately:1; 2594 }; 2595 2596 /* 2597 * __vb2_init_fileio() - initialize file io emulator 2598 * @q: videobuf2 queue 2599 * @read: mode selector (1 means read, 0 means write) 2600 */ 2601 static int __vb2_init_fileio(struct vb2_queue *q, int read) 2602 { 2603 struct vb2_fileio_data *fileio; 2604 int i, ret; 2605 unsigned int count = 0; 2606 2607 /* 2608 * Sanity check 2609 */ 2610 if (WARN_ON((read && !(q->io_modes & VB2_READ)) || 2611 (!read && !(q->io_modes & VB2_WRITE)))) 2612 return -EINVAL; 2613 2614 /* 2615 * Check if device supports mapping buffers to kernel virtual space. 2616 */ 2617 if (!q->mem_ops->vaddr) 2618 return -EBUSY; 2619 2620 /* 2621 * Check if streaming api has not been already activated. 2622 */ 2623 if (q->streaming || q->num_buffers > 0) 2624 return -EBUSY; 2625 2626 /* 2627 * Start with count 1, driver can increase it in queue_setup() 2628 */ 2629 count = 1; 2630 2631 dprintk(q, 3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n", 2632 (read) ? "read" : "write", count, q->fileio_read_once, 2633 q->fileio_write_immediately); 2634 2635 fileio = kzalloc(sizeof(*fileio), GFP_KERNEL); 2636 if (fileio == NULL) 2637 return -ENOMEM; 2638 2639 fileio->read_once = q->fileio_read_once; 2640 fileio->write_immediately = q->fileio_write_immediately; 2641 2642 /* 2643 * Request buffers and use MMAP type to force driver 2644 * to allocate buffers by itself. 2645 */ 2646 fileio->count = count; 2647 fileio->memory = VB2_MEMORY_MMAP; 2648 fileio->type = q->type; 2649 q->fileio = fileio; 2650 ret = vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); 2651 if (ret) 2652 goto err_kfree; 2653 2654 /* 2655 * Check if plane_count is correct 2656 * (multiplane buffers are not supported). 2657 */ 2658 if (q->bufs[0]->num_planes != 1) { 2659 ret = -EBUSY; 2660 goto err_reqbufs; 2661 } 2662 2663 /* 2664 * Get kernel address of each buffer. 2665 */ 2666 for (i = 0; i < q->num_buffers; i++) { 2667 fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0); 2668 if (fileio->bufs[i].vaddr == NULL) { 2669 ret = -EINVAL; 2670 goto err_reqbufs; 2671 } 2672 fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0); 2673 } 2674 2675 /* 2676 * Read mode requires pre queuing of all buffers. 2677 */ 2678 if (read) { 2679 /* 2680 * Queue all buffers. 2681 */ 2682 for (i = 0; i < q->num_buffers; i++) { 2683 ret = vb2_core_qbuf(q, i, NULL, NULL); 2684 if (ret) 2685 goto err_reqbufs; 2686 fileio->bufs[i].queued = 1; 2687 } 2688 /* 2689 * All buffers have been queued, so mark that by setting 2690 * initial_index to q->num_buffers 2691 */ 2692 fileio->initial_index = q->num_buffers; 2693 fileio->cur_index = q->num_buffers; 2694 } 2695 2696 /* 2697 * Start streaming. 2698 */ 2699 ret = vb2_core_streamon(q, q->type); 2700 if (ret) 2701 goto err_reqbufs; 2702 2703 return ret; 2704 2705 err_reqbufs: 2706 fileio->count = 0; 2707 vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); 2708 2709 err_kfree: 2710 q->fileio = NULL; 2711 kfree(fileio); 2712 return ret; 2713 } 2714 2715 /* 2716 * __vb2_cleanup_fileio() - free resourced used by file io emulator 2717 * @q: videobuf2 queue 2718 */ 2719 static int __vb2_cleanup_fileio(struct vb2_queue *q) 2720 { 2721 struct vb2_fileio_data *fileio = q->fileio; 2722 2723 if (fileio) { 2724 vb2_core_streamoff(q, q->type); 2725 q->fileio = NULL; 2726 fileio->count = 0; 2727 vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); 2728 kfree(fileio); 2729 dprintk(q, 3, "file io emulator closed\n"); 2730 } 2731 return 0; 2732 } 2733 2734 /* 2735 * __vb2_perform_fileio() - perform a single file io (read or write) operation 2736 * @q: videobuf2 queue 2737 * @data: pointed to target userspace buffer 2738 * @count: number of bytes to read or write 2739 * @ppos: file handle position tracking pointer 2740 * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking) 2741 * @read: access mode selector (1 means read, 0 means write) 2742 */ 2743 static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count, 2744 loff_t *ppos, int nonblock, int read) 2745 { 2746 struct vb2_fileio_data *fileio; 2747 struct vb2_fileio_buf *buf; 2748 bool is_multiplanar = q->is_multiplanar; 2749 /* 2750 * When using write() to write data to an output video node the vb2 core 2751 * should copy timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody 2752 * else is able to provide this information with the write() operation. 2753 */ 2754 bool copy_timestamp = !read && q->copy_timestamp; 2755 unsigned index; 2756 int ret; 2757 2758 dprintk(q, 3, "mode %s, offset %ld, count %zd, %sblocking\n", 2759 read ? "read" : "write", (long)*ppos, count, 2760 nonblock ? "non" : ""); 2761 2762 if (!data) 2763 return -EINVAL; 2764 2765 if (q->waiting_in_dqbuf) { 2766 dprintk(q, 3, "another dup()ped fd is %s\n", 2767 read ? "reading" : "writing"); 2768 return -EBUSY; 2769 } 2770 2771 /* 2772 * Initialize emulator on first call. 2773 */ 2774 if (!vb2_fileio_is_active(q)) { 2775 ret = __vb2_init_fileio(q, read); 2776 dprintk(q, 3, "vb2_init_fileio result: %d\n", ret); 2777 if (ret) 2778 return ret; 2779 } 2780 fileio = q->fileio; 2781 2782 /* 2783 * Check if we need to dequeue the buffer. 2784 */ 2785 index = fileio->cur_index; 2786 if (index >= q->num_buffers) { 2787 struct vb2_buffer *b; 2788 2789 /* 2790 * Call vb2_dqbuf to get buffer back. 2791 */ 2792 ret = vb2_core_dqbuf(q, &index, NULL, nonblock); 2793 dprintk(q, 5, "vb2_dqbuf result: %d\n", ret); 2794 if (ret) 2795 return ret; 2796 fileio->dq_count += 1; 2797 2798 fileio->cur_index = index; 2799 buf = &fileio->bufs[index]; 2800 b = q->bufs[index]; 2801 2802 /* 2803 * Get number of bytes filled by the driver 2804 */ 2805 buf->pos = 0; 2806 buf->queued = 0; 2807 buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0) 2808 : vb2_plane_size(q->bufs[index], 0); 2809 /* Compensate for data_offset on read in the multiplanar case. */ 2810 if (is_multiplanar && read && 2811 b->planes[0].data_offset < buf->size) { 2812 buf->pos = b->planes[0].data_offset; 2813 buf->size -= buf->pos; 2814 } 2815 } else { 2816 buf = &fileio->bufs[index]; 2817 } 2818 2819 /* 2820 * Limit count on last few bytes of the buffer. 2821 */ 2822 if (buf->pos + count > buf->size) { 2823 count = buf->size - buf->pos; 2824 dprintk(q, 5, "reducing read count: %zd\n", count); 2825 } 2826 2827 /* 2828 * Transfer data to userspace. 2829 */ 2830 dprintk(q, 3, "copying %zd bytes - buffer %d, offset %u\n", 2831 count, index, buf->pos); 2832 if (read) 2833 ret = copy_to_user(data, buf->vaddr + buf->pos, count); 2834 else 2835 ret = copy_from_user(buf->vaddr + buf->pos, data, count); 2836 if (ret) { 2837 dprintk(q, 3, "error copying data\n"); 2838 return -EFAULT; 2839 } 2840 2841 /* 2842 * Update counters. 2843 */ 2844 buf->pos += count; 2845 *ppos += count; 2846 2847 /* 2848 * Queue next buffer if required. 2849 */ 2850 if (buf->pos == buf->size || (!read && fileio->write_immediately)) { 2851 struct vb2_buffer *b = q->bufs[index]; 2852 2853 /* 2854 * Check if this is the last buffer to read. 2855 */ 2856 if (read && fileio->read_once && fileio->dq_count == 1) { 2857 dprintk(q, 3, "read limit reached\n"); 2858 return __vb2_cleanup_fileio(q); 2859 } 2860 2861 /* 2862 * Call vb2_qbuf and give buffer to the driver. 2863 */ 2864 b->planes[0].bytesused = buf->pos; 2865 2866 if (copy_timestamp) 2867 b->timestamp = ktime_get_ns(); 2868 ret = vb2_core_qbuf(q, index, NULL, NULL); 2869 dprintk(q, 5, "vb2_dbuf result: %d\n", ret); 2870 if (ret) 2871 return ret; 2872 2873 /* 2874 * Buffer has been queued, update the status 2875 */ 2876 buf->pos = 0; 2877 buf->queued = 1; 2878 buf->size = vb2_plane_size(q->bufs[index], 0); 2879 fileio->q_count += 1; 2880 /* 2881 * If we are queuing up buffers for the first time, then 2882 * increase initial_index by one. 2883 */ 2884 if (fileio->initial_index < q->num_buffers) 2885 fileio->initial_index++; 2886 /* 2887 * The next buffer to use is either a buffer that's going to be 2888 * queued for the first time (initial_index < q->num_buffers) 2889 * or it is equal to q->num_buffers, meaning that the next 2890 * time we need to dequeue a buffer since we've now queued up 2891 * all the 'first time' buffers. 2892 */ 2893 fileio->cur_index = fileio->initial_index; 2894 } 2895 2896 /* 2897 * Return proper number of bytes processed. 2898 */ 2899 if (ret == 0) 2900 ret = count; 2901 return ret; 2902 } 2903 2904 size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, 2905 loff_t *ppos, int nonblocking) 2906 { 2907 return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1); 2908 } 2909 EXPORT_SYMBOL_GPL(vb2_read); 2910 2911 size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, 2912 loff_t *ppos, int nonblocking) 2913 { 2914 return __vb2_perform_fileio(q, (char __user *) data, count, 2915 ppos, nonblocking, 0); 2916 } 2917 EXPORT_SYMBOL_GPL(vb2_write); 2918 2919 struct vb2_threadio_data { 2920 struct task_struct *thread; 2921 vb2_thread_fnc fnc; 2922 void *priv; 2923 bool stop; 2924 }; 2925 2926 static int vb2_thread(void *data) 2927 { 2928 struct vb2_queue *q = data; 2929 struct vb2_threadio_data *threadio = q->threadio; 2930 bool copy_timestamp = false; 2931 unsigned prequeue = 0; 2932 unsigned index = 0; 2933 int ret = 0; 2934 2935 if (q->is_output) { 2936 prequeue = q->num_buffers; 2937 copy_timestamp = q->copy_timestamp; 2938 } 2939 2940 set_freezable(); 2941 2942 for (;;) { 2943 struct vb2_buffer *vb; 2944 2945 /* 2946 * Call vb2_dqbuf to get buffer back. 2947 */ 2948 if (prequeue) { 2949 vb = q->bufs[index++]; 2950 prequeue--; 2951 } else { 2952 call_void_qop(q, wait_finish, q); 2953 if (!threadio->stop) 2954 ret = vb2_core_dqbuf(q, &index, NULL, 0); 2955 call_void_qop(q, wait_prepare, q); 2956 dprintk(q, 5, "file io: vb2_dqbuf result: %d\n", ret); 2957 if (!ret) 2958 vb = q->bufs[index]; 2959 } 2960 if (ret || threadio->stop) 2961 break; 2962 try_to_freeze(); 2963 2964 if (vb->state != VB2_BUF_STATE_ERROR) 2965 if (threadio->fnc(vb, threadio->priv)) 2966 break; 2967 call_void_qop(q, wait_finish, q); 2968 if (copy_timestamp) 2969 vb->timestamp = ktime_get_ns(); 2970 if (!threadio->stop) 2971 ret = vb2_core_qbuf(q, vb->index, NULL, NULL); 2972 call_void_qop(q, wait_prepare, q); 2973 if (ret || threadio->stop) 2974 break; 2975 } 2976 2977 /* Hmm, linux becomes *very* unhappy without this ... */ 2978 while (!kthread_should_stop()) { 2979 set_current_state(TASK_INTERRUPTIBLE); 2980 schedule(); 2981 } 2982 return 0; 2983 } 2984 2985 /* 2986 * This function should not be used for anything else but the videobuf2-dvb 2987 * support. If you think you have another good use-case for this, then please 2988 * contact the linux-media mailinglist first. 2989 */ 2990 int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, 2991 const char *thread_name) 2992 { 2993 struct vb2_threadio_data *threadio; 2994 int ret = 0; 2995 2996 if (q->threadio) 2997 return -EBUSY; 2998 if (vb2_is_busy(q)) 2999 return -EBUSY; 3000 if (WARN_ON(q->fileio)) 3001 return -EBUSY; 3002 3003 threadio = kzalloc(sizeof(*threadio), GFP_KERNEL); 3004 if (threadio == NULL) 3005 return -ENOMEM; 3006 threadio->fnc = fnc; 3007 threadio->priv = priv; 3008 3009 ret = __vb2_init_fileio(q, !q->is_output); 3010 dprintk(q, 3, "file io: vb2_init_fileio result: %d\n", ret); 3011 if (ret) 3012 goto nomem; 3013 q->threadio = threadio; 3014 threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name); 3015 if (IS_ERR(threadio->thread)) { 3016 ret = PTR_ERR(threadio->thread); 3017 threadio->thread = NULL; 3018 goto nothread; 3019 } 3020 return 0; 3021 3022 nothread: 3023 __vb2_cleanup_fileio(q); 3024 nomem: 3025 kfree(threadio); 3026 return ret; 3027 } 3028 EXPORT_SYMBOL_GPL(vb2_thread_start); 3029 3030 int vb2_thread_stop(struct vb2_queue *q) 3031 { 3032 struct vb2_threadio_data *threadio = q->threadio; 3033 int err; 3034 3035 if (threadio == NULL) 3036 return 0; 3037 threadio->stop = true; 3038 /* Wake up all pending sleeps in the thread */ 3039 vb2_queue_error(q); 3040 err = kthread_stop(threadio->thread); 3041 __vb2_cleanup_fileio(q); 3042 threadio->thread = NULL; 3043 kfree(threadio); 3044 q->threadio = NULL; 3045 return err; 3046 } 3047 EXPORT_SYMBOL_GPL(vb2_thread_stop); 3048 3049 MODULE_DESCRIPTION("Media buffer core framework"); 3050 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski"); 3051 MODULE_LICENSE("GPL"); 3052 MODULE_IMPORT_NS(DMA_BUF); 3053