1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * generic helper functions for handling video4linux capture buffers 4 * 5 * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org> 6 * 7 * Highly based on video-buf written originally by: 8 * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org> 9 * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org> 10 * (c) 2006 Ted Walther and John Sokol 11 */ 12 13 #include <linux/init.h> 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/mm.h> 17 #include <linux/sched.h> 18 #include <linux/slab.h> 19 #include <linux/interrupt.h> 20 21 #include <media/videobuf-core.h> 22 23 #define MAGIC_BUFFER 0x20070728 24 #define MAGIC_CHECK(is, should) \ 25 do { \ 26 if (unlikely((is) != (should))) { \ 27 printk(KERN_ERR \ 28 "magic mismatch: %x (expected %x)\n", \ 29 is, should); \ 30 BUG(); \ 31 } \ 32 } while (0) 33 34 static int debug; 35 module_param(debug, int, 0644); 36 37 MODULE_DESCRIPTION("helper module to manage video4linux buffers"); 38 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>"); 39 MODULE_LICENSE("GPL"); 40 41 #define dprintk(level, fmt, arg...) \ 42 do { \ 43 if (debug >= level) \ 44 printk(KERN_DEBUG "vbuf: " fmt, ## arg); \ 45 } while (0) 46 47 /* --------------------------------------------------------------------- */ 48 49 #define CALL(q, f, arg...) \ 50 ((q->int_ops->f) ? q->int_ops->f(arg) : 0) 51 #define CALLPTR(q, f, arg...) \ 52 ((q->int_ops->f) ? q->int_ops->f(arg) : NULL) 53 54 struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q) 55 { 56 struct videobuf_buffer *vb; 57 58 BUG_ON(q->msize < sizeof(*vb)); 59 60 if (!q->int_ops || !q->int_ops->alloc_vb) { 61 printk(KERN_ERR "No specific ops defined!\n"); 62 BUG(); 63 } 64 65 vb = q->int_ops->alloc_vb(q->msize); 66 if (NULL != vb) { 67 init_waitqueue_head(&vb->done); 68 vb->magic = MAGIC_BUFFER; 69 } 70 71 return vb; 72 } 73 EXPORT_SYMBOL_GPL(videobuf_alloc_vb); 74 75 static int state_neither_active_nor_queued(struct videobuf_queue *q, 76 struct videobuf_buffer *vb) 77 { 78 unsigned long flags; 79 bool rc; 80 81 spin_lock_irqsave(q->irqlock, flags); 82 rc = vb->state != VIDEOBUF_ACTIVE && vb->state != VIDEOBUF_QUEUED; 83 spin_unlock_irqrestore(q->irqlock, flags); 84 return rc; 85 }; 86 87 int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb, 88 int non_blocking, int intr) 89 { 90 bool is_ext_locked; 91 int ret = 0; 92 93 MAGIC_CHECK(vb->magic, MAGIC_BUFFER); 94 95 if (non_blocking) { 96 if (state_neither_active_nor_queued(q, vb)) 97 return 0; 98 return -EAGAIN; 99 } 100 101 is_ext_locked = q->ext_lock && mutex_is_locked(q->ext_lock); 102 103 /* Release vdev lock to prevent this wait from blocking outside access to 104 the device. */ 105 if (is_ext_locked) 106 mutex_unlock(q->ext_lock); 107 if (intr) 108 ret = wait_event_interruptible(vb->done, 109 state_neither_active_nor_queued(q, vb)); 110 else 111 wait_event(vb->done, state_neither_active_nor_queued(q, vb)); 112 /* Relock */ 113 if (is_ext_locked) 114 mutex_lock(q->ext_lock); 115 116 return ret; 117 } 118 EXPORT_SYMBOL_GPL(videobuf_waiton); 119 120 int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb, 121 struct v4l2_framebuffer *fbuf) 122 { 123 MAGIC_CHECK(vb->magic, MAGIC_BUFFER); 124 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 125 126 return CALL(q, iolock, q, vb, fbuf); 127 } 128 EXPORT_SYMBOL_GPL(videobuf_iolock); 129 130 void *videobuf_queue_to_vaddr(struct videobuf_queue *q, 131 struct videobuf_buffer *buf) 132 { 133 if (q->int_ops->vaddr) 134 return q->int_ops->vaddr(buf); 135 return NULL; 136 } 137 EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr); 138 139 /* --------------------------------------------------------------------- */ 140 141 142 void videobuf_queue_core_init(struct videobuf_queue *q, 143 const struct videobuf_queue_ops *ops, 144 struct device *dev, 145 spinlock_t *irqlock, 146 enum v4l2_buf_type type, 147 enum v4l2_field field, 148 unsigned int msize, 149 void *priv, 150 struct videobuf_qtype_ops *int_ops, 151 struct mutex *ext_lock) 152 { 153 BUG_ON(!q); 154 memset(q, 0, sizeof(*q)); 155 q->irqlock = irqlock; 156 q->ext_lock = ext_lock; 157 q->dev = dev; 158 q->type = type; 159 q->field = field; 160 q->msize = msize; 161 q->ops = ops; 162 q->priv_data = priv; 163 q->int_ops = int_ops; 164 165 /* All buffer operations are mandatory */ 166 BUG_ON(!q->ops->buf_setup); 167 BUG_ON(!q->ops->buf_prepare); 168 BUG_ON(!q->ops->buf_queue); 169 BUG_ON(!q->ops->buf_release); 170 171 /* Lock is mandatory for queue_cancel to work */ 172 BUG_ON(!irqlock); 173 174 /* Having implementations for abstract methods are mandatory */ 175 BUG_ON(!q->int_ops); 176 177 mutex_init(&q->vb_lock); 178 init_waitqueue_head(&q->wait); 179 INIT_LIST_HEAD(&q->stream); 180 } 181 EXPORT_SYMBOL_GPL(videobuf_queue_core_init); 182 183 /* Locking: Only usage in bttv unsafe find way to remove */ 184 int videobuf_queue_is_busy(struct videobuf_queue *q) 185 { 186 int i; 187 188 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 189 190 if (q->streaming) { 191 dprintk(1, "busy: streaming active\n"); 192 return 1; 193 } 194 if (q->reading) { 195 dprintk(1, "busy: pending read #1\n"); 196 return 1; 197 } 198 if (q->read_buf) { 199 dprintk(1, "busy: pending read #2\n"); 200 return 1; 201 } 202 for (i = 0; i < VIDEO_MAX_FRAME; i++) { 203 if (NULL == q->bufs[i]) 204 continue; 205 if (q->bufs[i]->map) { 206 dprintk(1, "busy: buffer #%d mapped\n", i); 207 return 1; 208 } 209 if (q->bufs[i]->state == VIDEOBUF_QUEUED) { 210 dprintk(1, "busy: buffer #%d queued\n", i); 211 return 1; 212 } 213 if (q->bufs[i]->state == VIDEOBUF_ACTIVE) { 214 dprintk(1, "busy: buffer #%d active\n", i); 215 return 1; 216 } 217 } 218 return 0; 219 } 220 EXPORT_SYMBOL_GPL(videobuf_queue_is_busy); 221 222 /* 223 * __videobuf_free() - free all the buffers and their control structures 224 * 225 * This function can only be called if streaming/reading is off, i.e. no buffers 226 * are under control of the driver. 227 */ 228 /* Locking: Caller holds q->vb_lock */ 229 static int __videobuf_free(struct videobuf_queue *q) 230 { 231 int i; 232 233 dprintk(1, "%s\n", __func__); 234 if (!q) 235 return 0; 236 237 if (q->streaming || q->reading) { 238 dprintk(1, "Cannot free buffers when streaming or reading\n"); 239 return -EBUSY; 240 } 241 242 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 243 244 for (i = 0; i < VIDEO_MAX_FRAME; i++) 245 if (q->bufs[i] && q->bufs[i]->map) { 246 dprintk(1, "Cannot free mmapped buffers\n"); 247 return -EBUSY; 248 } 249 250 for (i = 0; i < VIDEO_MAX_FRAME; i++) { 251 if (NULL == q->bufs[i]) 252 continue; 253 q->ops->buf_release(q, q->bufs[i]); 254 kfree(q->bufs[i]); 255 q->bufs[i] = NULL; 256 } 257 258 return 0; 259 } 260 261 /* Locking: Caller holds q->vb_lock */ 262 void videobuf_queue_cancel(struct videobuf_queue *q) 263 { 264 unsigned long flags = 0; 265 int i; 266 267 q->streaming = 0; 268 q->reading = 0; 269 wake_up_interruptible_sync(&q->wait); 270 271 /* remove queued buffers from list */ 272 spin_lock_irqsave(q->irqlock, flags); 273 for (i = 0; i < VIDEO_MAX_FRAME; i++) { 274 if (NULL == q->bufs[i]) 275 continue; 276 if (q->bufs[i]->state == VIDEOBUF_QUEUED) { 277 list_del(&q->bufs[i]->queue); 278 q->bufs[i]->state = VIDEOBUF_ERROR; 279 wake_up_all(&q->bufs[i]->done); 280 } 281 } 282 spin_unlock_irqrestore(q->irqlock, flags); 283 284 /* free all buffers + clear queue */ 285 for (i = 0; i < VIDEO_MAX_FRAME; i++) { 286 if (NULL == q->bufs[i]) 287 continue; 288 q->ops->buf_release(q, q->bufs[i]); 289 } 290 INIT_LIST_HEAD(&q->stream); 291 } 292 EXPORT_SYMBOL_GPL(videobuf_queue_cancel); 293 294 /* --------------------------------------------------------------------- */ 295 296 /* Locking: Caller holds q->vb_lock */ 297 enum v4l2_field videobuf_next_field(struct videobuf_queue *q) 298 { 299 enum v4l2_field field = q->field; 300 301 BUG_ON(V4L2_FIELD_ANY == field); 302 303 if (V4L2_FIELD_ALTERNATE == field) { 304 if (V4L2_FIELD_TOP == q->last) { 305 field = V4L2_FIELD_BOTTOM; 306 q->last = V4L2_FIELD_BOTTOM; 307 } else { 308 field = V4L2_FIELD_TOP; 309 q->last = V4L2_FIELD_TOP; 310 } 311 } 312 return field; 313 } 314 EXPORT_SYMBOL_GPL(videobuf_next_field); 315 316 /* Locking: Caller holds q->vb_lock */ 317 static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b, 318 struct videobuf_buffer *vb, enum v4l2_buf_type type) 319 { 320 MAGIC_CHECK(vb->magic, MAGIC_BUFFER); 321 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 322 323 b->index = vb->i; 324 b->type = type; 325 326 b->memory = vb->memory; 327 switch (b->memory) { 328 case V4L2_MEMORY_MMAP: 329 b->m.offset = vb->boff; 330 b->length = vb->bsize; 331 break; 332 case V4L2_MEMORY_USERPTR: 333 b->m.userptr = vb->baddr; 334 b->length = vb->bsize; 335 break; 336 case V4L2_MEMORY_OVERLAY: 337 b->m.offset = vb->boff; 338 break; 339 case V4L2_MEMORY_DMABUF: 340 /* DMABUF is not handled in videobuf framework */ 341 break; 342 } 343 344 b->flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 345 if (vb->map) 346 b->flags |= V4L2_BUF_FLAG_MAPPED; 347 348 switch (vb->state) { 349 case VIDEOBUF_PREPARED: 350 case VIDEOBUF_QUEUED: 351 case VIDEOBUF_ACTIVE: 352 b->flags |= V4L2_BUF_FLAG_QUEUED; 353 break; 354 case VIDEOBUF_ERROR: 355 b->flags |= V4L2_BUF_FLAG_ERROR; 356 /* fall through */ 357 case VIDEOBUF_DONE: 358 b->flags |= V4L2_BUF_FLAG_DONE; 359 break; 360 case VIDEOBUF_NEEDS_INIT: 361 case VIDEOBUF_IDLE: 362 /* nothing */ 363 break; 364 } 365 366 b->field = vb->field; 367 b->timestamp = ns_to_timeval(vb->ts); 368 b->bytesused = vb->size; 369 b->sequence = vb->field_count >> 1; 370 } 371 372 int videobuf_mmap_free(struct videobuf_queue *q) 373 { 374 int ret; 375 videobuf_queue_lock(q); 376 ret = __videobuf_free(q); 377 videobuf_queue_unlock(q); 378 return ret; 379 } 380 EXPORT_SYMBOL_GPL(videobuf_mmap_free); 381 382 /* Locking: Caller holds q->vb_lock */ 383 int __videobuf_mmap_setup(struct videobuf_queue *q, 384 unsigned int bcount, unsigned int bsize, 385 enum v4l2_memory memory) 386 { 387 unsigned int i; 388 int err; 389 390 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 391 392 err = __videobuf_free(q); 393 if (0 != err) 394 return err; 395 396 /* Allocate and initialize buffers */ 397 for (i = 0; i < bcount; i++) { 398 q->bufs[i] = videobuf_alloc_vb(q); 399 400 if (NULL == q->bufs[i]) 401 break; 402 403 q->bufs[i]->i = i; 404 q->bufs[i]->memory = memory; 405 q->bufs[i]->bsize = bsize; 406 switch (memory) { 407 case V4L2_MEMORY_MMAP: 408 q->bufs[i]->boff = PAGE_ALIGN(bsize) * i; 409 break; 410 case V4L2_MEMORY_USERPTR: 411 case V4L2_MEMORY_OVERLAY: 412 case V4L2_MEMORY_DMABUF: 413 /* nothing */ 414 break; 415 } 416 } 417 418 if (!i) 419 return -ENOMEM; 420 421 dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i, bsize); 422 423 return i; 424 } 425 EXPORT_SYMBOL_GPL(__videobuf_mmap_setup); 426 427 int videobuf_mmap_setup(struct videobuf_queue *q, 428 unsigned int bcount, unsigned int bsize, 429 enum v4l2_memory memory) 430 { 431 int ret; 432 videobuf_queue_lock(q); 433 ret = __videobuf_mmap_setup(q, bcount, bsize, memory); 434 videobuf_queue_unlock(q); 435 return ret; 436 } 437 EXPORT_SYMBOL_GPL(videobuf_mmap_setup); 438 439 int videobuf_reqbufs(struct videobuf_queue *q, 440 struct v4l2_requestbuffers *req) 441 { 442 unsigned int size, count; 443 int retval; 444 445 if (req->memory != V4L2_MEMORY_MMAP && 446 req->memory != V4L2_MEMORY_USERPTR && 447 req->memory != V4L2_MEMORY_OVERLAY) { 448 dprintk(1, "reqbufs: memory type invalid\n"); 449 return -EINVAL; 450 } 451 452 videobuf_queue_lock(q); 453 if (req->type != q->type) { 454 dprintk(1, "reqbufs: queue type invalid\n"); 455 retval = -EINVAL; 456 goto done; 457 } 458 459 if (q->streaming) { 460 dprintk(1, "reqbufs: streaming already exists\n"); 461 retval = -EBUSY; 462 goto done; 463 } 464 if (!list_empty(&q->stream)) { 465 dprintk(1, "reqbufs: stream running\n"); 466 retval = -EBUSY; 467 goto done; 468 } 469 470 if (req->count == 0) { 471 dprintk(1, "reqbufs: count invalid (%d)\n", req->count); 472 retval = __videobuf_free(q); 473 goto done; 474 } 475 476 count = req->count; 477 if (count > VIDEO_MAX_FRAME) 478 count = VIDEO_MAX_FRAME; 479 size = 0; 480 q->ops->buf_setup(q, &count, &size); 481 dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n", 482 count, size, 483 (unsigned int)((count * PAGE_ALIGN(size)) >> PAGE_SHIFT)); 484 485 retval = __videobuf_mmap_setup(q, count, size, req->memory); 486 if (retval < 0) { 487 dprintk(1, "reqbufs: mmap setup returned %d\n", retval); 488 goto done; 489 } 490 491 req->count = retval; 492 retval = 0; 493 494 done: 495 videobuf_queue_unlock(q); 496 return retval; 497 } 498 EXPORT_SYMBOL_GPL(videobuf_reqbufs); 499 500 int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b) 501 { 502 int ret = -EINVAL; 503 504 videobuf_queue_lock(q); 505 if (unlikely(b->type != q->type)) { 506 dprintk(1, "querybuf: Wrong type.\n"); 507 goto done; 508 } 509 if (unlikely(b->index >= VIDEO_MAX_FRAME)) { 510 dprintk(1, "querybuf: index out of range.\n"); 511 goto done; 512 } 513 if (unlikely(NULL == q->bufs[b->index])) { 514 dprintk(1, "querybuf: buffer is null.\n"); 515 goto done; 516 } 517 518 videobuf_status(q, b, q->bufs[b->index], q->type); 519 520 ret = 0; 521 done: 522 videobuf_queue_unlock(q); 523 return ret; 524 } 525 EXPORT_SYMBOL_GPL(videobuf_querybuf); 526 527 int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b) 528 { 529 struct videobuf_buffer *buf; 530 enum v4l2_field field; 531 unsigned long flags = 0; 532 int retval; 533 534 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 535 536 if (b->memory == V4L2_MEMORY_MMAP) 537 down_read(¤t->mm->mmap_sem); 538 539 videobuf_queue_lock(q); 540 retval = -EBUSY; 541 if (q->reading) { 542 dprintk(1, "qbuf: Reading running...\n"); 543 goto done; 544 } 545 retval = -EINVAL; 546 if (b->type != q->type) { 547 dprintk(1, "qbuf: Wrong type.\n"); 548 goto done; 549 } 550 if (b->index >= VIDEO_MAX_FRAME) { 551 dprintk(1, "qbuf: index out of range.\n"); 552 goto done; 553 } 554 buf = q->bufs[b->index]; 555 if (NULL == buf) { 556 dprintk(1, "qbuf: buffer is null.\n"); 557 goto done; 558 } 559 MAGIC_CHECK(buf->magic, MAGIC_BUFFER); 560 if (buf->memory != b->memory) { 561 dprintk(1, "qbuf: memory type is wrong.\n"); 562 goto done; 563 } 564 if (buf->state != VIDEOBUF_NEEDS_INIT && buf->state != VIDEOBUF_IDLE) { 565 dprintk(1, "qbuf: buffer is already queued or active.\n"); 566 goto done; 567 } 568 569 switch (b->memory) { 570 case V4L2_MEMORY_MMAP: 571 if (0 == buf->baddr) { 572 dprintk(1, "qbuf: mmap requested but buffer addr is zero!\n"); 573 goto done; 574 } 575 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT 576 || q->type == V4L2_BUF_TYPE_VBI_OUTPUT 577 || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT 578 || q->type == V4L2_BUF_TYPE_SDR_OUTPUT) { 579 buf->size = b->bytesused; 580 buf->field = b->field; 581 buf->ts = v4l2_timeval_to_ns(&b->timestamp); 582 } 583 break; 584 case V4L2_MEMORY_USERPTR: 585 if (b->length < buf->bsize) { 586 dprintk(1, "qbuf: buffer length is not enough\n"); 587 goto done; 588 } 589 if (VIDEOBUF_NEEDS_INIT != buf->state && 590 buf->baddr != b->m.userptr) 591 q->ops->buf_release(q, buf); 592 buf->baddr = b->m.userptr; 593 break; 594 case V4L2_MEMORY_OVERLAY: 595 buf->boff = b->m.offset; 596 break; 597 default: 598 dprintk(1, "qbuf: wrong memory type\n"); 599 goto done; 600 } 601 602 dprintk(1, "qbuf: requesting next field\n"); 603 field = videobuf_next_field(q); 604 retval = q->ops->buf_prepare(q, buf, field); 605 if (0 != retval) { 606 dprintk(1, "qbuf: buffer_prepare returned %d\n", retval); 607 goto done; 608 } 609 610 list_add_tail(&buf->stream, &q->stream); 611 if (q->streaming) { 612 spin_lock_irqsave(q->irqlock, flags); 613 q->ops->buf_queue(q, buf); 614 spin_unlock_irqrestore(q->irqlock, flags); 615 } 616 dprintk(1, "qbuf: succeeded\n"); 617 retval = 0; 618 wake_up_interruptible_sync(&q->wait); 619 620 done: 621 videobuf_queue_unlock(q); 622 623 if (b->memory == V4L2_MEMORY_MMAP) 624 up_read(¤t->mm->mmap_sem); 625 626 return retval; 627 } 628 EXPORT_SYMBOL_GPL(videobuf_qbuf); 629 630 /* Locking: Caller holds q->vb_lock */ 631 static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock) 632 { 633 int retval; 634 635 checks: 636 if (!q->streaming) { 637 dprintk(1, "next_buffer: Not streaming\n"); 638 retval = -EINVAL; 639 goto done; 640 } 641 642 if (list_empty(&q->stream)) { 643 if (noblock) { 644 retval = -EAGAIN; 645 dprintk(2, "next_buffer: no buffers to dequeue\n"); 646 goto done; 647 } else { 648 dprintk(2, "next_buffer: waiting on buffer\n"); 649 650 /* Drop lock to avoid deadlock with qbuf */ 651 videobuf_queue_unlock(q); 652 653 /* Checking list_empty and streaming is safe without 654 * locks because we goto checks to validate while 655 * holding locks before proceeding */ 656 retval = wait_event_interruptible(q->wait, 657 !list_empty(&q->stream) || !q->streaming); 658 videobuf_queue_lock(q); 659 660 if (retval) 661 goto done; 662 663 goto checks; 664 } 665 } 666 667 retval = 0; 668 669 done: 670 return retval; 671 } 672 673 /* Locking: Caller holds q->vb_lock */ 674 static int stream_next_buffer(struct videobuf_queue *q, 675 struct videobuf_buffer **vb, int nonblocking) 676 { 677 int retval; 678 struct videobuf_buffer *buf = NULL; 679 680 retval = stream_next_buffer_check_queue(q, nonblocking); 681 if (retval) 682 goto done; 683 684 buf = list_entry(q->stream.next, struct videobuf_buffer, stream); 685 retval = videobuf_waiton(q, buf, nonblocking, 1); 686 if (retval < 0) 687 goto done; 688 689 *vb = buf; 690 done: 691 return retval; 692 } 693 694 int videobuf_dqbuf(struct videobuf_queue *q, 695 struct v4l2_buffer *b, int nonblocking) 696 { 697 struct videobuf_buffer *buf = NULL; 698 int retval; 699 700 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 701 702 memset(b, 0, sizeof(*b)); 703 videobuf_queue_lock(q); 704 705 retval = stream_next_buffer(q, &buf, nonblocking); 706 if (retval < 0) { 707 dprintk(1, "dqbuf: next_buffer error: %i\n", retval); 708 goto done; 709 } 710 711 switch (buf->state) { 712 case VIDEOBUF_ERROR: 713 dprintk(1, "dqbuf: state is error\n"); 714 break; 715 case VIDEOBUF_DONE: 716 dprintk(1, "dqbuf: state is done\n"); 717 break; 718 default: 719 dprintk(1, "dqbuf: state invalid\n"); 720 retval = -EINVAL; 721 goto done; 722 } 723 CALL(q, sync, q, buf); 724 videobuf_status(q, b, buf, q->type); 725 list_del(&buf->stream); 726 buf->state = VIDEOBUF_IDLE; 727 b->flags &= ~V4L2_BUF_FLAG_DONE; 728 done: 729 videobuf_queue_unlock(q); 730 return retval; 731 } 732 EXPORT_SYMBOL_GPL(videobuf_dqbuf); 733 734 int videobuf_streamon(struct videobuf_queue *q) 735 { 736 struct videobuf_buffer *buf; 737 unsigned long flags = 0; 738 int retval; 739 740 videobuf_queue_lock(q); 741 retval = -EBUSY; 742 if (q->reading) 743 goto done; 744 retval = 0; 745 if (q->streaming) 746 goto done; 747 q->streaming = 1; 748 spin_lock_irqsave(q->irqlock, flags); 749 list_for_each_entry(buf, &q->stream, stream) 750 if (buf->state == VIDEOBUF_PREPARED) 751 q->ops->buf_queue(q, buf); 752 spin_unlock_irqrestore(q->irqlock, flags); 753 754 wake_up_interruptible_sync(&q->wait); 755 done: 756 videobuf_queue_unlock(q); 757 return retval; 758 } 759 EXPORT_SYMBOL_GPL(videobuf_streamon); 760 761 /* Locking: Caller holds q->vb_lock */ 762 static int __videobuf_streamoff(struct videobuf_queue *q) 763 { 764 if (!q->streaming) 765 return -EINVAL; 766 767 videobuf_queue_cancel(q); 768 769 return 0; 770 } 771 772 int videobuf_streamoff(struct videobuf_queue *q) 773 { 774 int retval; 775 776 videobuf_queue_lock(q); 777 retval = __videobuf_streamoff(q); 778 videobuf_queue_unlock(q); 779 780 return retval; 781 } 782 EXPORT_SYMBOL_GPL(videobuf_streamoff); 783 784 /* Locking: Caller holds q->vb_lock */ 785 static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q, 786 char __user *data, 787 size_t count, loff_t *ppos) 788 { 789 enum v4l2_field field; 790 unsigned long flags = 0; 791 int retval; 792 793 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 794 795 /* setup stuff */ 796 q->read_buf = videobuf_alloc_vb(q); 797 if (NULL == q->read_buf) 798 return -ENOMEM; 799 800 q->read_buf->memory = V4L2_MEMORY_USERPTR; 801 q->read_buf->baddr = (unsigned long)data; 802 q->read_buf->bsize = count; 803 804 field = videobuf_next_field(q); 805 retval = q->ops->buf_prepare(q, q->read_buf, field); 806 if (0 != retval) 807 goto done; 808 809 /* start capture & wait */ 810 spin_lock_irqsave(q->irqlock, flags); 811 q->ops->buf_queue(q, q->read_buf); 812 spin_unlock_irqrestore(q->irqlock, flags); 813 retval = videobuf_waiton(q, q->read_buf, 0, 0); 814 if (0 == retval) { 815 CALL(q, sync, q, q->read_buf); 816 if (VIDEOBUF_ERROR == q->read_buf->state) 817 retval = -EIO; 818 else 819 retval = q->read_buf->size; 820 } 821 822 done: 823 /* cleanup */ 824 q->ops->buf_release(q, q->read_buf); 825 kfree(q->read_buf); 826 q->read_buf = NULL; 827 return retval; 828 } 829 830 static int __videobuf_copy_to_user(struct videobuf_queue *q, 831 struct videobuf_buffer *buf, 832 char __user *data, size_t count, 833 int nonblocking) 834 { 835 void *vaddr = CALLPTR(q, vaddr, buf); 836 837 /* copy to userspace */ 838 if (count > buf->size - q->read_off) 839 count = buf->size - q->read_off; 840 841 if (copy_to_user(data, vaddr + q->read_off, count)) 842 return -EFAULT; 843 844 return count; 845 } 846 847 static int __videobuf_copy_stream(struct videobuf_queue *q, 848 struct videobuf_buffer *buf, 849 char __user *data, size_t count, size_t pos, 850 int vbihack, int nonblocking) 851 { 852 unsigned int *fc = CALLPTR(q, vaddr, buf); 853 854 if (vbihack) { 855 /* dirty, undocumented hack -- pass the frame counter 856 * within the last four bytes of each vbi data block. 857 * We need that one to maintain backward compatibility 858 * to all vbi decoding software out there ... */ 859 fc += (buf->size >> 2) - 1; 860 *fc = buf->field_count >> 1; 861 dprintk(1, "vbihack: %d\n", *fc); 862 } 863 864 /* copy stuff using the common method */ 865 count = __videobuf_copy_to_user(q, buf, data, count, nonblocking); 866 867 if ((count == -EFAULT) && (pos == 0)) 868 return -EFAULT; 869 870 return count; 871 } 872 873 ssize_t videobuf_read_one(struct videobuf_queue *q, 874 char __user *data, size_t count, loff_t *ppos, 875 int nonblocking) 876 { 877 enum v4l2_field field; 878 unsigned long flags = 0; 879 unsigned size = 0, nbufs = 1; 880 int retval; 881 882 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 883 884 videobuf_queue_lock(q); 885 886 q->ops->buf_setup(q, &nbufs, &size); 887 888 if (NULL == q->read_buf && 889 count >= size && 890 !nonblocking) { 891 retval = videobuf_read_zerocopy(q, data, count, ppos); 892 if (retval >= 0 || retval == -EIO) 893 /* ok, all done */ 894 goto done; 895 /* fallback to kernel bounce buffer on failures */ 896 } 897 898 if (NULL == q->read_buf) { 899 /* need to capture a new frame */ 900 retval = -ENOMEM; 901 q->read_buf = videobuf_alloc_vb(q); 902 903 dprintk(1, "video alloc=0x%p\n", q->read_buf); 904 if (NULL == q->read_buf) 905 goto done; 906 q->read_buf->memory = V4L2_MEMORY_USERPTR; 907 q->read_buf->bsize = count; /* preferred size */ 908 field = videobuf_next_field(q); 909 retval = q->ops->buf_prepare(q, q->read_buf, field); 910 911 if (0 != retval) { 912 kfree(q->read_buf); 913 q->read_buf = NULL; 914 goto done; 915 } 916 917 spin_lock_irqsave(q->irqlock, flags); 918 q->ops->buf_queue(q, q->read_buf); 919 spin_unlock_irqrestore(q->irqlock, flags); 920 921 q->read_off = 0; 922 } 923 924 /* wait until capture is done */ 925 retval = videobuf_waiton(q, q->read_buf, nonblocking, 1); 926 if (0 != retval) 927 goto done; 928 929 CALL(q, sync, q, q->read_buf); 930 931 if (VIDEOBUF_ERROR == q->read_buf->state) { 932 /* catch I/O errors */ 933 q->ops->buf_release(q, q->read_buf); 934 kfree(q->read_buf); 935 q->read_buf = NULL; 936 retval = -EIO; 937 goto done; 938 } 939 940 /* Copy to userspace */ 941 retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking); 942 if (retval < 0) 943 goto done; 944 945 q->read_off += retval; 946 if (q->read_off == q->read_buf->size) { 947 /* all data copied, cleanup */ 948 q->ops->buf_release(q, q->read_buf); 949 kfree(q->read_buf); 950 q->read_buf = NULL; 951 } 952 953 done: 954 videobuf_queue_unlock(q); 955 return retval; 956 } 957 EXPORT_SYMBOL_GPL(videobuf_read_one); 958 959 /* Locking: Caller holds q->vb_lock */ 960 static int __videobuf_read_start(struct videobuf_queue *q) 961 { 962 enum v4l2_field field; 963 unsigned long flags = 0; 964 unsigned int count = 0, size = 0; 965 int err, i; 966 967 q->ops->buf_setup(q, &count, &size); 968 if (count < 2) 969 count = 2; 970 if (count > VIDEO_MAX_FRAME) 971 count = VIDEO_MAX_FRAME; 972 size = PAGE_ALIGN(size); 973 974 err = __videobuf_mmap_setup(q, count, size, V4L2_MEMORY_USERPTR); 975 if (err < 0) 976 return err; 977 978 count = err; 979 980 for (i = 0; i < count; i++) { 981 field = videobuf_next_field(q); 982 err = q->ops->buf_prepare(q, q->bufs[i], field); 983 if (err) 984 return err; 985 list_add_tail(&q->bufs[i]->stream, &q->stream); 986 } 987 spin_lock_irqsave(q->irqlock, flags); 988 for (i = 0; i < count; i++) 989 q->ops->buf_queue(q, q->bufs[i]); 990 spin_unlock_irqrestore(q->irqlock, flags); 991 q->reading = 1; 992 return 0; 993 } 994 995 static void __videobuf_read_stop(struct videobuf_queue *q) 996 { 997 int i; 998 999 videobuf_queue_cancel(q); 1000 __videobuf_free(q); 1001 INIT_LIST_HEAD(&q->stream); 1002 for (i = 0; i < VIDEO_MAX_FRAME; i++) { 1003 if (NULL == q->bufs[i]) 1004 continue; 1005 kfree(q->bufs[i]); 1006 q->bufs[i] = NULL; 1007 } 1008 q->read_buf = NULL; 1009 } 1010 1011 int videobuf_read_start(struct videobuf_queue *q) 1012 { 1013 int rc; 1014 1015 videobuf_queue_lock(q); 1016 rc = __videobuf_read_start(q); 1017 videobuf_queue_unlock(q); 1018 1019 return rc; 1020 } 1021 EXPORT_SYMBOL_GPL(videobuf_read_start); 1022 1023 void videobuf_read_stop(struct videobuf_queue *q) 1024 { 1025 videobuf_queue_lock(q); 1026 __videobuf_read_stop(q); 1027 videobuf_queue_unlock(q); 1028 } 1029 EXPORT_SYMBOL_GPL(videobuf_read_stop); 1030 1031 void videobuf_stop(struct videobuf_queue *q) 1032 { 1033 videobuf_queue_lock(q); 1034 1035 if (q->streaming) 1036 __videobuf_streamoff(q); 1037 1038 if (q->reading) 1039 __videobuf_read_stop(q); 1040 1041 videobuf_queue_unlock(q); 1042 } 1043 EXPORT_SYMBOL_GPL(videobuf_stop); 1044 1045 ssize_t videobuf_read_stream(struct videobuf_queue *q, 1046 char __user *data, size_t count, loff_t *ppos, 1047 int vbihack, int nonblocking) 1048 { 1049 int rc, retval; 1050 unsigned long flags = 0; 1051 1052 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 1053 1054 dprintk(2, "%s\n", __func__); 1055 videobuf_queue_lock(q); 1056 retval = -EBUSY; 1057 if (q->streaming) 1058 goto done; 1059 if (!q->reading) { 1060 retval = __videobuf_read_start(q); 1061 if (retval < 0) 1062 goto done; 1063 } 1064 1065 retval = 0; 1066 while (count > 0) { 1067 /* get / wait for data */ 1068 if (NULL == q->read_buf) { 1069 q->read_buf = list_entry(q->stream.next, 1070 struct videobuf_buffer, 1071 stream); 1072 list_del(&q->read_buf->stream); 1073 q->read_off = 0; 1074 } 1075 rc = videobuf_waiton(q, q->read_buf, nonblocking, 1); 1076 if (rc < 0) { 1077 if (0 == retval) 1078 retval = rc; 1079 break; 1080 } 1081 1082 if (q->read_buf->state == VIDEOBUF_DONE) { 1083 rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count, 1084 retval, vbihack, nonblocking); 1085 if (rc < 0) { 1086 retval = rc; 1087 break; 1088 } 1089 retval += rc; 1090 count -= rc; 1091 q->read_off += rc; 1092 } else { 1093 /* some error */ 1094 q->read_off = q->read_buf->size; 1095 if (0 == retval) 1096 retval = -EIO; 1097 } 1098 1099 /* requeue buffer when done with copying */ 1100 if (q->read_off == q->read_buf->size) { 1101 list_add_tail(&q->read_buf->stream, 1102 &q->stream); 1103 spin_lock_irqsave(q->irqlock, flags); 1104 q->ops->buf_queue(q, q->read_buf); 1105 spin_unlock_irqrestore(q->irqlock, flags); 1106 q->read_buf = NULL; 1107 } 1108 if (retval < 0) 1109 break; 1110 } 1111 1112 done: 1113 videobuf_queue_unlock(q); 1114 return retval; 1115 } 1116 EXPORT_SYMBOL_GPL(videobuf_read_stream); 1117 1118 __poll_t videobuf_poll_stream(struct file *file, 1119 struct videobuf_queue *q, 1120 poll_table *wait) 1121 { 1122 __poll_t req_events = poll_requested_events(wait); 1123 struct videobuf_buffer *buf = NULL; 1124 __poll_t rc = 0; 1125 1126 poll_wait(file, &buf->done, wait); 1127 videobuf_queue_lock(q); 1128 if (q->streaming) { 1129 if (!list_empty(&q->stream)) 1130 buf = list_entry(q->stream.next, 1131 struct videobuf_buffer, stream); 1132 } else if (req_events & (EPOLLIN | EPOLLRDNORM)) { 1133 if (!q->reading) 1134 __videobuf_read_start(q); 1135 if (!q->reading) { 1136 rc = EPOLLERR; 1137 } else if (NULL == q->read_buf) { 1138 q->read_buf = list_entry(q->stream.next, 1139 struct videobuf_buffer, 1140 stream); 1141 list_del(&q->read_buf->stream); 1142 q->read_off = 0; 1143 } 1144 buf = q->read_buf; 1145 } 1146 if (!buf) 1147 rc = EPOLLERR; 1148 1149 if (0 == rc) { 1150 if (buf->state == VIDEOBUF_DONE || 1151 buf->state == VIDEOBUF_ERROR) { 1152 switch (q->type) { 1153 case V4L2_BUF_TYPE_VIDEO_OUTPUT: 1154 case V4L2_BUF_TYPE_VBI_OUTPUT: 1155 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: 1156 case V4L2_BUF_TYPE_SDR_OUTPUT: 1157 rc = EPOLLOUT | EPOLLWRNORM; 1158 break; 1159 default: 1160 rc = EPOLLIN | EPOLLRDNORM; 1161 break; 1162 } 1163 } 1164 } 1165 videobuf_queue_unlock(q); 1166 return rc; 1167 } 1168 EXPORT_SYMBOL_GPL(videobuf_poll_stream); 1169 1170 int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma) 1171 { 1172 int rc = -EINVAL; 1173 int i; 1174 1175 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 1176 1177 if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) { 1178 dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n"); 1179 return -EINVAL; 1180 } 1181 1182 videobuf_queue_lock(q); 1183 for (i = 0; i < VIDEO_MAX_FRAME; i++) { 1184 struct videobuf_buffer *buf = q->bufs[i]; 1185 1186 if (buf && buf->memory == V4L2_MEMORY_MMAP && 1187 buf->boff == (vma->vm_pgoff << PAGE_SHIFT)) { 1188 rc = CALL(q, mmap_mapper, q, buf, vma); 1189 break; 1190 } 1191 } 1192 videobuf_queue_unlock(q); 1193 1194 return rc; 1195 } 1196 EXPORT_SYMBOL_GPL(videobuf_mmap_mapper); 1197