1 /* 2 * generic helper functions for handling video4linux capture buffers 3 * 4 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org> 5 * 6 * Highly based on video-buf written originally by: 7 * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org> 8 * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org> 9 * (c) 2006 Ted Walther and John Sokol 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2 14 */ 15 16 #include <linux/init.h> 17 #include <linux/module.h> 18 #include <linux/moduleparam.h> 19 #include <linux/mm.h> 20 #include <linux/sched.h> 21 #include <linux/slab.h> 22 #include <linux/interrupt.h> 23 24 #include <media/videobuf-core.h> 25 26 #define MAGIC_BUFFER 0x20070728 27 #define MAGIC_CHECK(is, should) \ 28 do { \ 29 if (unlikely((is) != (should))) { \ 30 printk(KERN_ERR \ 31 "magic mismatch: %x (expected %x)\n", \ 32 is, should); \ 33 BUG(); \ 34 } \ 35 } while (0) 36 37 static int debug; 38 module_param(debug, int, 0644); 39 40 MODULE_DESCRIPTION("helper module to manage video4linux buffers"); 41 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); 42 MODULE_LICENSE("GPL"); 43 44 #define dprintk(level, fmt, arg...) \ 45 do { \ 46 if (debug >= level) \ 47 printk(KERN_DEBUG "vbuf: " fmt, ## arg); \ 48 } while (0) 49 50 /* --------------------------------------------------------------------- */ 51 52 #define CALL(q, f, arg...) \ 53 ((q->int_ops->f) ? q->int_ops->f(arg) : 0) 54 55 struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q) 56 { 57 struct videobuf_buffer *vb; 58 59 BUG_ON(q->msize < sizeof(*vb)); 60 61 if (!q->int_ops || !q->int_ops->alloc_vb) { 62 printk(KERN_ERR "No specific ops defined!\n"); 63 BUG(); 64 } 65 66 vb = q->int_ops->alloc_vb(q->msize); 67 if (NULL != vb) { 68 init_waitqueue_head(&vb->done); 69 vb->magic = MAGIC_BUFFER; 70 } 71 72 return vb; 73 } 74 EXPORT_SYMBOL_GPL(videobuf_alloc_vb); 75 76 static int is_state_active_or_queued(struct videobuf_queue *q, struct videobuf_buffer *vb) 77 { 78 unsigned long flags; 79 bool rc; 80 81 spin_lock_irqsave(q->irqlock, flags); 82 rc = vb->state != VIDEOBUF_ACTIVE && vb->state != VIDEOBUF_QUEUED; 83 spin_unlock_irqrestore(q->irqlock, flags); 84 return rc; 85 }; 86 87 int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb, 88 int non_blocking, int intr) 89 { 90 bool is_ext_locked; 91 int ret = 0; 92 93 MAGIC_CHECK(vb->magic, MAGIC_BUFFER); 94 95 if (non_blocking) { 96 if (is_state_active_or_queued(q, vb)) 97 return 0; 98 return -EAGAIN; 99 } 100 101 is_ext_locked = q->ext_lock && mutex_is_locked(q->ext_lock); 102 103 /* Release vdev lock to prevent this wait from blocking outside access to 104 the device. */ 105 if (is_ext_locked) 106 mutex_unlock(q->ext_lock); 107 if (intr) 108 ret = wait_event_interruptible(vb->done, is_state_active_or_queued(q, vb)); 109 else 110 wait_event(vb->done, is_state_active_or_queued(q, vb)); 111 /* Relock */ 112 if (is_ext_locked) 113 mutex_lock(q->ext_lock); 114 115 return ret; 116 } 117 EXPORT_SYMBOL_GPL(videobuf_waiton); 118 119 int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb, 120 struct v4l2_framebuffer *fbuf) 121 { 122 MAGIC_CHECK(vb->magic, MAGIC_BUFFER); 123 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 124 125 return CALL(q, iolock, q, vb, fbuf); 126 } 127 EXPORT_SYMBOL_GPL(videobuf_iolock); 128 129 void *videobuf_queue_to_vaddr(struct videobuf_queue *q, 130 struct videobuf_buffer *buf) 131 { 132 if (q->int_ops->vaddr) 133 return q->int_ops->vaddr(buf); 134 return NULL; 135 } 136 EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr); 137 138 /* --------------------------------------------------------------------- */ 139 140 141 void videobuf_queue_core_init(struct videobuf_queue *q, 142 const struct videobuf_queue_ops *ops, 143 struct device *dev, 144 spinlock_t *irqlock, 145 enum v4l2_buf_type type, 146 enum v4l2_field field, 147 unsigned int msize, 148 void *priv, 149 struct videobuf_qtype_ops *int_ops, 150 struct mutex *ext_lock) 151 { 152 BUG_ON(!q); 153 memset(q, 0, sizeof(*q)); 154 q->irqlock = irqlock; 155 q->ext_lock = ext_lock; 156 q->dev = dev; 157 q->type = type; 158 q->field = field; 159 q->msize = msize; 160 q->ops = ops; 161 q->priv_data = priv; 162 q->int_ops = int_ops; 163 164 /* All buffer operations are mandatory */ 165 BUG_ON(!q->ops->buf_setup); 166 BUG_ON(!q->ops->buf_prepare); 167 BUG_ON(!q->ops->buf_queue); 168 BUG_ON(!q->ops->buf_release); 169 170 /* Lock is mandatory for queue_cancel to work */ 171 BUG_ON(!irqlock); 172 173 /* Having implementations for abstract methods are mandatory */ 174 BUG_ON(!q->int_ops); 175 176 mutex_init(&q->vb_lock); 177 init_waitqueue_head(&q->wait); 178 INIT_LIST_HEAD(&q->stream); 179 } 180 EXPORT_SYMBOL_GPL(videobuf_queue_core_init); 181 182 /* Locking: Only usage in bttv unsafe find way to remove */ 183 int videobuf_queue_is_busy(struct videobuf_queue *q) 184 { 185 int i; 186 187 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 188 189 if (q->streaming) { 190 dprintk(1, "busy: streaming active\n"); 191 return 1; 192 } 193 if (q->reading) { 194 dprintk(1, "busy: pending read #1\n"); 195 return 1; 196 } 197 if (q->read_buf) { 198 dprintk(1, "busy: pending read #2\n"); 199 return 1; 200 } 201 for (i = 0; i < VIDEO_MAX_FRAME; i++) { 202 if (NULL == q->bufs[i]) 203 continue; 204 if (q->bufs[i]->map) { 205 dprintk(1, "busy: buffer #%d mapped\n", i); 206 return 1; 207 } 208 if (q->bufs[i]->state == VIDEOBUF_QUEUED) { 209 dprintk(1, "busy: buffer #%d queued\n", i); 210 return 1; 211 } 212 if (q->bufs[i]->state == VIDEOBUF_ACTIVE) { 213 dprintk(1, "busy: buffer #%d avtive\n", i); 214 return 1; 215 } 216 } 217 return 0; 218 } 219 EXPORT_SYMBOL_GPL(videobuf_queue_is_busy); 220 221 /** 222 * __videobuf_free() - free all the buffers and their control structures 223 * 224 * This function can only be called if streaming/reading is off, i.e. no buffers 225 * are under control of the driver. 226 */ 227 /* Locking: Caller holds q->vb_lock */ 228 static int __videobuf_free(struct videobuf_queue *q) 229 { 230 int i; 231 232 dprintk(1, "%s\n", __func__); 233 if (!q) 234 return 0; 235 236 if (q->streaming || q->reading) { 237 dprintk(1, "Cannot free buffers when streaming or reading\n"); 238 return -EBUSY; 239 } 240 241 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 242 243 for (i = 0; i < VIDEO_MAX_FRAME; i++) 244 if (q->bufs[i] && q->bufs[i]->map) { 245 dprintk(1, "Cannot free mmapped buffers\n"); 246 return -EBUSY; 247 } 248 249 for (i = 0; i < VIDEO_MAX_FRAME; i++) { 250 if (NULL == q->bufs[i]) 251 continue; 252 q->ops->buf_release(q, q->bufs[i]); 253 kfree(q->bufs[i]); 254 q->bufs[i] = NULL; 255 } 256 257 return 0; 258 } 259 260 /* Locking: Caller holds q->vb_lock */ 261 void videobuf_queue_cancel(struct videobuf_queue *q) 262 { 263 unsigned long flags = 0; 264 int i; 265 266 q->streaming = 0; 267 q->reading = 0; 268 wake_up_interruptible_sync(&q->wait); 269 270 /* remove queued buffers from list */ 271 spin_lock_irqsave(q->irqlock, flags); 272 for (i = 0; i < VIDEO_MAX_FRAME; i++) { 273 if (NULL == q->bufs[i]) 274 continue; 275 if (q->bufs[i]->state == VIDEOBUF_QUEUED) { 276 list_del(&q->bufs[i]->queue); 277 q->bufs[i]->state = VIDEOBUF_ERROR; 278 wake_up_all(&q->bufs[i]->done); 279 } 280 } 281 spin_unlock_irqrestore(q->irqlock, flags); 282 283 /* free all buffers + clear queue */ 284 for (i = 0; i < VIDEO_MAX_FRAME; i++) { 285 if (NULL == q->bufs[i]) 286 continue; 287 q->ops->buf_release(q, q->bufs[i]); 288 } 289 INIT_LIST_HEAD(&q->stream); 290 } 291 EXPORT_SYMBOL_GPL(videobuf_queue_cancel); 292 293 /* --------------------------------------------------------------------- */ 294 295 /* Locking: Caller holds q->vb_lock */ 296 enum v4l2_field videobuf_next_field(struct videobuf_queue *q) 297 { 298 enum v4l2_field field = q->field; 299 300 BUG_ON(V4L2_FIELD_ANY == field); 301 302 if (V4L2_FIELD_ALTERNATE == field) { 303 if (V4L2_FIELD_TOP == q->last) { 304 field = V4L2_FIELD_BOTTOM; 305 q->last = V4L2_FIELD_BOTTOM; 306 } else { 307 field = V4L2_FIELD_TOP; 308 q->last = V4L2_FIELD_TOP; 309 } 310 } 311 return field; 312 } 313 EXPORT_SYMBOL_GPL(videobuf_next_field); 314 315 /* Locking: Caller holds q->vb_lock */ 316 static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b, 317 struct videobuf_buffer *vb, enum v4l2_buf_type type) 318 { 319 MAGIC_CHECK(vb->magic, MAGIC_BUFFER); 320 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 321 322 b->index = vb->i; 323 b->type = type; 324 325 b->memory = vb->memory; 326 switch (b->memory) { 327 case V4L2_MEMORY_MMAP: 328 b->m.offset = vb->boff; 329 b->length = vb->bsize; 330 break; 331 case V4L2_MEMORY_USERPTR: 332 b->m.userptr = vb->baddr; 333 b->length = vb->bsize; 334 break; 335 case V4L2_MEMORY_OVERLAY: 336 b->m.offset = vb->boff; 337 break; 338 } 339 340 b->flags = 0; 341 if (vb->map) 342 b->flags |= V4L2_BUF_FLAG_MAPPED; 343 344 switch (vb->state) { 345 case VIDEOBUF_PREPARED: 346 case VIDEOBUF_QUEUED: 347 case VIDEOBUF_ACTIVE: 348 b->flags |= V4L2_BUF_FLAG_QUEUED; 349 break; 350 case VIDEOBUF_ERROR: 351 b->flags |= V4L2_BUF_FLAG_ERROR; 352 /* fall through */ 353 case VIDEOBUF_DONE: 354 b->flags |= V4L2_BUF_FLAG_DONE; 355 break; 356 case VIDEOBUF_NEEDS_INIT: 357 case VIDEOBUF_IDLE: 358 /* nothing */ 359 break; 360 } 361 362 b->field = vb->field; 363 b->timestamp = vb->ts; 364 b->bytesused = vb->size; 365 b->sequence = vb->field_count >> 1; 366 } 367 368 int videobuf_mmap_free(struct videobuf_queue *q) 369 { 370 int ret; 371 videobuf_queue_lock(q); 372 ret = __videobuf_free(q); 373 videobuf_queue_unlock(q); 374 return ret; 375 } 376 EXPORT_SYMBOL_GPL(videobuf_mmap_free); 377 378 /* Locking: Caller holds q->vb_lock */ 379 int __videobuf_mmap_setup(struct videobuf_queue *q, 380 unsigned int bcount, unsigned int bsize, 381 enum v4l2_memory memory) 382 { 383 unsigned int i; 384 int err; 385 386 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 387 388 err = __videobuf_free(q); 389 if (0 != err) 390 return err; 391 392 /* Allocate and initialize buffers */ 393 for (i = 0; i < bcount; i++) { 394 q->bufs[i] = videobuf_alloc_vb(q); 395 396 if (NULL == q->bufs[i]) 397 break; 398 399 q->bufs[i]->i = i; 400 q->bufs[i]->memory = memory; 401 q->bufs[i]->bsize = bsize; 402 switch (memory) { 403 case V4L2_MEMORY_MMAP: 404 q->bufs[i]->boff = PAGE_ALIGN(bsize) * i; 405 break; 406 case V4L2_MEMORY_USERPTR: 407 case V4L2_MEMORY_OVERLAY: 408 /* nothing */ 409 break; 410 } 411 } 412 413 if (!i) 414 return -ENOMEM; 415 416 dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i, bsize); 417 418 return i; 419 } 420 EXPORT_SYMBOL_GPL(__videobuf_mmap_setup); 421 422 int videobuf_mmap_setup(struct videobuf_queue *q, 423 unsigned int bcount, unsigned int bsize, 424 enum v4l2_memory memory) 425 { 426 int ret; 427 videobuf_queue_lock(q); 428 ret = __videobuf_mmap_setup(q, bcount, bsize, memory); 429 videobuf_queue_unlock(q); 430 return ret; 431 } 432 EXPORT_SYMBOL_GPL(videobuf_mmap_setup); 433 434 int videobuf_reqbufs(struct videobuf_queue *q, 435 struct v4l2_requestbuffers *req) 436 { 437 unsigned int size, count; 438 int retval; 439 440 if (req->count < 1) { 441 dprintk(1, "reqbufs: count invalid (%d)\n", req->count); 442 return -EINVAL; 443 } 444 445 if (req->memory != V4L2_MEMORY_MMAP && 446 req->memory != V4L2_MEMORY_USERPTR && 447 req->memory != V4L2_MEMORY_OVERLAY) { 448 dprintk(1, "reqbufs: memory type invalid\n"); 449 return -EINVAL; 450 } 451 452 videobuf_queue_lock(q); 453 if (req->type != q->type) { 454 dprintk(1, "reqbufs: queue type invalid\n"); 455 retval = -EINVAL; 456 goto done; 457 } 458 459 if (q->streaming) { 460 dprintk(1, "reqbufs: streaming already exists\n"); 461 retval = -EBUSY; 462 goto done; 463 } 464 if (!list_empty(&q->stream)) { 465 dprintk(1, "reqbufs: stream running\n"); 466 retval = -EBUSY; 467 goto done; 468 } 469 470 count = req->count; 471 if (count > VIDEO_MAX_FRAME) 472 count = VIDEO_MAX_FRAME; 473 size = 0; 474 q->ops->buf_setup(q, &count, &size); 475 dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n", 476 count, size, 477 (unsigned int)((count * PAGE_ALIGN(size)) >> PAGE_SHIFT)); 478 479 retval = __videobuf_mmap_setup(q, count, size, req->memory); 480 if (retval < 0) { 481 dprintk(1, "reqbufs: mmap setup returned %d\n", retval); 482 goto done; 483 } 484 485 req->count = retval; 486 retval = 0; 487 488 done: 489 videobuf_queue_unlock(q); 490 return retval; 491 } 492 EXPORT_SYMBOL_GPL(videobuf_reqbufs); 493 494 int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b) 495 { 496 int ret = -EINVAL; 497 498 videobuf_queue_lock(q); 499 if (unlikely(b->type != q->type)) { 500 dprintk(1, "querybuf: Wrong type.\n"); 501 goto done; 502 } 503 if (unlikely(b->index >= VIDEO_MAX_FRAME)) { 504 dprintk(1, "querybuf: index out of range.\n"); 505 goto done; 506 } 507 if (unlikely(NULL == q->bufs[b->index])) { 508 dprintk(1, "querybuf: buffer is null.\n"); 509 goto done; 510 } 511 512 videobuf_status(q, b, q->bufs[b->index], q->type); 513 514 ret = 0; 515 done: 516 videobuf_queue_unlock(q); 517 return ret; 518 } 519 EXPORT_SYMBOL_GPL(videobuf_querybuf); 520 521 int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b) 522 { 523 struct videobuf_buffer *buf; 524 enum v4l2_field field; 525 unsigned long flags = 0; 526 int retval; 527 528 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 529 530 if (b->memory == V4L2_MEMORY_MMAP) 531 down_read(¤t->mm->mmap_sem); 532 533 videobuf_queue_lock(q); 534 retval = -EBUSY; 535 if (q->reading) { 536 dprintk(1, "qbuf: Reading running...\n"); 537 goto done; 538 } 539 retval = -EINVAL; 540 if (b->type != q->type) { 541 dprintk(1, "qbuf: Wrong type.\n"); 542 goto done; 543 } 544 if (b->index >= VIDEO_MAX_FRAME) { 545 dprintk(1, "qbuf: index out of range.\n"); 546 goto done; 547 } 548 buf = q->bufs[b->index]; 549 if (NULL == buf) { 550 dprintk(1, "qbuf: buffer is null.\n"); 551 goto done; 552 } 553 MAGIC_CHECK(buf->magic, MAGIC_BUFFER); 554 if (buf->memory != b->memory) { 555 dprintk(1, "qbuf: memory type is wrong.\n"); 556 goto done; 557 } 558 if (buf->state != VIDEOBUF_NEEDS_INIT && buf->state != VIDEOBUF_IDLE) { 559 dprintk(1, "qbuf: buffer is already queued or active.\n"); 560 goto done; 561 } 562 563 switch (b->memory) { 564 case V4L2_MEMORY_MMAP: 565 if (0 == buf->baddr) { 566 dprintk(1, "qbuf: mmap requested " 567 "but buffer addr is zero!\n"); 568 goto done; 569 } 570 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT 571 || q->type == V4L2_BUF_TYPE_VBI_OUTPUT 572 || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) { 573 buf->size = b->bytesused; 574 buf->field = b->field; 575 buf->ts = b->timestamp; 576 } 577 break; 578 case V4L2_MEMORY_USERPTR: 579 if (b->length < buf->bsize) { 580 dprintk(1, "qbuf: buffer length is not enough\n"); 581 goto done; 582 } 583 if (VIDEOBUF_NEEDS_INIT != buf->state && 584 buf->baddr != b->m.userptr) 585 q->ops->buf_release(q, buf); 586 buf->baddr = b->m.userptr; 587 break; 588 case V4L2_MEMORY_OVERLAY: 589 buf->boff = b->m.offset; 590 break; 591 default: 592 dprintk(1, "qbuf: wrong memory type\n"); 593 goto done; 594 } 595 596 dprintk(1, "qbuf: requesting next field\n"); 597 field = videobuf_next_field(q); 598 retval = q->ops->buf_prepare(q, buf, field); 599 if (0 != retval) { 600 dprintk(1, "qbuf: buffer_prepare returned %d\n", retval); 601 goto done; 602 } 603 604 list_add_tail(&buf->stream, &q->stream); 605 if (q->streaming) { 606 spin_lock_irqsave(q->irqlock, flags); 607 q->ops->buf_queue(q, buf); 608 spin_unlock_irqrestore(q->irqlock, flags); 609 } 610 dprintk(1, "qbuf: succeeded\n"); 611 retval = 0; 612 wake_up_interruptible_sync(&q->wait); 613 614 done: 615 videobuf_queue_unlock(q); 616 617 if (b->memory == V4L2_MEMORY_MMAP) 618 up_read(¤t->mm->mmap_sem); 619 620 return retval; 621 } 622 EXPORT_SYMBOL_GPL(videobuf_qbuf); 623 624 /* Locking: Caller holds q->vb_lock */ 625 static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock) 626 { 627 int retval; 628 629 checks: 630 if (!q->streaming) { 631 dprintk(1, "next_buffer: Not streaming\n"); 632 retval = -EINVAL; 633 goto done; 634 } 635 636 if (list_empty(&q->stream)) { 637 if (noblock) { 638 retval = -EAGAIN; 639 dprintk(2, "next_buffer: no buffers to dequeue\n"); 640 goto done; 641 } else { 642 dprintk(2, "next_buffer: waiting on buffer\n"); 643 644 /* Drop lock to avoid deadlock with qbuf */ 645 videobuf_queue_unlock(q); 646 647 /* Checking list_empty and streaming is safe without 648 * locks because we goto checks to validate while 649 * holding locks before proceeding */ 650 retval = wait_event_interruptible(q->wait, 651 !list_empty(&q->stream) || !q->streaming); 652 videobuf_queue_lock(q); 653 654 if (retval) 655 goto done; 656 657 goto checks; 658 } 659 } 660 661 retval = 0; 662 663 done: 664 return retval; 665 } 666 667 /* Locking: Caller holds q->vb_lock */ 668 static int stream_next_buffer(struct videobuf_queue *q, 669 struct videobuf_buffer **vb, int nonblocking) 670 { 671 int retval; 672 struct videobuf_buffer *buf = NULL; 673 674 retval = stream_next_buffer_check_queue(q, nonblocking); 675 if (retval) 676 goto done; 677 678 buf = list_entry(q->stream.next, struct videobuf_buffer, stream); 679 retval = videobuf_waiton(q, buf, nonblocking, 1); 680 if (retval < 0) 681 goto done; 682 683 *vb = buf; 684 done: 685 return retval; 686 } 687 688 int videobuf_dqbuf(struct videobuf_queue *q, 689 struct v4l2_buffer *b, int nonblocking) 690 { 691 struct videobuf_buffer *buf = NULL; 692 int retval; 693 694 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 695 696 memset(b, 0, sizeof(*b)); 697 videobuf_queue_lock(q); 698 699 retval = stream_next_buffer(q, &buf, nonblocking); 700 if (retval < 0) { 701 dprintk(1, "dqbuf: next_buffer error: %i\n", retval); 702 goto done; 703 } 704 705 switch (buf->state) { 706 case VIDEOBUF_ERROR: 707 dprintk(1, "dqbuf: state is error\n"); 708 break; 709 case VIDEOBUF_DONE: 710 dprintk(1, "dqbuf: state is done\n"); 711 break; 712 default: 713 dprintk(1, "dqbuf: state invalid\n"); 714 retval = -EINVAL; 715 goto done; 716 } 717 CALL(q, sync, q, buf); 718 videobuf_status(q, b, buf, q->type); 719 list_del(&buf->stream); 720 buf->state = VIDEOBUF_IDLE; 721 b->flags &= ~V4L2_BUF_FLAG_DONE; 722 done: 723 videobuf_queue_unlock(q); 724 return retval; 725 } 726 EXPORT_SYMBOL_GPL(videobuf_dqbuf); 727 728 int videobuf_streamon(struct videobuf_queue *q) 729 { 730 struct videobuf_buffer *buf; 731 unsigned long flags = 0; 732 int retval; 733 734 videobuf_queue_lock(q); 735 retval = -EBUSY; 736 if (q->reading) 737 goto done; 738 retval = 0; 739 if (q->streaming) 740 goto done; 741 q->streaming = 1; 742 spin_lock_irqsave(q->irqlock, flags); 743 list_for_each_entry(buf, &q->stream, stream) 744 if (buf->state == VIDEOBUF_PREPARED) 745 q->ops->buf_queue(q, buf); 746 spin_unlock_irqrestore(q->irqlock, flags); 747 748 wake_up_interruptible_sync(&q->wait); 749 done: 750 videobuf_queue_unlock(q); 751 return retval; 752 } 753 EXPORT_SYMBOL_GPL(videobuf_streamon); 754 755 /* Locking: Caller holds q->vb_lock */ 756 static int __videobuf_streamoff(struct videobuf_queue *q) 757 { 758 if (!q->streaming) 759 return -EINVAL; 760 761 videobuf_queue_cancel(q); 762 763 return 0; 764 } 765 766 int videobuf_streamoff(struct videobuf_queue *q) 767 { 768 int retval; 769 770 videobuf_queue_lock(q); 771 retval = __videobuf_streamoff(q); 772 videobuf_queue_unlock(q); 773 774 return retval; 775 } 776 EXPORT_SYMBOL_GPL(videobuf_streamoff); 777 778 /* Locking: Caller holds q->vb_lock */ 779 static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q, 780 char __user *data, 781 size_t count, loff_t *ppos) 782 { 783 enum v4l2_field field; 784 unsigned long flags = 0; 785 int retval; 786 787 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 788 789 /* setup stuff */ 790 q->read_buf = videobuf_alloc_vb(q); 791 if (NULL == q->read_buf) 792 return -ENOMEM; 793 794 q->read_buf->memory = V4L2_MEMORY_USERPTR; 795 q->read_buf->baddr = (unsigned long)data; 796 q->read_buf->bsize = count; 797 798 field = videobuf_next_field(q); 799 retval = q->ops->buf_prepare(q, q->read_buf, field); 800 if (0 != retval) 801 goto done; 802 803 /* start capture & wait */ 804 spin_lock_irqsave(q->irqlock, flags); 805 q->ops->buf_queue(q, q->read_buf); 806 spin_unlock_irqrestore(q->irqlock, flags); 807 retval = videobuf_waiton(q, q->read_buf, 0, 0); 808 if (0 == retval) { 809 CALL(q, sync, q, q->read_buf); 810 if (VIDEOBUF_ERROR == q->read_buf->state) 811 retval = -EIO; 812 else 813 retval = q->read_buf->size; 814 } 815 816 done: 817 /* cleanup */ 818 q->ops->buf_release(q, q->read_buf); 819 kfree(q->read_buf); 820 q->read_buf = NULL; 821 return retval; 822 } 823 824 static int __videobuf_copy_to_user(struct videobuf_queue *q, 825 struct videobuf_buffer *buf, 826 char __user *data, size_t count, 827 int nonblocking) 828 { 829 void *vaddr = CALL(q, vaddr, buf); 830 831 /* copy to userspace */ 832 if (count > buf->size - q->read_off) 833 count = buf->size - q->read_off; 834 835 if (copy_to_user(data, vaddr + q->read_off, count)) 836 return -EFAULT; 837 838 return count; 839 } 840 841 static int __videobuf_copy_stream(struct videobuf_queue *q, 842 struct videobuf_buffer *buf, 843 char __user *data, size_t count, size_t pos, 844 int vbihack, int nonblocking) 845 { 846 unsigned int *fc = CALL(q, vaddr, buf); 847 848 if (vbihack) { 849 /* dirty, undocumented hack -- pass the frame counter 850 * within the last four bytes of each vbi data block. 851 * We need that one to maintain backward compatibility 852 * to all vbi decoding software out there ... */ 853 fc += (buf->size >> 2) - 1; 854 *fc = buf->field_count >> 1; 855 dprintk(1, "vbihack: %d\n", *fc); 856 } 857 858 /* copy stuff using the common method */ 859 count = __videobuf_copy_to_user(q, buf, data, count, nonblocking); 860 861 if ((count == -EFAULT) && (pos == 0)) 862 return -EFAULT; 863 864 return count; 865 } 866 867 ssize_t videobuf_read_one(struct videobuf_queue *q, 868 char __user *data, size_t count, loff_t *ppos, 869 int nonblocking) 870 { 871 enum v4l2_field field; 872 unsigned long flags = 0; 873 unsigned size = 0, nbufs = 1; 874 int retval; 875 876 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 877 878 videobuf_queue_lock(q); 879 880 q->ops->buf_setup(q, &nbufs, &size); 881 882 if (NULL == q->read_buf && 883 count >= size && 884 !nonblocking) { 885 retval = videobuf_read_zerocopy(q, data, count, ppos); 886 if (retval >= 0 || retval == -EIO) 887 /* ok, all done */ 888 goto done; 889 /* fallback to kernel bounce buffer on failures */ 890 } 891 892 if (NULL == q->read_buf) { 893 /* need to capture a new frame */ 894 retval = -ENOMEM; 895 q->read_buf = videobuf_alloc_vb(q); 896 897 dprintk(1, "video alloc=0x%p\n", q->read_buf); 898 if (NULL == q->read_buf) 899 goto done; 900 q->read_buf->memory = V4L2_MEMORY_USERPTR; 901 q->read_buf->bsize = count; /* preferred size */ 902 field = videobuf_next_field(q); 903 retval = q->ops->buf_prepare(q, q->read_buf, field); 904 905 if (0 != retval) { 906 kfree(q->read_buf); 907 q->read_buf = NULL; 908 goto done; 909 } 910 911 spin_lock_irqsave(q->irqlock, flags); 912 q->ops->buf_queue(q, q->read_buf); 913 spin_unlock_irqrestore(q->irqlock, flags); 914 915 q->read_off = 0; 916 } 917 918 /* wait until capture is done */ 919 retval = videobuf_waiton(q, q->read_buf, nonblocking, 1); 920 if (0 != retval) 921 goto done; 922 923 CALL(q, sync, q, q->read_buf); 924 925 if (VIDEOBUF_ERROR == q->read_buf->state) { 926 /* catch I/O errors */ 927 q->ops->buf_release(q, q->read_buf); 928 kfree(q->read_buf); 929 q->read_buf = NULL; 930 retval = -EIO; 931 goto done; 932 } 933 934 /* Copy to userspace */ 935 retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking); 936 if (retval < 0) 937 goto done; 938 939 q->read_off += retval; 940 if (q->read_off == q->read_buf->size) { 941 /* all data copied, cleanup */ 942 q->ops->buf_release(q, q->read_buf); 943 kfree(q->read_buf); 944 q->read_buf = NULL; 945 } 946 947 done: 948 videobuf_queue_unlock(q); 949 return retval; 950 } 951 EXPORT_SYMBOL_GPL(videobuf_read_one); 952 953 /* Locking: Caller holds q->vb_lock */ 954 static int __videobuf_read_start(struct videobuf_queue *q) 955 { 956 enum v4l2_field field; 957 unsigned long flags = 0; 958 unsigned int count = 0, size = 0; 959 int err, i; 960 961 q->ops->buf_setup(q, &count, &size); 962 if (count < 2) 963 count = 2; 964 if (count > VIDEO_MAX_FRAME) 965 count = VIDEO_MAX_FRAME; 966 size = PAGE_ALIGN(size); 967 968 err = __videobuf_mmap_setup(q, count, size, V4L2_MEMORY_USERPTR); 969 if (err < 0) 970 return err; 971 972 count = err; 973 974 for (i = 0; i < count; i++) { 975 field = videobuf_next_field(q); 976 err = q->ops->buf_prepare(q, q->bufs[i], field); 977 if (err) 978 return err; 979 list_add_tail(&q->bufs[i]->stream, &q->stream); 980 } 981 spin_lock_irqsave(q->irqlock, flags); 982 for (i = 0; i < count; i++) 983 q->ops->buf_queue(q, q->bufs[i]); 984 spin_unlock_irqrestore(q->irqlock, flags); 985 q->reading = 1; 986 return 0; 987 } 988 989 static void __videobuf_read_stop(struct videobuf_queue *q) 990 { 991 int i; 992 993 videobuf_queue_cancel(q); 994 __videobuf_free(q); 995 INIT_LIST_HEAD(&q->stream); 996 for (i = 0; i < VIDEO_MAX_FRAME; i++) { 997 if (NULL == q->bufs[i]) 998 continue; 999 kfree(q->bufs[i]); 1000 q->bufs[i] = NULL; 1001 } 1002 q->read_buf = NULL; 1003 } 1004 1005 int videobuf_read_start(struct videobuf_queue *q) 1006 { 1007 int rc; 1008 1009 videobuf_queue_lock(q); 1010 rc = __videobuf_read_start(q); 1011 videobuf_queue_unlock(q); 1012 1013 return rc; 1014 } 1015 EXPORT_SYMBOL_GPL(videobuf_read_start); 1016 1017 void videobuf_read_stop(struct videobuf_queue *q) 1018 { 1019 videobuf_queue_lock(q); 1020 __videobuf_read_stop(q); 1021 videobuf_queue_unlock(q); 1022 } 1023 EXPORT_SYMBOL_GPL(videobuf_read_stop); 1024 1025 void videobuf_stop(struct videobuf_queue *q) 1026 { 1027 videobuf_queue_lock(q); 1028 1029 if (q->streaming) 1030 __videobuf_streamoff(q); 1031 1032 if (q->reading) 1033 __videobuf_read_stop(q); 1034 1035 videobuf_queue_unlock(q); 1036 } 1037 EXPORT_SYMBOL_GPL(videobuf_stop); 1038 1039 ssize_t videobuf_read_stream(struct videobuf_queue *q, 1040 char __user *data, size_t count, loff_t *ppos, 1041 int vbihack, int nonblocking) 1042 { 1043 int rc, retval; 1044 unsigned long flags = 0; 1045 1046 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 1047 1048 dprintk(2, "%s\n", __func__); 1049 videobuf_queue_lock(q); 1050 retval = -EBUSY; 1051 if (q->streaming) 1052 goto done; 1053 if (!q->reading) { 1054 retval = __videobuf_read_start(q); 1055 if (retval < 0) 1056 goto done; 1057 } 1058 1059 retval = 0; 1060 while (count > 0) { 1061 /* get / wait for data */ 1062 if (NULL == q->read_buf) { 1063 q->read_buf = list_entry(q->stream.next, 1064 struct videobuf_buffer, 1065 stream); 1066 list_del(&q->read_buf->stream); 1067 q->read_off = 0; 1068 } 1069 rc = videobuf_waiton(q, q->read_buf, nonblocking, 1); 1070 if (rc < 0) { 1071 if (0 == retval) 1072 retval = rc; 1073 break; 1074 } 1075 1076 if (q->read_buf->state == VIDEOBUF_DONE) { 1077 rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count, 1078 retval, vbihack, nonblocking); 1079 if (rc < 0) { 1080 retval = rc; 1081 break; 1082 } 1083 retval += rc; 1084 count -= rc; 1085 q->read_off += rc; 1086 } else { 1087 /* some error */ 1088 q->read_off = q->read_buf->size; 1089 if (0 == retval) 1090 retval = -EIO; 1091 } 1092 1093 /* requeue buffer when done with copying */ 1094 if (q->read_off == q->read_buf->size) { 1095 list_add_tail(&q->read_buf->stream, 1096 &q->stream); 1097 spin_lock_irqsave(q->irqlock, flags); 1098 q->ops->buf_queue(q, q->read_buf); 1099 spin_unlock_irqrestore(q->irqlock, flags); 1100 q->read_buf = NULL; 1101 } 1102 if (retval < 0) 1103 break; 1104 } 1105 1106 done: 1107 videobuf_queue_unlock(q); 1108 return retval; 1109 } 1110 EXPORT_SYMBOL_GPL(videobuf_read_stream); 1111 1112 unsigned int videobuf_poll_stream(struct file *file, 1113 struct videobuf_queue *q, 1114 poll_table *wait) 1115 { 1116 unsigned long req_events = poll_requested_events(wait); 1117 struct videobuf_buffer *buf = NULL; 1118 unsigned int rc = 0; 1119 1120 videobuf_queue_lock(q); 1121 if (q->streaming) { 1122 if (!list_empty(&q->stream)) 1123 buf = list_entry(q->stream.next, 1124 struct videobuf_buffer, stream); 1125 } else if (req_events & (POLLIN | POLLRDNORM)) { 1126 if (!q->reading) 1127 __videobuf_read_start(q); 1128 if (!q->reading) { 1129 rc = POLLERR; 1130 } else if (NULL == q->read_buf) { 1131 q->read_buf = list_entry(q->stream.next, 1132 struct videobuf_buffer, 1133 stream); 1134 list_del(&q->read_buf->stream); 1135 q->read_off = 0; 1136 } 1137 buf = q->read_buf; 1138 } 1139 if (!buf) 1140 rc = POLLERR; 1141 1142 if (0 == rc) { 1143 poll_wait(file, &buf->done, wait); 1144 if (buf->state == VIDEOBUF_DONE || 1145 buf->state == VIDEOBUF_ERROR) { 1146 switch (q->type) { 1147 case V4L2_BUF_TYPE_VIDEO_OUTPUT: 1148 case V4L2_BUF_TYPE_VBI_OUTPUT: 1149 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: 1150 rc = POLLOUT | POLLWRNORM; 1151 break; 1152 default: 1153 rc = POLLIN | POLLRDNORM; 1154 break; 1155 } 1156 } 1157 } 1158 videobuf_queue_unlock(q); 1159 return rc; 1160 } 1161 EXPORT_SYMBOL_GPL(videobuf_poll_stream); 1162 1163 int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma) 1164 { 1165 int rc = -EINVAL; 1166 int i; 1167 1168 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 1169 1170 if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) { 1171 dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n"); 1172 return -EINVAL; 1173 } 1174 1175 videobuf_queue_lock(q); 1176 for (i = 0; i < VIDEO_MAX_FRAME; i++) { 1177 struct videobuf_buffer *buf = q->bufs[i]; 1178 1179 if (buf && buf->memory == V4L2_MEMORY_MMAP && 1180 buf->boff == (vma->vm_pgoff << PAGE_SHIFT)) { 1181 rc = CALL(q, mmap_mapper, q, buf, vma); 1182 break; 1183 } 1184 } 1185 videobuf_queue_unlock(q); 1186 1187 return rc; 1188 } 1189 EXPORT_SYMBOL_GPL(videobuf_mmap_mapper); 1190