1 /* 2 cx231xx_vbi.c - driver for Conexant Cx23100/101/102 USB video capture devices 3 4 Copyright (C) 2008 <srinivasa.deevi at conexant dot com> 5 Based on cx88 driver 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 2 of the License, or 10 (at your option) any later version. 11 12 This program is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 GNU General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with this program; if not, write to the Free Software 19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 */ 21 22 #include "cx231xx.h" 23 #include <linux/init.h> 24 #include <linux/list.h> 25 #include <linux/module.h> 26 #include <linux/kernel.h> 27 #include <linux/bitmap.h> 28 #include <linux/i2c.h> 29 #include <linux/mm.h> 30 #include <linux/mutex.h> 31 #include <linux/slab.h> 32 33 #include <media/v4l2-common.h> 34 #include <media/v4l2-ioctl.h> 35 #include <media/drv-intf/msp3400.h> 36 #include <media/tuner.h> 37 38 #include "cx231xx-vbi.h" 39 40 static inline void print_err_status(struct cx231xx *dev, int packet, int status) 41 { 42 char *errmsg = "Unknown"; 43 44 switch (status) { 45 case -ENOENT: 46 errmsg = "unlinked synchronuously"; 47 break; 48 case -ECONNRESET: 49 errmsg = "unlinked asynchronuously"; 50 break; 51 case -ENOSR: 52 errmsg = "Buffer error (overrun)"; 53 break; 54 case -EPIPE: 55 errmsg = "Stalled (device not responding)"; 56 break; 57 case -EOVERFLOW: 58 errmsg = "Babble (bad cable?)"; 59 break; 60 case -EPROTO: 61 errmsg = "Bit-stuff error (bad cable?)"; 62 break; 63 case -EILSEQ: 64 errmsg = "CRC/Timeout (could be anything)"; 65 break; 66 case -ETIME: 67 errmsg = "Device does not respond"; 68 break; 69 } 70 if (packet < 0) { 71 dev_err(dev->dev, 72 "URB status %d [%s].\n", status, errmsg); 73 } else { 74 dev_err(dev->dev, 75 "URB packet %d, status %d [%s].\n", 76 packet, status, errmsg); 77 } 78 } 79 80 /* 81 * Controls the isoc copy of each urb packet 82 */ 83 static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb) 84 { 85 struct cx231xx_dmaqueue *dma_q = urb->context; 86 int rc = 1; 87 unsigned char *p_buffer; 88 u32 bytes_parsed = 0, buffer_size = 0; 89 u8 sav_eav = 0; 90 91 if (!dev) 92 return 0; 93 94 if (dev->state & DEV_DISCONNECTED) 95 return 0; 96 97 if (urb->status < 0) { 98 print_err_status(dev, -1, urb->status); 99 if (urb->status == -ENOENT) 100 return 0; 101 } 102 103 /* get buffer pointer and length */ 104 p_buffer = urb->transfer_buffer; 105 buffer_size = urb->actual_length; 106 107 if (buffer_size > 0) { 108 bytes_parsed = 0; 109 110 if (dma_q->is_partial_line) { 111 /* Handle the case where we were working on a partial 112 line */ 113 sav_eav = dma_q->last_sav; 114 } else { 115 /* Check for a SAV/EAV overlapping the 116 buffer boundary */ 117 118 sav_eav = cx231xx_find_boundary_SAV_EAV(p_buffer, 119 dma_q->partial_buf, 120 &bytes_parsed); 121 } 122 123 sav_eav &= 0xF0; 124 /* Get the first line if we have some portion of an SAV/EAV from 125 the last buffer or a partial line */ 126 if (sav_eav) { 127 bytes_parsed += cx231xx_get_vbi_line(dev, dma_q, 128 sav_eav, /* SAV/EAV */ 129 p_buffer + bytes_parsed, /* p_buffer */ 130 buffer_size - bytes_parsed); /* buffer size */ 131 } 132 133 /* Now parse data that is completely in this buffer */ 134 dma_q->is_partial_line = 0; 135 136 while (bytes_parsed < buffer_size) { 137 u32 bytes_used = 0; 138 139 sav_eav = cx231xx_find_next_SAV_EAV( 140 p_buffer + bytes_parsed, /* p_buffer */ 141 buffer_size - bytes_parsed, /* buffer size */ 142 &bytes_used); /* bytes used to get SAV/EAV */ 143 144 bytes_parsed += bytes_used; 145 146 sav_eav &= 0xF0; 147 if (sav_eav && (bytes_parsed < buffer_size)) { 148 bytes_parsed += cx231xx_get_vbi_line(dev, 149 dma_q, sav_eav, /* SAV/EAV */ 150 p_buffer+bytes_parsed, /* p_buffer */ 151 buffer_size-bytes_parsed);/*buf size*/ 152 } 153 } 154 155 /* Save the last four bytes of the buffer so we can 156 check the buffer boundary condition next time */ 157 memcpy(dma_q->partial_buf, p_buffer + buffer_size - 4, 4); 158 bytes_parsed = 0; 159 } 160 161 return rc; 162 } 163 164 /* ------------------------------------------------------------------ 165 Vbi buf operations 166 ------------------------------------------------------------------*/ 167 168 static int 169 vbi_buffer_setup(struct videobuf_queue *vq, unsigned int *count, 170 unsigned int *size) 171 { 172 struct cx231xx_fh *fh = vq->priv_data; 173 struct cx231xx *dev = fh->dev; 174 u32 height = 0; 175 176 height = ((dev->norm & V4L2_STD_625_50) ? 177 PAL_VBI_LINES : NTSC_VBI_LINES); 178 179 *size = (dev->width * height * 2 * 2); 180 if (0 == *count) 181 *count = CX231XX_DEF_VBI_BUF; 182 183 if (*count < CX231XX_MIN_BUF) 184 *count = CX231XX_MIN_BUF; 185 186 return 0; 187 } 188 189 /* This is called *without* dev->slock held; please keep it that way */ 190 static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf) 191 { 192 struct cx231xx_fh *fh = vq->priv_data; 193 struct cx231xx *dev = fh->dev; 194 unsigned long flags = 0; 195 BUG_ON(in_interrupt()); 196 197 /* We used to wait for the buffer to finish here, but this didn't work 198 because, as we were keeping the state as VIDEOBUF_QUEUED, 199 videobuf_queue_cancel marked it as finished for us. 200 (Also, it could wedge forever if the hardware was misconfigured.) 201 202 This should be safe; by the time we get here, the buffer isn't 203 queued anymore. If we ever start marking the buffers as 204 VIDEOBUF_ACTIVE, it won't be, though. 205 */ 206 spin_lock_irqsave(&dev->vbi_mode.slock, flags); 207 if (dev->vbi_mode.bulk_ctl.buf == buf) 208 dev->vbi_mode.bulk_ctl.buf = NULL; 209 spin_unlock_irqrestore(&dev->vbi_mode.slock, flags); 210 211 videobuf_vmalloc_free(&buf->vb); 212 buf->vb.state = VIDEOBUF_NEEDS_INIT; 213 } 214 215 static int 216 vbi_buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb, 217 enum v4l2_field field) 218 { 219 struct cx231xx_fh *fh = vq->priv_data; 220 struct cx231xx_buffer *buf = 221 container_of(vb, struct cx231xx_buffer, vb); 222 struct cx231xx *dev = fh->dev; 223 int rc = 0, urb_init = 0; 224 u32 height = 0; 225 226 height = ((dev->norm & V4L2_STD_625_50) ? 227 PAL_VBI_LINES : NTSC_VBI_LINES); 228 buf->vb.size = ((dev->width << 1) * height * 2); 229 230 if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size) 231 return -EINVAL; 232 233 buf->vb.width = dev->width; 234 buf->vb.height = height; 235 buf->vb.field = field; 236 buf->vb.field = V4L2_FIELD_SEQ_TB; 237 238 if (VIDEOBUF_NEEDS_INIT == buf->vb.state) { 239 rc = videobuf_iolock(vq, &buf->vb, NULL); 240 if (rc < 0) 241 goto fail; 242 } 243 244 if (!dev->vbi_mode.bulk_ctl.num_bufs) 245 urb_init = 1; 246 247 if (urb_init) { 248 rc = cx231xx_init_vbi_isoc(dev, CX231XX_NUM_VBI_PACKETS, 249 CX231XX_NUM_VBI_BUFS, 250 dev->vbi_mode.alt_max_pkt_size[0], 251 cx231xx_isoc_vbi_copy); 252 if (rc < 0) 253 goto fail; 254 } 255 256 buf->vb.state = VIDEOBUF_PREPARED; 257 return 0; 258 259 fail: 260 free_buffer(vq, buf); 261 return rc; 262 } 263 264 static void 265 vbi_buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) 266 { 267 struct cx231xx_buffer *buf = 268 container_of(vb, struct cx231xx_buffer, vb); 269 struct cx231xx_fh *fh = vq->priv_data; 270 struct cx231xx *dev = fh->dev; 271 struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq; 272 273 buf->vb.state = VIDEOBUF_QUEUED; 274 list_add_tail(&buf->vb.queue, &vidq->active); 275 276 } 277 278 static void vbi_buffer_release(struct videobuf_queue *vq, 279 struct videobuf_buffer *vb) 280 { 281 struct cx231xx_buffer *buf = 282 container_of(vb, struct cx231xx_buffer, vb); 283 284 285 free_buffer(vq, buf); 286 } 287 288 struct videobuf_queue_ops cx231xx_vbi_qops = { 289 .buf_setup = vbi_buffer_setup, 290 .buf_prepare = vbi_buffer_prepare, 291 .buf_queue = vbi_buffer_queue, 292 .buf_release = vbi_buffer_release, 293 }; 294 295 /* ------------------------------------------------------------------ 296 URB control 297 ------------------------------------------------------------------*/ 298 299 /* 300 * IRQ callback, called by URB callback 301 */ 302 static void cx231xx_irq_vbi_callback(struct urb *urb) 303 { 304 struct cx231xx_dmaqueue *dma_q = urb->context; 305 struct cx231xx_video_mode *vmode = 306 container_of(dma_q, struct cx231xx_video_mode, vidq); 307 struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode); 308 309 switch (urb->status) { 310 case 0: /* success */ 311 case -ETIMEDOUT: /* NAK */ 312 break; 313 case -ECONNRESET: /* kill */ 314 case -ENOENT: 315 case -ESHUTDOWN: 316 return; 317 default: /* error */ 318 dev_err(dev->dev, 319 "urb completition error %d.\n", urb->status); 320 break; 321 } 322 323 /* Copy data from URB */ 324 spin_lock(&dev->vbi_mode.slock); 325 dev->vbi_mode.bulk_ctl.bulk_copy(dev, urb); 326 spin_unlock(&dev->vbi_mode.slock); 327 328 /* Reset status */ 329 urb->status = 0; 330 331 urb->status = usb_submit_urb(urb, GFP_ATOMIC); 332 if (urb->status) { 333 dev_err(dev->dev, "urb resubmit failed (error=%i)\n", 334 urb->status); 335 } 336 } 337 338 /* 339 * Stop and Deallocate URBs 340 */ 341 void cx231xx_uninit_vbi_isoc(struct cx231xx *dev) 342 { 343 struct urb *urb; 344 int i; 345 346 dev_dbg(dev->dev, "called cx231xx_uninit_vbi_isoc\n"); 347 348 dev->vbi_mode.bulk_ctl.nfields = -1; 349 for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) { 350 urb = dev->vbi_mode.bulk_ctl.urb[i]; 351 if (urb) { 352 if (!irqs_disabled()) 353 usb_kill_urb(urb); 354 else 355 usb_unlink_urb(urb); 356 357 if (dev->vbi_mode.bulk_ctl.transfer_buffer[i]) { 358 359 kfree(dev->vbi_mode.bulk_ctl. 360 transfer_buffer[i]); 361 dev->vbi_mode.bulk_ctl.transfer_buffer[i] = 362 NULL; 363 } 364 usb_free_urb(urb); 365 dev->vbi_mode.bulk_ctl.urb[i] = NULL; 366 } 367 dev->vbi_mode.bulk_ctl.transfer_buffer[i] = NULL; 368 } 369 370 kfree(dev->vbi_mode.bulk_ctl.urb); 371 kfree(dev->vbi_mode.bulk_ctl.transfer_buffer); 372 373 dev->vbi_mode.bulk_ctl.urb = NULL; 374 dev->vbi_mode.bulk_ctl.transfer_buffer = NULL; 375 dev->vbi_mode.bulk_ctl.num_bufs = 0; 376 377 cx231xx_capture_start(dev, 0, Vbi); 378 } 379 EXPORT_SYMBOL_GPL(cx231xx_uninit_vbi_isoc); 380 381 /* 382 * Allocate URBs and start IRQ 383 */ 384 int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets, 385 int num_bufs, int max_pkt_size, 386 int (*bulk_copy) (struct cx231xx *dev, 387 struct urb *urb)) 388 { 389 struct cx231xx_dmaqueue *dma_q = &dev->vbi_mode.vidq; 390 int i; 391 int sb_size, pipe; 392 struct urb *urb; 393 int rc; 394 395 dev_dbg(dev->dev, "called cx231xx_vbi_isoc\n"); 396 397 /* De-allocates all pending stuff */ 398 cx231xx_uninit_vbi_isoc(dev); 399 400 /* clear if any halt */ 401 usb_clear_halt(dev->udev, 402 usb_rcvbulkpipe(dev->udev, 403 dev->vbi_mode.end_point_addr)); 404 405 dev->vbi_mode.bulk_ctl.bulk_copy = bulk_copy; 406 dev->vbi_mode.bulk_ctl.num_bufs = num_bufs; 407 dma_q->pos = 0; 408 dma_q->is_partial_line = 0; 409 dma_q->last_sav = 0; 410 dma_q->current_field = -1; 411 dma_q->bytes_left_in_line = dev->width << 1; 412 dma_q->lines_per_field = ((dev->norm & V4L2_STD_625_50) ? 413 PAL_VBI_LINES : NTSC_VBI_LINES); 414 dma_q->lines_completed = 0; 415 for (i = 0; i < 8; i++) 416 dma_q->partial_buf[i] = 0; 417 418 dev->vbi_mode.bulk_ctl.urb = kzalloc(sizeof(void *) * num_bufs, 419 GFP_KERNEL); 420 if (!dev->vbi_mode.bulk_ctl.urb) { 421 dev_err(dev->dev, 422 "cannot alloc memory for usb buffers\n"); 423 return -ENOMEM; 424 } 425 426 dev->vbi_mode.bulk_ctl.transfer_buffer = 427 kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL); 428 if (!dev->vbi_mode.bulk_ctl.transfer_buffer) { 429 dev_err(dev->dev, 430 "cannot allocate memory for usbtransfer\n"); 431 kfree(dev->vbi_mode.bulk_ctl.urb); 432 return -ENOMEM; 433 } 434 435 dev->vbi_mode.bulk_ctl.max_pkt_size = max_pkt_size; 436 dev->vbi_mode.bulk_ctl.buf = NULL; 437 438 sb_size = max_packets * dev->vbi_mode.bulk_ctl.max_pkt_size; 439 440 /* allocate urbs and transfer buffers */ 441 for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) { 442 443 urb = usb_alloc_urb(0, GFP_KERNEL); 444 if (!urb) { 445 cx231xx_uninit_vbi_isoc(dev); 446 return -ENOMEM; 447 } 448 dev->vbi_mode.bulk_ctl.urb[i] = urb; 449 urb->transfer_flags = 0; 450 451 dev->vbi_mode.bulk_ctl.transfer_buffer[i] = 452 kzalloc(sb_size, GFP_KERNEL); 453 if (!dev->vbi_mode.bulk_ctl.transfer_buffer[i]) { 454 dev_err(dev->dev, 455 "unable to allocate %i bytes for transfer buffer %i%s\n", 456 sb_size, i, 457 in_interrupt() ? " while in int" : ""); 458 cx231xx_uninit_vbi_isoc(dev); 459 return -ENOMEM; 460 } 461 462 pipe = usb_rcvbulkpipe(dev->udev, dev->vbi_mode.end_point_addr); 463 usb_fill_bulk_urb(urb, dev->udev, pipe, 464 dev->vbi_mode.bulk_ctl.transfer_buffer[i], 465 sb_size, cx231xx_irq_vbi_callback, dma_q); 466 } 467 468 init_waitqueue_head(&dma_q->wq); 469 470 /* submit urbs and enables IRQ */ 471 for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) { 472 rc = usb_submit_urb(dev->vbi_mode.bulk_ctl.urb[i], GFP_ATOMIC); 473 if (rc) { 474 dev_err(dev->dev, 475 "submit of urb %i failed (error=%i)\n", i, rc); 476 cx231xx_uninit_vbi_isoc(dev); 477 return rc; 478 } 479 } 480 481 cx231xx_capture_start(dev, 1, Vbi); 482 483 return 0; 484 } 485 EXPORT_SYMBOL_GPL(cx231xx_init_vbi_isoc); 486 487 u32 cx231xx_get_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, 488 u8 sav_eav, u8 *p_buffer, u32 buffer_size) 489 { 490 u32 bytes_copied = 0; 491 int current_field = -1; 492 493 switch (sav_eav) { 494 495 case SAV_VBI_FIELD1: 496 current_field = 1; 497 break; 498 499 case SAV_VBI_FIELD2: 500 current_field = 2; 501 break; 502 default: 503 break; 504 } 505 506 if (current_field < 0) 507 return bytes_copied; 508 509 dma_q->last_sav = sav_eav; 510 511 bytes_copied = 512 cx231xx_copy_vbi_line(dev, dma_q, p_buffer, buffer_size, 513 current_field); 514 515 return bytes_copied; 516 } 517 518 /* 519 * Announces that a buffer were filled and request the next 520 */ 521 static inline void vbi_buffer_filled(struct cx231xx *dev, 522 struct cx231xx_dmaqueue *dma_q, 523 struct cx231xx_buffer *buf) 524 { 525 /* Advice that buffer was filled */ 526 /* dev_dbg(dev->dev, "[%p/%d] wakeup\n", buf, buf->vb.i); */ 527 528 buf->vb.state = VIDEOBUF_DONE; 529 buf->vb.field_count++; 530 v4l2_get_timestamp(&buf->vb.ts); 531 532 dev->vbi_mode.bulk_ctl.buf = NULL; 533 534 list_del(&buf->vb.queue); 535 wake_up(&buf->vb.done); 536 } 537 538 u32 cx231xx_copy_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, 539 u8 *p_line, u32 length, int field_number) 540 { 541 u32 bytes_to_copy; 542 struct cx231xx_buffer *buf; 543 u32 _line_size = dev->width * 2; 544 545 if (dma_q->current_field == -1) { 546 /* Just starting up */ 547 cx231xx_reset_vbi_buffer(dev, dma_q); 548 } 549 550 if (dma_q->current_field != field_number) 551 dma_q->lines_completed = 0; 552 553 /* get the buffer pointer */ 554 buf = dev->vbi_mode.bulk_ctl.buf; 555 556 /* Remember the field number for next time */ 557 dma_q->current_field = field_number; 558 559 bytes_to_copy = dma_q->bytes_left_in_line; 560 if (bytes_to_copy > length) 561 bytes_to_copy = length; 562 563 if (dma_q->lines_completed >= dma_q->lines_per_field) { 564 dma_q->bytes_left_in_line -= bytes_to_copy; 565 dma_q->is_partial_line = 566 (dma_q->bytes_left_in_line == 0) ? 0 : 1; 567 return 0; 568 } 569 570 dma_q->is_partial_line = 1; 571 572 /* If we don't have a buffer, just return the number of bytes we would 573 have copied if we had a buffer. */ 574 if (!buf) { 575 dma_q->bytes_left_in_line -= bytes_to_copy; 576 dma_q->is_partial_line = 577 (dma_q->bytes_left_in_line == 0) ? 0 : 1; 578 return bytes_to_copy; 579 } 580 581 /* copy the data to video buffer */ 582 cx231xx_do_vbi_copy(dev, dma_q, p_line, bytes_to_copy); 583 584 dma_q->pos += bytes_to_copy; 585 dma_q->bytes_left_in_line -= bytes_to_copy; 586 587 if (dma_q->bytes_left_in_line == 0) { 588 589 dma_q->bytes_left_in_line = _line_size; 590 dma_q->lines_completed++; 591 dma_q->is_partial_line = 0; 592 593 if (cx231xx_is_vbi_buffer_done(dev, dma_q) && buf) { 594 595 vbi_buffer_filled(dev, dma_q, buf); 596 597 dma_q->pos = 0; 598 dma_q->lines_completed = 0; 599 cx231xx_reset_vbi_buffer(dev, dma_q); 600 } 601 } 602 603 return bytes_to_copy; 604 } 605 606 /* 607 * video-buf generic routine to get the next available buffer 608 */ 609 static inline void get_next_vbi_buf(struct cx231xx_dmaqueue *dma_q, 610 struct cx231xx_buffer **buf) 611 { 612 struct cx231xx_video_mode *vmode = 613 container_of(dma_q, struct cx231xx_video_mode, vidq); 614 struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode); 615 char *outp; 616 617 if (list_empty(&dma_q->active)) { 618 dev_err(dev->dev, "No active queue to serve\n"); 619 dev->vbi_mode.bulk_ctl.buf = NULL; 620 *buf = NULL; 621 return; 622 } 623 624 /* Get the next buffer */ 625 *buf = list_entry(dma_q->active.next, struct cx231xx_buffer, vb.queue); 626 627 /* Cleans up buffer - Useful for testing for frame/URB loss */ 628 outp = videobuf_to_vmalloc(&(*buf)->vb); 629 memset(outp, 0, (*buf)->vb.size); 630 631 dev->vbi_mode.bulk_ctl.buf = *buf; 632 633 return; 634 } 635 636 void cx231xx_reset_vbi_buffer(struct cx231xx *dev, 637 struct cx231xx_dmaqueue *dma_q) 638 { 639 struct cx231xx_buffer *buf; 640 641 buf = dev->vbi_mode.bulk_ctl.buf; 642 643 if (buf == NULL) { 644 /* first try to get the buffer */ 645 get_next_vbi_buf(dma_q, &buf); 646 647 dma_q->pos = 0; 648 dma_q->current_field = -1; 649 } 650 651 dma_q->bytes_left_in_line = dev->width << 1; 652 dma_q->lines_completed = 0; 653 } 654 655 int cx231xx_do_vbi_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, 656 u8 *p_buffer, u32 bytes_to_copy) 657 { 658 u8 *p_out_buffer = NULL; 659 u32 current_line_bytes_copied = 0; 660 struct cx231xx_buffer *buf; 661 u32 _line_size = dev->width << 1; 662 void *startwrite; 663 int offset, lencopy; 664 665 buf = dev->vbi_mode.bulk_ctl.buf; 666 667 if (buf == NULL) 668 return -EINVAL; 669 670 p_out_buffer = videobuf_to_vmalloc(&buf->vb); 671 672 if (dma_q->bytes_left_in_line != _line_size) { 673 current_line_bytes_copied = 674 _line_size - dma_q->bytes_left_in_line; 675 } 676 677 offset = (dma_q->lines_completed * _line_size) + 678 current_line_bytes_copied; 679 680 if (dma_q->current_field == 2) { 681 /* Populate the second half of the frame */ 682 offset += (dev->width * 2 * dma_q->lines_per_field); 683 } 684 685 /* prepare destination address */ 686 startwrite = p_out_buffer + offset; 687 688 lencopy = dma_q->bytes_left_in_line > bytes_to_copy ? 689 bytes_to_copy : dma_q->bytes_left_in_line; 690 691 memcpy(startwrite, p_buffer, lencopy); 692 693 return 0; 694 } 695 696 u8 cx231xx_is_vbi_buffer_done(struct cx231xx *dev, 697 struct cx231xx_dmaqueue *dma_q) 698 { 699 u32 height = 0; 700 701 height = ((dev->norm & V4L2_STD_625_50) ? 702 PAL_VBI_LINES : NTSC_VBI_LINES); 703 if (dma_q->lines_completed == height && dma_q->current_field == 2) 704 return 1; 705 else 706 return 0; 707 } 708