1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 cx231xx_vbi.c - driver for Conexant Cx23100/101/102 USB video capture devices 4 5 Copyright (C) 2008 <srinivasa.deevi at conexant dot com> 6 Based on cx88 driver 7 8 */ 9 10 #include "cx231xx.h" 11 #include <linux/init.h> 12 #include <linux/list.h> 13 #include <linux/module.h> 14 #include <linux/kernel.h> 15 #include <linux/bitmap.h> 16 #include <linux/i2c.h> 17 #include <linux/mm.h> 18 #include <linux/mutex.h> 19 #include <linux/slab.h> 20 21 #include <media/v4l2-common.h> 22 #include <media/v4l2-ioctl.h> 23 #include <media/drv-intf/msp3400.h> 24 #include <media/tuner.h> 25 26 #include "cx231xx-vbi.h" 27 28 static inline void print_err_status(struct cx231xx *dev, int packet, int status) 29 { 30 char *errmsg = "Unknown"; 31 32 switch (status) { 33 case -ENOENT: 34 errmsg = "unlinked synchronously"; 35 break; 36 case -ECONNRESET: 37 errmsg = "unlinked asynchronously"; 38 break; 39 case -ENOSR: 40 errmsg = "Buffer error (overrun)"; 41 break; 42 case -EPIPE: 43 errmsg = "Stalled (device not responding)"; 44 break; 45 case -EOVERFLOW: 46 errmsg = "Babble (bad cable?)"; 47 break; 48 case -EPROTO: 49 errmsg = "Bit-stuff error (bad cable?)"; 50 break; 51 case -EILSEQ: 52 errmsg = "CRC/Timeout (could be anything)"; 53 break; 54 case -ETIME: 55 errmsg = "Device does not respond"; 56 break; 57 } 58 if (packet < 0) { 59 dev_err(dev->dev, 60 "URB status %d [%s].\n", status, errmsg); 61 } else { 62 dev_err(dev->dev, 63 "URB packet %d, status %d [%s].\n", 64 packet, status, errmsg); 65 } 66 } 67 68 /* 69 * Controls the isoc copy of each urb packet 70 */ 71 static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb) 72 { 73 struct cx231xx_dmaqueue *dma_q = urb->context; 74 int rc = 1; 75 unsigned char *p_buffer; 76 u32 bytes_parsed = 0, buffer_size = 0; 77 u8 sav_eav = 0; 78 79 if (!dev) 80 return 0; 81 82 if (dev->state & DEV_DISCONNECTED) 83 return 0; 84 85 if (urb->status < 0) { 86 print_err_status(dev, -1, urb->status); 87 if (urb->status == -ENOENT) 88 return 0; 89 } 90 91 /* get buffer pointer and length */ 92 p_buffer = urb->transfer_buffer; 93 buffer_size = urb->actual_length; 94 95 if (buffer_size > 0) { 96 bytes_parsed = 0; 97 98 if (dma_q->is_partial_line) { 99 /* Handle the case where we were working on a partial 100 line */ 101 sav_eav = dma_q->last_sav; 102 } else { 103 /* Check for a SAV/EAV overlapping the 104 buffer boundary */ 105 106 sav_eav = cx231xx_find_boundary_SAV_EAV(p_buffer, 107 dma_q->partial_buf, 108 &bytes_parsed); 109 } 110 111 sav_eav &= 0xF0; 112 /* Get the first line if we have some portion of an SAV/EAV from 113 the last buffer or a partial line */ 114 if (sav_eav) { 115 bytes_parsed += cx231xx_get_vbi_line(dev, dma_q, 116 sav_eav, /* SAV/EAV */ 117 p_buffer + bytes_parsed, /* p_buffer */ 118 buffer_size - bytes_parsed); /* buffer size */ 119 } 120 121 /* Now parse data that is completely in this buffer */ 122 dma_q->is_partial_line = 0; 123 124 while (bytes_parsed < buffer_size) { 125 u32 bytes_used = 0; 126 127 sav_eav = cx231xx_find_next_SAV_EAV( 128 p_buffer + bytes_parsed, /* p_buffer */ 129 buffer_size - bytes_parsed, /* buffer size */ 130 &bytes_used); /* bytes used to get SAV/EAV */ 131 132 bytes_parsed += bytes_used; 133 134 sav_eav &= 0xF0; 135 if (sav_eav && (bytes_parsed < buffer_size)) { 136 bytes_parsed += cx231xx_get_vbi_line(dev, 137 dma_q, sav_eav, /* SAV/EAV */ 138 p_buffer+bytes_parsed, /* p_buffer */ 139 buffer_size-bytes_parsed);/*buf size*/ 140 } 141 } 142 143 /* Save the last four bytes of the buffer so we can 144 check the buffer boundary condition next time */ 145 memcpy(dma_q->partial_buf, p_buffer + buffer_size - 4, 4); 146 bytes_parsed = 0; 147 } 148 149 return rc; 150 } 151 152 /* ------------------------------------------------------------------ 153 Vbi buf operations 154 ------------------------------------------------------------------*/ 155 156 static int vbi_queue_setup(struct vb2_queue *vq, 157 unsigned int *nbuffers, unsigned int *nplanes, 158 unsigned int sizes[], struct device *alloc_devs[]) 159 { 160 struct cx231xx *dev = vb2_get_drv_priv(vq); 161 u32 height = 0; 162 163 height = ((dev->norm & V4L2_STD_625_50) ? 164 PAL_VBI_LINES : NTSC_VBI_LINES); 165 166 *nplanes = 1; 167 sizes[0] = (dev->width * height * 2 * 2); 168 return 0; 169 } 170 171 /* This is called *without* dev->slock held; please keep it that way */ 172 static int vbi_buf_prepare(struct vb2_buffer *vb) 173 { 174 struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue); 175 u32 height = 0; 176 u32 size; 177 178 height = ((dev->norm & V4L2_STD_625_50) ? 179 PAL_VBI_LINES : NTSC_VBI_LINES); 180 size = ((dev->width << 1) * height * 2); 181 182 if (vb2_plane_size(vb, 0) < size) 183 return -EINVAL; 184 vb2_set_plane_payload(vb, 0, size); 185 return 0; 186 } 187 188 static void vbi_buf_queue(struct vb2_buffer *vb) 189 { 190 struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue); 191 struct cx231xx_buffer *buf = 192 container_of(vb, struct cx231xx_buffer, vb.vb2_buf); 193 struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq; 194 unsigned long flags; 195 196 spin_lock_irqsave(&dev->vbi_mode.slock, flags); 197 list_add_tail(&buf->list, &vidq->active); 198 spin_unlock_irqrestore(&dev->vbi_mode.slock, flags); 199 } 200 201 static void return_all_buffers(struct cx231xx *dev, 202 enum vb2_buffer_state state) 203 { 204 struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq; 205 struct cx231xx_buffer *buf, *node; 206 unsigned long flags; 207 208 spin_lock_irqsave(&dev->vbi_mode.slock, flags); 209 dev->vbi_mode.bulk_ctl.buf = NULL; 210 list_for_each_entry_safe(buf, node, &vidq->active, list) { 211 list_del(&buf->list); 212 vb2_buffer_done(&buf->vb.vb2_buf, state); 213 } 214 spin_unlock_irqrestore(&dev->vbi_mode.slock, flags); 215 } 216 217 static int vbi_start_streaming(struct vb2_queue *vq, unsigned int count) 218 { 219 struct cx231xx *dev = vb2_get_drv_priv(vq); 220 struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq; 221 int ret; 222 223 vidq->sequence = 0; 224 ret = cx231xx_init_vbi_isoc(dev, CX231XX_NUM_VBI_PACKETS, 225 CX231XX_NUM_VBI_BUFS, 226 dev->vbi_mode.alt_max_pkt_size[0], 227 cx231xx_isoc_vbi_copy); 228 if (ret) 229 return_all_buffers(dev, VB2_BUF_STATE_QUEUED); 230 return ret; 231 } 232 233 static void vbi_stop_streaming(struct vb2_queue *vq) 234 { 235 struct cx231xx *dev = vb2_get_drv_priv(vq); 236 237 return_all_buffers(dev, VB2_BUF_STATE_ERROR); 238 } 239 240 struct vb2_ops cx231xx_vbi_qops = { 241 .queue_setup = vbi_queue_setup, 242 .buf_prepare = vbi_buf_prepare, 243 .buf_queue = vbi_buf_queue, 244 .start_streaming = vbi_start_streaming, 245 .stop_streaming = vbi_stop_streaming, 246 .wait_prepare = vb2_ops_wait_prepare, 247 .wait_finish = vb2_ops_wait_finish, 248 }; 249 250 /* ------------------------------------------------------------------ 251 URB control 252 ------------------------------------------------------------------*/ 253 254 /* 255 * IRQ callback, called by URB callback 256 */ 257 static void cx231xx_irq_vbi_callback(struct urb *urb) 258 { 259 struct cx231xx_dmaqueue *dma_q = urb->context; 260 struct cx231xx_video_mode *vmode = 261 container_of(dma_q, struct cx231xx_video_mode, vidq); 262 struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode); 263 unsigned long flags; 264 265 switch (urb->status) { 266 case 0: /* success */ 267 case -ETIMEDOUT: /* NAK */ 268 break; 269 case -ECONNRESET: /* kill */ 270 case -ENOENT: 271 case -ESHUTDOWN: 272 return; 273 default: /* error */ 274 dev_err(dev->dev, 275 "urb completion error %d.\n", urb->status); 276 break; 277 } 278 279 /* Copy data from URB */ 280 spin_lock_irqsave(&dev->vbi_mode.slock, flags); 281 dev->vbi_mode.bulk_ctl.bulk_copy(dev, urb); 282 spin_unlock_irqrestore(&dev->vbi_mode.slock, flags); 283 284 /* Reset status */ 285 urb->status = 0; 286 287 urb->status = usb_submit_urb(urb, GFP_ATOMIC); 288 if (urb->status) { 289 dev_err(dev->dev, "urb resubmit failed (error=%i)\n", 290 urb->status); 291 } 292 } 293 294 /* 295 * Stop and Deallocate URBs 296 */ 297 void cx231xx_uninit_vbi_isoc(struct cx231xx *dev) 298 { 299 struct urb *urb; 300 int i; 301 302 dev_dbg(dev->dev, "called cx231xx_uninit_vbi_isoc\n"); 303 304 dev->vbi_mode.bulk_ctl.nfields = -1; 305 for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) { 306 urb = dev->vbi_mode.bulk_ctl.urb[i]; 307 if (urb) { 308 if (!irqs_disabled()) 309 usb_kill_urb(urb); 310 else 311 usb_unlink_urb(urb); 312 313 if (dev->vbi_mode.bulk_ctl.transfer_buffer[i]) { 314 315 kfree(dev->vbi_mode.bulk_ctl. 316 transfer_buffer[i]); 317 dev->vbi_mode.bulk_ctl.transfer_buffer[i] = 318 NULL; 319 } 320 usb_free_urb(urb); 321 dev->vbi_mode.bulk_ctl.urb[i] = NULL; 322 } 323 dev->vbi_mode.bulk_ctl.transfer_buffer[i] = NULL; 324 } 325 326 kfree(dev->vbi_mode.bulk_ctl.urb); 327 kfree(dev->vbi_mode.bulk_ctl.transfer_buffer); 328 329 dev->vbi_mode.bulk_ctl.urb = NULL; 330 dev->vbi_mode.bulk_ctl.transfer_buffer = NULL; 331 dev->vbi_mode.bulk_ctl.num_bufs = 0; 332 333 cx231xx_capture_start(dev, 0, Vbi); 334 } 335 EXPORT_SYMBOL_GPL(cx231xx_uninit_vbi_isoc); 336 337 /* 338 * Allocate URBs and start IRQ 339 */ 340 int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets, 341 int num_bufs, int max_pkt_size, 342 int (*bulk_copy) (struct cx231xx *dev, 343 struct urb *urb)) 344 { 345 struct cx231xx_dmaqueue *dma_q = &dev->vbi_mode.vidq; 346 int i; 347 int sb_size, pipe; 348 struct urb *urb; 349 int rc; 350 351 dev_dbg(dev->dev, "called cx231xx_vbi_isoc\n"); 352 353 /* De-allocates all pending stuff */ 354 cx231xx_uninit_vbi_isoc(dev); 355 356 /* clear if any halt */ 357 usb_clear_halt(dev->udev, 358 usb_rcvbulkpipe(dev->udev, 359 dev->vbi_mode.end_point_addr)); 360 361 dev->vbi_mode.bulk_ctl.bulk_copy = bulk_copy; 362 dev->vbi_mode.bulk_ctl.num_bufs = num_bufs; 363 dma_q->pos = 0; 364 dma_q->is_partial_line = 0; 365 dma_q->last_sav = 0; 366 dma_q->current_field = -1; 367 dma_q->bytes_left_in_line = dev->width << 1; 368 dma_q->lines_per_field = ((dev->norm & V4L2_STD_625_50) ? 369 PAL_VBI_LINES : NTSC_VBI_LINES); 370 dma_q->lines_completed = 0; 371 for (i = 0; i < 8; i++) 372 dma_q->partial_buf[i] = 0; 373 374 dev->vbi_mode.bulk_ctl.urb = kcalloc(num_bufs, sizeof(void *), 375 GFP_KERNEL); 376 if (!dev->vbi_mode.bulk_ctl.urb) { 377 dev_err(dev->dev, 378 "cannot alloc memory for usb buffers\n"); 379 return -ENOMEM; 380 } 381 382 dev->vbi_mode.bulk_ctl.transfer_buffer = 383 kcalloc(num_bufs, sizeof(void *), GFP_KERNEL); 384 if (!dev->vbi_mode.bulk_ctl.transfer_buffer) { 385 dev_err(dev->dev, 386 "cannot allocate memory for usbtransfer\n"); 387 kfree(dev->vbi_mode.bulk_ctl.urb); 388 return -ENOMEM; 389 } 390 391 dev->vbi_mode.bulk_ctl.max_pkt_size = max_pkt_size; 392 dev->vbi_mode.bulk_ctl.buf = NULL; 393 394 sb_size = max_packets * dev->vbi_mode.bulk_ctl.max_pkt_size; 395 396 /* allocate urbs and transfer buffers */ 397 for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) { 398 399 urb = usb_alloc_urb(0, GFP_KERNEL); 400 if (!urb) { 401 cx231xx_uninit_vbi_isoc(dev); 402 return -ENOMEM; 403 } 404 dev->vbi_mode.bulk_ctl.urb[i] = urb; 405 urb->transfer_flags = 0; 406 407 dev->vbi_mode.bulk_ctl.transfer_buffer[i] = 408 kzalloc(sb_size, GFP_KERNEL); 409 if (!dev->vbi_mode.bulk_ctl.transfer_buffer[i]) { 410 dev_err(dev->dev, 411 "unable to allocate %i bytes for transfer buffer %i%s\n", 412 sb_size, i, 413 in_interrupt() ? " while in int" : ""); 414 cx231xx_uninit_vbi_isoc(dev); 415 return -ENOMEM; 416 } 417 418 pipe = usb_rcvbulkpipe(dev->udev, dev->vbi_mode.end_point_addr); 419 usb_fill_bulk_urb(urb, dev->udev, pipe, 420 dev->vbi_mode.bulk_ctl.transfer_buffer[i], 421 sb_size, cx231xx_irq_vbi_callback, dma_q); 422 } 423 424 init_waitqueue_head(&dma_q->wq); 425 426 /* submit urbs and enables IRQ */ 427 for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) { 428 rc = usb_submit_urb(dev->vbi_mode.bulk_ctl.urb[i], GFP_ATOMIC); 429 if (rc) { 430 dev_err(dev->dev, 431 "submit of urb %i failed (error=%i)\n", i, rc); 432 cx231xx_uninit_vbi_isoc(dev); 433 return rc; 434 } 435 } 436 437 cx231xx_capture_start(dev, 1, Vbi); 438 439 return 0; 440 } 441 EXPORT_SYMBOL_GPL(cx231xx_init_vbi_isoc); 442 443 u32 cx231xx_get_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, 444 u8 sav_eav, u8 *p_buffer, u32 buffer_size) 445 { 446 u32 bytes_copied = 0; 447 int current_field = -1; 448 449 switch (sav_eav) { 450 451 case SAV_VBI_FIELD1: 452 current_field = 1; 453 break; 454 455 case SAV_VBI_FIELD2: 456 current_field = 2; 457 break; 458 default: 459 break; 460 } 461 462 if (current_field < 0) 463 return bytes_copied; 464 465 dma_q->last_sav = sav_eav; 466 467 bytes_copied = 468 cx231xx_copy_vbi_line(dev, dma_q, p_buffer, buffer_size, 469 current_field); 470 471 return bytes_copied; 472 } 473 474 /* 475 * Announces that a buffer were filled and request the next 476 */ 477 static inline void vbi_buffer_filled(struct cx231xx *dev, 478 struct cx231xx_dmaqueue *dma_q, 479 struct cx231xx_buffer *buf) 480 { 481 /* Advice that buffer was filled */ 482 /* dev_dbg(dev->dev, "[%p/%d] wakeup\n", buf, buf->vb.index); */ 483 484 buf->vb.sequence = dma_q->sequence++; 485 buf->vb.vb2_buf.timestamp = ktime_get_ns(); 486 487 dev->vbi_mode.bulk_ctl.buf = NULL; 488 489 list_del(&buf->list); 490 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE); 491 } 492 493 u32 cx231xx_copy_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, 494 u8 *p_line, u32 length, int field_number) 495 { 496 u32 bytes_to_copy; 497 struct cx231xx_buffer *buf; 498 u32 _line_size = dev->width * 2; 499 500 if (dma_q->current_field == -1) { 501 /* Just starting up */ 502 cx231xx_reset_vbi_buffer(dev, dma_q); 503 } 504 505 if (dma_q->current_field != field_number) 506 dma_q->lines_completed = 0; 507 508 /* get the buffer pointer */ 509 buf = dev->vbi_mode.bulk_ctl.buf; 510 511 /* Remember the field number for next time */ 512 dma_q->current_field = field_number; 513 514 bytes_to_copy = dma_q->bytes_left_in_line; 515 if (bytes_to_copy > length) 516 bytes_to_copy = length; 517 518 if (dma_q->lines_completed >= dma_q->lines_per_field) { 519 dma_q->bytes_left_in_line -= bytes_to_copy; 520 dma_q->is_partial_line = 521 (dma_q->bytes_left_in_line == 0) ? 0 : 1; 522 return 0; 523 } 524 525 dma_q->is_partial_line = 1; 526 527 /* If we don't have a buffer, just return the number of bytes we would 528 have copied if we had a buffer. */ 529 if (!buf) { 530 dma_q->bytes_left_in_line -= bytes_to_copy; 531 dma_q->is_partial_line = 532 (dma_q->bytes_left_in_line == 0) ? 0 : 1; 533 return bytes_to_copy; 534 } 535 536 /* copy the data to video buffer */ 537 cx231xx_do_vbi_copy(dev, dma_q, p_line, bytes_to_copy); 538 539 dma_q->pos += bytes_to_copy; 540 dma_q->bytes_left_in_line -= bytes_to_copy; 541 542 if (dma_q->bytes_left_in_line == 0) { 543 544 dma_q->bytes_left_in_line = _line_size; 545 dma_q->lines_completed++; 546 dma_q->is_partial_line = 0; 547 548 if (cx231xx_is_vbi_buffer_done(dev, dma_q) && buf) { 549 550 vbi_buffer_filled(dev, dma_q, buf); 551 552 dma_q->pos = 0; 553 dma_q->lines_completed = 0; 554 cx231xx_reset_vbi_buffer(dev, dma_q); 555 } 556 } 557 558 return bytes_to_copy; 559 } 560 561 /* 562 * video-buf generic routine to get the next available buffer 563 */ 564 static inline void get_next_vbi_buf(struct cx231xx_dmaqueue *dma_q, 565 struct cx231xx_buffer **buf) 566 { 567 struct cx231xx_video_mode *vmode = 568 container_of(dma_q, struct cx231xx_video_mode, vidq); 569 struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode); 570 char *outp; 571 572 if (list_empty(&dma_q->active)) { 573 dev_err(dev->dev, "No active queue to serve\n"); 574 dev->vbi_mode.bulk_ctl.buf = NULL; 575 *buf = NULL; 576 return; 577 } 578 579 /* Get the next buffer */ 580 *buf = list_entry(dma_q->active.next, struct cx231xx_buffer, list); 581 582 /* Cleans up buffer - Useful for testing for frame/URB loss */ 583 outp = vb2_plane_vaddr(&(*buf)->vb.vb2_buf, 0); 584 memset(outp, 0, vb2_plane_size(&(*buf)->vb.vb2_buf, 0)); 585 586 dev->vbi_mode.bulk_ctl.buf = *buf; 587 588 return; 589 } 590 591 void cx231xx_reset_vbi_buffer(struct cx231xx *dev, 592 struct cx231xx_dmaqueue *dma_q) 593 { 594 struct cx231xx_buffer *buf; 595 596 buf = dev->vbi_mode.bulk_ctl.buf; 597 598 if (buf == NULL) { 599 /* first try to get the buffer */ 600 get_next_vbi_buf(dma_q, &buf); 601 602 dma_q->pos = 0; 603 dma_q->current_field = -1; 604 } 605 606 dma_q->bytes_left_in_line = dev->width << 1; 607 dma_q->lines_completed = 0; 608 } 609 610 int cx231xx_do_vbi_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, 611 u8 *p_buffer, u32 bytes_to_copy) 612 { 613 u8 *p_out_buffer = NULL; 614 u32 current_line_bytes_copied = 0; 615 struct cx231xx_buffer *buf; 616 u32 _line_size = dev->width << 1; 617 void *startwrite; 618 int offset, lencopy; 619 620 buf = dev->vbi_mode.bulk_ctl.buf; 621 622 if (buf == NULL) 623 return -EINVAL; 624 625 p_out_buffer = vb2_plane_vaddr(&buf->vb.vb2_buf, 0); 626 627 if (dma_q->bytes_left_in_line != _line_size) { 628 current_line_bytes_copied = 629 _line_size - dma_q->bytes_left_in_line; 630 } 631 632 offset = (dma_q->lines_completed * _line_size) + 633 current_line_bytes_copied; 634 635 if (dma_q->current_field == 2) { 636 /* Populate the second half of the frame */ 637 offset += (dev->width * 2 * dma_q->lines_per_field); 638 } 639 640 /* prepare destination address */ 641 startwrite = p_out_buffer + offset; 642 643 lencopy = dma_q->bytes_left_in_line > bytes_to_copy ? 644 bytes_to_copy : dma_q->bytes_left_in_line; 645 646 memcpy(startwrite, p_buffer, lencopy); 647 648 return 0; 649 } 650 651 u8 cx231xx_is_vbi_buffer_done(struct cx231xx *dev, 652 struct cx231xx_dmaqueue *dma_q) 653 { 654 u32 height = 0; 655 656 height = ((dev->norm & V4L2_STD_625_50) ? 657 PAL_VBI_LINES : NTSC_VBI_LINES); 658 if (dma_q->lines_completed == height && dma_q->current_field == 2) 659 return 1; 660 else 661 return 0; 662 } 663