Lines Matching refs:dev

28 static inline void print_err_status(struct cx231xx *dev, int packet, int status)  in print_err_status()  argument
59 dev_err(dev->dev, in print_err_status()
62 dev_err(dev->dev, in print_err_status()
71 static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb) in cx231xx_isoc_vbi_copy() argument
79 if (!dev) in cx231xx_isoc_vbi_copy()
82 if (dev->state & DEV_DISCONNECTED) in cx231xx_isoc_vbi_copy()
86 print_err_status(dev, -1, urb->status); in cx231xx_isoc_vbi_copy()
115 bytes_parsed += cx231xx_get_vbi_line(dev, dma_q, in cx231xx_isoc_vbi_copy()
136 bytes_parsed += cx231xx_get_vbi_line(dev, in cx231xx_isoc_vbi_copy()
160 struct cx231xx *dev = vb2_get_drv_priv(vq); in vbi_queue_setup() local
163 height = ((dev->norm & V4L2_STD_625_50) ? in vbi_queue_setup()
167 sizes[0] = (dev->width * height * 2 * 2); in vbi_queue_setup()
174 struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue); in vbi_buf_prepare() local
178 height = ((dev->norm & V4L2_STD_625_50) ? in vbi_buf_prepare()
180 size = ((dev->width << 1) * height * 2); in vbi_buf_prepare()
190 struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue); in vbi_buf_queue() local
193 struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq; in vbi_buf_queue()
196 spin_lock_irqsave(&dev->vbi_mode.slock, flags); in vbi_buf_queue()
198 spin_unlock_irqrestore(&dev->vbi_mode.slock, flags); in vbi_buf_queue()
201 static void return_all_buffers(struct cx231xx *dev, in return_all_buffers() argument
204 struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq; in return_all_buffers()
208 spin_lock_irqsave(&dev->vbi_mode.slock, flags); in return_all_buffers()
209 dev->vbi_mode.bulk_ctl.buf = NULL; in return_all_buffers()
214 spin_unlock_irqrestore(&dev->vbi_mode.slock, flags); in return_all_buffers()
219 struct cx231xx *dev = vb2_get_drv_priv(vq); in vbi_start_streaming() local
220 struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq; in vbi_start_streaming()
224 ret = cx231xx_init_vbi_isoc(dev, CX231XX_NUM_VBI_PACKETS, in vbi_start_streaming()
226 dev->vbi_mode.alt_max_pkt_size[0], in vbi_start_streaming()
229 return_all_buffers(dev, VB2_BUF_STATE_QUEUED); in vbi_start_streaming()
235 struct cx231xx *dev = vb2_get_drv_priv(vq); in vbi_stop_streaming() local
237 return_all_buffers(dev, VB2_BUF_STATE_ERROR); in vbi_stop_streaming()
262 struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode); in cx231xx_irq_vbi_callback() local
274 dev_err(dev->dev, in cx231xx_irq_vbi_callback()
280 spin_lock_irqsave(&dev->vbi_mode.slock, flags); in cx231xx_irq_vbi_callback()
281 dev->vbi_mode.bulk_ctl.bulk_copy(dev, urb); in cx231xx_irq_vbi_callback()
282 spin_unlock_irqrestore(&dev->vbi_mode.slock, flags); in cx231xx_irq_vbi_callback()
289 dev_err(dev->dev, "urb resubmit failed (error=%i)\n", in cx231xx_irq_vbi_callback()
297 void cx231xx_uninit_vbi_isoc(struct cx231xx *dev) in cx231xx_uninit_vbi_isoc() argument
302 dev_dbg(dev->dev, "called cx231xx_uninit_vbi_isoc\n"); in cx231xx_uninit_vbi_isoc()
304 dev->vbi_mode.bulk_ctl.nfields = -1; in cx231xx_uninit_vbi_isoc()
305 for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) { in cx231xx_uninit_vbi_isoc()
306 urb = dev->vbi_mode.bulk_ctl.urb[i]; in cx231xx_uninit_vbi_isoc()
313 if (dev->vbi_mode.bulk_ctl.transfer_buffer[i]) { in cx231xx_uninit_vbi_isoc()
315 kfree(dev->vbi_mode.bulk_ctl. in cx231xx_uninit_vbi_isoc()
317 dev->vbi_mode.bulk_ctl.transfer_buffer[i] = in cx231xx_uninit_vbi_isoc()
321 dev->vbi_mode.bulk_ctl.urb[i] = NULL; in cx231xx_uninit_vbi_isoc()
323 dev->vbi_mode.bulk_ctl.transfer_buffer[i] = NULL; in cx231xx_uninit_vbi_isoc()
326 kfree(dev->vbi_mode.bulk_ctl.urb); in cx231xx_uninit_vbi_isoc()
327 kfree(dev->vbi_mode.bulk_ctl.transfer_buffer); in cx231xx_uninit_vbi_isoc()
329 dev->vbi_mode.bulk_ctl.urb = NULL; in cx231xx_uninit_vbi_isoc()
330 dev->vbi_mode.bulk_ctl.transfer_buffer = NULL; in cx231xx_uninit_vbi_isoc()
331 dev->vbi_mode.bulk_ctl.num_bufs = 0; in cx231xx_uninit_vbi_isoc()
333 cx231xx_capture_start(dev, 0, Vbi); in cx231xx_uninit_vbi_isoc()
340 int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets, in cx231xx_init_vbi_isoc() argument
342 int (*bulk_copy) (struct cx231xx *dev, in cx231xx_init_vbi_isoc()
345 struct cx231xx_dmaqueue *dma_q = &dev->vbi_mode.vidq; in cx231xx_init_vbi_isoc()
351 dev_dbg(dev->dev, "called cx231xx_vbi_isoc\n"); in cx231xx_init_vbi_isoc()
354 cx231xx_uninit_vbi_isoc(dev); in cx231xx_init_vbi_isoc()
357 usb_clear_halt(dev->udev, in cx231xx_init_vbi_isoc()
358 usb_rcvbulkpipe(dev->udev, in cx231xx_init_vbi_isoc()
359 dev->vbi_mode.end_point_addr)); in cx231xx_init_vbi_isoc()
361 dev->vbi_mode.bulk_ctl.bulk_copy = bulk_copy; in cx231xx_init_vbi_isoc()
362 dev->vbi_mode.bulk_ctl.num_bufs = num_bufs; in cx231xx_init_vbi_isoc()
367 dma_q->bytes_left_in_line = dev->width << 1; in cx231xx_init_vbi_isoc()
368 dma_q->lines_per_field = ((dev->norm & V4L2_STD_625_50) ? in cx231xx_init_vbi_isoc()
374 dev->vbi_mode.bulk_ctl.urb = kcalloc(num_bufs, sizeof(void *), in cx231xx_init_vbi_isoc()
376 if (!dev->vbi_mode.bulk_ctl.urb) { in cx231xx_init_vbi_isoc()
377 dev_err(dev->dev, in cx231xx_init_vbi_isoc()
382 dev->vbi_mode.bulk_ctl.transfer_buffer = in cx231xx_init_vbi_isoc()
384 if (!dev->vbi_mode.bulk_ctl.transfer_buffer) { in cx231xx_init_vbi_isoc()
385 dev_err(dev->dev, in cx231xx_init_vbi_isoc()
387 kfree(dev->vbi_mode.bulk_ctl.urb); in cx231xx_init_vbi_isoc()
391 dev->vbi_mode.bulk_ctl.max_pkt_size = max_pkt_size; in cx231xx_init_vbi_isoc()
392 dev->vbi_mode.bulk_ctl.buf = NULL; in cx231xx_init_vbi_isoc()
394 sb_size = max_packets * dev->vbi_mode.bulk_ctl.max_pkt_size; in cx231xx_init_vbi_isoc()
397 for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) { in cx231xx_init_vbi_isoc()
401 cx231xx_uninit_vbi_isoc(dev); in cx231xx_init_vbi_isoc()
404 dev->vbi_mode.bulk_ctl.urb[i] = urb; in cx231xx_init_vbi_isoc()
407 dev->vbi_mode.bulk_ctl.transfer_buffer[i] = in cx231xx_init_vbi_isoc()
409 if (!dev->vbi_mode.bulk_ctl.transfer_buffer[i]) { in cx231xx_init_vbi_isoc()
410 dev_err(dev->dev, in cx231xx_init_vbi_isoc()
413 cx231xx_uninit_vbi_isoc(dev); in cx231xx_init_vbi_isoc()
417 pipe = usb_rcvbulkpipe(dev->udev, dev->vbi_mode.end_point_addr); in cx231xx_init_vbi_isoc()
418 usb_fill_bulk_urb(urb, dev->udev, pipe, in cx231xx_init_vbi_isoc()
419 dev->vbi_mode.bulk_ctl.transfer_buffer[i], in cx231xx_init_vbi_isoc()
426 for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) { in cx231xx_init_vbi_isoc()
427 rc = usb_submit_urb(dev->vbi_mode.bulk_ctl.urb[i], GFP_ATOMIC); in cx231xx_init_vbi_isoc()
429 dev_err(dev->dev, in cx231xx_init_vbi_isoc()
431 cx231xx_uninit_vbi_isoc(dev); in cx231xx_init_vbi_isoc()
436 cx231xx_capture_start(dev, 1, Vbi); in cx231xx_init_vbi_isoc()
442 u32 cx231xx_get_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, in cx231xx_get_vbi_line() argument
467 cx231xx_copy_vbi_line(dev, dma_q, p_buffer, buffer_size, in cx231xx_get_vbi_line()
476 static inline void vbi_buffer_filled(struct cx231xx *dev, in vbi_buffer_filled() argument
486 dev->vbi_mode.bulk_ctl.buf = NULL; in vbi_buffer_filled()
492 u32 cx231xx_copy_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, in cx231xx_copy_vbi_line() argument
497 u32 _line_size = dev->width * 2; in cx231xx_copy_vbi_line()
501 cx231xx_reset_vbi_buffer(dev, dma_q); in cx231xx_copy_vbi_line()
508 buf = dev->vbi_mode.bulk_ctl.buf; in cx231xx_copy_vbi_line()
536 cx231xx_do_vbi_copy(dev, dma_q, p_line, bytes_to_copy); in cx231xx_copy_vbi_line()
547 if (cx231xx_is_vbi_buffer_done(dev, dma_q) && buf) { in cx231xx_copy_vbi_line()
549 vbi_buffer_filled(dev, dma_q, buf); in cx231xx_copy_vbi_line()
553 cx231xx_reset_vbi_buffer(dev, dma_q); in cx231xx_copy_vbi_line()
568 struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode); in get_next_vbi_buf() local
572 dev_err(dev->dev, "No active queue to serve\n"); in get_next_vbi_buf()
573 dev->vbi_mode.bulk_ctl.buf = NULL; in get_next_vbi_buf()
585 dev->vbi_mode.bulk_ctl.buf = *buf; in get_next_vbi_buf()
590 void cx231xx_reset_vbi_buffer(struct cx231xx *dev, in cx231xx_reset_vbi_buffer() argument
595 buf = dev->vbi_mode.bulk_ctl.buf; in cx231xx_reset_vbi_buffer()
605 dma_q->bytes_left_in_line = dev->width << 1; in cx231xx_reset_vbi_buffer()
609 int cx231xx_do_vbi_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q, in cx231xx_do_vbi_copy() argument
615 u32 _line_size = dev->width << 1; in cx231xx_do_vbi_copy()
619 buf = dev->vbi_mode.bulk_ctl.buf; in cx231xx_do_vbi_copy()
636 offset += (dev->width * 2 * dma_q->lines_per_field); in cx231xx_do_vbi_copy()
650 u8 cx231xx_is_vbi_buffer_done(struct cx231xx *dev, in cx231xx_is_vbi_buffer_done() argument
655 height = ((dev->norm & V4L2_STD_625_50) ? in cx231xx_is_vbi_buffer_done()