1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for the Conexant CX23885 PCIe bridge 4 * 5 * Copyright (c) 2007 Steven Toth <stoth@linuxtv.org> 6 */ 7 8 #include "cx23885.h" 9 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/moduleparam.h> 13 #include <linux/init.h> 14 15 static unsigned int vbibufs = 4; 16 module_param(vbibufs, int, 0644); 17 MODULE_PARM_DESC(vbibufs, "number of vbi buffers, range 2-32"); 18 19 static unsigned int vbi_debug; 20 module_param(vbi_debug, int, 0644); 21 MODULE_PARM_DESC(vbi_debug, "enable debug messages [vbi]"); 22 23 #define dprintk(level, fmt, arg...)\ 24 do { if (vbi_debug >= level)\ 25 printk(KERN_DEBUG pr_fmt("%s: vbi:" fmt), \ 26 __func__, ##arg); \ 27 } while (0) 28 29 /* ------------------------------------------------------------------ */ 30 31 #define VBI_LINE_LENGTH 1440 32 #define VBI_NTSC_LINE_COUNT 12 33 #define VBI_PAL_LINE_COUNT 18 34 35 36 int cx23885_vbi_fmt(struct file *file, void *priv, 37 struct v4l2_format *f) 38 { 39 struct cx23885_dev *dev = video_drvdata(file); 40 41 f->fmt.vbi.sampling_rate = 27000000; 42 f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH; 43 f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY; 44 f->fmt.vbi.offset = 0; 45 f->fmt.vbi.flags = 0; 46 if (dev->tvnorm & V4L2_STD_525_60) { 47 /* ntsc */ 48 f->fmt.vbi.start[0] = V4L2_VBI_ITU_525_F1_START + 9; 49 f->fmt.vbi.start[1] = V4L2_VBI_ITU_525_F2_START + 9; 50 f->fmt.vbi.count[0] = VBI_NTSC_LINE_COUNT; 51 f->fmt.vbi.count[1] = VBI_NTSC_LINE_COUNT; 52 } else if (dev->tvnorm & V4L2_STD_625_50) { 53 /* pal */ 54 f->fmt.vbi.start[0] = V4L2_VBI_ITU_625_F1_START + 5; 55 f->fmt.vbi.start[1] = V4L2_VBI_ITU_625_F2_START + 5; 56 f->fmt.vbi.count[0] = VBI_PAL_LINE_COUNT; 57 f->fmt.vbi.count[1] = VBI_PAL_LINE_COUNT; 58 } 59 60 return 0; 61 } 62 63 /* We're given the Video Interrupt status register. 64 * The cx23885_video_irq() func has already validated 65 * the potential error bits, we just need to 66 * deal with vbi payload and return indication if 67 * we actually processed any payload. 68 */ 69 int cx23885_vbi_irq(struct cx23885_dev *dev, u32 status) 70 { 71 u32 count; 72 int handled = 0; 73 74 if (status & VID_BC_MSK_VBI_RISCI1) { 75 dprintk(1, "%s() VID_BC_MSK_VBI_RISCI1\n", __func__); 76 spin_lock(&dev->slock); 77 count = cx_read(VBI_A_GPCNT); 78 cx23885_video_wakeup(dev, &dev->vbiq, count); 79 spin_unlock(&dev->slock); 80 handled++; 81 } 82 83 return handled; 84 } 85 86 static int cx23885_start_vbi_dma(struct cx23885_dev *dev, 87 struct cx23885_dmaqueue *q, 88 struct cx23885_buffer *buf) 89 { 90 dprintk(1, "%s()\n", __func__); 91 92 /* setup fifo + format */ 93 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 94 VBI_LINE_LENGTH, buf->risc.dma); 95 96 /* reset counter */ 97 cx_write(VID_A_VBI_CTRL, 3); 98 cx_write(VBI_A_GPCNT_CTL, 3); 99 q->count = 0; 100 101 /* enable irq */ 102 cx23885_irq_add_enable(dev, 0x01); 103 cx_set(VID_A_INT_MSK, 0x000022); 104 105 /* start dma */ 106 cx_set(DEV_CNTRL2, (1<<5)); 107 cx_set(VID_A_DMA_CTL, 0x22); /* FIFO and RISC enable */ 108 109 return 0; 110 } 111 112 /* ------------------------------------------------------------------ */ 113 114 static int queue_setup(struct vb2_queue *q, 115 unsigned int *num_buffers, unsigned int *num_planes, 116 unsigned int sizes[], struct device *alloc_devs[]) 117 { 118 struct cx23885_dev *dev = q->drv_priv; 119 unsigned lines = VBI_PAL_LINE_COUNT; 120 121 if (dev->tvnorm & V4L2_STD_525_60) 122 lines = VBI_NTSC_LINE_COUNT; 123 *num_planes = 1; 124 sizes[0] = lines * VBI_LINE_LENGTH * 2; 125 return 0; 126 } 127 128 static int buffer_prepare(struct vb2_buffer *vb) 129 { 130 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 131 struct cx23885_dev *dev = vb->vb2_queue->drv_priv; 132 struct cx23885_buffer *buf = container_of(vbuf, 133 struct cx23885_buffer, vb); 134 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0); 135 unsigned lines = VBI_PAL_LINE_COUNT; 136 137 if (dev->tvnorm & V4L2_STD_525_60) 138 lines = VBI_NTSC_LINE_COUNT; 139 140 if (vb2_plane_size(vb, 0) < lines * VBI_LINE_LENGTH * 2) 141 return -EINVAL; 142 vb2_set_plane_payload(vb, 0, lines * VBI_LINE_LENGTH * 2); 143 144 cx23885_risc_vbibuffer(dev->pci, &buf->risc, 145 sgt->sgl, 146 0, VBI_LINE_LENGTH * lines, 147 VBI_LINE_LENGTH, 0, 148 lines); 149 return 0; 150 } 151 152 static void buffer_finish(struct vb2_buffer *vb) 153 { 154 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 155 struct cx23885_buffer *buf = container_of(vbuf, 156 struct cx23885_buffer, vb); 157 158 cx23885_free_buffer(vb->vb2_queue->drv_priv, buf); 159 } 160 161 /* 162 * The risc program for each buffer works as follows: it starts with a simple 163 * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the 164 * buffer follows and at the end we have a JUMP back to the start + 12 (skipping 165 * the initial JUMP). 166 * 167 * This is the risc program of the first buffer to be queued if the active list 168 * is empty and it just keeps DMAing this buffer without generating any 169 * interrupts. 170 * 171 * If a new buffer is added then the initial JUMP in the code for that buffer 172 * will generate an interrupt which signals that the previous buffer has been 173 * DMAed successfully and that it can be returned to userspace. 174 * 175 * It also sets the final jump of the previous buffer to the start of the new 176 * buffer, thus chaining the new buffer into the DMA chain. This is a single 177 * atomic u32 write, so there is no race condition. 178 * 179 * The end-result of all this that you only get an interrupt when a buffer 180 * is ready, so the control flow is very easy. 181 */ 182 static void buffer_queue(struct vb2_buffer *vb) 183 { 184 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 185 struct cx23885_dev *dev = vb->vb2_queue->drv_priv; 186 struct cx23885_buffer *buf = container_of(vbuf, 187 struct cx23885_buffer, vb); 188 struct cx23885_buffer *prev; 189 struct cx23885_dmaqueue *q = &dev->vbiq; 190 unsigned long flags; 191 192 buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12); 193 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC); 194 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12); 195 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */ 196 197 if (list_empty(&q->active)) { 198 spin_lock_irqsave(&dev->slock, flags); 199 list_add_tail(&buf->queue, &q->active); 200 spin_unlock_irqrestore(&dev->slock, flags); 201 dprintk(2, "[%p/%d] vbi_queue - first active\n", 202 buf, buf->vb.vb2_buf.index); 203 204 } else { 205 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1); 206 prev = list_entry(q->active.prev, struct cx23885_buffer, 207 queue); 208 spin_lock_irqsave(&dev->slock, flags); 209 list_add_tail(&buf->queue, &q->active); 210 spin_unlock_irqrestore(&dev->slock, flags); 211 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); 212 dprintk(2, "[%p/%d] buffer_queue - append to active\n", 213 buf, buf->vb.vb2_buf.index); 214 } 215 } 216 217 static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count) 218 { 219 struct cx23885_dev *dev = q->drv_priv; 220 struct cx23885_dmaqueue *dmaq = &dev->vbiq; 221 struct cx23885_buffer *buf = list_entry(dmaq->active.next, 222 struct cx23885_buffer, queue); 223 224 cx23885_start_vbi_dma(dev, dmaq, buf); 225 return 0; 226 } 227 228 static void cx23885_stop_streaming(struct vb2_queue *q) 229 { 230 struct cx23885_dev *dev = q->drv_priv; 231 struct cx23885_dmaqueue *dmaq = &dev->vbiq; 232 unsigned long flags; 233 234 cx_clear(VID_A_DMA_CTL, 0x22); /* FIFO and RISC enable */ 235 spin_lock_irqsave(&dev->slock, flags); 236 while (!list_empty(&dmaq->active)) { 237 struct cx23885_buffer *buf = list_entry(dmaq->active.next, 238 struct cx23885_buffer, queue); 239 240 list_del(&buf->queue); 241 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); 242 } 243 spin_unlock_irqrestore(&dev->slock, flags); 244 } 245 246 247 const struct vb2_ops cx23885_vbi_qops = { 248 .queue_setup = queue_setup, 249 .buf_prepare = buffer_prepare, 250 .buf_finish = buffer_finish, 251 .buf_queue = buffer_queue, 252 .wait_prepare = vb2_ops_wait_prepare, 253 .wait_finish = vb2_ops_wait_finish, 254 .start_streaming = cx23885_start_streaming, 255 .stop_streaming = cx23885_stop_streaming, 256 }; 257