1 /* 2 * Driver for the Conexant CX23885 PCIe bridge 3 * 4 * Copyright (c) 2007 Steven Toth <stoth@linuxtv.org> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * 15 * GNU General Public License for more details. 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/moduleparam.h> 21 #include <linux/init.h> 22 23 #include "cx23885.h" 24 25 static unsigned int vbibufs = 4; 26 module_param(vbibufs, int, 0644); 27 MODULE_PARM_DESC(vbibufs, "number of vbi buffers, range 2-32"); 28 29 static unsigned int vbi_debug; 30 module_param(vbi_debug, int, 0644); 31 MODULE_PARM_DESC(vbi_debug, "enable debug messages [vbi]"); 32 33 #define dprintk(level, fmt, arg...)\ 34 do { if (vbi_debug >= level)\ 35 printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\ 36 } while (0) 37 38 /* ------------------------------------------------------------------ */ 39 40 #define VBI_LINE_LENGTH 1440 41 #define VBI_NTSC_LINE_COUNT 12 42 #define VBI_PAL_LINE_COUNT 18 43 44 45 int cx23885_vbi_fmt(struct file *file, void *priv, 46 struct v4l2_format *f) 47 { 48 struct cx23885_dev *dev = video_drvdata(file); 49 50 f->fmt.vbi.sampling_rate = 27000000; 51 f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH; 52 f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY; 53 f->fmt.vbi.offset = 0; 54 f->fmt.vbi.flags = 0; 55 if (dev->tvnorm & V4L2_STD_525_60) { 56 /* ntsc */ 57 f->fmt.vbi.start[0] = V4L2_VBI_ITU_525_F1_START + 9; 58 f->fmt.vbi.start[1] = V4L2_VBI_ITU_525_F2_START + 9; 59 f->fmt.vbi.count[0] = VBI_NTSC_LINE_COUNT; 60 f->fmt.vbi.count[1] = VBI_NTSC_LINE_COUNT; 61 } else if (dev->tvnorm & V4L2_STD_625_50) { 62 /* pal */ 63 f->fmt.vbi.start[0] = V4L2_VBI_ITU_625_F1_START + 5; 64 f->fmt.vbi.start[1] = V4L2_VBI_ITU_625_F2_START + 5; 65 f->fmt.vbi.count[0] = VBI_PAL_LINE_COUNT; 66 f->fmt.vbi.count[1] = VBI_PAL_LINE_COUNT; 67 } 68 69 return 0; 70 } 71 72 /* We're given the Video Interrupt status register. 73 * The cx23885_video_irq() func has already validated 74 * the potential error bits, we just need to 75 * deal with vbi payload and return indication if 76 * we actually processed any payload. 77 */ 78 int cx23885_vbi_irq(struct cx23885_dev *dev, u32 status) 79 { 80 u32 count; 81 int handled = 0; 82 83 if (status & VID_BC_MSK_VBI_RISCI1) { 84 dprintk(1, "%s() VID_BC_MSK_VBI_RISCI1\n", __func__); 85 spin_lock(&dev->slock); 86 count = cx_read(VBI_A_GPCNT); 87 cx23885_video_wakeup(dev, &dev->vbiq, count); 88 spin_unlock(&dev->slock); 89 handled++; 90 } 91 92 return handled; 93 } 94 95 static int cx23885_start_vbi_dma(struct cx23885_dev *dev, 96 struct cx23885_dmaqueue *q, 97 struct cx23885_buffer *buf) 98 { 99 dprintk(1, "%s()\n", __func__); 100 101 /* setup fifo + format */ 102 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 103 VBI_LINE_LENGTH, buf->risc.dma); 104 105 /* reset counter */ 106 cx_write(VID_A_VBI_CTRL, 3); 107 cx_write(VBI_A_GPCNT_CTL, 3); 108 q->count = 0; 109 110 /* enable irq */ 111 cx23885_irq_add_enable(dev, 0x01); 112 cx_set(VID_A_INT_MSK, 0x000022); 113 114 /* start dma */ 115 cx_set(DEV_CNTRL2, (1<<5)); 116 cx_set(VID_A_DMA_CTL, 0x22); /* FIFO and RISC enable */ 117 118 return 0; 119 } 120 121 /* ------------------------------------------------------------------ */ 122 123 static int queue_setup(struct vb2_queue *q, 124 unsigned int *num_buffers, unsigned int *num_planes, 125 unsigned int sizes[], struct device *alloc_devs[]) 126 { 127 struct cx23885_dev *dev = q->drv_priv; 128 unsigned lines = VBI_PAL_LINE_COUNT; 129 130 if (dev->tvnorm & V4L2_STD_525_60) 131 lines = VBI_NTSC_LINE_COUNT; 132 *num_planes = 1; 133 sizes[0] = lines * VBI_LINE_LENGTH * 2; 134 return 0; 135 } 136 137 static int buffer_prepare(struct vb2_buffer *vb) 138 { 139 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 140 struct cx23885_dev *dev = vb->vb2_queue->drv_priv; 141 struct cx23885_buffer *buf = container_of(vbuf, 142 struct cx23885_buffer, vb); 143 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0); 144 unsigned lines = VBI_PAL_LINE_COUNT; 145 146 if (dev->tvnorm & V4L2_STD_525_60) 147 lines = VBI_NTSC_LINE_COUNT; 148 149 if (vb2_plane_size(vb, 0) < lines * VBI_LINE_LENGTH * 2) 150 return -EINVAL; 151 vb2_set_plane_payload(vb, 0, lines * VBI_LINE_LENGTH * 2); 152 153 cx23885_risc_vbibuffer(dev->pci, &buf->risc, 154 sgt->sgl, 155 0, VBI_LINE_LENGTH * lines, 156 VBI_LINE_LENGTH, 0, 157 lines); 158 return 0; 159 } 160 161 static void buffer_finish(struct vb2_buffer *vb) 162 { 163 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 164 struct cx23885_buffer *buf = container_of(vbuf, 165 struct cx23885_buffer, vb); 166 167 cx23885_free_buffer(vb->vb2_queue->drv_priv, buf); 168 } 169 170 /* 171 * The risc program for each buffer works as follows: it starts with a simple 172 * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the 173 * buffer follows and at the end we have a JUMP back to the start + 12 (skipping 174 * the initial JUMP). 175 * 176 * This is the risc program of the first buffer to be queued if the active list 177 * is empty and it just keeps DMAing this buffer without generating any 178 * interrupts. 179 * 180 * If a new buffer is added then the initial JUMP in the code for that buffer 181 * will generate an interrupt which signals that the previous buffer has been 182 * DMAed successfully and that it can be returned to userspace. 183 * 184 * It also sets the final jump of the previous buffer to the start of the new 185 * buffer, thus chaining the new buffer into the DMA chain. This is a single 186 * atomic u32 write, so there is no race condition. 187 * 188 * The end-result of all this that you only get an interrupt when a buffer 189 * is ready, so the control flow is very easy. 190 */ 191 static void buffer_queue(struct vb2_buffer *vb) 192 { 193 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 194 struct cx23885_dev *dev = vb->vb2_queue->drv_priv; 195 struct cx23885_buffer *buf = container_of(vbuf, 196 struct cx23885_buffer, vb); 197 struct cx23885_buffer *prev; 198 struct cx23885_dmaqueue *q = &dev->vbiq; 199 unsigned long flags; 200 201 buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12); 202 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC); 203 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12); 204 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */ 205 206 if (list_empty(&q->active)) { 207 spin_lock_irqsave(&dev->slock, flags); 208 list_add_tail(&buf->queue, &q->active); 209 spin_unlock_irqrestore(&dev->slock, flags); 210 dprintk(2, "[%p/%d] vbi_queue - first active\n", 211 buf, buf->vb.vb2_buf.index); 212 213 } else { 214 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1); 215 prev = list_entry(q->active.prev, struct cx23885_buffer, 216 queue); 217 spin_lock_irqsave(&dev->slock, flags); 218 list_add_tail(&buf->queue, &q->active); 219 spin_unlock_irqrestore(&dev->slock, flags); 220 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); 221 dprintk(2, "[%p/%d] buffer_queue - append to active\n", 222 buf, buf->vb.vb2_buf.index); 223 } 224 } 225 226 static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count) 227 { 228 struct cx23885_dev *dev = q->drv_priv; 229 struct cx23885_dmaqueue *dmaq = &dev->vbiq; 230 struct cx23885_buffer *buf = list_entry(dmaq->active.next, 231 struct cx23885_buffer, queue); 232 233 cx23885_start_vbi_dma(dev, dmaq, buf); 234 return 0; 235 } 236 237 static void cx23885_stop_streaming(struct vb2_queue *q) 238 { 239 struct cx23885_dev *dev = q->drv_priv; 240 struct cx23885_dmaqueue *dmaq = &dev->vbiq; 241 unsigned long flags; 242 243 cx_clear(VID_A_DMA_CTL, 0x22); /* FIFO and RISC enable */ 244 spin_lock_irqsave(&dev->slock, flags); 245 while (!list_empty(&dmaq->active)) { 246 struct cx23885_buffer *buf = list_entry(dmaq->active.next, 247 struct cx23885_buffer, queue); 248 249 list_del(&buf->queue); 250 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); 251 } 252 spin_unlock_irqrestore(&dev->slock, flags); 253 } 254 255 256 struct vb2_ops cx23885_vbi_qops = { 257 .queue_setup = queue_setup, 258 .buf_prepare = buffer_prepare, 259 .buf_finish = buffer_finish, 260 .buf_queue = buffer_queue, 261 .wait_prepare = vb2_ops_wait_prepare, 262 .wait_finish = vb2_ops_wait_finish, 263 .start_streaming = cx23885_start_streaming, 264 .stop_streaming = cx23885_stop_streaming, 265 }; 266