1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 */
4
5 #include "cx88.h"
6
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/init.h>
10
11 static unsigned int vbi_debug;
12 module_param(vbi_debug, int, 0644);
13 MODULE_PARM_DESC(vbi_debug, "enable debug messages [vbi]");
14
15 #define dprintk(level, fmt, arg...) do { \
16 if (vbi_debug >= level) \
17 printk(KERN_DEBUG pr_fmt("%s: vbi:" fmt), \
18 __func__, ##arg); \
19 } while (0)
20
21 /* ------------------------------------------------------------------ */
22
cx8800_vbi_fmt(struct file * file,void * priv,struct v4l2_format * f)23 int cx8800_vbi_fmt(struct file *file, void *priv,
24 struct v4l2_format *f)
25 {
26 struct cx8800_dev *dev = video_drvdata(file);
27
28 f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
29 f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
30 f->fmt.vbi.offset = 244;
31
32 if (dev->core->tvnorm & V4L2_STD_525_60) {
33 /* ntsc */
34 f->fmt.vbi.sampling_rate = 28636363;
35 f->fmt.vbi.start[0] = 10;
36 f->fmt.vbi.start[1] = 273;
37 f->fmt.vbi.count[0] = VBI_LINE_NTSC_COUNT;
38 f->fmt.vbi.count[1] = VBI_LINE_NTSC_COUNT;
39
40 } else if (dev->core->tvnorm & V4L2_STD_625_50) {
41 /* pal */
42 f->fmt.vbi.sampling_rate = 35468950;
43 f->fmt.vbi.start[0] = V4L2_VBI_ITU_625_F1_START + 5;
44 f->fmt.vbi.start[1] = V4L2_VBI_ITU_625_F2_START + 5;
45 f->fmt.vbi.count[0] = VBI_LINE_PAL_COUNT;
46 f->fmt.vbi.count[1] = VBI_LINE_PAL_COUNT;
47 }
48 return 0;
49 }
50
cx8800_start_vbi_dma(struct cx8800_dev * dev,struct cx88_dmaqueue * q,struct cx88_buffer * buf)51 static int cx8800_start_vbi_dma(struct cx8800_dev *dev,
52 struct cx88_dmaqueue *q,
53 struct cx88_buffer *buf)
54 {
55 struct cx88_core *core = dev->core;
56
57 /* setup fifo + format */
58 cx88_sram_channel_setup(dev->core, &cx88_sram_channels[SRAM_CH24],
59 VBI_LINE_LENGTH, buf->risc.dma);
60
61 cx_write(MO_VBOS_CONTROL, (1 << 18) | /* comb filter delay fixup */
62 (1 << 15) | /* enable vbi capture */
63 (1 << 11));
64
65 /* reset counter */
66 cx_write(MO_VBI_GPCNTRL, GP_COUNT_CONTROL_RESET);
67 q->count = 0;
68
69 /* enable irqs */
70 cx_set(MO_PCI_INTMSK, core->pci_irqmask | PCI_INT_VIDINT);
71 cx_set(MO_VID_INTMSK, 0x0f0088);
72
73 /* enable capture */
74 cx_set(VID_CAPTURE_CONTROL, 0x18);
75
76 /* start dma */
77 cx_set(MO_DEV_CNTRL2, (1 << 5));
78 cx_set(MO_VID_DMACNTRL, 0x88);
79
80 return 0;
81 }
82
cx8800_stop_vbi_dma(struct cx8800_dev * dev)83 void cx8800_stop_vbi_dma(struct cx8800_dev *dev)
84 {
85 struct cx88_core *core = dev->core;
86
87 /* stop dma */
88 cx_clear(MO_VID_DMACNTRL, 0x88);
89
90 /* disable capture */
91 cx_clear(VID_CAPTURE_CONTROL, 0x18);
92
93 /* disable irqs */
94 cx_clear(MO_PCI_INTMSK, PCI_INT_VIDINT);
95 cx_clear(MO_VID_INTMSK, 0x0f0088);
96 }
97
cx8800_restart_vbi_queue(struct cx8800_dev * dev,struct cx88_dmaqueue * q)98 int cx8800_restart_vbi_queue(struct cx8800_dev *dev,
99 struct cx88_dmaqueue *q)
100 {
101 struct cx88_buffer *buf;
102
103 if (list_empty(&q->active))
104 return 0;
105
106 buf = list_entry(q->active.next, struct cx88_buffer, list);
107 dprintk(2, "restart_queue [%p/%d]: restart dma\n",
108 buf, buf->vb.vb2_buf.index);
109 cx8800_start_vbi_dma(dev, q, buf);
110 return 0;
111 }
112
113 /* ------------------------------------------------------------------ */
114
queue_setup(struct vb2_queue * q,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_devs[])115 static int queue_setup(struct vb2_queue *q,
116 unsigned int *num_buffers, unsigned int *num_planes,
117 unsigned int sizes[], struct device *alloc_devs[])
118 {
119 struct cx8800_dev *dev = q->drv_priv;
120
121 *num_planes = 1;
122 if (dev->core->tvnorm & V4L2_STD_525_60)
123 sizes[0] = VBI_LINE_NTSC_COUNT * VBI_LINE_LENGTH * 2;
124 else
125 sizes[0] = VBI_LINE_PAL_COUNT * VBI_LINE_LENGTH * 2;
126 return 0;
127 }
128
buffer_prepare(struct vb2_buffer * vb)129 static int buffer_prepare(struct vb2_buffer *vb)
130 {
131 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
132 struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
133 struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
134 struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
135 unsigned int lines;
136 unsigned int size;
137
138 if (dev->core->tvnorm & V4L2_STD_525_60)
139 lines = VBI_LINE_NTSC_COUNT;
140 else
141 lines = VBI_LINE_PAL_COUNT;
142 size = lines * VBI_LINE_LENGTH * 2;
143 if (vb2_plane_size(vb, 0) < size)
144 return -EINVAL;
145 vb2_set_plane_payload(vb, 0, size);
146
147 return cx88_risc_buffer(dev->pci, &buf->risc, sgt->sgl,
148 0, VBI_LINE_LENGTH * lines,
149 VBI_LINE_LENGTH, 0,
150 lines);
151 }
152
buffer_finish(struct vb2_buffer * vb)153 static void buffer_finish(struct vb2_buffer *vb)
154 {
155 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
156 struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
157 struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
158 struct cx88_riscmem *risc = &buf->risc;
159
160 if (risc->cpu)
161 dma_free_coherent(&dev->pci->dev, risc->size, risc->cpu,
162 risc->dma);
163 memset(risc, 0, sizeof(*risc));
164 }
165
buffer_queue(struct vb2_buffer * vb)166 static void buffer_queue(struct vb2_buffer *vb)
167 {
168 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
169 struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
170 struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb);
171 struct cx88_buffer *prev;
172 struct cx88_dmaqueue *q = &dev->vbiq;
173
174 /* add jump to start */
175 buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 8);
176 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
177 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 8);
178
179 if (list_empty(&q->active)) {
180 list_add_tail(&buf->list, &q->active);
181 dprintk(2, "[%p/%d] vbi_queue - first active\n",
182 buf, buf->vb.vb2_buf.index);
183
184 } else {
185 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
186 prev = list_entry(q->active.prev, struct cx88_buffer, list);
187 list_add_tail(&buf->list, &q->active);
188 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
189 dprintk(2, "[%p/%d] buffer_queue - append to active\n",
190 buf, buf->vb.vb2_buf.index);
191 }
192 }
193
start_streaming(struct vb2_queue * q,unsigned int count)194 static int start_streaming(struct vb2_queue *q, unsigned int count)
195 {
196 struct cx8800_dev *dev = q->drv_priv;
197 struct cx88_dmaqueue *dmaq = &dev->vbiq;
198 struct cx88_buffer *buf = list_entry(dmaq->active.next,
199 struct cx88_buffer, list);
200
201 cx8800_start_vbi_dma(dev, dmaq, buf);
202 return 0;
203 }
204
stop_streaming(struct vb2_queue * q)205 static void stop_streaming(struct vb2_queue *q)
206 {
207 struct cx8800_dev *dev = q->drv_priv;
208 struct cx88_core *core = dev->core;
209 struct cx88_dmaqueue *dmaq = &dev->vbiq;
210 unsigned long flags;
211
212 cx_clear(MO_VID_DMACNTRL, 0x11);
213 cx_clear(VID_CAPTURE_CONTROL, 0x06);
214 cx8800_stop_vbi_dma(dev);
215 spin_lock_irqsave(&dev->slock, flags);
216 while (!list_empty(&dmaq->active)) {
217 struct cx88_buffer *buf = list_entry(dmaq->active.next,
218 struct cx88_buffer, list);
219
220 list_del(&buf->list);
221 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
222 }
223 spin_unlock_irqrestore(&dev->slock, flags);
224 }
225
226 const struct vb2_ops cx8800_vbi_qops = {
227 .queue_setup = queue_setup,
228 .buf_prepare = buffer_prepare,
229 .buf_finish = buffer_finish,
230 .buf_queue = buffer_queue,
231 .wait_prepare = vb2_ops_wait_prepare,
232 .wait_finish = vb2_ops_wait_finish,
233 .start_streaming = start_streaming,
234 .stop_streaming = stop_streaming,
235 };
236