1 // SPDX-License-Identifier: GPL-2.0
2 #include <media/drv-intf/saa7146_vv.h>
3 
4 static int vbi_pixel_to_capture = 720 * 2;
5 
vbi_workaround(struct saa7146_dev * dev)6 static int vbi_workaround(struct saa7146_dev *dev)
7 {
8 	struct saa7146_vv *vv = dev->vv_data;
9 
10 	u32          *cpu;
11 	dma_addr_t   dma_addr;
12 
13 	int count = 0;
14 	int i;
15 
16 	DECLARE_WAITQUEUE(wait, current);
17 
18 	DEB_VBI("dev:%p\n", dev);
19 
20 	/* once again, a bug in the saa7146: the brs acquisition
21 	   is buggy and especially the BXO-counter does not work
22 	   as specified. there is this workaround, but please
23 	   don't let me explain it. ;-) */
24 
25 	cpu = dma_alloc_coherent(&dev->pci->dev, 4096, &dma_addr, GFP_KERNEL);
26 	if (NULL == cpu)
27 		return -ENOMEM;
28 
29 	/* setup some basic programming, just for the workaround */
30 	saa7146_write(dev, BASE_EVEN3,	dma_addr);
31 	saa7146_write(dev, BASE_ODD3,	dma_addr+vbi_pixel_to_capture);
32 	saa7146_write(dev, PROT_ADDR3,	dma_addr+4096);
33 	saa7146_write(dev, PITCH3,	vbi_pixel_to_capture);
34 	saa7146_write(dev, BASE_PAGE3,	0x0);
35 	saa7146_write(dev, NUM_LINE_BYTE3, (2<<16)|((vbi_pixel_to_capture)<<0));
36 	saa7146_write(dev, MC2, MASK_04|MASK_20);
37 
38 	/* load brs-control register */
39 	WRITE_RPS1(CMD_WR_REG | (1 << 8) | (BRS_CTRL/4));
40 	/* BXO = 1h, BRS to outbound */
41 	WRITE_RPS1(0xc000008c);
42 	/* wait for vbi_a or vbi_b*/
43 	if ( 0 != (SAA7146_USE_PORT_B_FOR_VBI & dev->ext_vv_data->flags)) {
44 		DEB_D("...using port b\n");
45 		WRITE_RPS1(CMD_PAUSE | CMD_OAN | CMD_SIG1 | CMD_E_FID_B);
46 		WRITE_RPS1(CMD_PAUSE | CMD_OAN | CMD_SIG1 | CMD_O_FID_B);
47 /*
48 		WRITE_RPS1(CMD_PAUSE | MASK_09);
49 */
50 	} else {
51 		DEB_D("...using port a\n");
52 		WRITE_RPS1(CMD_PAUSE | MASK_10);
53 	}
54 	/* upload brs */
55 	WRITE_RPS1(CMD_UPLOAD | MASK_08);
56 	/* load brs-control register */
57 	WRITE_RPS1(CMD_WR_REG | (1 << 8) | (BRS_CTRL/4));
58 	/* BYO = 1, BXO = NQBIL (=1728 for PAL, for NTSC this is 858*2) - NumByte3 (=1440) = 288 */
59 	WRITE_RPS1(((1728-(vbi_pixel_to_capture)) << 7) | MASK_19);
60 	/* wait for brs_done */
61 	WRITE_RPS1(CMD_PAUSE | MASK_08);
62 	/* upload brs */
63 	WRITE_RPS1(CMD_UPLOAD | MASK_08);
64 	/* load video-dma3 NumLines3 and NumBytes3 */
65 	WRITE_RPS1(CMD_WR_REG | (1 << 8) | (NUM_LINE_BYTE3/4));
66 	/* dev->vbi_count*2 lines, 720 pixel (= 1440 Bytes) */
67 	WRITE_RPS1((2 << 16) | (vbi_pixel_to_capture));
68 	/* load brs-control register */
69 	WRITE_RPS1(CMD_WR_REG | (1 << 8) | (BRS_CTRL/4));
70 	/* Set BRS right: note: this is an experimental value for BXO (=> PAL!) */
71 	WRITE_RPS1((540 << 7) | (5 << 19));  // 5 == vbi_start
72 	/* wait for brs_done */
73 	WRITE_RPS1(CMD_PAUSE | MASK_08);
74 	/* upload brs and video-dma3*/
75 	WRITE_RPS1(CMD_UPLOAD | MASK_08 | MASK_04);
76 	/* load mc2 register: enable dma3 */
77 	WRITE_RPS1(CMD_WR_REG | (1 << 8) | (MC1/4));
78 	WRITE_RPS1(MASK_20 | MASK_04);
79 	/* generate interrupt */
80 	WRITE_RPS1(CMD_INTERRUPT);
81 	/* stop rps1 */
82 	WRITE_RPS1(CMD_STOP);
83 
84 	/* we have to do the workaround twice to be sure that
85 	   everything is ok */
86 	for(i = 0; i < 2; i++) {
87 
88 		/* indicate to the irq handler that we do the workaround */
89 		saa7146_write(dev, MC2, MASK_31|MASK_15);
90 
91 		saa7146_write(dev, NUM_LINE_BYTE3, (1<<16)|(2<<0));
92 		saa7146_write(dev, MC2, MASK_04|MASK_20);
93 
94 		/* enable rps1 irqs */
95 		SAA7146_IER_ENABLE(dev,MASK_28);
96 
97 		/* prepare to wait to be woken up by the irq-handler */
98 		add_wait_queue(&vv->vbi_wq, &wait);
99 		set_current_state(TASK_INTERRUPTIBLE);
100 
101 		/* start rps1 to enable workaround */
102 		saa7146_write(dev, RPS_ADDR1, dev->d_rps1.dma_handle);
103 		saa7146_write(dev, MC1, (MASK_13 | MASK_29));
104 
105 		schedule();
106 
107 		DEB_VBI("brs bug workaround %d/1\n", i);
108 
109 		remove_wait_queue(&vv->vbi_wq, &wait);
110 		__set_current_state(TASK_RUNNING);
111 
112 		/* disable rps1 irqs */
113 		SAA7146_IER_DISABLE(dev,MASK_28);
114 
115 		/* stop video-dma3 */
116 		saa7146_write(dev, MC1, MASK_20);
117 
118 		if(signal_pending(current)) {
119 
120 			DEB_VBI("aborted (rps:0x%08x)\n",
121 				saa7146_read(dev, RPS_ADDR1));
122 
123 			/* stop rps1 for sure */
124 			saa7146_write(dev, MC1, MASK_29);
125 
126 			dma_free_coherent(&dev->pci->dev, 4096, cpu, dma_addr);
127 			return -EINTR;
128 		}
129 	}
130 
131 	dma_free_coherent(&dev->pci->dev, 4096, cpu, dma_addr);
132 	return 0;
133 }
134 
saa7146_set_vbi_capture(struct saa7146_dev * dev,struct saa7146_buf * buf,struct saa7146_buf * next)135 static void saa7146_set_vbi_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next)
136 {
137 	struct saa7146_vv *vv = dev->vv_data;
138 
139 	struct saa7146_video_dma vdma3;
140 
141 	int count = 0;
142 	unsigned long e_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_E_FID_A : CMD_E_FID_B;
143 	unsigned long o_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_O_FID_A : CMD_O_FID_B;
144 
145 /*
146 	vdma3.base_even	= 0xc8000000+2560*70;
147 	vdma3.base_odd	= 0xc8000000;
148 	vdma3.prot_addr	= 0xc8000000+2560*164;
149 	vdma3.pitch	= 2560;
150 	vdma3.base_page	= 0;
151 	vdma3.num_line_byte = (64<<16)|((vbi_pixel_to_capture)<<0); // set above!
152 */
153 	vdma3.base_even	= buf->pt[2].offset;
154 	vdma3.base_odd	= buf->pt[2].offset + 16 * vbi_pixel_to_capture;
155 	vdma3.prot_addr	= buf->pt[2].offset + 16 * 2 * vbi_pixel_to_capture;
156 	vdma3.pitch	= vbi_pixel_to_capture;
157 	vdma3.base_page	= buf->pt[2].dma | ME1;
158 	vdma3.num_line_byte = (16 << 16) | vbi_pixel_to_capture;
159 
160 	saa7146_write_out_dma(dev, 3, &vdma3);
161 
162 	/* write beginning of rps-program */
163 	count = 0;
164 
165 	/* wait for o_fid_a/b / e_fid_a/b toggle only if bit 1 is not set */
166 
167 	/* we don't wait here for the first field anymore. this is different from the video
168 	   capture and might cause that the first buffer is only half filled (with only
169 	   one field). but since this is some sort of streaming data, this is not that negative.
170 	   but by doing this, we can use the whole engine from videobuf-dma-sg.c... */
171 
172 /*
173 	WRITE_RPS1(CMD_PAUSE | CMD_OAN | CMD_SIG1 | e_wait);
174 	WRITE_RPS1(CMD_PAUSE | CMD_OAN | CMD_SIG1 | o_wait);
175 */
176 	/* set bit 1 */
177 	WRITE_RPS1(CMD_WR_REG | (1 << 8) | (MC2/4));
178 	WRITE_RPS1(MASK_28 | MASK_12);
179 
180 	/* turn on video-dma3 */
181 	WRITE_RPS1(CMD_WR_REG_MASK | (MC1/4));
182 	WRITE_RPS1(MASK_04 | MASK_20);			/* => mask */
183 	WRITE_RPS1(MASK_04 | MASK_20);			/* => values */
184 
185 	/* wait for o_fid_a/b / e_fid_a/b toggle */
186 	WRITE_RPS1(CMD_PAUSE | o_wait);
187 	WRITE_RPS1(CMD_PAUSE | e_wait);
188 
189 	/* generate interrupt */
190 	WRITE_RPS1(CMD_INTERRUPT);
191 
192 	/* stop */
193 	WRITE_RPS1(CMD_STOP);
194 
195 	/* enable rps1 irqs */
196 	SAA7146_IER_ENABLE(dev, MASK_28);
197 
198 	/* write the address of the rps-program */
199 	saa7146_write(dev, RPS_ADDR1, dev->d_rps1.dma_handle);
200 
201 	/* turn on rps */
202 	saa7146_write(dev, MC1, (MASK_13 | MASK_29));
203 }
204 
buffer_activate(struct saa7146_dev * dev,struct saa7146_buf * buf,struct saa7146_buf * next)205 static int buffer_activate(struct saa7146_dev *dev,
206 			   struct saa7146_buf *buf,
207 			   struct saa7146_buf *next)
208 {
209 	struct saa7146_vv *vv = dev->vv_data;
210 
211 	DEB_VBI("dev:%p, buf:%p, next:%p\n", dev, buf, next);
212 	saa7146_set_vbi_capture(dev,buf,next);
213 
214 	mod_timer(&vv->vbi_dmaq.timeout, jiffies+BUFFER_TIMEOUT);
215 	return 0;
216 }
217 
218 /* ------------------------------------------------------------------ */
219 
queue_setup(struct vb2_queue * q,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_devs[])220 static int queue_setup(struct vb2_queue *q,
221 		       unsigned int *num_buffers, unsigned int *num_planes,
222 		       unsigned int sizes[], struct device *alloc_devs[])
223 {
224 	unsigned int size = 16 * 2 * vbi_pixel_to_capture;
225 
226 	if (*num_planes)
227 		return sizes[0] < size ? -EINVAL : 0;
228 	*num_planes = 1;
229 	sizes[0] = size;
230 
231 	return 0;
232 }
233 
buf_queue(struct vb2_buffer * vb)234 static void buf_queue(struct vb2_buffer *vb)
235 {
236 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
237 	struct vb2_queue *vq = vb->vb2_queue;
238 	struct saa7146_dev *dev = vb2_get_drv_priv(vq);
239 	struct saa7146_buf *buf = container_of(vbuf, struct saa7146_buf, vb);
240 	unsigned long flags;
241 
242 	spin_lock_irqsave(&dev->slock, flags);
243 
244 	saa7146_buffer_queue(dev, &dev->vv_data->vbi_dmaq, buf);
245 	spin_unlock_irqrestore(&dev->slock, flags);
246 }
247 
buf_init(struct vb2_buffer * vb)248 static int buf_init(struct vb2_buffer *vb)
249 {
250 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
251 	struct saa7146_buf *buf = container_of(vbuf, struct saa7146_buf, vb);
252 	struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
253 	struct scatterlist *list = sgt->sgl;
254 	int length = sgt->nents;
255 	struct vb2_queue *vq = vb->vb2_queue;
256 	struct saa7146_dev *dev = vb2_get_drv_priv(vq);
257 	int ret;
258 
259 	buf->activate = buffer_activate;
260 
261 	saa7146_pgtable_alloc(dev->pci, &buf->pt[2]);
262 
263 	ret = saa7146_pgtable_build_single(dev->pci, &buf->pt[2],
264 					   list, length);
265 	if (ret)
266 		saa7146_pgtable_free(dev->pci, &buf->pt[2]);
267 	return ret;
268 }
269 
buf_prepare(struct vb2_buffer * vb)270 static int buf_prepare(struct vb2_buffer *vb)
271 {
272 	unsigned int size = 16 * 2 * vbi_pixel_to_capture;
273 
274 	if (vb2_plane_size(vb, 0) < size)
275 		return -EINVAL;
276 	vb2_set_plane_payload(vb, 0, size);
277 	return 0;
278 }
279 
buf_cleanup(struct vb2_buffer * vb)280 static void buf_cleanup(struct vb2_buffer *vb)
281 {
282 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
283 	struct saa7146_buf *buf = container_of(vbuf, struct saa7146_buf, vb);
284 	struct vb2_queue *vq = vb->vb2_queue;
285 	struct saa7146_dev *dev = vb2_get_drv_priv(vq);
286 
287 	saa7146_pgtable_free(dev->pci, &buf->pt[2]);
288 }
289 
return_buffers(struct vb2_queue * q,int state)290 static void return_buffers(struct vb2_queue *q, int state)
291 {
292 	struct saa7146_dev *dev = vb2_get_drv_priv(q);
293 	struct saa7146_dmaqueue *dq = &dev->vv_data->vbi_dmaq;
294 	struct saa7146_buf *buf;
295 
296 	if (dq->curr) {
297 		buf = dq->curr;
298 		dq->curr = NULL;
299 		vb2_buffer_done(&buf->vb.vb2_buf, state);
300 	}
301 	while (!list_empty(&dq->queue)) {
302 		buf = list_entry(dq->queue.next, struct saa7146_buf, list);
303 		list_del(&buf->list);
304 		vb2_buffer_done(&buf->vb.vb2_buf, state);
305 	}
306 }
307 
vbi_stop(struct saa7146_dev * dev)308 static void vbi_stop(struct saa7146_dev *dev)
309 {
310 	struct saa7146_vv *vv = dev->vv_data;
311 	unsigned long flags;
312 	DEB_VBI("dev:%p\n", dev);
313 
314 	spin_lock_irqsave(&dev->slock,flags);
315 
316 	/* disable rps1  */
317 	saa7146_write(dev, MC1, MASK_29);
318 
319 	/* disable rps1 irqs */
320 	SAA7146_IER_DISABLE(dev, MASK_28);
321 
322 	/* shut down dma 3 transfers */
323 	saa7146_write(dev, MC1, MASK_20);
324 
325 	del_timer(&vv->vbi_dmaq.timeout);
326 	del_timer(&vv->vbi_read_timeout);
327 
328 	spin_unlock_irqrestore(&dev->slock, flags);
329 }
330 
vbi_read_timeout(struct timer_list * t)331 static void vbi_read_timeout(struct timer_list *t)
332 {
333 	struct saa7146_vv *vv = from_timer(vv, t, vbi_read_timeout);
334 	struct saa7146_dev *dev = vv->vbi_dmaq.dev;
335 
336 	DEB_VBI("dev:%p\n", dev);
337 
338 	vbi_stop(dev);
339 }
340 
vbi_begin(struct saa7146_dev * dev)341 static int vbi_begin(struct saa7146_dev *dev)
342 {
343 	struct saa7146_vv *vv = dev->vv_data;
344 	u32 arbtr_ctrl	= saa7146_read(dev, PCI_BT_V1);
345 	int ret = 0;
346 
347 	DEB_VBI("dev:%p\n", dev);
348 
349 	ret = saa7146_res_get(dev, RESOURCE_DMA3_BRS);
350 	if (0 == ret) {
351 		DEB_S("cannot get vbi RESOURCE_DMA3_BRS resource\n");
352 		return -EBUSY;
353 	}
354 
355 	/* adjust arbitrition control for video dma 3 */
356 	arbtr_ctrl &= ~0x1f0000;
357 	arbtr_ctrl |=  0x1d0000;
358 	saa7146_write(dev, PCI_BT_V1, arbtr_ctrl);
359 	saa7146_write(dev, MC2, (MASK_04|MASK_20));
360 
361 	vv->vbi_read_timeout.function = vbi_read_timeout;
362 
363 	/* initialize the brs */
364 	if ( 0 != (SAA7146_USE_PORT_B_FOR_VBI & dev->ext_vv_data->flags)) {
365 		saa7146_write(dev, BRS_CTRL, MASK_30|MASK_29 | (7 << 19));
366 	} else {
367 		saa7146_write(dev, BRS_CTRL, 0x00000001);
368 
369 		if (0 != (ret = vbi_workaround(dev))) {
370 			DEB_VBI("vbi workaround failed!\n");
371 			/* return ret;*/
372 		}
373 	}
374 
375 	/* upload brs register */
376 	saa7146_write(dev, MC2, (MASK_08|MASK_24));
377 	return 0;
378 }
379 
start_streaming(struct vb2_queue * q,unsigned int count)380 static int start_streaming(struct vb2_queue *q, unsigned int count)
381 {
382 	struct saa7146_dev *dev = vb2_get_drv_priv(q);
383 	int ret;
384 
385 	if (!vb2_is_streaming(&dev->vv_data->vbi_dmaq.q))
386 		dev->vv_data->seqnr = 0;
387 	ret = vbi_begin(dev);
388 	if (ret)
389 		return_buffers(q, VB2_BUF_STATE_QUEUED);
390 	return ret;
391 }
392 
stop_streaming(struct vb2_queue * q)393 static void stop_streaming(struct vb2_queue *q)
394 {
395 	struct saa7146_dev *dev = vb2_get_drv_priv(q);
396 
397 	vbi_stop(dev);
398 	return_buffers(q, VB2_BUF_STATE_ERROR);
399 	saa7146_res_free(dev, RESOURCE_DMA3_BRS);
400 }
401 
402 const struct vb2_ops vbi_qops = {
403 	.queue_setup	= queue_setup,
404 	.buf_queue	= buf_queue,
405 	.buf_init	= buf_init,
406 	.buf_prepare	= buf_prepare,
407 	.buf_cleanup	= buf_cleanup,
408 	.start_streaming = start_streaming,
409 	.stop_streaming = stop_streaming,
410 	.wait_prepare	= vb2_ops_wait_prepare,
411 	.wait_finish	= vb2_ops_wait_finish,
412 };
413 
414 /* ------------------------------------------------------------------ */
415 
vbi_init(struct saa7146_dev * dev,struct saa7146_vv * vv)416 static void vbi_init(struct saa7146_dev *dev, struct saa7146_vv *vv)
417 {
418 	DEB_VBI("dev:%p\n", dev);
419 
420 	INIT_LIST_HEAD(&vv->vbi_dmaq.queue);
421 
422 	timer_setup(&vv->vbi_dmaq.timeout, saa7146_buffer_timeout, 0);
423 	vv->vbi_dmaq.dev              = dev;
424 
425 	init_waitqueue_head(&vv->vbi_wq);
426 }
427 
vbi_irq_done(struct saa7146_dev * dev,unsigned long status)428 static void vbi_irq_done(struct saa7146_dev *dev, unsigned long status)
429 {
430 	struct saa7146_vv *vv = dev->vv_data;
431 	spin_lock(&dev->slock);
432 
433 	if (vv->vbi_dmaq.curr) {
434 		DEB_VBI("dev:%p, curr:%p\n", dev, vv->vbi_dmaq.curr);
435 		saa7146_buffer_finish(dev, &vv->vbi_dmaq, VB2_BUF_STATE_DONE);
436 	} else {
437 		DEB_VBI("dev:%p\n", dev);
438 	}
439 	saa7146_buffer_next(dev, &vv->vbi_dmaq, 1);
440 
441 	spin_unlock(&dev->slock);
442 }
443 
444 const struct saa7146_use_ops saa7146_vbi_uops = {
445 	.init		= vbi_init,
446 	.irq_done	= vbi_irq_done,
447 };
448