1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <media/drv-intf/saa7146_vv.h>
5 #include <linux/module.h>
6 
7 /****************************************************************************/
8 /* resource management functions, shamelessly stolen from saa7134 driver */
9 
10 int saa7146_res_get(struct saa7146_dev *dev, unsigned int bit)
11 {
12 	struct saa7146_vv *vv = dev->vv_data;
13 
14 	if (vv->resources & bit) {
15 		DEB_D("already allocated! want: 0x%02x, cur:0x%02x\n",
16 		      bit, vv->resources);
17 		/* have it already allocated */
18 		return 1;
19 	}
20 
21 	/* is it free? */
22 	if (vv->resources & bit) {
23 		DEB_D("locked! vv->resources:0x%02x, we want:0x%02x\n",
24 		      vv->resources, bit);
25 		/* no, someone else uses it */
26 		return 0;
27 	}
28 	/* it's free, grab it */
29 	vv->resources |= bit;
30 	DEB_D("res: get 0x%02x, cur:0x%02x\n", bit, vv->resources);
31 	return 1;
32 }
33 
34 void saa7146_res_free(struct saa7146_dev *dev, unsigned int bits)
35 {
36 	struct saa7146_vv *vv = dev->vv_data;
37 
38 	WARN_ON((vv->resources & bits) != bits);
39 
40 	vv->resources &= ~bits;
41 	DEB_D("res: put 0x%02x, cur:0x%02x\n", bits, vv->resources);
42 }
43 
44 
45 /********************************************************************************/
46 /* common buffer functions */
47 
48 int saa7146_buffer_queue(struct saa7146_dev *dev,
49 			 struct saa7146_dmaqueue *q,
50 			 struct saa7146_buf *buf)
51 {
52 	assert_spin_locked(&dev->slock);
53 	DEB_EE("dev:%p, dmaq:%p, buf:%p\n", dev, q, buf);
54 
55 	if (WARN_ON(!q))
56 		return -EIO;
57 
58 	if (NULL == q->curr) {
59 		q->curr = buf;
60 		DEB_D("immediately activating buffer %p\n", buf);
61 		buf->activate(dev,buf,NULL);
62 	} else {
63 		list_add_tail(&buf->list, &q->queue);
64 		DEB_D("adding buffer %p to queue. (active buffer present)\n",
65 		      buf);
66 	}
67 	return 0;
68 }
69 
70 void saa7146_buffer_finish(struct saa7146_dev *dev,
71 			   struct saa7146_dmaqueue *q,
72 			   int state)
73 {
74 	struct saa7146_vv *vv = dev->vv_data;
75 	struct saa7146_buf *buf = q->curr;
76 
77 	assert_spin_locked(&dev->slock);
78 	DEB_EE("dev:%p, dmaq:%p, state:%d\n", dev, q, state);
79 	DEB_EE("q->curr:%p\n", q->curr);
80 
81 	/* finish current buffer */
82 	if (!buf) {
83 		DEB_D("aiii. no current buffer\n");
84 		return;
85 	}
86 
87 	q->curr = NULL;
88 	buf->vb.vb2_buf.timestamp = ktime_get_ns();
89 	if (vv->video_fmt.field == V4L2_FIELD_ALTERNATE)
90 		buf->vb.field = vv->last_field;
91 	else if (vv->video_fmt.field == V4L2_FIELD_ANY)
92 		buf->vb.field = (vv->video_fmt.height > vv->standard->v_max_out / 2)
93 			? V4L2_FIELD_INTERLACED
94 			: V4L2_FIELD_BOTTOM;
95 	else
96 		buf->vb.field = vv->video_fmt.field;
97 	buf->vb.sequence = vv->seqnr++;
98 	vb2_buffer_done(&buf->vb.vb2_buf, state);
99 }
100 
101 void saa7146_buffer_next(struct saa7146_dev *dev,
102 			 struct saa7146_dmaqueue *q, int vbi)
103 {
104 	struct saa7146_buf *buf,*next = NULL;
105 
106 	if (WARN_ON(!q))
107 		return;
108 
109 	DEB_INT("dev:%p, dmaq:%p, vbi:%d\n", dev, q, vbi);
110 
111 	assert_spin_locked(&dev->slock);
112 	if (!list_empty(&q->queue)) {
113 		/* activate next one from queue */
114 		buf = list_entry(q->queue.next, struct saa7146_buf, list);
115 		list_del(&buf->list);
116 		if (!list_empty(&q->queue))
117 			next = list_entry(q->queue.next, struct saa7146_buf, list);
118 		q->curr = buf;
119 		DEB_INT("next buffer: buf:%p, prev:%p, next:%p\n",
120 			buf, q->queue.prev, q->queue.next);
121 		buf->activate(dev,buf,next);
122 	} else {
123 		DEB_INT("no next buffer. stopping.\n");
124 		if( 0 != vbi ) {
125 			/* turn off video-dma3 */
126 			saa7146_write(dev,MC1, MASK_20);
127 		} else {
128 			/* nothing to do -- just prevent next video-dma1 transfer
129 			   by lowering the protection address */
130 
131 			// fixme: fix this for vflip != 0
132 
133 			saa7146_write(dev, PROT_ADDR1, 0);
134 			saa7146_write(dev, MC2, (MASK_02|MASK_18));
135 
136 			/* write the address of the rps-program */
137 			saa7146_write(dev, RPS_ADDR0, dev->d_rps0.dma_handle);
138 			/* turn on rps */
139 			saa7146_write(dev, MC1, (MASK_12 | MASK_28));
140 
141 /*
142 			printk("vdma%d.base_even:     0x%08x\n", 1,saa7146_read(dev,BASE_EVEN1));
143 			printk("vdma%d.base_odd:      0x%08x\n", 1,saa7146_read(dev,BASE_ODD1));
144 			printk("vdma%d.prot_addr:     0x%08x\n", 1,saa7146_read(dev,PROT_ADDR1));
145 			printk("vdma%d.base_page:     0x%08x\n", 1,saa7146_read(dev,BASE_PAGE1));
146 			printk("vdma%d.pitch:         0x%08x\n", 1,saa7146_read(dev,PITCH1));
147 			printk("vdma%d.num_line_byte: 0x%08x\n", 1,saa7146_read(dev,NUM_LINE_BYTE1));
148 */
149 		}
150 		del_timer(&q->timeout);
151 	}
152 }
153 
154 void saa7146_buffer_timeout(struct timer_list *t)
155 {
156 	struct saa7146_dmaqueue *q = from_timer(q, t, timeout);
157 	struct saa7146_dev *dev = q->dev;
158 	unsigned long flags;
159 
160 	DEB_EE("dev:%p, dmaq:%p\n", dev, q);
161 
162 	spin_lock_irqsave(&dev->slock,flags);
163 	if (q->curr) {
164 		DEB_D("timeout on %p\n", q->curr);
165 		saa7146_buffer_finish(dev, q, VB2_BUF_STATE_ERROR);
166 	}
167 
168 	/* we don't restart the transfer here like other drivers do. when
169 	   a streaming capture is disabled, the timeout function will be
170 	   called for the current buffer. if we activate the next buffer now,
171 	   we mess up our capture logic. if a timeout occurs on another buffer,
172 	   then something is seriously broken before, so no need to buffer the
173 	   next capture IMHO... */
174 
175 	saa7146_buffer_next(dev, q, 0);
176 
177 	spin_unlock_irqrestore(&dev->slock,flags);
178 }
179 
180 /********************************************************************************/
181 /* file operations */
182 
183 static ssize_t fops_write(struct file *file, const char __user *data, size_t count, loff_t *ppos)
184 {
185 	struct video_device *vdev = video_devdata(file);
186 	struct saa7146_dev *dev = video_drvdata(file);
187 	int ret;
188 
189 	if (vdev->vfl_type != VFL_TYPE_VBI || !dev->ext_vv_data->vbi_fops.write)
190 		return -EINVAL;
191 	if (mutex_lock_interruptible(vdev->lock))
192 		return -ERESTARTSYS;
193 	ret = dev->ext_vv_data->vbi_fops.write(file, data, count, ppos);
194 	mutex_unlock(vdev->lock);
195 	return ret;
196 }
197 
198 static const struct v4l2_file_operations video_fops =
199 {
200 	.owner		= THIS_MODULE,
201 	.open		= v4l2_fh_open,
202 	.release	= vb2_fop_release,
203 	.read		= vb2_fop_read,
204 	.write		= fops_write,
205 	.poll		= vb2_fop_poll,
206 	.mmap		= vb2_fop_mmap,
207 	.unlocked_ioctl	= video_ioctl2,
208 };
209 
210 static void vv_callback(struct saa7146_dev *dev, unsigned long status)
211 {
212 	u32 isr = status;
213 
214 	DEB_INT("dev:%p, isr:0x%08x\n", dev, (u32)status);
215 
216 	if (0 != (isr & (MASK_27))) {
217 		DEB_INT("irq: RPS0 (0x%08x)\n", isr);
218 		saa7146_video_uops.irq_done(dev,isr);
219 	}
220 
221 	if (0 != (isr & (MASK_28))) {
222 		u32 mc2 = saa7146_read(dev, MC2);
223 		if( 0 != (mc2 & MASK_15)) {
224 			DEB_INT("irq: RPS1 vbi workaround (0x%08x)\n", isr);
225 			wake_up(&dev->vv_data->vbi_wq);
226 			saa7146_write(dev,MC2, MASK_31);
227 			return;
228 		}
229 		DEB_INT("irq: RPS1 (0x%08x)\n", isr);
230 		saa7146_vbi_uops.irq_done(dev,isr);
231 	}
232 }
233 
234 static const struct v4l2_ctrl_ops saa7146_ctrl_ops = {
235 	.s_ctrl = saa7146_s_ctrl,
236 };
237 
238 int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
239 {
240 	struct v4l2_ctrl_handler *hdl = &dev->ctrl_handler;
241 	struct v4l2_pix_format *fmt;
242 	struct v4l2_vbi_format *vbi;
243 	struct saa7146_vv *vv;
244 	int err;
245 
246 	err = v4l2_device_register(&dev->pci->dev, &dev->v4l2_dev);
247 	if (err)
248 		return err;
249 
250 	v4l2_ctrl_handler_init(hdl, 6);
251 	v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops,
252 		V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
253 	v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops,
254 		V4L2_CID_CONTRAST, 0, 127, 1, 64);
255 	v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops,
256 		V4L2_CID_SATURATION, 0, 127, 1, 64);
257 	v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops,
258 		V4L2_CID_VFLIP, 0, 1, 1, 0);
259 	v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops,
260 		V4L2_CID_HFLIP, 0, 1, 1, 0);
261 	if (hdl->error) {
262 		err = hdl->error;
263 		v4l2_ctrl_handler_free(hdl);
264 		v4l2_device_unregister(&dev->v4l2_dev);
265 		return err;
266 	}
267 	dev->v4l2_dev.ctrl_handler = hdl;
268 
269 	vv = kzalloc(sizeof(struct saa7146_vv), GFP_KERNEL);
270 	if (vv == NULL) {
271 		ERR("out of memory. aborting.\n");
272 		v4l2_ctrl_handler_free(hdl);
273 		v4l2_device_unregister(&dev->v4l2_dev);
274 		return -ENOMEM;
275 	}
276 	ext_vv->vid_ops = saa7146_video_ioctl_ops;
277 	ext_vv->vbi_ops = saa7146_vbi_ioctl_ops;
278 	ext_vv->core_ops = &saa7146_video_ioctl_ops;
279 
280 	DEB_EE("dev:%p\n", dev);
281 
282 	/* set default values for video parts of the saa7146 */
283 	saa7146_write(dev, BCS_CTRL, 0x80400040);
284 
285 	/* enable video-port pins */
286 	saa7146_write(dev, MC1, (MASK_10 | MASK_26));
287 
288 	/* save per-device extension data (one extension can
289 	   handle different devices that might need different
290 	   configuration data) */
291 	dev->ext_vv_data = ext_vv;
292 
293 	saa7146_video_uops.init(dev,vv);
294 	if (dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE)
295 		saa7146_vbi_uops.init(dev,vv);
296 
297 	fmt = &vv->video_fmt;
298 	fmt->width = 384;
299 	fmt->height = 288;
300 	fmt->pixelformat = V4L2_PIX_FMT_BGR24;
301 	fmt->field = V4L2_FIELD_INTERLACED;
302 	fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
303 	fmt->bytesperline = 3 * fmt->width;
304 	fmt->sizeimage = fmt->bytesperline * fmt->height;
305 
306 	vbi = &vv->vbi_fmt;
307 	vbi->sampling_rate	= 27000000;
308 	vbi->offset		= 248; /* todo */
309 	vbi->samples_per_line	= 720 * 2;
310 	vbi->sample_format	= V4L2_PIX_FMT_GREY;
311 
312 	/* fixme: this only works for PAL */
313 	vbi->start[0] = 5;
314 	vbi->count[0] = 16;
315 	vbi->start[1] = 312;
316 	vbi->count[1] = 16;
317 
318 	timer_setup(&vv->vbi_read_timeout, NULL, 0);
319 
320 	dev->vv_data = vv;
321 	dev->vv_callback = &vv_callback;
322 
323 	return 0;
324 }
325 EXPORT_SYMBOL_GPL(saa7146_vv_init);
326 
327 int saa7146_vv_release(struct saa7146_dev* dev)
328 {
329 	struct saa7146_vv *vv = dev->vv_data;
330 
331 	DEB_EE("dev:%p\n", dev);
332 
333 	v4l2_device_unregister(&dev->v4l2_dev);
334 	v4l2_ctrl_handler_free(&dev->ctrl_handler);
335 	kfree(vv);
336 	dev->vv_data = NULL;
337 	dev->vv_callback = NULL;
338 
339 	return 0;
340 }
341 EXPORT_SYMBOL_GPL(saa7146_vv_release);
342 
343 int saa7146_register_device(struct video_device *vfd, struct saa7146_dev *dev,
344 			    char *name, int type)
345 {
346 	struct vb2_queue *q;
347 	int err;
348 	int i;
349 
350 	DEB_EE("dev:%p, name:'%s', type:%d\n", dev, name, type);
351 
352 	vfd->fops = &video_fops;
353 	if (type == VFL_TYPE_VIDEO) {
354 		vfd->ioctl_ops = &dev->ext_vv_data->vid_ops;
355 		q = &dev->vv_data->video_dmaq.q;
356 	} else {
357 		vfd->ioctl_ops = &dev->ext_vv_data->vbi_ops;
358 		q = &dev->vv_data->vbi_dmaq.q;
359 	}
360 	vfd->release = video_device_release_empty;
361 	vfd->lock = &dev->v4l2_lock;
362 	vfd->v4l2_dev = &dev->v4l2_dev;
363 	vfd->tvnorms = 0;
364 	for (i = 0; i < dev->ext_vv_data->num_stds; i++)
365 		vfd->tvnorms |= dev->ext_vv_data->stds[i].id;
366 	strscpy(vfd->name, name, sizeof(vfd->name));
367 	vfd->device_caps = V4L2_CAP_VIDEO_CAPTURE |
368 			   V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
369 	vfd->device_caps |= dev->ext_vv_data->capabilities;
370 	if (type == VFL_TYPE_VIDEO) {
371 		vfd->device_caps &=
372 			~(V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_OUTPUT);
373 	} else if (vfd->device_caps & V4L2_CAP_SLICED_VBI_OUTPUT) {
374 		vfd->vfl_dir = VFL_DIR_TX;
375 		vfd->device_caps &= ~(V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
376 				      V4L2_CAP_AUDIO | V4L2_CAP_TUNER);
377 	} else {
378 		vfd->device_caps &= ~V4L2_CAP_VIDEO_CAPTURE;
379 	}
380 
381 	q->type = type == VFL_TYPE_VIDEO ? V4L2_BUF_TYPE_VIDEO_CAPTURE : V4L2_BUF_TYPE_VBI_CAPTURE;
382 	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
383 	q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF;
384 	q->ops = type == VFL_TYPE_VIDEO ? &video_qops : &vbi_qops;
385 	q->mem_ops = &vb2_dma_sg_memops;
386 	q->drv_priv = dev;
387 	q->gfp_flags = __GFP_DMA32;
388 	q->buf_struct_size = sizeof(struct saa7146_buf);
389 	q->lock = &dev->v4l2_lock;
390 	q->min_buffers_needed = 2;
391 	q->dev = &dev->pci->dev;
392 	err = vb2_queue_init(q);
393 	if (err)
394 		return err;
395 	vfd->queue = q;
396 
397 	video_set_drvdata(vfd, dev);
398 
399 	err = video_register_device(vfd, type, -1);
400 	if (err < 0) {
401 		ERR("cannot register v4l2 device. skipping.\n");
402 		return err;
403 	}
404 
405 	pr_info("%s: registered device %s [v4l2]\n",
406 		dev->name, video_device_node_name(vfd));
407 	return 0;
408 }
409 EXPORT_SYMBOL_GPL(saa7146_register_device);
410 
411 int saa7146_unregister_device(struct video_device *vfd, struct saa7146_dev *dev)
412 {
413 	DEB_EE("dev:%p\n", dev);
414 
415 	video_unregister_device(vfd);
416 	return 0;
417 }
418 EXPORT_SYMBOL_GPL(saa7146_unregister_device);
419 
420 static int __init saa7146_vv_init_module(void)
421 {
422 	return 0;
423 }
424 
425 
426 static void __exit saa7146_vv_cleanup_module(void)
427 {
428 }
429 
430 module_init(saa7146_vv_init_module);
431 module_exit(saa7146_vv_cleanup_module);
432 
433 MODULE_AUTHOR("Michael Hunold <michael@mihu.de>");
434 MODULE_DESCRIPTION("video4linux driver for saa7146-based hardware");
435 MODULE_LICENSE("GPL");
436