1 /*
2  * Xilinx Video DMA
3  *
4  * Copyright (C) 2013-2015 Ideas on Board
5  * Copyright (C) 2013-2015 Xilinx, Inc.
6  *
7  * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
8  *           Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14 
15 #include <linux/dma/xilinx_dma.h>
16 #include <linux/lcm.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/slab.h>
21 
22 #include <media/v4l2-dev.h>
23 #include <media/v4l2-fh.h>
24 #include <media/v4l2-ioctl.h>
25 #include <media/videobuf2-core.h>
26 #include <media/videobuf2-dma-contig.h>
27 
28 #include "xilinx-dma.h"
29 #include "xilinx-vip.h"
30 #include "xilinx-vipp.h"
31 
32 #define XVIP_DMA_DEF_FORMAT		V4L2_PIX_FMT_YUYV
33 #define XVIP_DMA_DEF_WIDTH		1920
34 #define XVIP_DMA_DEF_HEIGHT		1080
35 
36 /* Minimum and maximum widths are expressed in bytes */
37 #define XVIP_DMA_MIN_WIDTH		1U
38 #define XVIP_DMA_MAX_WIDTH		65535U
39 #define XVIP_DMA_MIN_HEIGHT		1U
40 #define XVIP_DMA_MAX_HEIGHT		8191U
41 
42 /* -----------------------------------------------------------------------------
43  * Helper functions
44  */
45 
46 static struct v4l2_subdev *
47 xvip_dma_remote_subdev(struct media_pad *local, u32 *pad)
48 {
49 	struct media_pad *remote;
50 
51 	remote = media_entity_remote_pad(local);
52 	if (remote == NULL ||
53 	    media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
54 		return NULL;
55 
56 	if (pad)
57 		*pad = remote->index;
58 
59 	return media_entity_to_v4l2_subdev(remote->entity);
60 }
61 
62 static int xvip_dma_verify_format(struct xvip_dma *dma)
63 {
64 	struct v4l2_subdev_format fmt;
65 	struct v4l2_subdev *subdev;
66 	int ret;
67 
68 	subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
69 	if (subdev == NULL)
70 		return -EPIPE;
71 
72 	fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
73 	ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
74 	if (ret < 0)
75 		return ret == -ENOIOCTLCMD ? -EINVAL : ret;
76 
77 	if (dma->fmtinfo->code != fmt.format.code ||
78 	    dma->format.height != fmt.format.height ||
79 	    dma->format.width != fmt.format.width ||
80 	    dma->format.colorspace != fmt.format.colorspace)
81 		return -EINVAL;
82 
83 	return 0;
84 }
85 
86 /* -----------------------------------------------------------------------------
87  * Pipeline Stream Management
88  */
89 
90 /**
91  * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
92  * @pipe: The pipeline
93  * @start: Start (when true) or stop (when false) the pipeline
94  *
95  * Walk the entities chain starting at the pipeline output video node and start
96  * or stop all of them.
97  *
98  * Return: 0 if successful, or the return value of the failed video::s_stream
99  * operation otherwise.
100  */
101 static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start)
102 {
103 	struct xvip_dma *dma = pipe->output;
104 	struct media_entity *entity;
105 	struct media_pad *pad;
106 	struct v4l2_subdev *subdev;
107 	int ret;
108 
109 	entity = &dma->video.entity;
110 	while (1) {
111 		pad = &entity->pads[0];
112 		if (!(pad->flags & MEDIA_PAD_FL_SINK))
113 			break;
114 
115 		pad = media_entity_remote_pad(pad);
116 		if (pad == NULL ||
117 		    media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
118 			break;
119 
120 		entity = pad->entity;
121 		subdev = media_entity_to_v4l2_subdev(entity);
122 
123 		ret = v4l2_subdev_call(subdev, video, s_stream, start);
124 		if (start && ret < 0 && ret != -ENOIOCTLCMD)
125 			return ret;
126 	}
127 
128 	return 0;
129 }
130 
131 /**
132  * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline
133  * @pipe: The pipeline
134  * @on: Turn the stream on when true or off when false
135  *
136  * The pipeline is shared between all DMA engines connect at its input and
137  * output. While the stream state of DMA engines can be controlled
138  * independently, pipelines have a shared stream state that enable or disable
139  * all entities in the pipeline. For this reason the pipeline uses a streaming
140  * counter that tracks the number of DMA engines that have requested the stream
141  * to be enabled.
142  *
143  * When called with the @on argument set to true, this function will increment
144  * the pipeline streaming count. If the streaming count reaches the number of
145  * DMA engines in the pipeline it will enable all entities that belong to the
146  * pipeline.
147  *
148  * Similarly, when called with the @on argument set to false, this function will
149  * decrement the pipeline streaming count and disable all entities in the
150  * pipeline when the streaming count reaches zero.
151  *
152  * Return: 0 if successful, or the return value of the failed video::s_stream
153  * operation otherwise. Stopping the pipeline never fails. The pipeline state is
154  * not updated when the operation fails.
155  */
156 static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
157 {
158 	int ret = 0;
159 
160 	mutex_lock(&pipe->lock);
161 
162 	if (on) {
163 		if (pipe->stream_count == pipe->num_dmas - 1) {
164 			ret = xvip_pipeline_start_stop(pipe, true);
165 			if (ret < 0)
166 				goto done;
167 		}
168 		pipe->stream_count++;
169 	} else {
170 		if (--pipe->stream_count == 0)
171 			xvip_pipeline_start_stop(pipe, false);
172 	}
173 
174 done:
175 	mutex_unlock(&pipe->lock);
176 	return ret;
177 }
178 
179 static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
180 				  struct xvip_dma *start)
181 {
182 	struct media_entity_graph graph;
183 	struct media_entity *entity = &start->video.entity;
184 	struct media_device *mdev = entity->parent;
185 	unsigned int num_inputs = 0;
186 	unsigned int num_outputs = 0;
187 
188 	mutex_lock(&mdev->graph_mutex);
189 
190 	/* Walk the graph to locate the video nodes. */
191 	media_entity_graph_walk_start(&graph, entity);
192 
193 	while ((entity = media_entity_graph_walk_next(&graph))) {
194 		struct xvip_dma *dma;
195 
196 		if (entity->type != MEDIA_ENT_T_DEVNODE_V4L)
197 			continue;
198 
199 		dma = to_xvip_dma(media_entity_to_video_device(entity));
200 
201 		if (dma->pad.flags & MEDIA_PAD_FL_SINK) {
202 			pipe->output = dma;
203 			num_outputs++;
204 		} else {
205 			num_inputs++;
206 		}
207 	}
208 
209 	mutex_unlock(&mdev->graph_mutex);
210 
211 	/* We need exactly one output and zero or one input. */
212 	if (num_outputs != 1 || num_inputs > 1)
213 		return -EPIPE;
214 
215 	pipe->num_dmas = num_inputs + num_outputs;
216 
217 	return 0;
218 }
219 
220 static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
221 {
222 	pipe->num_dmas = 0;
223 	pipe->output = NULL;
224 }
225 
226 /**
227  * xvip_pipeline_cleanup - Cleanup the pipeline after streaming
228  * @pipe: the pipeline
229  *
230  * Decrease the pipeline use count and clean it up if we were the last user.
231  */
232 static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
233 {
234 	mutex_lock(&pipe->lock);
235 
236 	/* If we're the last user clean up the pipeline. */
237 	if (--pipe->use_count == 0)
238 		__xvip_pipeline_cleanup(pipe);
239 
240 	mutex_unlock(&pipe->lock);
241 }
242 
243 /**
244  * xvip_pipeline_prepare - Prepare the pipeline for streaming
245  * @pipe: the pipeline
246  * @dma: DMA engine at one end of the pipeline
247  *
248  * Validate the pipeline if no user exists yet, otherwise just increase the use
249  * count.
250  *
251  * Return: 0 if successful or -EPIPE if the pipeline is not valid.
252  */
253 static int xvip_pipeline_prepare(struct xvip_pipeline *pipe,
254 				 struct xvip_dma *dma)
255 {
256 	int ret;
257 
258 	mutex_lock(&pipe->lock);
259 
260 	/* If we're the first user validate and initialize the pipeline. */
261 	if (pipe->use_count == 0) {
262 		ret = xvip_pipeline_validate(pipe, dma);
263 		if (ret < 0) {
264 			__xvip_pipeline_cleanup(pipe);
265 			goto done;
266 		}
267 	}
268 
269 	pipe->use_count++;
270 	ret = 0;
271 
272 done:
273 	mutex_unlock(&pipe->lock);
274 	return ret;
275 }
276 
277 /* -----------------------------------------------------------------------------
278  * videobuf2 queue operations
279  */
280 
281 /**
282  * struct xvip_dma_buffer - Video DMA buffer
283  * @buf: vb2 buffer base object
284  * @queue: buffer list entry in the DMA engine queued buffers list
285  * @dma: DMA channel that uses the buffer
286  */
287 struct xvip_dma_buffer {
288 	struct vb2_buffer buf;
289 	struct list_head queue;
290 	struct xvip_dma *dma;
291 };
292 
293 #define to_xvip_dma_buffer(vb)	container_of(vb, struct xvip_dma_buffer, buf)
294 
295 static void xvip_dma_complete(void *param)
296 {
297 	struct xvip_dma_buffer *buf = param;
298 	struct xvip_dma *dma = buf->dma;
299 
300 	spin_lock(&dma->queued_lock);
301 	list_del(&buf->queue);
302 	spin_unlock(&dma->queued_lock);
303 
304 	buf->buf.v4l2_buf.field = V4L2_FIELD_NONE;
305 	buf->buf.v4l2_buf.sequence = dma->sequence++;
306 	v4l2_get_timestamp(&buf->buf.v4l2_buf.timestamp);
307 	vb2_set_plane_payload(&buf->buf, 0, dma->format.sizeimage);
308 	vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE);
309 }
310 
311 static int
312 xvip_dma_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
313 		     unsigned int *nbuffers, unsigned int *nplanes,
314 		     unsigned int sizes[], void *alloc_ctxs[])
315 {
316 	struct xvip_dma *dma = vb2_get_drv_priv(vq);
317 
318 	/* Make sure the image size is large enough. */
319 	if (fmt && fmt->fmt.pix.sizeimage < dma->format.sizeimage)
320 		return -EINVAL;
321 
322 	*nplanes = 1;
323 
324 	sizes[0] = fmt ? fmt->fmt.pix.sizeimage : dma->format.sizeimage;
325 	alloc_ctxs[0] = dma->alloc_ctx;
326 
327 	return 0;
328 }
329 
330 static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
331 {
332 	struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
333 	struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vb);
334 
335 	buf->dma = dma;
336 
337 	return 0;
338 }
339 
340 static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
341 {
342 	struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
343 	struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vb);
344 	struct dma_async_tx_descriptor *desc;
345 	dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
346 	u32 flags;
347 
348 	if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
349 		flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
350 		dma->xt.dir = DMA_DEV_TO_MEM;
351 		dma->xt.src_sgl = false;
352 		dma->xt.dst_sgl = true;
353 		dma->xt.dst_start = addr;
354 	} else {
355 		flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
356 		dma->xt.dir = DMA_MEM_TO_DEV;
357 		dma->xt.src_sgl = true;
358 		dma->xt.dst_sgl = false;
359 		dma->xt.src_start = addr;
360 	}
361 
362 	dma->xt.frame_size = 1;
363 	dma->sgl[0].size = dma->format.width * dma->fmtinfo->bpp;
364 	dma->sgl[0].icg = dma->format.bytesperline - dma->sgl[0].size;
365 	dma->xt.numf = dma->format.height;
366 
367 	desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
368 	if (!desc) {
369 		dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
370 		vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
371 		return;
372 	}
373 	desc->callback = xvip_dma_complete;
374 	desc->callback_param = buf;
375 
376 	spin_lock_irq(&dma->queued_lock);
377 	list_add_tail(&buf->queue, &dma->queued_bufs);
378 	spin_unlock_irq(&dma->queued_lock);
379 
380 	dmaengine_submit(desc);
381 
382 	if (vb2_is_streaming(&dma->queue))
383 		dma_async_issue_pending(dma->dma);
384 }
385 
386 static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
387 {
388 	struct xvip_dma *dma = vb2_get_drv_priv(vq);
389 	struct xvip_dma_buffer *buf, *nbuf;
390 	struct xvip_pipeline *pipe;
391 	int ret;
392 
393 	dma->sequence = 0;
394 
395 	/*
396 	 * Start streaming on the pipeline. No link touching an entity in the
397 	 * pipeline can be activated or deactivated once streaming is started.
398 	 *
399 	 * Use the pipeline object embedded in the first DMA object that starts
400 	 * streaming.
401 	 */
402 	pipe = dma->video.entity.pipe
403 	     ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
404 
405 	ret = media_entity_pipeline_start(&dma->video.entity, &pipe->pipe);
406 	if (ret < 0)
407 		goto error;
408 
409 	/* Verify that the configured format matches the output of the
410 	 * connected subdev.
411 	 */
412 	ret = xvip_dma_verify_format(dma);
413 	if (ret < 0)
414 		goto error_stop;
415 
416 	ret = xvip_pipeline_prepare(pipe, dma);
417 	if (ret < 0)
418 		goto error_stop;
419 
420 	/* Start the DMA engine. This must be done before starting the blocks
421 	 * in the pipeline to avoid DMA synchronization issues.
422 	 */
423 	dma_async_issue_pending(dma->dma);
424 
425 	/* Start the pipeline. */
426 	xvip_pipeline_set_stream(pipe, true);
427 
428 	return 0;
429 
430 error_stop:
431 	media_entity_pipeline_stop(&dma->video.entity);
432 
433 error:
434 	/* Give back all queued buffers to videobuf2. */
435 	spin_lock_irq(&dma->queued_lock);
436 	list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
437 		vb2_buffer_done(&buf->buf, VB2_BUF_STATE_QUEUED);
438 		list_del(&buf->queue);
439 	}
440 	spin_unlock_irq(&dma->queued_lock);
441 
442 	return ret;
443 }
444 
445 static void xvip_dma_stop_streaming(struct vb2_queue *vq)
446 {
447 	struct xvip_dma *dma = vb2_get_drv_priv(vq);
448 	struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity);
449 	struct xvip_dma_buffer *buf, *nbuf;
450 
451 	/* Stop the pipeline. */
452 	xvip_pipeline_set_stream(pipe, false);
453 
454 	/* Stop and reset the DMA engine. */
455 	dmaengine_terminate_all(dma->dma);
456 
457 	/* Cleanup the pipeline and mark it as being stopped. */
458 	xvip_pipeline_cleanup(pipe);
459 	media_entity_pipeline_stop(&dma->video.entity);
460 
461 	/* Give back all queued buffers to videobuf2. */
462 	spin_lock_irq(&dma->queued_lock);
463 	list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
464 		vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR);
465 		list_del(&buf->queue);
466 	}
467 	spin_unlock_irq(&dma->queued_lock);
468 }
469 
470 static struct vb2_ops xvip_dma_queue_qops = {
471 	.queue_setup = xvip_dma_queue_setup,
472 	.buf_prepare = xvip_dma_buffer_prepare,
473 	.buf_queue = xvip_dma_buffer_queue,
474 	.wait_prepare = vb2_ops_wait_prepare,
475 	.wait_finish = vb2_ops_wait_finish,
476 	.start_streaming = xvip_dma_start_streaming,
477 	.stop_streaming = xvip_dma_stop_streaming,
478 };
479 
480 /* -----------------------------------------------------------------------------
481  * V4L2 ioctls
482  */
483 
484 static int
485 xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
486 {
487 	struct v4l2_fh *vfh = file->private_data;
488 	struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
489 
490 	cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
491 			  | dma->xdev->v4l2_caps;
492 
493 	if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
494 		cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
495 	else
496 		cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
497 
498 	strlcpy(cap->driver, "xilinx-vipp", sizeof(cap->driver));
499 	strlcpy(cap->card, dma->video.name, sizeof(cap->card));
500 	snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s:%u",
501 		 dma->xdev->dev->of_node->name, dma->port);
502 
503 	return 0;
504 }
505 
506 /* FIXME: without this callback function, some applications are not configured
507  * with correct formats, and it results in frames in wrong format. Whether this
508  * callback needs to be required is not clearly defined, so it should be
509  * clarified through the mailing list.
510  */
511 static int
512 xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
513 {
514 	struct v4l2_fh *vfh = file->private_data;
515 	struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
516 
517 	if (f->index > 0)
518 		return -EINVAL;
519 
520 	f->pixelformat = dma->format.pixelformat;
521 	strlcpy(f->description, dma->fmtinfo->description,
522 		sizeof(f->description));
523 
524 	return 0;
525 }
526 
527 static int
528 xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format)
529 {
530 	struct v4l2_fh *vfh = file->private_data;
531 	struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
532 
533 	format->fmt.pix = dma->format;
534 
535 	return 0;
536 }
537 
538 static void
539 __xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix,
540 		      const struct xvip_video_format **fmtinfo)
541 {
542 	const struct xvip_video_format *info;
543 	unsigned int min_width;
544 	unsigned int max_width;
545 	unsigned int min_bpl;
546 	unsigned int max_bpl;
547 	unsigned int width;
548 	unsigned int align;
549 	unsigned int bpl;
550 
551 	/* Retrieve format information and select the default format if the
552 	 * requested format isn't supported.
553 	 */
554 	info = xvip_get_format_by_fourcc(pix->pixelformat);
555 	if (IS_ERR(info))
556 		info = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
557 
558 	pix->pixelformat = info->fourcc;
559 	pix->field = V4L2_FIELD_NONE;
560 
561 	/* The transfer alignment requirements are expressed in bytes. Compute
562 	 * the minimum and maximum values, clamp the requested width and convert
563 	 * it back to pixels.
564 	 */
565 	align = lcm(dma->align, info->bpp);
566 	min_width = roundup(XVIP_DMA_MIN_WIDTH, align);
567 	max_width = rounddown(XVIP_DMA_MAX_WIDTH, align);
568 	width = rounddown(pix->width * info->bpp, align);
569 
570 	pix->width = clamp(width, min_width, max_width) / info->bpp;
571 	pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT,
572 			    XVIP_DMA_MAX_HEIGHT);
573 
574 	/* Clamp the requested bytes per line value. If the maximum bytes per
575 	 * line value is zero, the module doesn't support user configurable line
576 	 * sizes. Override the requested value with the minimum in that case.
577 	 */
578 	min_bpl = pix->width * info->bpp;
579 	max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
580 	bpl = rounddown(pix->bytesperline, dma->align);
581 
582 	pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
583 	pix->sizeimage = pix->bytesperline * pix->height;
584 
585 	if (fmtinfo)
586 		*fmtinfo = info;
587 }
588 
589 static int
590 xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
591 {
592 	struct v4l2_fh *vfh = file->private_data;
593 	struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
594 
595 	__xvip_dma_try_format(dma, &format->fmt.pix, NULL);
596 	return 0;
597 }
598 
599 static int
600 xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format)
601 {
602 	struct v4l2_fh *vfh = file->private_data;
603 	struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
604 	const struct xvip_video_format *info;
605 
606 	__xvip_dma_try_format(dma, &format->fmt.pix, &info);
607 
608 	if (vb2_is_busy(&dma->queue))
609 		return -EBUSY;
610 
611 	dma->format = format->fmt.pix;
612 	dma->fmtinfo = info;
613 
614 	return 0;
615 }
616 
617 static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
618 	.vidioc_querycap		= xvip_dma_querycap,
619 	.vidioc_enum_fmt_vid_cap	= xvip_dma_enum_format,
620 	.vidioc_g_fmt_vid_cap		= xvip_dma_get_format,
621 	.vidioc_g_fmt_vid_out		= xvip_dma_get_format,
622 	.vidioc_s_fmt_vid_cap		= xvip_dma_set_format,
623 	.vidioc_s_fmt_vid_out		= xvip_dma_set_format,
624 	.vidioc_try_fmt_vid_cap		= xvip_dma_try_format,
625 	.vidioc_try_fmt_vid_out		= xvip_dma_try_format,
626 	.vidioc_reqbufs			= vb2_ioctl_reqbufs,
627 	.vidioc_querybuf		= vb2_ioctl_querybuf,
628 	.vidioc_qbuf			= vb2_ioctl_qbuf,
629 	.vidioc_dqbuf			= vb2_ioctl_dqbuf,
630 	.vidioc_create_bufs		= vb2_ioctl_create_bufs,
631 	.vidioc_expbuf			= vb2_ioctl_expbuf,
632 	.vidioc_streamon		= vb2_ioctl_streamon,
633 	.vidioc_streamoff		= vb2_ioctl_streamoff,
634 };
635 
636 /* -----------------------------------------------------------------------------
637  * V4L2 file operations
638  */
639 
640 static const struct v4l2_file_operations xvip_dma_fops = {
641 	.owner		= THIS_MODULE,
642 	.unlocked_ioctl	= video_ioctl2,
643 	.open		= v4l2_fh_open,
644 	.release	= vb2_fop_release,
645 	.poll		= vb2_fop_poll,
646 	.mmap		= vb2_fop_mmap,
647 };
648 
649 /* -----------------------------------------------------------------------------
650  * Xilinx Video DMA Core
651  */
652 
653 int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
654 		  enum v4l2_buf_type type, unsigned int port)
655 {
656 	char name[16];
657 	int ret;
658 
659 	dma->xdev = xdev;
660 	dma->port = port;
661 	mutex_init(&dma->lock);
662 	mutex_init(&dma->pipe.lock);
663 	INIT_LIST_HEAD(&dma->queued_bufs);
664 	spin_lock_init(&dma->queued_lock);
665 
666 	dma->fmtinfo = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
667 	dma->format.pixelformat = dma->fmtinfo->fourcc;
668 	dma->format.colorspace = V4L2_COLORSPACE_SRGB;
669 	dma->format.field = V4L2_FIELD_NONE;
670 	dma->format.width = XVIP_DMA_DEF_WIDTH;
671 	dma->format.height = XVIP_DMA_DEF_HEIGHT;
672 	dma->format.bytesperline = dma->format.width * dma->fmtinfo->bpp;
673 	dma->format.sizeimage = dma->format.bytesperline * dma->format.height;
674 
675 	/* Initialize the media entity... */
676 	dma->pad.flags = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
677 		       ? MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
678 
679 	ret = media_entity_init(&dma->video.entity, 1, &dma->pad, 0);
680 	if (ret < 0)
681 		goto error;
682 
683 	/* ... and the video node... */
684 	dma->video.fops = &xvip_dma_fops;
685 	dma->video.v4l2_dev = &xdev->v4l2_dev;
686 	dma->video.queue = &dma->queue;
687 	snprintf(dma->video.name, sizeof(dma->video.name), "%s %s %u",
688 		 xdev->dev->of_node->name,
689 		 type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? "output" : "input",
690 		 port);
691 	dma->video.vfl_type = VFL_TYPE_GRABBER;
692 	dma->video.vfl_dir = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
693 			   ? VFL_DIR_RX : VFL_DIR_TX;
694 	dma->video.release = video_device_release_empty;
695 	dma->video.ioctl_ops = &xvip_dma_ioctl_ops;
696 	dma->video.lock = &dma->lock;
697 
698 	video_set_drvdata(&dma->video, dma);
699 
700 	/* ... and the buffers queue... */
701 	dma->alloc_ctx = vb2_dma_contig_init_ctx(dma->xdev->dev);
702 	if (IS_ERR(dma->alloc_ctx))
703 		goto error;
704 
705 	/* Don't enable VB2_READ and VB2_WRITE, as using the read() and write()
706 	 * V4L2 APIs would be inefficient. Testing on the command line with a
707 	 * 'cat /dev/video?' thus won't be possible, but given that the driver
708 	 * anyway requires a test tool to setup the pipeline before any video
709 	 * stream can be started, requiring a specific V4L2 test tool as well
710 	 * instead of 'cat' isn't really a drawback.
711 	 */
712 	dma->queue.type = type;
713 	dma->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
714 	dma->queue.lock = &dma->lock;
715 	dma->queue.drv_priv = dma;
716 	dma->queue.buf_struct_size = sizeof(struct xvip_dma_buffer);
717 	dma->queue.ops = &xvip_dma_queue_qops;
718 	dma->queue.mem_ops = &vb2_dma_contig_memops;
719 	dma->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
720 				   | V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
721 	ret = vb2_queue_init(&dma->queue);
722 	if (ret < 0) {
723 		dev_err(dma->xdev->dev, "failed to initialize VB2 queue\n");
724 		goto error;
725 	}
726 
727 	/* ... and the DMA channel. */
728 	snprintf(name, sizeof(name), "port%u", port);
729 	dma->dma = dma_request_slave_channel(dma->xdev->dev, name);
730 	if (dma->dma == NULL) {
731 		dev_err(dma->xdev->dev, "no VDMA channel found\n");
732 		ret = -ENODEV;
733 		goto error;
734 	}
735 
736 	dma->align = 1 << dma->dma->device->copy_align;
737 
738 	ret = video_register_device(&dma->video, VFL_TYPE_GRABBER, -1);
739 	if (ret < 0) {
740 		dev_err(dma->xdev->dev, "failed to register video device\n");
741 		goto error;
742 	}
743 
744 	return 0;
745 
746 error:
747 	xvip_dma_cleanup(dma);
748 	return ret;
749 }
750 
751 void xvip_dma_cleanup(struct xvip_dma *dma)
752 {
753 	if (video_is_registered(&dma->video))
754 		video_unregister_device(&dma->video);
755 
756 	if (dma->dma)
757 		dma_release_channel(dma->dma);
758 
759 	if (!IS_ERR_OR_NULL(dma->alloc_ctx))
760 		vb2_dma_contig_cleanup_ctx(dma->alloc_ctx);
761 
762 	media_entity_cleanup(&dma->video.entity);
763 
764 	mutex_destroy(&dma->lock);
765 	mutex_destroy(&dma->pipe.lock);
766 }
767