19c92ab61SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2f7e7b48eSJacob Chen /*
3f7e7b48eSJacob Chen  * Copyright (C) 2017 Fuzhou Rockchip Electronics Co.Ltd
4f7e7b48eSJacob Chen  * Author: Jacob Chen <jacob-chen@iotwrt.com>
5f7e7b48eSJacob Chen  */
6f7e7b48eSJacob Chen 
7f7e7b48eSJacob Chen #include <linux/pm_runtime.h>
8f7e7b48eSJacob Chen 
9f7e7b48eSJacob Chen #include <media/v4l2-device.h>
10f7e7b48eSJacob Chen #include <media/v4l2-ioctl.h>
11f7e7b48eSJacob Chen #include <media/v4l2-mem2mem.h>
12f7e7b48eSJacob Chen #include <media/videobuf2-dma-sg.h>
13f7e7b48eSJacob Chen #include <media/videobuf2-v4l2.h>
14f7e7b48eSJacob Chen 
15f7e7b48eSJacob Chen #include "rga-hw.h"
16f7e7b48eSJacob Chen #include "rga.h"
17f7e7b48eSJacob Chen 
18f7e7b48eSJacob Chen static int
rga_queue_setup(struct vb2_queue * vq,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],struct device * alloc_devs[])19f7e7b48eSJacob Chen rga_queue_setup(struct vb2_queue *vq,
20f7e7b48eSJacob Chen 		unsigned int *nbuffers, unsigned int *nplanes,
21f7e7b48eSJacob Chen 		unsigned int sizes[], struct device *alloc_devs[])
22f7e7b48eSJacob Chen {
23f7e7b48eSJacob Chen 	struct rga_ctx *ctx = vb2_get_drv_priv(vq);
24f7e7b48eSJacob Chen 	struct rga_frame *f = rga_get_frame(ctx, vq->type);
25f7e7b48eSJacob Chen 
26f7e7b48eSJacob Chen 	if (IS_ERR(f))
27f7e7b48eSJacob Chen 		return PTR_ERR(f);
28f7e7b48eSJacob Chen 
29f7e7b48eSJacob Chen 	if (*nplanes)
30f7e7b48eSJacob Chen 		return sizes[0] < f->size ? -EINVAL : 0;
31f7e7b48eSJacob Chen 
32f7e7b48eSJacob Chen 	sizes[0] = f->size;
33f7e7b48eSJacob Chen 	*nplanes = 1;
34f7e7b48eSJacob Chen 
35f7e7b48eSJacob Chen 	return 0;
36f7e7b48eSJacob Chen }
37f7e7b48eSJacob Chen 
rga_buf_prepare(struct vb2_buffer * vb)38f7e7b48eSJacob Chen static int rga_buf_prepare(struct vb2_buffer *vb)
39f7e7b48eSJacob Chen {
40f7e7b48eSJacob Chen 	struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
41f7e7b48eSJacob Chen 	struct rga_frame *f = rga_get_frame(ctx, vb->vb2_queue->type);
42f7e7b48eSJacob Chen 
43f7e7b48eSJacob Chen 	if (IS_ERR(f))
44f7e7b48eSJacob Chen 		return PTR_ERR(f);
45f7e7b48eSJacob Chen 
46f7e7b48eSJacob Chen 	vb2_set_plane_payload(vb, 0, f->size);
47f7e7b48eSJacob Chen 
48f7e7b48eSJacob Chen 	return 0;
49f7e7b48eSJacob Chen }
50f7e7b48eSJacob Chen 
rga_buf_queue(struct vb2_buffer * vb)51f7e7b48eSJacob Chen static void rga_buf_queue(struct vb2_buffer *vb)
52f7e7b48eSJacob Chen {
53f7e7b48eSJacob Chen 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
54f7e7b48eSJacob Chen 	struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
55f7e7b48eSJacob Chen 
56f7e7b48eSJacob Chen 	v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
57f7e7b48eSJacob Chen }
58f7e7b48eSJacob Chen 
rga_buf_return_buffers(struct vb2_queue * q,enum vb2_buffer_state state)599aecc035SEzequiel Garcia static void rga_buf_return_buffers(struct vb2_queue *q,
609aecc035SEzequiel Garcia 				   enum vb2_buffer_state state)
61f7e7b48eSJacob Chen {
62f7e7b48eSJacob Chen 	struct rga_ctx *ctx = vb2_get_drv_priv(q);
63f7e7b48eSJacob Chen 	struct vb2_v4l2_buffer *vbuf;
64f7e7b48eSJacob Chen 
65f7e7b48eSJacob Chen 	for (;;) {
66f7e7b48eSJacob Chen 		if (V4L2_TYPE_IS_OUTPUT(q->type))
67f7e7b48eSJacob Chen 			vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
68f7e7b48eSJacob Chen 		else
69f7e7b48eSJacob Chen 			vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
70f7e7b48eSJacob Chen 		if (!vbuf)
71f7e7b48eSJacob Chen 			break;
729aecc035SEzequiel Garcia 		v4l2_m2m_buf_done(vbuf, state);
739aecc035SEzequiel Garcia 	}
74f7e7b48eSJacob Chen }
75f7e7b48eSJacob Chen 
rga_buf_start_streaming(struct vb2_queue * q,unsigned int count)769aecc035SEzequiel Garcia static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
779aecc035SEzequiel Garcia {
789aecc035SEzequiel Garcia 	struct rga_ctx *ctx = vb2_get_drv_priv(q);
799aecc035SEzequiel Garcia 	struct rockchip_rga *rga = ctx->rga;
809aecc035SEzequiel Garcia 	int ret;
819aecc035SEzequiel Garcia 
82*0314339aSMauro Carvalho Chehab 	ret = pm_runtime_resume_and_get(rga->dev);
839aecc035SEzequiel Garcia 	if (ret < 0) {
849aecc035SEzequiel Garcia 		rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED);
859aecc035SEzequiel Garcia 		return ret;
869aecc035SEzequiel Garcia 	}
879aecc035SEzequiel Garcia 
889aecc035SEzequiel Garcia 	return 0;
899aecc035SEzequiel Garcia }
909aecc035SEzequiel Garcia 
rga_buf_stop_streaming(struct vb2_queue * q)919aecc035SEzequiel Garcia static void rga_buf_stop_streaming(struct vb2_queue *q)
929aecc035SEzequiel Garcia {
939aecc035SEzequiel Garcia 	struct rga_ctx *ctx = vb2_get_drv_priv(q);
949aecc035SEzequiel Garcia 	struct rockchip_rga *rga = ctx->rga;
959aecc035SEzequiel Garcia 
969aecc035SEzequiel Garcia 	rga_buf_return_buffers(q, VB2_BUF_STATE_ERROR);
97f7e7b48eSJacob Chen 	pm_runtime_put(rga->dev);
98f7e7b48eSJacob Chen }
99f7e7b48eSJacob Chen 
100f7e7b48eSJacob Chen const struct vb2_ops rga_qops = {
101f7e7b48eSJacob Chen 	.queue_setup = rga_queue_setup,
102f7e7b48eSJacob Chen 	.buf_prepare = rga_buf_prepare,
103f7e7b48eSJacob Chen 	.buf_queue = rga_buf_queue,
104f7e7b48eSJacob Chen 	.wait_prepare = vb2_ops_wait_prepare,
105f7e7b48eSJacob Chen 	.wait_finish = vb2_ops_wait_finish,
106f7e7b48eSJacob Chen 	.start_streaming = rga_buf_start_streaming,
107f7e7b48eSJacob Chen 	.stop_streaming = rga_buf_stop_streaming,
108f7e7b48eSJacob Chen };
109f7e7b48eSJacob Chen 
110f7e7b48eSJacob Chen /* RGA MMU is a 1-Level MMU, so it can't be used through the IOMMU API.
111f7e7b48eSJacob Chen  * We use it more like a scatter-gather list.
112f7e7b48eSJacob Chen  */
rga_buf_map(struct vb2_buffer * vb)113f7e7b48eSJacob Chen void rga_buf_map(struct vb2_buffer *vb)
114f7e7b48eSJacob Chen {
115f7e7b48eSJacob Chen 	struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
116f7e7b48eSJacob Chen 	struct rockchip_rga *rga = ctx->rga;
117f7e7b48eSJacob Chen 	struct sg_table *sgt;
118f7e7b48eSJacob Chen 	struct scatterlist *sgl;
119f7e7b48eSJacob Chen 	unsigned int *pages;
120f7e7b48eSJacob Chen 	unsigned int address, len, i, p;
121f7e7b48eSJacob Chen 	unsigned int mapped_size = 0;
122f7e7b48eSJacob Chen 
123f7e7b48eSJacob Chen 	if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
124f7e7b48eSJacob Chen 		pages = rga->src_mmu_pages;
125f7e7b48eSJacob Chen 	else
126f7e7b48eSJacob Chen 		pages = rga->dst_mmu_pages;
127f7e7b48eSJacob Chen 
128f7e7b48eSJacob Chen 	/* Create local MMU table for RGA */
129f7e7b48eSJacob Chen 	sgt = vb2_plane_cookie(vb, 0);
130f7e7b48eSJacob Chen 
131f7e7b48eSJacob Chen 	for_each_sg(sgt->sgl, sgl, sgt->nents, i) {
132f7e7b48eSJacob Chen 		len = sg_dma_len(sgl) >> PAGE_SHIFT;
133f7e7b48eSJacob Chen 		address = sg_phys(sgl);
134f7e7b48eSJacob Chen 
135f7e7b48eSJacob Chen 		for (p = 0; p < len; p++) {
1368328ad0fSGustavo A. R. Silva 			dma_addr_t phys = address +
1378328ad0fSGustavo A. R. Silva 					  ((dma_addr_t)p << PAGE_SHIFT);
138f7e7b48eSJacob Chen 
139f7e7b48eSJacob Chen 			pages[mapped_size + p] = phys;
140f7e7b48eSJacob Chen 		}
141f7e7b48eSJacob Chen 
142f7e7b48eSJacob Chen 		mapped_size += len;
143f7e7b48eSJacob Chen 	}
144f7e7b48eSJacob Chen 
145f7e7b48eSJacob Chen 	/* sync local MMU table for RGA */
146f7e7b48eSJacob Chen 	dma_sync_single_for_device(rga->dev, virt_to_phys(pages),
147f7e7b48eSJacob Chen 				   8 * PAGE_SIZE, DMA_BIDIRECTIONAL);
148f7e7b48eSJacob Chen }
149