xref: /openbmc/linux/drivers/media/platform/ti/vpe/vpdma.c (revision 8148baab)
1*8148baabSPratyush Yadav // SPDX-License-Identifier: GPL-2.0-only
2*8148baabSPratyush Yadav /*
3*8148baabSPratyush Yadav  * VPDMA helper library
4*8148baabSPratyush Yadav  *
5*8148baabSPratyush Yadav  * Copyright (c) 2013 Texas Instruments Inc.
6*8148baabSPratyush Yadav  *
7*8148baabSPratyush Yadav  * David Griego, <dagriego@biglakesoftware.com>
8*8148baabSPratyush Yadav  * Dale Farnsworth, <dale@farnsworth.org>
9*8148baabSPratyush Yadav  * Archit Taneja, <archit@ti.com>
10*8148baabSPratyush Yadav  */
11*8148baabSPratyush Yadav 
12*8148baabSPratyush Yadav #include <linux/delay.h>
13*8148baabSPratyush Yadav #include <linux/dma-mapping.h>
14*8148baabSPratyush Yadav #include <linux/err.h>
15*8148baabSPratyush Yadav #include <linux/firmware.h>
16*8148baabSPratyush Yadav #include <linux/io.h>
17*8148baabSPratyush Yadav #include <linux/module.h>
18*8148baabSPratyush Yadav #include <linux/platform_device.h>
19*8148baabSPratyush Yadav #include <linux/sched.h>
20*8148baabSPratyush Yadav #include <linux/slab.h>
21*8148baabSPratyush Yadav #include <linux/videodev2.h>
22*8148baabSPratyush Yadav 
23*8148baabSPratyush Yadav #include "vpdma.h"
24*8148baabSPratyush Yadav #include "vpdma_priv.h"
25*8148baabSPratyush Yadav 
26*8148baabSPratyush Yadav #define VPDMA_FIRMWARE	"vpdma-1b8.bin"
27*8148baabSPratyush Yadav 
28*8148baabSPratyush Yadav const struct vpdma_data_format vpdma_yuv_fmts[] = {
29*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_Y444] = {
30*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_YUV,
31*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_Y444,
32*8148baabSPratyush Yadav 		.depth		= 8,
33*8148baabSPratyush Yadav 	},
34*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_Y422] = {
35*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_YUV,
36*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_Y422,
37*8148baabSPratyush Yadav 		.depth		= 8,
38*8148baabSPratyush Yadav 	},
39*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_Y420] = {
40*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_YUV,
41*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_Y420,
42*8148baabSPratyush Yadav 		.depth		= 8,
43*8148baabSPratyush Yadav 	},
44*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_C444] = {
45*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_YUV,
46*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_C444,
47*8148baabSPratyush Yadav 		.depth		= 8,
48*8148baabSPratyush Yadav 	},
49*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_C422] = {
50*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_YUV,
51*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_C422,
52*8148baabSPratyush Yadav 		.depth		= 8,
53*8148baabSPratyush Yadav 	},
54*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_C420] = {
55*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_YUV,
56*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_C420,
57*8148baabSPratyush Yadav 		.depth		= 4,
58*8148baabSPratyush Yadav 	},
59*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_CB420] = {
60*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_YUV,
61*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_CB420,
62*8148baabSPratyush Yadav 		.depth		= 4,
63*8148baabSPratyush Yadav 	},
64*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_YCR422] = {
65*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_YUV,
66*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_YCR422,
67*8148baabSPratyush Yadav 		.depth		= 16,
68*8148baabSPratyush Yadav 	},
69*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_YC444] = {
70*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_YUV,
71*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_YC444,
72*8148baabSPratyush Yadav 		.depth		= 24,
73*8148baabSPratyush Yadav 	},
74*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_CRY422] = {
75*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_YUV,
76*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_CRY422,
77*8148baabSPratyush Yadav 		.depth		= 16,
78*8148baabSPratyush Yadav 	},
79*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_CBY422] = {
80*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_YUV,
81*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_CBY422,
82*8148baabSPratyush Yadav 		.depth		= 16,
83*8148baabSPratyush Yadav 	},
84*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_YCB422] = {
85*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_YUV,
86*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_YCB422,
87*8148baabSPratyush Yadav 		.depth		= 16,
88*8148baabSPratyush Yadav 	},
89*8148baabSPratyush Yadav };
90*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_yuv_fmts);
91*8148baabSPratyush Yadav 
92*8148baabSPratyush Yadav const struct vpdma_data_format vpdma_rgb_fmts[] = {
93*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_RGB565] = {
94*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_RGB,
95*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_RGB16_565,
96*8148baabSPratyush Yadav 		.depth		= 16,
97*8148baabSPratyush Yadav 	},
98*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_ARGB16_1555] = {
99*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_RGB,
100*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_ARGB_1555,
101*8148baabSPratyush Yadav 		.depth		= 16,
102*8148baabSPratyush Yadav 	},
103*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_ARGB16] = {
104*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_RGB,
105*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_ARGB_4444,
106*8148baabSPratyush Yadav 		.depth		= 16,
107*8148baabSPratyush Yadav 	},
108*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_RGBA16_5551] = {
109*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_RGB,
110*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_RGBA_5551,
111*8148baabSPratyush Yadav 		.depth		= 16,
112*8148baabSPratyush Yadav 	},
113*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_RGBA16] = {
114*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_RGB,
115*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_RGBA_4444,
116*8148baabSPratyush Yadav 		.depth		= 16,
117*8148baabSPratyush Yadav 	},
118*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_ARGB24] = {
119*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_RGB,
120*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_ARGB24_6666,
121*8148baabSPratyush Yadav 		.depth		= 24,
122*8148baabSPratyush Yadav 	},
123*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_RGB24] = {
124*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_RGB,
125*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_RGB24_888,
126*8148baabSPratyush Yadav 		.depth		= 24,
127*8148baabSPratyush Yadav 	},
128*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_ARGB32] = {
129*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_RGB,
130*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_ARGB32_8888,
131*8148baabSPratyush Yadav 		.depth		= 32,
132*8148baabSPratyush Yadav 	},
133*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_RGBA24] = {
134*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_RGB,
135*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_RGBA24_6666,
136*8148baabSPratyush Yadav 		.depth		= 24,
137*8148baabSPratyush Yadav 	},
138*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_RGBA32] = {
139*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_RGB,
140*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_RGBA32_8888,
141*8148baabSPratyush Yadav 		.depth		= 32,
142*8148baabSPratyush Yadav 	},
143*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_BGR565] = {
144*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_RGB,
145*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_BGR16_565,
146*8148baabSPratyush Yadav 		.depth		= 16,
147*8148baabSPratyush Yadav 	},
148*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_ABGR16_1555] = {
149*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_RGB,
150*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_ABGR_1555,
151*8148baabSPratyush Yadav 		.depth		= 16,
152*8148baabSPratyush Yadav 	},
153*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_ABGR16] = {
154*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_RGB,
155*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_ABGR_4444,
156*8148baabSPratyush Yadav 		.depth		= 16,
157*8148baabSPratyush Yadav 	},
158*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_BGRA16_5551] = {
159*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_RGB,
160*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_BGRA_5551,
161*8148baabSPratyush Yadav 		.depth		= 16,
162*8148baabSPratyush Yadav 	},
163*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_BGRA16] = {
164*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_RGB,
165*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_BGRA_4444,
166*8148baabSPratyush Yadav 		.depth		= 16,
167*8148baabSPratyush Yadav 	},
168*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_ABGR24] = {
169*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_RGB,
170*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_ABGR24_6666,
171*8148baabSPratyush Yadav 		.depth		= 24,
172*8148baabSPratyush Yadav 	},
173*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_BGR24] = {
174*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_RGB,
175*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_BGR24_888,
176*8148baabSPratyush Yadav 		.depth		= 24,
177*8148baabSPratyush Yadav 	},
178*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_ABGR32] = {
179*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_RGB,
180*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_ABGR32_8888,
181*8148baabSPratyush Yadav 		.depth		= 32,
182*8148baabSPratyush Yadav 	},
183*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_BGRA24] = {
184*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_RGB,
185*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_BGRA24_6666,
186*8148baabSPratyush Yadav 		.depth		= 24,
187*8148baabSPratyush Yadav 	},
188*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_BGRA32] = {
189*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_RGB,
190*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_BGRA32_8888,
191*8148baabSPratyush Yadav 		.depth		= 32,
192*8148baabSPratyush Yadav 	},
193*8148baabSPratyush Yadav };
194*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_rgb_fmts);
195*8148baabSPratyush Yadav 
196*8148baabSPratyush Yadav /*
197*8148baabSPratyush Yadav  * To handle RAW format we are re-using the CBY422
198*8148baabSPratyush Yadav  * vpdma data type so that we use the vpdma to re-order
199*8148baabSPratyush Yadav  * the incoming bytes, as the parser assumes that the
200*8148baabSPratyush Yadav  * first byte presented on the bus is the MSB of a 2
201*8148baabSPratyush Yadav  * bytes value.
202*8148baabSPratyush Yadav  * RAW8 handles from 1 to 8 bits
203*8148baabSPratyush Yadav  * RAW16 handles from 9 to 16 bits
204*8148baabSPratyush Yadav  */
205*8148baabSPratyush Yadav const struct vpdma_data_format vpdma_raw_fmts[] = {
206*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_RAW8] = {
207*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_YUV,
208*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_CBY422,
209*8148baabSPratyush Yadav 		.depth		= 8,
210*8148baabSPratyush Yadav 	},
211*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_RAW16] = {
212*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_YUV,
213*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_CBY422,
214*8148baabSPratyush Yadav 		.depth		= 16,
215*8148baabSPratyush Yadav 	},
216*8148baabSPratyush Yadav };
217*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_raw_fmts);
218*8148baabSPratyush Yadav 
219*8148baabSPratyush Yadav const struct vpdma_data_format vpdma_misc_fmts[] = {
220*8148baabSPratyush Yadav 	[VPDMA_DATA_FMT_MV] = {
221*8148baabSPratyush Yadav 		.type		= VPDMA_DATA_FMT_TYPE_MISC,
222*8148baabSPratyush Yadav 		.data_type	= DATA_TYPE_MV,
223*8148baabSPratyush Yadav 		.depth		= 4,
224*8148baabSPratyush Yadav 	},
225*8148baabSPratyush Yadav };
226*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_misc_fmts);
227*8148baabSPratyush Yadav 
228*8148baabSPratyush Yadav struct vpdma_channel_info {
229*8148baabSPratyush Yadav 	int num;		/* VPDMA channel number */
230*8148baabSPratyush Yadav 	int cstat_offset;	/* client CSTAT register offset */
231*8148baabSPratyush Yadav };
232*8148baabSPratyush Yadav 
233*8148baabSPratyush Yadav static const struct vpdma_channel_info chan_info[] = {
234*8148baabSPratyush Yadav 	[VPE_CHAN_LUMA1_IN] = {
235*8148baabSPratyush Yadav 		.num		= VPE_CHAN_NUM_LUMA1_IN,
236*8148baabSPratyush Yadav 		.cstat_offset	= VPDMA_DEI_LUMA1_CSTAT,
237*8148baabSPratyush Yadav 	},
238*8148baabSPratyush Yadav 	[VPE_CHAN_CHROMA1_IN] = {
239*8148baabSPratyush Yadav 		.num		= VPE_CHAN_NUM_CHROMA1_IN,
240*8148baabSPratyush Yadav 		.cstat_offset	= VPDMA_DEI_CHROMA1_CSTAT,
241*8148baabSPratyush Yadav 	},
242*8148baabSPratyush Yadav 	[VPE_CHAN_LUMA2_IN] = {
243*8148baabSPratyush Yadav 		.num		= VPE_CHAN_NUM_LUMA2_IN,
244*8148baabSPratyush Yadav 		.cstat_offset	= VPDMA_DEI_LUMA2_CSTAT,
245*8148baabSPratyush Yadav 	},
246*8148baabSPratyush Yadav 	[VPE_CHAN_CHROMA2_IN] = {
247*8148baabSPratyush Yadav 		.num		= VPE_CHAN_NUM_CHROMA2_IN,
248*8148baabSPratyush Yadav 		.cstat_offset	= VPDMA_DEI_CHROMA2_CSTAT,
249*8148baabSPratyush Yadav 	},
250*8148baabSPratyush Yadav 	[VPE_CHAN_LUMA3_IN] = {
251*8148baabSPratyush Yadav 		.num		= VPE_CHAN_NUM_LUMA3_IN,
252*8148baabSPratyush Yadav 		.cstat_offset	= VPDMA_DEI_LUMA3_CSTAT,
253*8148baabSPratyush Yadav 	},
254*8148baabSPratyush Yadav 	[VPE_CHAN_CHROMA3_IN] = {
255*8148baabSPratyush Yadav 		.num		= VPE_CHAN_NUM_CHROMA3_IN,
256*8148baabSPratyush Yadav 		.cstat_offset	= VPDMA_DEI_CHROMA3_CSTAT,
257*8148baabSPratyush Yadav 	},
258*8148baabSPratyush Yadav 	[VPE_CHAN_MV_IN] = {
259*8148baabSPratyush Yadav 		.num		= VPE_CHAN_NUM_MV_IN,
260*8148baabSPratyush Yadav 		.cstat_offset	= VPDMA_DEI_MV_IN_CSTAT,
261*8148baabSPratyush Yadav 	},
262*8148baabSPratyush Yadav 	[VPE_CHAN_MV_OUT] = {
263*8148baabSPratyush Yadav 		.num		= VPE_CHAN_NUM_MV_OUT,
264*8148baabSPratyush Yadav 		.cstat_offset	= VPDMA_DEI_MV_OUT_CSTAT,
265*8148baabSPratyush Yadav 	},
266*8148baabSPratyush Yadav 	[VPE_CHAN_LUMA_OUT] = {
267*8148baabSPratyush Yadav 		.num		= VPE_CHAN_NUM_LUMA_OUT,
268*8148baabSPratyush Yadav 		.cstat_offset	= VPDMA_VIP_UP_Y_CSTAT,
269*8148baabSPratyush Yadav 	},
270*8148baabSPratyush Yadav 	[VPE_CHAN_CHROMA_OUT] = {
271*8148baabSPratyush Yadav 		.num		= VPE_CHAN_NUM_CHROMA_OUT,
272*8148baabSPratyush Yadav 		.cstat_offset	= VPDMA_VIP_UP_UV_CSTAT,
273*8148baabSPratyush Yadav 	},
274*8148baabSPratyush Yadav 	[VPE_CHAN_RGB_OUT] = {
275*8148baabSPratyush Yadav 		.num		= VPE_CHAN_NUM_RGB_OUT,
276*8148baabSPratyush Yadav 		.cstat_offset	= VPDMA_VIP_UP_Y_CSTAT,
277*8148baabSPratyush Yadav 	},
278*8148baabSPratyush Yadav };
279*8148baabSPratyush Yadav 
read_reg(struct vpdma_data * vpdma,int offset)280*8148baabSPratyush Yadav static u32 read_reg(struct vpdma_data *vpdma, int offset)
281*8148baabSPratyush Yadav {
282*8148baabSPratyush Yadav 	return ioread32(vpdma->base + offset);
283*8148baabSPratyush Yadav }
284*8148baabSPratyush Yadav 
write_reg(struct vpdma_data * vpdma,int offset,u32 value)285*8148baabSPratyush Yadav static void write_reg(struct vpdma_data *vpdma, int offset, u32 value)
286*8148baabSPratyush Yadav {
287*8148baabSPratyush Yadav 	iowrite32(value, vpdma->base + offset);
288*8148baabSPratyush Yadav }
289*8148baabSPratyush Yadav 
read_field_reg(struct vpdma_data * vpdma,int offset,u32 mask,int shift)290*8148baabSPratyush Yadav static int read_field_reg(struct vpdma_data *vpdma, int offset,
291*8148baabSPratyush Yadav 		u32 mask, int shift)
292*8148baabSPratyush Yadav {
293*8148baabSPratyush Yadav 	return (read_reg(vpdma, offset) & (mask << shift)) >> shift;
294*8148baabSPratyush Yadav }
295*8148baabSPratyush Yadav 
write_field_reg(struct vpdma_data * vpdma,int offset,u32 field,u32 mask,int shift)296*8148baabSPratyush Yadav static void write_field_reg(struct vpdma_data *vpdma, int offset, u32 field,
297*8148baabSPratyush Yadav 		u32 mask, int shift)
298*8148baabSPratyush Yadav {
299*8148baabSPratyush Yadav 	u32 val = read_reg(vpdma, offset);
300*8148baabSPratyush Yadav 
301*8148baabSPratyush Yadav 	val &= ~(mask << shift);
302*8148baabSPratyush Yadav 	val |= (field & mask) << shift;
303*8148baabSPratyush Yadav 
304*8148baabSPratyush Yadav 	write_reg(vpdma, offset, val);
305*8148baabSPratyush Yadav }
306*8148baabSPratyush Yadav 
vpdma_dump_regs(struct vpdma_data * vpdma)307*8148baabSPratyush Yadav void vpdma_dump_regs(struct vpdma_data *vpdma)
308*8148baabSPratyush Yadav {
309*8148baabSPratyush Yadav 	struct device *dev = &vpdma->pdev->dev;
310*8148baabSPratyush Yadav 
311*8148baabSPratyush Yadav #define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(vpdma, VPDMA_##r))
312*8148baabSPratyush Yadav 
313*8148baabSPratyush Yadav 	dev_dbg(dev, "VPDMA Registers:\n");
314*8148baabSPratyush Yadav 
315*8148baabSPratyush Yadav 	DUMPREG(PID);
316*8148baabSPratyush Yadav 	DUMPREG(LIST_ADDR);
317*8148baabSPratyush Yadav 	DUMPREG(LIST_ATTR);
318*8148baabSPratyush Yadav 	DUMPREG(LIST_STAT_SYNC);
319*8148baabSPratyush Yadav 	DUMPREG(BG_RGB);
320*8148baabSPratyush Yadav 	DUMPREG(BG_YUV);
321*8148baabSPratyush Yadav 	DUMPREG(SETUP);
322*8148baabSPratyush Yadav 	DUMPREG(MAX_SIZE1);
323*8148baabSPratyush Yadav 	DUMPREG(MAX_SIZE2);
324*8148baabSPratyush Yadav 	DUMPREG(MAX_SIZE3);
325*8148baabSPratyush Yadav 
326*8148baabSPratyush Yadav 	/*
327*8148baabSPratyush Yadav 	 * dumping registers of only group0 and group3, because VPE channels
328*8148baabSPratyush Yadav 	 * lie within group0 and group3 registers
329*8148baabSPratyush Yadav 	 */
330*8148baabSPratyush Yadav 	DUMPREG(INT_CHAN_STAT(0));
331*8148baabSPratyush Yadav 	DUMPREG(INT_CHAN_MASK(0));
332*8148baabSPratyush Yadav 	DUMPREG(INT_CHAN_STAT(3));
333*8148baabSPratyush Yadav 	DUMPREG(INT_CHAN_MASK(3));
334*8148baabSPratyush Yadav 	DUMPREG(INT_CLIENT0_STAT);
335*8148baabSPratyush Yadav 	DUMPREG(INT_CLIENT0_MASK);
336*8148baabSPratyush Yadav 	DUMPREG(INT_CLIENT1_STAT);
337*8148baabSPratyush Yadav 	DUMPREG(INT_CLIENT1_MASK);
338*8148baabSPratyush Yadav 	DUMPREG(INT_LIST0_STAT);
339*8148baabSPratyush Yadav 	DUMPREG(INT_LIST0_MASK);
340*8148baabSPratyush Yadav 
341*8148baabSPratyush Yadav 	/*
342*8148baabSPratyush Yadav 	 * these are registers specific to VPE clients, we can make this
343*8148baabSPratyush Yadav 	 * function dump client registers specific to VPE or VIP based on
344*8148baabSPratyush Yadav 	 * who is using it
345*8148baabSPratyush Yadav 	 */
346*8148baabSPratyush Yadav 	DUMPREG(DEI_CHROMA1_CSTAT);
347*8148baabSPratyush Yadav 	DUMPREG(DEI_LUMA1_CSTAT);
348*8148baabSPratyush Yadav 	DUMPREG(DEI_CHROMA2_CSTAT);
349*8148baabSPratyush Yadav 	DUMPREG(DEI_LUMA2_CSTAT);
350*8148baabSPratyush Yadav 	DUMPREG(DEI_CHROMA3_CSTAT);
351*8148baabSPratyush Yadav 	DUMPREG(DEI_LUMA3_CSTAT);
352*8148baabSPratyush Yadav 	DUMPREG(DEI_MV_IN_CSTAT);
353*8148baabSPratyush Yadav 	DUMPREG(DEI_MV_OUT_CSTAT);
354*8148baabSPratyush Yadav 	DUMPREG(VIP_UP_Y_CSTAT);
355*8148baabSPratyush Yadav 	DUMPREG(VIP_UP_UV_CSTAT);
356*8148baabSPratyush Yadav 	DUMPREG(VPI_CTL_CSTAT);
357*8148baabSPratyush Yadav }
358*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_dump_regs);
359*8148baabSPratyush Yadav 
360*8148baabSPratyush Yadav /*
361*8148baabSPratyush Yadav  * Allocate a DMA buffer
362*8148baabSPratyush Yadav  */
vpdma_alloc_desc_buf(struct vpdma_buf * buf,size_t size)363*8148baabSPratyush Yadav int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size)
364*8148baabSPratyush Yadav {
365*8148baabSPratyush Yadav 	buf->size = size;
366*8148baabSPratyush Yadav 	buf->mapped = false;
367*8148baabSPratyush Yadav 	buf->addr = kzalloc(size, GFP_KERNEL);
368*8148baabSPratyush Yadav 	if (!buf->addr)
369*8148baabSPratyush Yadav 		return -ENOMEM;
370*8148baabSPratyush Yadav 
371*8148baabSPratyush Yadav 	WARN_ON(((unsigned long)buf->addr & VPDMA_DESC_ALIGN) != 0);
372*8148baabSPratyush Yadav 
373*8148baabSPratyush Yadav 	return 0;
374*8148baabSPratyush Yadav }
375*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_alloc_desc_buf);
376*8148baabSPratyush Yadav 
vpdma_free_desc_buf(struct vpdma_buf * buf)377*8148baabSPratyush Yadav void vpdma_free_desc_buf(struct vpdma_buf *buf)
378*8148baabSPratyush Yadav {
379*8148baabSPratyush Yadav 	WARN_ON(buf->mapped);
380*8148baabSPratyush Yadav 	kfree(buf->addr);
381*8148baabSPratyush Yadav 	buf->addr = NULL;
382*8148baabSPratyush Yadav 	buf->size = 0;
383*8148baabSPratyush Yadav }
384*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_free_desc_buf);
385*8148baabSPratyush Yadav 
386*8148baabSPratyush Yadav /*
387*8148baabSPratyush Yadav  * map descriptor/payload DMA buffer, enabling DMA access
388*8148baabSPratyush Yadav  */
vpdma_map_desc_buf(struct vpdma_data * vpdma,struct vpdma_buf * buf)389*8148baabSPratyush Yadav int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
390*8148baabSPratyush Yadav {
391*8148baabSPratyush Yadav 	struct device *dev = &vpdma->pdev->dev;
392*8148baabSPratyush Yadav 
393*8148baabSPratyush Yadav 	WARN_ON(buf->mapped);
394*8148baabSPratyush Yadav 	buf->dma_addr = dma_map_single(dev, buf->addr, buf->size,
395*8148baabSPratyush Yadav 				DMA_BIDIRECTIONAL);
396*8148baabSPratyush Yadav 	if (dma_mapping_error(dev, buf->dma_addr)) {
397*8148baabSPratyush Yadav 		dev_err(dev, "failed to map buffer\n");
398*8148baabSPratyush Yadav 		return -EINVAL;
399*8148baabSPratyush Yadav 	}
400*8148baabSPratyush Yadav 
401*8148baabSPratyush Yadav 	buf->mapped = true;
402*8148baabSPratyush Yadav 
403*8148baabSPratyush Yadav 	return 0;
404*8148baabSPratyush Yadav }
405*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_map_desc_buf);
406*8148baabSPratyush Yadav 
407*8148baabSPratyush Yadav /*
408*8148baabSPratyush Yadav  * unmap descriptor/payload DMA buffer, disabling DMA access and
409*8148baabSPratyush Yadav  * allowing the main processor to access the data
410*8148baabSPratyush Yadav  */
vpdma_unmap_desc_buf(struct vpdma_data * vpdma,struct vpdma_buf * buf)411*8148baabSPratyush Yadav void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
412*8148baabSPratyush Yadav {
413*8148baabSPratyush Yadav 	struct device *dev = &vpdma->pdev->dev;
414*8148baabSPratyush Yadav 
415*8148baabSPratyush Yadav 	if (buf->mapped)
416*8148baabSPratyush Yadav 		dma_unmap_single(dev, buf->dma_addr, buf->size,
417*8148baabSPratyush Yadav 				DMA_BIDIRECTIONAL);
418*8148baabSPratyush Yadav 
419*8148baabSPratyush Yadav 	buf->mapped = false;
420*8148baabSPratyush Yadav }
421*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_unmap_desc_buf);
422*8148baabSPratyush Yadav 
423*8148baabSPratyush Yadav /*
424*8148baabSPratyush Yadav  * Cleanup all pending descriptors of a list
425*8148baabSPratyush Yadav  * First, stop the current list being processed.
426*8148baabSPratyush Yadav  * If the VPDMA was busy, this step makes vpdma to accept post lists.
427*8148baabSPratyush Yadav  * To cleanup the internal FSM, post abort list descriptor for all the
428*8148baabSPratyush Yadav  * channels from @channels array of size @size.
429*8148baabSPratyush Yadav  */
vpdma_list_cleanup(struct vpdma_data * vpdma,int list_num,int * channels,int size)430*8148baabSPratyush Yadav int vpdma_list_cleanup(struct vpdma_data *vpdma, int list_num,
431*8148baabSPratyush Yadav 		int *channels, int size)
432*8148baabSPratyush Yadav {
433*8148baabSPratyush Yadav 	struct vpdma_desc_list abort_list;
434*8148baabSPratyush Yadav 	int i, ret, timeout = 500;
435*8148baabSPratyush Yadav 
436*8148baabSPratyush Yadav 	write_reg(vpdma, VPDMA_LIST_ATTR,
437*8148baabSPratyush Yadav 			(list_num << VPDMA_LIST_NUM_SHFT) |
438*8148baabSPratyush Yadav 			(1 << VPDMA_LIST_STOP_SHFT));
439*8148baabSPratyush Yadav 
440*8148baabSPratyush Yadav 	if (size <= 0 || !channels)
441*8148baabSPratyush Yadav 		return 0;
442*8148baabSPratyush Yadav 
443*8148baabSPratyush Yadav 	ret = vpdma_create_desc_list(&abort_list,
444*8148baabSPratyush Yadav 		size * sizeof(struct vpdma_dtd), VPDMA_LIST_TYPE_NORMAL);
445*8148baabSPratyush Yadav 	if (ret)
446*8148baabSPratyush Yadav 		return ret;
447*8148baabSPratyush Yadav 
448*8148baabSPratyush Yadav 	for (i = 0; i < size; i++)
449*8148baabSPratyush Yadav 		vpdma_add_abort_channel_ctd(&abort_list, channels[i]);
450*8148baabSPratyush Yadav 
451*8148baabSPratyush Yadav 	ret = vpdma_map_desc_buf(vpdma, &abort_list.buf);
452*8148baabSPratyush Yadav 	if (ret)
453*8148baabSPratyush Yadav 		goto free_desc;
454*8148baabSPratyush Yadav 	ret = vpdma_submit_descs(vpdma, &abort_list, list_num);
455*8148baabSPratyush Yadav 	if (ret)
456*8148baabSPratyush Yadav 		goto unmap_desc;
457*8148baabSPratyush Yadav 
458*8148baabSPratyush Yadav 	while (vpdma_list_busy(vpdma, list_num) && --timeout)
459*8148baabSPratyush Yadav 		;
460*8148baabSPratyush Yadav 
461*8148baabSPratyush Yadav 	if (timeout == 0) {
462*8148baabSPratyush Yadav 		dev_err(&vpdma->pdev->dev, "Timed out cleaning up VPDMA list\n");
463*8148baabSPratyush Yadav 		ret = -EBUSY;
464*8148baabSPratyush Yadav 	}
465*8148baabSPratyush Yadav 
466*8148baabSPratyush Yadav unmap_desc:
467*8148baabSPratyush Yadav 	vpdma_unmap_desc_buf(vpdma, &abort_list.buf);
468*8148baabSPratyush Yadav free_desc:
469*8148baabSPratyush Yadav 	vpdma_free_desc_buf(&abort_list.buf);
470*8148baabSPratyush Yadav 
471*8148baabSPratyush Yadav 	return ret;
472*8148baabSPratyush Yadav }
473*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_list_cleanup);
474*8148baabSPratyush Yadav 
475*8148baabSPratyush Yadav /*
476*8148baabSPratyush Yadav  * create a descriptor list, the user of this list will append configuration,
477*8148baabSPratyush Yadav  * control and data descriptors to this list, this list will be submitted to
478*8148baabSPratyush Yadav  * VPDMA. VPDMA's list parser will go through each descriptor and perform the
479*8148baabSPratyush Yadav  * required DMA operations
480*8148baabSPratyush Yadav  */
vpdma_create_desc_list(struct vpdma_desc_list * list,size_t size,int type)481*8148baabSPratyush Yadav int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type)
482*8148baabSPratyush Yadav {
483*8148baabSPratyush Yadav 	int r;
484*8148baabSPratyush Yadav 
485*8148baabSPratyush Yadav 	r = vpdma_alloc_desc_buf(&list->buf, size);
486*8148baabSPratyush Yadav 	if (r)
487*8148baabSPratyush Yadav 		return r;
488*8148baabSPratyush Yadav 
489*8148baabSPratyush Yadav 	list->next = list->buf.addr;
490*8148baabSPratyush Yadav 
491*8148baabSPratyush Yadav 	list->type = type;
492*8148baabSPratyush Yadav 
493*8148baabSPratyush Yadav 	return 0;
494*8148baabSPratyush Yadav }
495*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_create_desc_list);
496*8148baabSPratyush Yadav 
497*8148baabSPratyush Yadav /*
498*8148baabSPratyush Yadav  * once a descriptor list is parsed by VPDMA, we reset the list by emptying it,
499*8148baabSPratyush Yadav  * to allow new descriptors to be added to the list.
500*8148baabSPratyush Yadav  */
vpdma_reset_desc_list(struct vpdma_desc_list * list)501*8148baabSPratyush Yadav void vpdma_reset_desc_list(struct vpdma_desc_list *list)
502*8148baabSPratyush Yadav {
503*8148baabSPratyush Yadav 	list->next = list->buf.addr;
504*8148baabSPratyush Yadav }
505*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_reset_desc_list);
506*8148baabSPratyush Yadav 
507*8148baabSPratyush Yadav /*
508*8148baabSPratyush Yadav  * free the buffer allocated for the VPDMA descriptor list, this should be
509*8148baabSPratyush Yadav  * called when the user doesn't want to use VPDMA any more.
510*8148baabSPratyush Yadav  */
vpdma_free_desc_list(struct vpdma_desc_list * list)511*8148baabSPratyush Yadav void vpdma_free_desc_list(struct vpdma_desc_list *list)
512*8148baabSPratyush Yadav {
513*8148baabSPratyush Yadav 	vpdma_free_desc_buf(&list->buf);
514*8148baabSPratyush Yadav 
515*8148baabSPratyush Yadav 	list->next = NULL;
516*8148baabSPratyush Yadav }
517*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_free_desc_list);
518*8148baabSPratyush Yadav 
vpdma_list_busy(struct vpdma_data * vpdma,int list_num)519*8148baabSPratyush Yadav bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num)
520*8148baabSPratyush Yadav {
521*8148baabSPratyush Yadav 	return read_reg(vpdma, VPDMA_LIST_STAT_SYNC) & BIT(list_num + 16);
522*8148baabSPratyush Yadav }
523*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_list_busy);
524*8148baabSPratyush Yadav 
525*8148baabSPratyush Yadav /*
526*8148baabSPratyush Yadav  * submit a list of DMA descriptors to the VPE VPDMA, do not wait for completion
527*8148baabSPratyush Yadav  */
vpdma_submit_descs(struct vpdma_data * vpdma,struct vpdma_desc_list * list,int list_num)528*8148baabSPratyush Yadav int vpdma_submit_descs(struct vpdma_data *vpdma,
529*8148baabSPratyush Yadav 			struct vpdma_desc_list *list, int list_num)
530*8148baabSPratyush Yadav {
531*8148baabSPratyush Yadav 	int list_size;
532*8148baabSPratyush Yadav 	unsigned long flags;
533*8148baabSPratyush Yadav 
534*8148baabSPratyush Yadav 	if (vpdma_list_busy(vpdma, list_num))
535*8148baabSPratyush Yadav 		return -EBUSY;
536*8148baabSPratyush Yadav 
537*8148baabSPratyush Yadav 	/* 16-byte granularity */
538*8148baabSPratyush Yadav 	list_size = (list->next - list->buf.addr) >> 4;
539*8148baabSPratyush Yadav 
540*8148baabSPratyush Yadav 	spin_lock_irqsave(&vpdma->lock, flags);
541*8148baabSPratyush Yadav 	write_reg(vpdma, VPDMA_LIST_ADDR, (u32) list->buf.dma_addr);
542*8148baabSPratyush Yadav 
543*8148baabSPratyush Yadav 	write_reg(vpdma, VPDMA_LIST_ATTR,
544*8148baabSPratyush Yadav 			(list_num << VPDMA_LIST_NUM_SHFT) |
545*8148baabSPratyush Yadav 			(list->type << VPDMA_LIST_TYPE_SHFT) |
546*8148baabSPratyush Yadav 			list_size);
547*8148baabSPratyush Yadav 	spin_unlock_irqrestore(&vpdma->lock, flags);
548*8148baabSPratyush Yadav 
549*8148baabSPratyush Yadav 	return 0;
550*8148baabSPratyush Yadav }
551*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_submit_descs);
552*8148baabSPratyush Yadav 
553*8148baabSPratyush Yadav static void dump_dtd(struct vpdma_dtd *dtd);
554*8148baabSPratyush Yadav 
vpdma_update_dma_addr(struct vpdma_data * vpdma,struct vpdma_desc_list * list,dma_addr_t dma_addr,void * write_dtd,int drop,int idx)555*8148baabSPratyush Yadav void vpdma_update_dma_addr(struct vpdma_data *vpdma,
556*8148baabSPratyush Yadav 	struct vpdma_desc_list *list, dma_addr_t dma_addr,
557*8148baabSPratyush Yadav 	void *write_dtd, int drop, int idx)
558*8148baabSPratyush Yadav {
559*8148baabSPratyush Yadav 	struct vpdma_dtd *dtd = list->buf.addr;
560*8148baabSPratyush Yadav 	dma_addr_t write_desc_addr;
561*8148baabSPratyush Yadav 	int offset;
562*8148baabSPratyush Yadav 
563*8148baabSPratyush Yadav 	dtd += idx;
564*8148baabSPratyush Yadav 	vpdma_unmap_desc_buf(vpdma, &list->buf);
565*8148baabSPratyush Yadav 
566*8148baabSPratyush Yadav 	dtd->start_addr = dma_addr;
567*8148baabSPratyush Yadav 
568*8148baabSPratyush Yadav 	/* Calculate write address from the offset of write_dtd from start
569*8148baabSPratyush Yadav 	 * of the list->buf
570*8148baabSPratyush Yadav 	 */
571*8148baabSPratyush Yadav 	offset = (void *)write_dtd - list->buf.addr;
572*8148baabSPratyush Yadav 	write_desc_addr = list->buf.dma_addr + offset;
573*8148baabSPratyush Yadav 
574*8148baabSPratyush Yadav 	if (drop)
575*8148baabSPratyush Yadav 		dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr,
576*8148baabSPratyush Yadav 							   1, 1, 0);
577*8148baabSPratyush Yadav 	else
578*8148baabSPratyush Yadav 		dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr,
579*8148baabSPratyush Yadav 							   1, 0, 0);
580*8148baabSPratyush Yadav 
581*8148baabSPratyush Yadav 	vpdma_map_desc_buf(vpdma, &list->buf);
582*8148baabSPratyush Yadav 
583*8148baabSPratyush Yadav 	dump_dtd(dtd);
584*8148baabSPratyush Yadav }
585*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_update_dma_addr);
586*8148baabSPratyush Yadav 
vpdma_set_max_size(struct vpdma_data * vpdma,int reg_addr,u32 width,u32 height)587*8148baabSPratyush Yadav void vpdma_set_max_size(struct vpdma_data *vpdma, int reg_addr,
588*8148baabSPratyush Yadav 			u32 width, u32 height)
589*8148baabSPratyush Yadav {
590*8148baabSPratyush Yadav 	if (reg_addr != VPDMA_MAX_SIZE1 && reg_addr != VPDMA_MAX_SIZE2 &&
591*8148baabSPratyush Yadav 	    reg_addr != VPDMA_MAX_SIZE3)
592*8148baabSPratyush Yadav 		reg_addr = VPDMA_MAX_SIZE1;
593*8148baabSPratyush Yadav 
594*8148baabSPratyush Yadav 	write_field_reg(vpdma, reg_addr, width - 1,
595*8148baabSPratyush Yadav 			VPDMA_MAX_SIZE_WIDTH_MASK, VPDMA_MAX_SIZE_WIDTH_SHFT);
596*8148baabSPratyush Yadav 
597*8148baabSPratyush Yadav 	write_field_reg(vpdma, reg_addr, height - 1,
598*8148baabSPratyush Yadav 			VPDMA_MAX_SIZE_HEIGHT_MASK, VPDMA_MAX_SIZE_HEIGHT_SHFT);
599*8148baabSPratyush Yadav 
600*8148baabSPratyush Yadav }
601*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_set_max_size);
602*8148baabSPratyush Yadav 
dump_cfd(struct vpdma_cfd * cfd)603*8148baabSPratyush Yadav static void dump_cfd(struct vpdma_cfd *cfd)
604*8148baabSPratyush Yadav {
605*8148baabSPratyush Yadav 	int class;
606*8148baabSPratyush Yadav 
607*8148baabSPratyush Yadav 	class = cfd_get_class(cfd);
608*8148baabSPratyush Yadav 
609*8148baabSPratyush Yadav 	pr_debug("config descriptor of payload class: %s\n",
610*8148baabSPratyush Yadav 		class == CFD_CLS_BLOCK ? "simple block" :
611*8148baabSPratyush Yadav 		"address data block");
612*8148baabSPratyush Yadav 
613*8148baabSPratyush Yadav 	if (class == CFD_CLS_BLOCK)
614*8148baabSPratyush Yadav 		pr_debug("word0: dst_addr_offset = 0x%08x\n",
615*8148baabSPratyush Yadav 			cfd->dest_addr_offset);
616*8148baabSPratyush Yadav 
617*8148baabSPratyush Yadav 	if (class == CFD_CLS_BLOCK)
618*8148baabSPratyush Yadav 		pr_debug("word1: num_data_wrds = %d\n", cfd->block_len);
619*8148baabSPratyush Yadav 
620*8148baabSPratyush Yadav 	pr_debug("word2: payload_addr = 0x%08x\n", cfd->payload_addr);
621*8148baabSPratyush Yadav 
622*8148baabSPratyush Yadav 	pr_debug("word3: pkt_type = %d, direct = %d, class = %d, dest = %d, payload_len = %d\n",
623*8148baabSPratyush Yadav 		 cfd_get_pkt_type(cfd),
624*8148baabSPratyush Yadav 		 cfd_get_direct(cfd), class, cfd_get_dest(cfd),
625*8148baabSPratyush Yadav 		 cfd_get_payload_len(cfd));
626*8148baabSPratyush Yadav }
627*8148baabSPratyush Yadav 
628*8148baabSPratyush Yadav /*
629*8148baabSPratyush Yadav  * append a configuration descriptor to the given descriptor list, where the
630*8148baabSPratyush Yadav  * payload is in the form of a simple data block specified in the descriptor
631*8148baabSPratyush Yadav  * header, this is used to upload scaler coefficients to the scaler module
632*8148baabSPratyush Yadav  */
vpdma_add_cfd_block(struct vpdma_desc_list * list,int client,struct vpdma_buf * blk,u32 dest_offset)633*8148baabSPratyush Yadav void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client,
634*8148baabSPratyush Yadav 		struct vpdma_buf *blk, u32 dest_offset)
635*8148baabSPratyush Yadav {
636*8148baabSPratyush Yadav 	struct vpdma_cfd *cfd;
637*8148baabSPratyush Yadav 	int len = blk->size;
638*8148baabSPratyush Yadav 
639*8148baabSPratyush Yadav 	WARN_ON(blk->dma_addr & VPDMA_DESC_ALIGN);
640*8148baabSPratyush Yadav 
641*8148baabSPratyush Yadav 	cfd = list->next;
642*8148baabSPratyush Yadav 	WARN_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size));
643*8148baabSPratyush Yadav 
644*8148baabSPratyush Yadav 	cfd->dest_addr_offset = dest_offset;
645*8148baabSPratyush Yadav 	cfd->block_len = len;
646*8148baabSPratyush Yadav 	cfd->payload_addr = (u32) blk->dma_addr;
647*8148baabSPratyush Yadav 	cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_BLOCK,
648*8148baabSPratyush Yadav 				client, len >> 4);
649*8148baabSPratyush Yadav 
650*8148baabSPratyush Yadav 	list->next = cfd + 1;
651*8148baabSPratyush Yadav 
652*8148baabSPratyush Yadav 	dump_cfd(cfd);
653*8148baabSPratyush Yadav }
654*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_add_cfd_block);
655*8148baabSPratyush Yadav 
656*8148baabSPratyush Yadav /*
657*8148baabSPratyush Yadav  * append a configuration descriptor to the given descriptor list, where the
658*8148baabSPratyush Yadav  * payload is in the address data block format, this is used to a configure a
659*8148baabSPratyush Yadav  * discontiguous set of MMRs
660*8148baabSPratyush Yadav  */
vpdma_add_cfd_adb(struct vpdma_desc_list * list,int client,struct vpdma_buf * adb)661*8148baabSPratyush Yadav void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client,
662*8148baabSPratyush Yadav 		struct vpdma_buf *adb)
663*8148baabSPratyush Yadav {
664*8148baabSPratyush Yadav 	struct vpdma_cfd *cfd;
665*8148baabSPratyush Yadav 	unsigned int len = adb->size;
666*8148baabSPratyush Yadav 
667*8148baabSPratyush Yadav 	WARN_ON(len & VPDMA_ADB_SIZE_ALIGN);
668*8148baabSPratyush Yadav 	WARN_ON(adb->dma_addr & VPDMA_DESC_ALIGN);
669*8148baabSPratyush Yadav 
670*8148baabSPratyush Yadav 	cfd = list->next;
671*8148baabSPratyush Yadav 	BUG_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size));
672*8148baabSPratyush Yadav 
673*8148baabSPratyush Yadav 	cfd->w0 = 0;
674*8148baabSPratyush Yadav 	cfd->w1 = 0;
675*8148baabSPratyush Yadav 	cfd->payload_addr = (u32) adb->dma_addr;
676*8148baabSPratyush Yadav 	cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_ADB,
677*8148baabSPratyush Yadav 				client, len >> 4);
678*8148baabSPratyush Yadav 
679*8148baabSPratyush Yadav 	list->next = cfd + 1;
680*8148baabSPratyush Yadav 
681*8148baabSPratyush Yadav 	dump_cfd(cfd);
682*8148baabSPratyush Yadav };
683*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_add_cfd_adb);
684*8148baabSPratyush Yadav 
685*8148baabSPratyush Yadav /*
686*8148baabSPratyush Yadav  * control descriptor format change based on what type of control descriptor it
687*8148baabSPratyush Yadav  * is, we only use 'sync on channel' control descriptors for now, so assume it's
688*8148baabSPratyush Yadav  * that
689*8148baabSPratyush Yadav  */
dump_ctd(struct vpdma_ctd * ctd)690*8148baabSPratyush Yadav static void dump_ctd(struct vpdma_ctd *ctd)
691*8148baabSPratyush Yadav {
692*8148baabSPratyush Yadav 	pr_debug("control descriptor\n");
693*8148baabSPratyush Yadav 
694*8148baabSPratyush Yadav 	pr_debug("word3: pkt_type = %d, source = %d, ctl_type = %d\n",
695*8148baabSPratyush Yadav 		ctd_get_pkt_type(ctd), ctd_get_source(ctd), ctd_get_ctl(ctd));
696*8148baabSPratyush Yadav }
697*8148baabSPratyush Yadav 
698*8148baabSPratyush Yadav /*
699*8148baabSPratyush Yadav  * append a 'sync on channel' type control descriptor to the given descriptor
700*8148baabSPratyush Yadav  * list, this descriptor stalls the VPDMA list till the time DMA is completed
701*8148baabSPratyush Yadav  * on the specified channel
702*8148baabSPratyush Yadav  */
vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list * list,enum vpdma_channel chan)703*8148baabSPratyush Yadav void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list,
704*8148baabSPratyush Yadav 		enum vpdma_channel chan)
705*8148baabSPratyush Yadav {
706*8148baabSPratyush Yadav 	struct vpdma_ctd *ctd;
707*8148baabSPratyush Yadav 
708*8148baabSPratyush Yadav 	ctd = list->next;
709*8148baabSPratyush Yadav 	WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size));
710*8148baabSPratyush Yadav 
711*8148baabSPratyush Yadav 	ctd->w0 = 0;
712*8148baabSPratyush Yadav 	ctd->w1 = 0;
713*8148baabSPratyush Yadav 	ctd->w2 = 0;
714*8148baabSPratyush Yadav 	ctd->type_source_ctl = ctd_type_source_ctl(chan_info[chan].num,
715*8148baabSPratyush Yadav 				CTD_TYPE_SYNC_ON_CHANNEL);
716*8148baabSPratyush Yadav 
717*8148baabSPratyush Yadav 	list->next = ctd + 1;
718*8148baabSPratyush Yadav 
719*8148baabSPratyush Yadav 	dump_ctd(ctd);
720*8148baabSPratyush Yadav }
721*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_add_sync_on_channel_ctd);
722*8148baabSPratyush Yadav 
723*8148baabSPratyush Yadav /*
724*8148baabSPratyush Yadav  * append an 'abort_channel' type control descriptor to the given descriptor
725*8148baabSPratyush Yadav  * list, this descriptor aborts any DMA transaction happening using the
726*8148baabSPratyush Yadav  * specified channel
727*8148baabSPratyush Yadav  */
vpdma_add_abort_channel_ctd(struct vpdma_desc_list * list,int chan_num)728*8148baabSPratyush Yadav void vpdma_add_abort_channel_ctd(struct vpdma_desc_list *list,
729*8148baabSPratyush Yadav 		int chan_num)
730*8148baabSPratyush Yadav {
731*8148baabSPratyush Yadav 	struct vpdma_ctd *ctd;
732*8148baabSPratyush Yadav 
733*8148baabSPratyush Yadav 	ctd = list->next;
734*8148baabSPratyush Yadav 	WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size));
735*8148baabSPratyush Yadav 
736*8148baabSPratyush Yadav 	ctd->w0 = 0;
737*8148baabSPratyush Yadav 	ctd->w1 = 0;
738*8148baabSPratyush Yadav 	ctd->w2 = 0;
739*8148baabSPratyush Yadav 	ctd->type_source_ctl = ctd_type_source_ctl(chan_num,
740*8148baabSPratyush Yadav 				CTD_TYPE_ABORT_CHANNEL);
741*8148baabSPratyush Yadav 
742*8148baabSPratyush Yadav 	list->next = ctd + 1;
743*8148baabSPratyush Yadav 
744*8148baabSPratyush Yadav 	dump_ctd(ctd);
745*8148baabSPratyush Yadav }
746*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_add_abort_channel_ctd);
747*8148baabSPratyush Yadav 
dump_dtd(struct vpdma_dtd * dtd)748*8148baabSPratyush Yadav static void dump_dtd(struct vpdma_dtd *dtd)
749*8148baabSPratyush Yadav {
750*8148baabSPratyush Yadav 	int dir, chan;
751*8148baabSPratyush Yadav 
752*8148baabSPratyush Yadav 	dir = dtd_get_dir(dtd);
753*8148baabSPratyush Yadav 	chan = dtd_get_chan(dtd);
754*8148baabSPratyush Yadav 
755*8148baabSPratyush Yadav 	pr_debug("%s data transfer descriptor for channel %d\n",
756*8148baabSPratyush Yadav 		dir == DTD_DIR_OUT ? "outbound" : "inbound", chan);
757*8148baabSPratyush Yadav 
758*8148baabSPratyush Yadav 	pr_debug("word0: data_type = %d, notify = %d, field = %d, 1D = %d, even_ln_skp = %d, odd_ln_skp = %d, line_stride = %d\n",
759*8148baabSPratyush Yadav 		dtd_get_data_type(dtd), dtd_get_notify(dtd), dtd_get_field(dtd),
760*8148baabSPratyush Yadav 		dtd_get_1d(dtd), dtd_get_even_line_skip(dtd),
761*8148baabSPratyush Yadav 		dtd_get_odd_line_skip(dtd), dtd_get_line_stride(dtd));
762*8148baabSPratyush Yadav 
763*8148baabSPratyush Yadav 	if (dir == DTD_DIR_IN)
764*8148baabSPratyush Yadav 		pr_debug("word1: line_length = %d, xfer_height = %d\n",
765*8148baabSPratyush Yadav 			dtd_get_line_length(dtd), dtd_get_xfer_height(dtd));
766*8148baabSPratyush Yadav 
767*8148baabSPratyush Yadav 	pr_debug("word2: start_addr = %x\n", dtd->start_addr);
768*8148baabSPratyush Yadav 
769*8148baabSPratyush Yadav 	pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, pri = %d, next_chan = %d\n",
770*8148baabSPratyush Yadav 		 dtd_get_pkt_type(dtd),
771*8148baabSPratyush Yadav 		 dtd_get_mode(dtd), dir, chan, dtd_get_priority(dtd),
772*8148baabSPratyush Yadav 		 dtd_get_next_chan(dtd));
773*8148baabSPratyush Yadav 
774*8148baabSPratyush Yadav 	if (dir == DTD_DIR_IN)
775*8148baabSPratyush Yadav 		pr_debug("word4: frame_width = %d, frame_height = %d\n",
776*8148baabSPratyush Yadav 			dtd_get_frame_width(dtd), dtd_get_frame_height(dtd));
777*8148baabSPratyush Yadav 	else
778*8148baabSPratyush Yadav 		pr_debug("word4: desc_write_addr = 0x%08x, write_desc = %d, drp_data = %d, use_desc_reg = %d\n",
779*8148baabSPratyush Yadav 			dtd_get_desc_write_addr(dtd), dtd_get_write_desc(dtd),
780*8148baabSPratyush Yadav 			dtd_get_drop_data(dtd), dtd_get_use_desc(dtd));
781*8148baabSPratyush Yadav 
782*8148baabSPratyush Yadav 	if (dir == DTD_DIR_IN)
783*8148baabSPratyush Yadav 		pr_debug("word5: hor_start = %d, ver_start = %d\n",
784*8148baabSPratyush Yadav 			dtd_get_h_start(dtd), dtd_get_v_start(dtd));
785*8148baabSPratyush Yadav 	else
786*8148baabSPratyush Yadav 		pr_debug("word5: max_width %d, max_height %d\n",
787*8148baabSPratyush Yadav 			dtd_get_max_width(dtd), dtd_get_max_height(dtd));
788*8148baabSPratyush Yadav 
789*8148baabSPratyush Yadav 	pr_debug("word6: client specific attr0 = 0x%08x\n", dtd->client_attr0);
790*8148baabSPratyush Yadav 	pr_debug("word7: client specific attr1 = 0x%08x\n", dtd->client_attr1);
791*8148baabSPratyush Yadav }
792*8148baabSPratyush Yadav 
793*8148baabSPratyush Yadav /*
794*8148baabSPratyush Yadav  * append an outbound data transfer descriptor to the given descriptor list,
795*8148baabSPratyush Yadav  * this sets up a 'client to memory' VPDMA transfer for the given VPDMA channel
796*8148baabSPratyush Yadav  *
797*8148baabSPratyush Yadav  * @list: vpdma desc list to which we add this descriptor
798*8148baabSPratyush Yadav  * @width: width of the image in pixels in memory
799*8148baabSPratyush Yadav  * @c_rect: compose params of output image
800*8148baabSPratyush Yadav  * @fmt: vpdma data format of the buffer
801*8148baabSPratyush Yadav  * dma_addr: dma address as seen by VPDMA
802*8148baabSPratyush Yadav  * max_width: enum for maximum width of data transfer
803*8148baabSPratyush Yadav  * max_height: enum for maximum height of data transfer
804*8148baabSPratyush Yadav  * chan: VPDMA channel
805*8148baabSPratyush Yadav  * flags: VPDMA flags to configure some descriptor fields
806*8148baabSPratyush Yadav  */
vpdma_add_out_dtd(struct vpdma_desc_list * list,int width,int stride,const struct v4l2_rect * c_rect,const struct vpdma_data_format * fmt,dma_addr_t dma_addr,int max_w,int max_h,enum vpdma_channel chan,u32 flags)807*8148baabSPratyush Yadav void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
808*8148baabSPratyush Yadav 		int stride, const struct v4l2_rect *c_rect,
809*8148baabSPratyush Yadav 		const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
810*8148baabSPratyush Yadav 		int max_w, int max_h, enum vpdma_channel chan, u32 flags)
811*8148baabSPratyush Yadav {
812*8148baabSPratyush Yadav 	vpdma_rawchan_add_out_dtd(list, width, stride, c_rect, fmt, dma_addr,
813*8148baabSPratyush Yadav 				  max_w, max_h, chan_info[chan].num, flags);
814*8148baabSPratyush Yadav }
815*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_add_out_dtd);
816*8148baabSPratyush Yadav 
vpdma_rawchan_add_out_dtd(struct vpdma_desc_list * list,int width,int stride,const struct v4l2_rect * c_rect,const struct vpdma_data_format * fmt,dma_addr_t dma_addr,int max_w,int max_h,int raw_vpdma_chan,u32 flags)817*8148baabSPratyush Yadav void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width,
818*8148baabSPratyush Yadav 		int stride, const struct v4l2_rect *c_rect,
819*8148baabSPratyush Yadav 		const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
820*8148baabSPratyush Yadav 		int max_w, int max_h, int raw_vpdma_chan, u32 flags)
821*8148baabSPratyush Yadav {
822*8148baabSPratyush Yadav 	int priority = 0;
823*8148baabSPratyush Yadav 	int field = 0;
824*8148baabSPratyush Yadav 	int notify = 1;
825*8148baabSPratyush Yadav 	int channel, next_chan;
826*8148baabSPratyush Yadav 	struct v4l2_rect rect = *c_rect;
827*8148baabSPratyush Yadav 	int depth = fmt->depth;
828*8148baabSPratyush Yadav 	struct vpdma_dtd *dtd;
829*8148baabSPratyush Yadav 
830*8148baabSPratyush Yadav 	channel = next_chan = raw_vpdma_chan;
831*8148baabSPratyush Yadav 
832*8148baabSPratyush Yadav 	if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
833*8148baabSPratyush Yadav 	    (fmt->data_type == DATA_TYPE_C420 ||
834*8148baabSPratyush Yadav 	     fmt->data_type == DATA_TYPE_CB420)) {
835*8148baabSPratyush Yadav 		rect.height >>= 1;
836*8148baabSPratyush Yadav 		rect.top >>= 1;
837*8148baabSPratyush Yadav 		depth = 8;
838*8148baabSPratyush Yadav 	}
839*8148baabSPratyush Yadav 
840*8148baabSPratyush Yadav 	dma_addr += rect.top * stride + (rect.left * depth >> 3);
841*8148baabSPratyush Yadav 
842*8148baabSPratyush Yadav 	dtd = list->next;
843*8148baabSPratyush Yadav 	WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size));
844*8148baabSPratyush Yadav 
845*8148baabSPratyush Yadav 	dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type,
846*8148baabSPratyush Yadav 					notify,
847*8148baabSPratyush Yadav 					field,
848*8148baabSPratyush Yadav 					!!(flags & VPDMA_DATA_FRAME_1D),
849*8148baabSPratyush Yadav 					!!(flags & VPDMA_DATA_EVEN_LINE_SKIP),
850*8148baabSPratyush Yadav 					!!(flags & VPDMA_DATA_ODD_LINE_SKIP),
851*8148baabSPratyush Yadav 					stride);
852*8148baabSPratyush Yadav 	dtd->w1 = 0;
853*8148baabSPratyush Yadav 	dtd->start_addr = (u32) dma_addr;
854*8148baabSPratyush Yadav 	dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
855*8148baabSPratyush Yadav 				DTD_DIR_OUT, channel, priority, next_chan);
856*8148baabSPratyush Yadav 	dtd->desc_write_addr = dtd_desc_write_addr(0, 0, 0, 0);
857*8148baabSPratyush Yadav 	dtd->max_width_height = dtd_max_width_height(max_w, max_h);
858*8148baabSPratyush Yadav 	dtd->client_attr0 = 0;
859*8148baabSPratyush Yadav 	dtd->client_attr1 = 0;
860*8148baabSPratyush Yadav 
861*8148baabSPratyush Yadav 	list->next = dtd + 1;
862*8148baabSPratyush Yadav 
863*8148baabSPratyush Yadav 	dump_dtd(dtd);
864*8148baabSPratyush Yadav }
865*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_rawchan_add_out_dtd);
866*8148baabSPratyush Yadav 
867*8148baabSPratyush Yadav /*
868*8148baabSPratyush Yadav  * append an inbound data transfer descriptor to the given descriptor list,
869*8148baabSPratyush Yadav  * this sets up a 'memory to client' VPDMA transfer for the given VPDMA channel
870*8148baabSPratyush Yadav  *
871*8148baabSPratyush Yadav  * @list: vpdma desc list to which we add this descriptor
872*8148baabSPratyush Yadav  * @width: width of the image in pixels in memory(not the cropped width)
873*8148baabSPratyush Yadav  * @c_rect: crop params of input image
874*8148baabSPratyush Yadav  * @fmt: vpdma data format of the buffer
875*8148baabSPratyush Yadav  * dma_addr: dma address as seen by VPDMA
876*8148baabSPratyush Yadav  * chan: VPDMA channel
877*8148baabSPratyush Yadav  * field: top or bottom field info of the input image
878*8148baabSPratyush Yadav  * flags: VPDMA flags to configure some descriptor fields
879*8148baabSPratyush Yadav  * frame_width/height: the complete width/height of the image presented to the
880*8148baabSPratyush Yadav  *			client (this makes sense when multiple channels are
881*8148baabSPratyush Yadav  *			connected to the same client, forming a larger frame)
882*8148baabSPratyush Yadav  * start_h, start_v: position where the given channel starts providing pixel
883*8148baabSPratyush Yadav  *			data to the client (makes sense when multiple channels
884*8148baabSPratyush Yadav  *			contribute to the client)
885*8148baabSPratyush Yadav  */
vpdma_add_in_dtd(struct vpdma_desc_list * list,int width,int stride,const struct v4l2_rect * c_rect,const struct vpdma_data_format * fmt,dma_addr_t dma_addr,enum vpdma_channel chan,int field,u32 flags,int frame_width,int frame_height,int start_h,int start_v)886*8148baabSPratyush Yadav void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width,
887*8148baabSPratyush Yadav 		int stride, const struct v4l2_rect *c_rect,
888*8148baabSPratyush Yadav 		const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
889*8148baabSPratyush Yadav 		enum vpdma_channel chan, int field, u32 flags, int frame_width,
890*8148baabSPratyush Yadav 		int frame_height, int start_h, int start_v)
891*8148baabSPratyush Yadav {
892*8148baabSPratyush Yadav 	int priority = 0;
893*8148baabSPratyush Yadav 	int notify = 1;
894*8148baabSPratyush Yadav 	int depth = fmt->depth;
895*8148baabSPratyush Yadav 	int channel, next_chan;
896*8148baabSPratyush Yadav 	struct v4l2_rect rect = *c_rect;
897*8148baabSPratyush Yadav 	struct vpdma_dtd *dtd;
898*8148baabSPratyush Yadav 
899*8148baabSPratyush Yadav 	channel = next_chan = chan_info[chan].num;
900*8148baabSPratyush Yadav 
901*8148baabSPratyush Yadav 	if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
902*8148baabSPratyush Yadav 	    (fmt->data_type == DATA_TYPE_C420 ||
903*8148baabSPratyush Yadav 	     fmt->data_type == DATA_TYPE_CB420)) {
904*8148baabSPratyush Yadav 		rect.height >>= 1;
905*8148baabSPratyush Yadav 		rect.top >>= 1;
906*8148baabSPratyush Yadav 		depth = 8;
907*8148baabSPratyush Yadav 	}
908*8148baabSPratyush Yadav 
909*8148baabSPratyush Yadav 	dma_addr += rect.top * stride + (rect.left * depth >> 3);
910*8148baabSPratyush Yadav 
911*8148baabSPratyush Yadav 	dtd = list->next;
912*8148baabSPratyush Yadav 	WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size));
913*8148baabSPratyush Yadav 
914*8148baabSPratyush Yadav 	dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type,
915*8148baabSPratyush Yadav 					notify,
916*8148baabSPratyush Yadav 					field,
917*8148baabSPratyush Yadav 					!!(flags & VPDMA_DATA_FRAME_1D),
918*8148baabSPratyush Yadav 					!!(flags & VPDMA_DATA_EVEN_LINE_SKIP),
919*8148baabSPratyush Yadav 					!!(flags & VPDMA_DATA_ODD_LINE_SKIP),
920*8148baabSPratyush Yadav 					stride);
921*8148baabSPratyush Yadav 
922*8148baabSPratyush Yadav 	dtd->xfer_length_height = dtd_xfer_length_height(rect.width,
923*8148baabSPratyush Yadav 					rect.height);
924*8148baabSPratyush Yadav 	dtd->start_addr = (u32) dma_addr;
925*8148baabSPratyush Yadav 	dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
926*8148baabSPratyush Yadav 				DTD_DIR_IN, channel, priority, next_chan);
927*8148baabSPratyush Yadav 	dtd->frame_width_height = dtd_frame_width_height(frame_width,
928*8148baabSPratyush Yadav 					frame_height);
929*8148baabSPratyush Yadav 	dtd->start_h_v = dtd_start_h_v(start_h, start_v);
930*8148baabSPratyush Yadav 	dtd->client_attr0 = 0;
931*8148baabSPratyush Yadav 	dtd->client_attr1 = 0;
932*8148baabSPratyush Yadav 
933*8148baabSPratyush Yadav 	list->next = dtd + 1;
934*8148baabSPratyush Yadav 
935*8148baabSPratyush Yadav 	dump_dtd(dtd);
936*8148baabSPratyush Yadav }
937*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_add_in_dtd);
938*8148baabSPratyush Yadav 
vpdma_hwlist_alloc(struct vpdma_data * vpdma,void * priv)939*8148baabSPratyush Yadav int vpdma_hwlist_alloc(struct vpdma_data *vpdma, void *priv)
940*8148baabSPratyush Yadav {
941*8148baabSPratyush Yadav 	int i, list_num = -1;
942*8148baabSPratyush Yadav 	unsigned long flags;
943*8148baabSPratyush Yadav 
944*8148baabSPratyush Yadav 	spin_lock_irqsave(&vpdma->lock, flags);
945*8148baabSPratyush Yadav 	for (i = 0; i < VPDMA_MAX_NUM_LIST && vpdma->hwlist_used[i]; i++)
946*8148baabSPratyush Yadav 		;
947*8148baabSPratyush Yadav 
948*8148baabSPratyush Yadav 	if (i < VPDMA_MAX_NUM_LIST) {
949*8148baabSPratyush Yadav 		list_num = i;
950*8148baabSPratyush Yadav 		vpdma->hwlist_used[i] = true;
951*8148baabSPratyush Yadav 		vpdma->hwlist_priv[i] = priv;
952*8148baabSPratyush Yadav 	}
953*8148baabSPratyush Yadav 	spin_unlock_irqrestore(&vpdma->lock, flags);
954*8148baabSPratyush Yadav 
955*8148baabSPratyush Yadav 	return list_num;
956*8148baabSPratyush Yadav }
957*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_hwlist_alloc);
958*8148baabSPratyush Yadav 
vpdma_hwlist_get_priv(struct vpdma_data * vpdma,int list_num)959*8148baabSPratyush Yadav void *vpdma_hwlist_get_priv(struct vpdma_data *vpdma, int list_num)
960*8148baabSPratyush Yadav {
961*8148baabSPratyush Yadav 	if (!vpdma || list_num >= VPDMA_MAX_NUM_LIST)
962*8148baabSPratyush Yadav 		return NULL;
963*8148baabSPratyush Yadav 
964*8148baabSPratyush Yadav 	return vpdma->hwlist_priv[list_num];
965*8148baabSPratyush Yadav }
966*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_hwlist_get_priv);
967*8148baabSPratyush Yadav 
vpdma_hwlist_release(struct vpdma_data * vpdma,int list_num)968*8148baabSPratyush Yadav void *vpdma_hwlist_release(struct vpdma_data *vpdma, int list_num)
969*8148baabSPratyush Yadav {
970*8148baabSPratyush Yadav 	void *priv;
971*8148baabSPratyush Yadav 	unsigned long flags;
972*8148baabSPratyush Yadav 
973*8148baabSPratyush Yadav 	spin_lock_irqsave(&vpdma->lock, flags);
974*8148baabSPratyush Yadav 	vpdma->hwlist_used[list_num] = false;
975*8148baabSPratyush Yadav 	priv = vpdma->hwlist_priv;
976*8148baabSPratyush Yadav 	spin_unlock_irqrestore(&vpdma->lock, flags);
977*8148baabSPratyush Yadav 
978*8148baabSPratyush Yadav 	return priv;
979*8148baabSPratyush Yadav }
980*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_hwlist_release);
981*8148baabSPratyush Yadav 
982*8148baabSPratyush Yadav /* set or clear the mask for list complete interrupt */
vpdma_enable_list_complete_irq(struct vpdma_data * vpdma,int irq_num,int list_num,bool enable)983*8148baabSPratyush Yadav void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int irq_num,
984*8148baabSPratyush Yadav 		int list_num, bool enable)
985*8148baabSPratyush Yadav {
986*8148baabSPratyush Yadav 	u32 reg_addr = VPDMA_INT_LIST0_MASK + VPDMA_INTX_OFFSET * irq_num;
987*8148baabSPratyush Yadav 	u32 val;
988*8148baabSPratyush Yadav 
989*8148baabSPratyush Yadav 	val = read_reg(vpdma, reg_addr);
990*8148baabSPratyush Yadav 	if (enable)
991*8148baabSPratyush Yadav 		val |= (1 << (list_num * 2));
992*8148baabSPratyush Yadav 	else
993*8148baabSPratyush Yadav 		val &= ~(1 << (list_num * 2));
994*8148baabSPratyush Yadav 	write_reg(vpdma, reg_addr, val);
995*8148baabSPratyush Yadav }
996*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_enable_list_complete_irq);
997*8148baabSPratyush Yadav 
998*8148baabSPratyush Yadav /* get the LIST_STAT register */
vpdma_get_list_stat(struct vpdma_data * vpdma,int irq_num)999*8148baabSPratyush Yadav unsigned int vpdma_get_list_stat(struct vpdma_data *vpdma, int irq_num)
1000*8148baabSPratyush Yadav {
1001*8148baabSPratyush Yadav 	u32 reg_addr = VPDMA_INT_LIST0_STAT + VPDMA_INTX_OFFSET * irq_num;
1002*8148baabSPratyush Yadav 
1003*8148baabSPratyush Yadav 	return read_reg(vpdma, reg_addr);
1004*8148baabSPratyush Yadav }
1005*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_get_list_stat);
1006*8148baabSPratyush Yadav 
1007*8148baabSPratyush Yadav /* get the LIST_MASK register */
vpdma_get_list_mask(struct vpdma_data * vpdma,int irq_num)1008*8148baabSPratyush Yadav unsigned int vpdma_get_list_mask(struct vpdma_data *vpdma, int irq_num)
1009*8148baabSPratyush Yadav {
1010*8148baabSPratyush Yadav 	u32 reg_addr = VPDMA_INT_LIST0_MASK + VPDMA_INTX_OFFSET * irq_num;
1011*8148baabSPratyush Yadav 
1012*8148baabSPratyush Yadav 	return read_reg(vpdma, reg_addr);
1013*8148baabSPratyush Yadav }
1014*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_get_list_mask);
1015*8148baabSPratyush Yadav 
1016*8148baabSPratyush Yadav /* clear previously occurred list interrupts in the LIST_STAT register */
vpdma_clear_list_stat(struct vpdma_data * vpdma,int irq_num,int list_num)1017*8148baabSPratyush Yadav void vpdma_clear_list_stat(struct vpdma_data *vpdma, int irq_num,
1018*8148baabSPratyush Yadav 			   int list_num)
1019*8148baabSPratyush Yadav {
1020*8148baabSPratyush Yadav 	u32 reg_addr = VPDMA_INT_LIST0_STAT + VPDMA_INTX_OFFSET * irq_num;
1021*8148baabSPratyush Yadav 
1022*8148baabSPratyush Yadav 	write_reg(vpdma, reg_addr, 3 << (list_num * 2));
1023*8148baabSPratyush Yadav }
1024*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_clear_list_stat);
1025*8148baabSPratyush Yadav 
vpdma_set_bg_color(struct vpdma_data * vpdma,struct vpdma_data_format * fmt,u32 color)1026*8148baabSPratyush Yadav void vpdma_set_bg_color(struct vpdma_data *vpdma,
1027*8148baabSPratyush Yadav 		struct vpdma_data_format *fmt, u32 color)
1028*8148baabSPratyush Yadav {
1029*8148baabSPratyush Yadav 	if (fmt->type == VPDMA_DATA_FMT_TYPE_RGB)
1030*8148baabSPratyush Yadav 		write_reg(vpdma, VPDMA_BG_RGB, color);
1031*8148baabSPratyush Yadav 	else if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV)
1032*8148baabSPratyush Yadav 		write_reg(vpdma, VPDMA_BG_YUV, color);
1033*8148baabSPratyush Yadav }
1034*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_set_bg_color);
1035*8148baabSPratyush Yadav 
1036*8148baabSPratyush Yadav /*
1037*8148baabSPratyush Yadav  * configures the output mode of the line buffer for the given client, the
1038*8148baabSPratyush Yadav  * line buffer content can either be mirrored(each line repeated twice) or
1039*8148baabSPratyush Yadav  * passed to the client as is
1040*8148baabSPratyush Yadav  */
vpdma_set_line_mode(struct vpdma_data * vpdma,int line_mode,enum vpdma_channel chan)1041*8148baabSPratyush Yadav void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode,
1042*8148baabSPratyush Yadav 		enum vpdma_channel chan)
1043*8148baabSPratyush Yadav {
1044*8148baabSPratyush Yadav 	int client_cstat = chan_info[chan].cstat_offset;
1045*8148baabSPratyush Yadav 
1046*8148baabSPratyush Yadav 	write_field_reg(vpdma, client_cstat, line_mode,
1047*8148baabSPratyush Yadav 		VPDMA_CSTAT_LINE_MODE_MASK, VPDMA_CSTAT_LINE_MODE_SHIFT);
1048*8148baabSPratyush Yadav }
1049*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_set_line_mode);
1050*8148baabSPratyush Yadav 
1051*8148baabSPratyush Yadav /*
1052*8148baabSPratyush Yadav  * configures the event which should trigger VPDMA transfer for the given
1053*8148baabSPratyush Yadav  * client
1054*8148baabSPratyush Yadav  */
vpdma_set_frame_start_event(struct vpdma_data * vpdma,enum vpdma_frame_start_event fs_event,enum vpdma_channel chan)1055*8148baabSPratyush Yadav void vpdma_set_frame_start_event(struct vpdma_data *vpdma,
1056*8148baabSPratyush Yadav 		enum vpdma_frame_start_event fs_event,
1057*8148baabSPratyush Yadav 		enum vpdma_channel chan)
1058*8148baabSPratyush Yadav {
1059*8148baabSPratyush Yadav 	int client_cstat = chan_info[chan].cstat_offset;
1060*8148baabSPratyush Yadav 
1061*8148baabSPratyush Yadav 	write_field_reg(vpdma, client_cstat, fs_event,
1062*8148baabSPratyush Yadav 		VPDMA_CSTAT_FRAME_START_MASK, VPDMA_CSTAT_FRAME_START_SHIFT);
1063*8148baabSPratyush Yadav }
1064*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_set_frame_start_event);
1065*8148baabSPratyush Yadav 
vpdma_firmware_cb(const struct firmware * f,void * context)1066*8148baabSPratyush Yadav static void vpdma_firmware_cb(const struct firmware *f, void *context)
1067*8148baabSPratyush Yadav {
1068*8148baabSPratyush Yadav 	struct vpdma_data *vpdma = context;
1069*8148baabSPratyush Yadav 	struct vpdma_buf fw_dma_buf;
1070*8148baabSPratyush Yadav 	int i, r;
1071*8148baabSPratyush Yadav 
1072*8148baabSPratyush Yadav 	dev_dbg(&vpdma->pdev->dev, "firmware callback\n");
1073*8148baabSPratyush Yadav 
1074*8148baabSPratyush Yadav 	if (!f || !f->data) {
1075*8148baabSPratyush Yadav 		dev_err(&vpdma->pdev->dev, "couldn't get firmware\n");
1076*8148baabSPratyush Yadav 		return;
1077*8148baabSPratyush Yadav 	}
1078*8148baabSPratyush Yadav 
1079*8148baabSPratyush Yadav 	/* already initialized */
1080*8148baabSPratyush Yadav 	if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK,
1081*8148baabSPratyush Yadav 			VPDMA_LIST_RDY_SHFT)) {
1082*8148baabSPratyush Yadav 		vpdma->cb(vpdma->pdev);
1083*8148baabSPratyush Yadav 		return;
1084*8148baabSPratyush Yadav 	}
1085*8148baabSPratyush Yadav 
1086*8148baabSPratyush Yadav 	r = vpdma_alloc_desc_buf(&fw_dma_buf, f->size);
1087*8148baabSPratyush Yadav 	if (r) {
1088*8148baabSPratyush Yadav 		dev_err(&vpdma->pdev->dev,
1089*8148baabSPratyush Yadav 			"failed to allocate dma buffer for firmware\n");
1090*8148baabSPratyush Yadav 		goto rel_fw;
1091*8148baabSPratyush Yadav 	}
1092*8148baabSPratyush Yadav 
1093*8148baabSPratyush Yadav 	memcpy(fw_dma_buf.addr, f->data, f->size);
1094*8148baabSPratyush Yadav 
1095*8148baabSPratyush Yadav 	vpdma_map_desc_buf(vpdma, &fw_dma_buf);
1096*8148baabSPratyush Yadav 
1097*8148baabSPratyush Yadav 	write_reg(vpdma, VPDMA_LIST_ADDR, (u32) fw_dma_buf.dma_addr);
1098*8148baabSPratyush Yadav 
1099*8148baabSPratyush Yadav 	for (i = 0; i < 100; i++) {		/* max 1 second */
1100*8148baabSPratyush Yadav 		msleep_interruptible(10);
1101*8148baabSPratyush Yadav 
1102*8148baabSPratyush Yadav 		if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK,
1103*8148baabSPratyush Yadav 				VPDMA_LIST_RDY_SHFT))
1104*8148baabSPratyush Yadav 			break;
1105*8148baabSPratyush Yadav 	}
1106*8148baabSPratyush Yadav 
1107*8148baabSPratyush Yadav 	if (i == 100) {
1108*8148baabSPratyush Yadav 		dev_err(&vpdma->pdev->dev, "firmware upload failed\n");
1109*8148baabSPratyush Yadav 		goto free_buf;
1110*8148baabSPratyush Yadav 	}
1111*8148baabSPratyush Yadav 
1112*8148baabSPratyush Yadav 	vpdma->cb(vpdma->pdev);
1113*8148baabSPratyush Yadav 
1114*8148baabSPratyush Yadav free_buf:
1115*8148baabSPratyush Yadav 	vpdma_unmap_desc_buf(vpdma, &fw_dma_buf);
1116*8148baabSPratyush Yadav 
1117*8148baabSPratyush Yadav 	vpdma_free_desc_buf(&fw_dma_buf);
1118*8148baabSPratyush Yadav rel_fw:
1119*8148baabSPratyush Yadav 	release_firmware(f);
1120*8148baabSPratyush Yadav }
1121*8148baabSPratyush Yadav 
vpdma_load_firmware(struct vpdma_data * vpdma)1122*8148baabSPratyush Yadav static int vpdma_load_firmware(struct vpdma_data *vpdma)
1123*8148baabSPratyush Yadav {
1124*8148baabSPratyush Yadav 	int r;
1125*8148baabSPratyush Yadav 	struct device *dev = &vpdma->pdev->dev;
1126*8148baabSPratyush Yadav 
1127*8148baabSPratyush Yadav 	r = request_firmware_nowait(THIS_MODULE, 1,
1128*8148baabSPratyush Yadav 		(const char *) VPDMA_FIRMWARE, dev, GFP_KERNEL, vpdma,
1129*8148baabSPratyush Yadav 		vpdma_firmware_cb);
1130*8148baabSPratyush Yadav 	if (r) {
1131*8148baabSPratyush Yadav 		dev_err(dev, "firmware not available %s\n", VPDMA_FIRMWARE);
1132*8148baabSPratyush Yadav 		return r;
1133*8148baabSPratyush Yadav 	} else {
1134*8148baabSPratyush Yadav 		dev_info(dev, "loading firmware %s\n", VPDMA_FIRMWARE);
1135*8148baabSPratyush Yadav 	}
1136*8148baabSPratyush Yadav 
1137*8148baabSPratyush Yadav 	return 0;
1138*8148baabSPratyush Yadav }
1139*8148baabSPratyush Yadav 
vpdma_create(struct platform_device * pdev,struct vpdma_data * vpdma,void (* cb)(struct platform_device * pdev))1140*8148baabSPratyush Yadav int vpdma_create(struct platform_device *pdev, struct vpdma_data *vpdma,
1141*8148baabSPratyush Yadav 		void (*cb)(struct platform_device *pdev))
1142*8148baabSPratyush Yadav {
1143*8148baabSPratyush Yadav 	struct resource *res;
1144*8148baabSPratyush Yadav 	int r;
1145*8148baabSPratyush Yadav 
1146*8148baabSPratyush Yadav 	dev_dbg(&pdev->dev, "vpdma_create\n");
1147*8148baabSPratyush Yadav 
1148*8148baabSPratyush Yadav 	vpdma->pdev = pdev;
1149*8148baabSPratyush Yadav 	vpdma->cb = cb;
1150*8148baabSPratyush Yadav 	spin_lock_init(&vpdma->lock);
1151*8148baabSPratyush Yadav 
1152*8148baabSPratyush Yadav 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpdma");
1153*8148baabSPratyush Yadav 	if (res == NULL) {
1154*8148baabSPratyush Yadav 		dev_err(&pdev->dev, "missing platform resources data\n");
1155*8148baabSPratyush Yadav 		return -ENODEV;
1156*8148baabSPratyush Yadav 	}
1157*8148baabSPratyush Yadav 
1158*8148baabSPratyush Yadav 	vpdma->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1159*8148baabSPratyush Yadav 	if (!vpdma->base) {
1160*8148baabSPratyush Yadav 		dev_err(&pdev->dev, "failed to ioremap\n");
1161*8148baabSPratyush Yadav 		return -ENOMEM;
1162*8148baabSPratyush Yadav 	}
1163*8148baabSPratyush Yadav 
1164*8148baabSPratyush Yadav 	r = vpdma_load_firmware(vpdma);
1165*8148baabSPratyush Yadav 	if (r) {
1166*8148baabSPratyush Yadav 		pr_err("failed to load firmware %s\n", VPDMA_FIRMWARE);
1167*8148baabSPratyush Yadav 		return r;
1168*8148baabSPratyush Yadav 	}
1169*8148baabSPratyush Yadav 
1170*8148baabSPratyush Yadav 	return 0;
1171*8148baabSPratyush Yadav }
1172*8148baabSPratyush Yadav EXPORT_SYMBOL(vpdma_create);
1173*8148baabSPratyush Yadav 
1174*8148baabSPratyush Yadav MODULE_AUTHOR("Texas Instruments Inc.");
1175*8148baabSPratyush Yadav MODULE_FIRMWARE(VPDMA_FIRMWARE);
1176*8148baabSPratyush Yadav MODULE_LICENSE("GPL v2");
1177