1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022 MediaTek Inc.
4  * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
5  */
6 
7 #include <linux/clk.h>
8 #include <linux/of_platform.h>
9 #include <linux/of_address.h>
10 #include <linux/pm_runtime.h>
11 #include "mtk-mdp3-cfg.h"
12 #include "mtk-mdp3-comp.h"
13 #include "mtk-mdp3-core.h"
14 #include "mtk-mdp3-regs.h"
15 
16 #include "mdp_reg_rdma.h"
17 #include "mdp_reg_ccorr.h"
18 #include "mdp_reg_rsz.h"
19 #include "mdp_reg_wrot.h"
20 #include "mdp_reg_wdma.h"
21 
22 static u32 mdp_comp_alias_id[MDP_COMP_TYPE_COUNT];
23 static int p_id;
24 
25 static inline const struct mdp_platform_config *
26 __get_plat_cfg(const struct mdp_comp_ctx *ctx)
27 {
28 	if (!ctx)
29 		return NULL;
30 
31 	return ctx->comp->mdp_dev->mdp_data->mdp_cfg;
32 }
33 
34 static s64 get_comp_flag(const struct mdp_comp_ctx *ctx)
35 {
36 	const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
37 	u32 rdma0, rsz1;
38 
39 	rdma0 = mdp_cfg_get_id_inner(ctx->comp->mdp_dev, MDP_COMP_RDMA0);
40 	rsz1 = mdp_cfg_get_id_inner(ctx->comp->mdp_dev, MDP_COMP_RSZ1);
41 	if (!rdma0 || !rsz1)
42 		return MDP_COMP_NONE;
43 
44 	if (mdp_cfg && mdp_cfg->rdma_rsz1_sram_sharing)
45 		if (ctx->comp->inner_id == rdma0)
46 			return BIT(rdma0) | BIT(rsz1);
47 
48 	return BIT(ctx->comp->inner_id);
49 }
50 
51 static int init_rdma(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
52 {
53 	const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
54 	phys_addr_t base = ctx->comp->reg_base;
55 	u8 subsys_id = ctx->comp->subsys_id;
56 	s32 rdma0;
57 
58 	rdma0 = mdp_cfg_get_id_inner(ctx->comp->mdp_dev, MDP_COMP_RDMA0);
59 	if (!rdma0)
60 		return -EINVAL;
61 
62 	if (mdp_cfg && mdp_cfg->rdma_support_10bit) {
63 		struct mdp_comp *prz1 = ctx->comp->mdp_dev->comp[MDP_COMP_RSZ1];
64 
65 		/* Disable RSZ1 */
66 		if (ctx->comp->inner_id == rdma0 && prz1)
67 			MM_REG_WRITE(cmd, subsys_id, prz1->reg_base, PRZ_ENABLE,
68 				     0x0, BIT(0));
69 	}
70 
71 	/* Reset RDMA */
72 	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_RESET, BIT(0), BIT(0));
73 	MM_REG_POLL(cmd, subsys_id, base, MDP_RDMA_MON_STA_1, BIT(8), BIT(8));
74 	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_RESET, 0x0, BIT(0));
75 	return 0;
76 }
77 
78 static int config_rdma_frame(struct mdp_comp_ctx *ctx,
79 			     struct mdp_cmdq_cmd *cmd,
80 			     const struct v4l2_rect *compose)
81 {
82 	const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
83 	u32 colorformat = ctx->input->buffer.format.colorformat;
84 	bool block10bit = MDP_COLOR_IS_10BIT_PACKED(colorformat);
85 	bool en_ufo = MDP_COLOR_IS_UFP(colorformat);
86 	phys_addr_t base = ctx->comp->reg_base;
87 	u8 subsys_id = ctx->comp->subsys_id;
88 	u32 reg = 0;
89 
90 	if (mdp_cfg && mdp_cfg->rdma_support_10bit) {
91 		if (block10bit)
92 			MM_REG_WRITE(cmd, subsys_id, base,
93 				     MDP_RDMA_RESV_DUMMY_0, 0x7, 0x7);
94 		else
95 			MM_REG_WRITE(cmd, subsys_id, base,
96 				     MDP_RDMA_RESV_DUMMY_0, 0x0, 0x7);
97 	}
98 
99 	/* Setup smi control */
100 	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_GMCIF_CON,
101 		     (7 <<  4) + //burst type to 8
102 		     (1 << 16),  //enable pre-ultra
103 		     0x00030071);
104 
105 	/* Setup source frame info */
106 	if (CFG_CHECK(MT8183, p_id))
107 		reg = CFG_COMP(MT8183, ctx->param, rdma.src_ctrl);
108 	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_CON, reg,
109 		     0x03C8FE0F);
110 
111 	if (mdp_cfg)
112 		if (mdp_cfg->rdma_support_10bit && en_ufo) {
113 			/* Setup source buffer base */
114 			if (CFG_CHECK(MT8183, p_id))
115 				reg = CFG_COMP(MT8183, ctx->param, rdma.ufo_dec_y);
116 			MM_REG_WRITE(cmd, subsys_id,
117 				     base, MDP_RDMA_UFO_DEC_LENGTH_BASE_Y,
118 				     reg, 0xFFFFFFFF);
119 			if (CFG_CHECK(MT8183, p_id))
120 				reg = CFG_COMP(MT8183, ctx->param, rdma.ufo_dec_c);
121 			MM_REG_WRITE(cmd, subsys_id,
122 				     base, MDP_RDMA_UFO_DEC_LENGTH_BASE_C,
123 				     reg, 0xFFFFFFFF);
124 			/* Set 10bit source frame pitch */
125 			if (block10bit) {
126 				if (CFG_CHECK(MT8183, p_id))
127 					reg = CFG_COMP(MT8183, ctx->param, rdma.mf_bkgd_in_pxl);
128 				MM_REG_WRITE(cmd, subsys_id,
129 					     base, MDP_RDMA_MF_BKGD_SIZE_IN_PXL,
130 					     reg, 0x001FFFFF);
131 			}
132 		}
133 
134 	if (CFG_CHECK(MT8183, p_id))
135 		reg = CFG_COMP(MT8183, ctx->param, rdma.control);
136 	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_CON, reg,
137 		     0x1110);
138 	/* Setup source buffer base */
139 	if (CFG_CHECK(MT8183, p_id))
140 		reg = CFG_COMP(MT8183, ctx->param, rdma.iova[0]);
141 	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_0, reg,
142 		     0xFFFFFFFF);
143 	if (CFG_CHECK(MT8183, p_id))
144 		reg = CFG_COMP(MT8183, ctx->param, rdma.iova[1]);
145 	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_1, reg,
146 		     0xFFFFFFFF);
147 	if (CFG_CHECK(MT8183, p_id))
148 		reg = CFG_COMP(MT8183, ctx->param, rdma.iova[2]);
149 	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_2, reg,
150 		     0xFFFFFFFF);
151 	/* Setup source buffer end */
152 	if (CFG_CHECK(MT8183, p_id))
153 		reg = CFG_COMP(MT8183, ctx->param, rdma.iova_end[0]);
154 	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_0,
155 		     reg, 0xFFFFFFFF);
156 	if (CFG_CHECK(MT8183, p_id))
157 		reg = CFG_COMP(MT8183, ctx->param, rdma.iova_end[1]);
158 	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_1,
159 		     reg, 0xFFFFFFFF);
160 	if (CFG_CHECK(MT8183, p_id))
161 		reg = CFG_COMP(MT8183, ctx->param, rdma.iova_end[2]);
162 	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_2,
163 		     reg, 0xFFFFFFFF);
164 	/* Setup source frame pitch */
165 	if (CFG_CHECK(MT8183, p_id))
166 		reg = CFG_COMP(MT8183, ctx->param, rdma.mf_bkgd);
167 	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_BKGD_SIZE_IN_BYTE,
168 		     reg, 0x001FFFFF);
169 	if (CFG_CHECK(MT8183, p_id))
170 		reg = CFG_COMP(MT8183, ctx->param, rdma.sf_bkgd);
171 	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SF_BKGD_SIZE_IN_BYTE,
172 		     reg, 0x001FFFFF);
173 	/* Setup color transform */
174 	if (CFG_CHECK(MT8183, p_id))
175 		reg = CFG_COMP(MT8183, ctx->param, rdma.transform);
176 	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_TRANSFORM_0,
177 		     reg, 0x0F110000);
178 
179 	return 0;
180 }
181 
182 static int config_rdma_subfrm(struct mdp_comp_ctx *ctx,
183 			      struct mdp_cmdq_cmd *cmd, u32 index)
184 {
185 	const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
186 	u32 colorformat = ctx->input->buffer.format.colorformat;
187 	bool block10bit = MDP_COLOR_IS_10BIT_PACKED(colorformat);
188 	bool en_ufo = MDP_COLOR_IS_UFP(colorformat);
189 	phys_addr_t base = ctx->comp->reg_base;
190 	u8 subsys_id = ctx->comp->subsys_id;
191 	u32 csf_l = 0, csf_r = 0;
192 	u32 reg = 0;
193 
194 	/* Enable RDMA */
195 	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_EN, BIT(0), BIT(0));
196 
197 	/* Set Y pixel offset */
198 	if (CFG_CHECK(MT8183, p_id))
199 		reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset[0]);
200 	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_0,
201 		     reg, 0xFFFFFFFF);
202 
203 	/* Set 10bit UFO mode */
204 	if (mdp_cfg) {
205 		if (mdp_cfg->rdma_support_10bit && block10bit && en_ufo) {
206 			if (CFG_CHECK(MT8183, p_id))
207 				reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset_0_p);
208 			MM_REG_WRITE(cmd, subsys_id, base,
209 				     MDP_RDMA_SRC_OFFSET_0_P,
210 				     reg, 0xFFFFFFFF);
211 		}
212 	}
213 
214 	/* Set U pixel offset */
215 	if (CFG_CHECK(MT8183, p_id))
216 		reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset[1]);
217 	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_1,
218 		     reg, 0xFFFFFFFF);
219 	/* Set V pixel offset */
220 	if (CFG_CHECK(MT8183, p_id))
221 		reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset[2]);
222 	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_2,
223 		     reg, 0xFFFFFFFF);
224 	/* Set source size */
225 	if (CFG_CHECK(MT8183, p_id))
226 		reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].src);
227 	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_SRC_SIZE, reg,
228 		     0x1FFF1FFF);
229 	/* Set target size */
230 	if (CFG_CHECK(MT8183, p_id))
231 		reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].clip);
232 	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_CLIP_SIZE,
233 		     reg, 0x1FFF1FFF);
234 	/* Set crop offset */
235 	if (CFG_CHECK(MT8183, p_id))
236 		reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].clip_ofst);
237 	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_OFFSET_1,
238 		     reg, 0x003F001F);
239 
240 	if (CFG_CHECK(MT8183, p_id)) {
241 		csf_l = CFG_COMP(MT8183, ctx->param, subfrms[index].in.left);
242 		csf_r = CFG_COMP(MT8183, ctx->param, subfrms[index].in.right);
243 	}
244 	if (mdp_cfg && mdp_cfg->rdma_upsample_repeat_only)
245 		if ((csf_r - csf_l + 1) > 320)
246 			MM_REG_WRITE(cmd, subsys_id, base,
247 				     MDP_RDMA_RESV_DUMMY_0, BIT(2), BIT(2));
248 
249 	return 0;
250 }
251 
252 static int wait_rdma_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
253 {
254 	struct device *dev = &ctx->comp->mdp_dev->pdev->dev;
255 	phys_addr_t base = ctx->comp->reg_base;
256 	u8 subsys_id = ctx->comp->subsys_id;
257 
258 	if (ctx->comp->alias_id == 0)
259 		MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
260 	else
261 		dev_err(dev, "Do not support RDMA1_DONE event\n");
262 
263 	/* Disable RDMA */
264 	MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_EN, 0x0, BIT(0));
265 	return 0;
266 }
267 
268 static const struct mdp_comp_ops rdma_ops = {
269 	.get_comp_flag = get_comp_flag,
270 	.init_comp = init_rdma,
271 	.config_frame = config_rdma_frame,
272 	.config_subfrm = config_rdma_subfrm,
273 	.wait_comp_event = wait_rdma_event,
274 };
275 
276 static int init_rsz(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
277 {
278 	phys_addr_t base = ctx->comp->reg_base;
279 	u8 subsys_id = ctx->comp->subsys_id;
280 
281 	/* Reset RSZ */
282 	MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x10000, BIT(16));
283 	MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(16));
284 	/* Enable RSZ */
285 	MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, BIT(0), BIT(0));
286 	return 0;
287 }
288 
289 static int config_rsz_frame(struct mdp_comp_ctx *ctx,
290 			    struct mdp_cmdq_cmd *cmd,
291 			    const struct v4l2_rect *compose)
292 {
293 	phys_addr_t base = ctx->comp->reg_base;
294 	u8 subsys_id = ctx->comp->subsys_id;
295 	bool bypass = FALSE;
296 	u32 reg = 0;
297 
298 	if (CFG_CHECK(MT8183, p_id))
299 		bypass = CFG_COMP(MT8183, ctx->param, frame.bypass);
300 
301 	if (bypass) {
302 		/* Disable RSZ */
303 		MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(0));
304 		return 0;
305 	}
306 
307 	if (CFG_CHECK(MT8183, p_id))
308 		reg = CFG_COMP(MT8183, ctx->param, rsz.control1);
309 	MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1, reg,
310 		     0x03FFFDF3);
311 	if (CFG_CHECK(MT8183, p_id))
312 		reg = CFG_COMP(MT8183, ctx->param, rsz.control2);
313 	MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_2, reg,
314 		     0x0FFFC290);
315 	if (CFG_CHECK(MT8183, p_id))
316 		reg = CFG_COMP(MT8183, ctx->param, rsz.coeff_step_x);
317 	MM_REG_WRITE(cmd, subsys_id, base, PRZ_HORIZONTAL_COEFF_STEP,
318 		     reg, 0x007FFFFF);
319 	if (CFG_CHECK(MT8183, p_id))
320 		reg = CFG_COMP(MT8183, ctx->param, rsz.coeff_step_y);
321 	MM_REG_WRITE(cmd, subsys_id, base, PRZ_VERTICAL_COEFF_STEP,
322 		     reg, 0x007FFFFF);
323 	return 0;
324 }
325 
326 static int config_rsz_subfrm(struct mdp_comp_ctx *ctx,
327 			     struct mdp_cmdq_cmd *cmd, u32 index)
328 {
329 	const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
330 	phys_addr_t base = ctx->comp->reg_base;
331 	u8 subsys_id = ctx->comp->subsys_id;
332 	u32 csf_l = 0, csf_r = 0;
333 	u32 reg = 0;
334 
335 	if (CFG_CHECK(MT8183, p_id))
336 		reg = CFG_COMP(MT8183, ctx->param, rsz.subfrms[index].control2);
337 	MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_2, reg,
338 		     0x00003800);
339 	if (CFG_CHECK(MT8183, p_id))
340 		reg = CFG_COMP(MT8183, ctx->param, rsz.subfrms[index].src);
341 	MM_REG_WRITE(cmd, subsys_id, base, PRZ_INPUT_IMAGE, reg,
342 		     0xFFFFFFFF);
343 
344 	if (CFG_CHECK(MT8183, p_id)) {
345 		csf_l = CFG_COMP(MT8183, ctx->param, subfrms[index].in.left);
346 		csf_r = CFG_COMP(MT8183, ctx->param, subfrms[index].in.right);
347 	}
348 	if (mdp_cfg && mdp_cfg->rsz_disable_dcm_small_sample)
349 		if ((csf_r - csf_l + 1) <= 16)
350 			MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1,
351 				     BIT(27), BIT(27));
352 
353 	if (CFG_CHECK(MT8183, p_id))
354 		reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.left);
355 	MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_HORIZONTAL_INTEGER_OFFSET,
356 		     reg, 0xFFFF);
357 	if (CFG_CHECK(MT8183, p_id))
358 		reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.left_subpix);
359 	MM_REG_WRITE(cmd, subsys_id,
360 		     base, PRZ_LUMA_HORIZONTAL_SUBPIXEL_OFFSET,
361 		     reg, 0x1FFFFF);
362 	if (CFG_CHECK(MT8183, p_id))
363 		reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.top);
364 	MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_INTEGER_OFFSET,
365 		     reg, 0xFFFF);
366 	if (CFG_CHECK(MT8183, p_id))
367 		reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.top_subpix);
368 	MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_SUBPIXEL_OFFSET,
369 		     reg, 0x1FFFFF);
370 	if (CFG_CHECK(MT8183, p_id))
371 		reg = CFG_COMP(MT8183, ctx->param, subfrms[index].chroma.left);
372 	MM_REG_WRITE(cmd, subsys_id,
373 		     base, PRZ_CHROMA_HORIZONTAL_INTEGER_OFFSET,
374 		     reg, 0xFFFF);
375 	if (CFG_CHECK(MT8183, p_id))
376 		reg = CFG_COMP(MT8183, ctx->param, subfrms[index].chroma.left_subpix);
377 	MM_REG_WRITE(cmd, subsys_id,
378 		     base, PRZ_CHROMA_HORIZONTAL_SUBPIXEL_OFFSET,
379 		     reg, 0x1FFFFF);
380 
381 	if (CFG_CHECK(MT8183, p_id))
382 		reg = CFG_COMP(MT8183, ctx->param, rsz.subfrms[index].clip);
383 	MM_REG_WRITE(cmd, subsys_id, base, PRZ_OUTPUT_IMAGE, reg,
384 		     0xFFFFFFFF);
385 
386 	return 0;
387 }
388 
389 static int advance_rsz_subfrm(struct mdp_comp_ctx *ctx,
390 			      struct mdp_cmdq_cmd *cmd, u32 index)
391 {
392 	const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
393 
394 	if (mdp_cfg && mdp_cfg->rsz_disable_dcm_small_sample) {
395 		phys_addr_t base = ctx->comp->reg_base;
396 		u8 subsys_id = ctx->comp->subsys_id;
397 		u32 csf_l = 0, csf_r = 0;
398 
399 		if (CFG_CHECK(MT8183, p_id)) {
400 			csf_l = CFG_COMP(MT8183, ctx->param, subfrms[index].in.left);
401 			csf_r = CFG_COMP(MT8183, ctx->param, subfrms[index].in.right);
402 		}
403 
404 		if ((csf_r - csf_l + 1) <= 16)
405 			MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1, 0x0,
406 				     BIT(27));
407 	}
408 
409 	return 0;
410 }
411 
412 static const struct mdp_comp_ops rsz_ops = {
413 	.get_comp_flag = get_comp_flag,
414 	.init_comp = init_rsz,
415 	.config_frame = config_rsz_frame,
416 	.config_subfrm = config_rsz_subfrm,
417 	.advance_subfrm = advance_rsz_subfrm,
418 };
419 
420 static int init_wrot(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
421 {
422 	phys_addr_t base = ctx->comp->reg_base;
423 	u8 subsys_id = ctx->comp->subsys_id;
424 
425 	/* Reset WROT */
426 	MM_REG_WRITE(cmd, subsys_id, base, VIDO_SOFT_RST, BIT(0), BIT(0));
427 	MM_REG_POLL(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, BIT(0), BIT(0));
428 	MM_REG_WRITE(cmd, subsys_id, base, VIDO_SOFT_RST, 0x0, BIT(0));
429 	MM_REG_POLL(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, 0x0, BIT(0));
430 	return 0;
431 }
432 
433 static int config_wrot_frame(struct mdp_comp_ctx *ctx,
434 			     struct mdp_cmdq_cmd *cmd,
435 			     const struct v4l2_rect *compose)
436 {
437 	const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
438 	phys_addr_t base = ctx->comp->reg_base;
439 	u8 subsys_id = ctx->comp->subsys_id;
440 	u32 reg = 0;
441 
442 	/* Write frame base address */
443 	if (CFG_CHECK(MT8183, p_id))
444 		reg = CFG_COMP(MT8183, ctx->param, wrot.iova[0]);
445 	MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR, reg,
446 		     0xFFFFFFFF);
447 	if (CFG_CHECK(MT8183, p_id))
448 		reg = CFG_COMP(MT8183, ctx->param, wrot.iova[1]);
449 	MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_C, reg,
450 		     0xFFFFFFFF);
451 	if (CFG_CHECK(MT8183, p_id))
452 		reg = CFG_COMP(MT8183, ctx->param, wrot.iova[2]);
453 	MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_V, reg,
454 		     0xFFFFFFFF);
455 	/* Write frame related registers */
456 	if (CFG_CHECK(MT8183, p_id))
457 		reg = CFG_COMP(MT8183, ctx->param, wrot.control);
458 	MM_REG_WRITE(cmd, subsys_id, base, VIDO_CTRL, reg,
459 		     0xF131510F);
460 	/* Write frame Y pitch */
461 	if (CFG_CHECK(MT8183, p_id))
462 		reg = CFG_COMP(MT8183, ctx->param, wrot.stride[0]);
463 	MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE, reg,
464 		     0x0000FFFF);
465 	/* Write frame UV pitch */
466 	if (CFG_CHECK(MT8183, p_id))
467 		reg = CFG_COMP(MT8183, ctx->param, wrot.stride[1]);
468 	MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE_C, reg,
469 		     0xFFFF);
470 	if (CFG_CHECK(MT8183, p_id))
471 		reg = CFG_COMP(MT8183, ctx->param, wrot.stride[2]);
472 	MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE_V, reg,
473 		     0xFFFF);
474 	/* Write matrix control */
475 	if (CFG_CHECK(MT8183, p_id))
476 		reg = CFG_COMP(MT8183, ctx->param, wrot.mat_ctrl);
477 	MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAT_CTRL, reg, 0xF3);
478 
479 	/* Set the fixed ALPHA as 0xFF */
480 	MM_REG_WRITE(cmd, subsys_id, base, VIDO_DITHER, 0xFF000000,
481 		     0xFF000000);
482 	/* Set VIDO_EOL_SEL */
483 	MM_REG_WRITE(cmd, subsys_id, base, VIDO_RSV_1, BIT(31), BIT(31));
484 	/* Set VIDO_FIFO_TEST */
485 	if (CFG_CHECK(MT8183, p_id))
486 		reg = CFG_COMP(MT8183, ctx->param, wrot.fifo_test);
487 	if (reg != 0)
488 		MM_REG_WRITE(cmd, subsys_id, base, VIDO_FIFO_TEST,
489 			     reg, 0xFFF);
490 	/* Filter enable */
491 	if (mdp_cfg && mdp_cfg->wrot_filter_constraint) {
492 		if (CFG_CHECK(MT8183, p_id))
493 			reg = CFG_COMP(MT8183, ctx->param, wrot.filter);
494 		MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE,
495 			     reg, 0x77);
496 	}
497 
498 	return 0;
499 }
500 
501 static int config_wrot_subfrm(struct mdp_comp_ctx *ctx,
502 			      struct mdp_cmdq_cmd *cmd, u32 index)
503 {
504 	phys_addr_t base = ctx->comp->reg_base;
505 	u8 subsys_id = ctx->comp->subsys_id;
506 	u32 reg = 0;
507 
508 	/* Write Y pixel offset */
509 	if (CFG_CHECK(MT8183, p_id))
510 		reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].offset[0]);
511 	MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR,
512 		     reg, 0x0FFFFFFF);
513 	/* Write U pixel offset */
514 	if (CFG_CHECK(MT8183, p_id))
515 		reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].offset[1]);
516 	MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR_C,
517 		     reg, 0x0FFFFFFF);
518 	/* Write V pixel offset */
519 	if (CFG_CHECK(MT8183, p_id))
520 		reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].offset[2]);
521 	MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR_V,
522 		     reg, 0x0FFFFFFF);
523 	/* Write source size */
524 	if (CFG_CHECK(MT8183, p_id))
525 		reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].src);
526 	MM_REG_WRITE(cmd, subsys_id, base, VIDO_IN_SIZE, reg,
527 		     0x1FFF1FFF);
528 	/* Write target size */
529 	if (CFG_CHECK(MT8183, p_id))
530 		reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].clip);
531 	MM_REG_WRITE(cmd, subsys_id, base, VIDO_TAR_SIZE, reg,
532 		     0x1FFF1FFF);
533 	if (CFG_CHECK(MT8183, p_id))
534 		reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].clip_ofst);
535 	MM_REG_WRITE(cmd, subsys_id, base, VIDO_CROP_OFST, reg,
536 		     0x1FFF1FFF);
537 
538 	if (CFG_CHECK(MT8183, p_id))
539 		reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].main_buf);
540 	MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE,
541 		     reg, 0x1FFF7F00);
542 
543 	/* Enable WROT */
544 	MM_REG_WRITE(cmd, subsys_id, base, VIDO_ROT_EN, BIT(0), BIT(0));
545 
546 	return 0;
547 }
548 
549 static int wait_wrot_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
550 {
551 	const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
552 	struct device *dev = &ctx->comp->mdp_dev->pdev->dev;
553 	phys_addr_t base = ctx->comp->reg_base;
554 	u8 subsys_id = ctx->comp->subsys_id;
555 
556 	if (ctx->comp->alias_id == 0)
557 		MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
558 	else
559 		dev_err(dev, "Do not support WROT1_DONE event\n");
560 
561 	if (mdp_cfg && mdp_cfg->wrot_filter_constraint)
562 		MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE, 0x0,
563 			     0x77);
564 
565 	/* Disable WROT */
566 	MM_REG_WRITE(cmd, subsys_id, base, VIDO_ROT_EN, 0x0, BIT(0));
567 
568 	return 0;
569 }
570 
571 static const struct mdp_comp_ops wrot_ops = {
572 	.get_comp_flag = get_comp_flag,
573 	.init_comp = init_wrot,
574 	.config_frame = config_wrot_frame,
575 	.config_subfrm = config_wrot_subfrm,
576 	.wait_comp_event = wait_wrot_event,
577 };
578 
579 static int init_wdma(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
580 {
581 	phys_addr_t base = ctx->comp->reg_base;
582 	u8 subsys_id = ctx->comp->subsys_id;
583 
584 	/* Reset WDMA */
585 	MM_REG_WRITE(cmd, subsys_id, base, WDMA_RST, BIT(0), BIT(0));
586 	MM_REG_POLL(cmd, subsys_id, base, WDMA_FLOW_CTRL_DBG, BIT(0), BIT(0));
587 	MM_REG_WRITE(cmd, subsys_id, base, WDMA_RST, 0x0, BIT(0));
588 	return 0;
589 }
590 
591 static int config_wdma_frame(struct mdp_comp_ctx *ctx,
592 			     struct mdp_cmdq_cmd *cmd,
593 			     const struct v4l2_rect *compose)
594 {
595 	phys_addr_t base = ctx->comp->reg_base;
596 	u8 subsys_id = ctx->comp->subsys_id;
597 	u32 reg = 0;
598 
599 	MM_REG_WRITE(cmd, subsys_id, base, WDMA_BUF_CON2, 0x10101050,
600 		     0xFFFFFFFF);
601 
602 	/* Setup frame information */
603 	if (CFG_CHECK(MT8183, p_id))
604 		reg = CFG_COMP(MT8183, ctx->param, wdma.wdma_cfg);
605 	MM_REG_WRITE(cmd, subsys_id, base, WDMA_CFG, reg,
606 		     0x0F01B8F0);
607 	/* Setup frame base address */
608 	if (CFG_CHECK(MT8183, p_id))
609 		reg = CFG_COMP(MT8183, ctx->param, wdma.iova[0]);
610 	MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_ADDR, reg,
611 		     0xFFFFFFFF);
612 	if (CFG_CHECK(MT8183, p_id))
613 		reg = CFG_COMP(MT8183, ctx->param, wdma.iova[1]);
614 	MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_U_ADDR, reg,
615 		     0xFFFFFFFF);
616 	if (CFG_CHECK(MT8183, p_id))
617 		reg = CFG_COMP(MT8183, ctx->param, wdma.iova[2]);
618 	MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_V_ADDR, reg,
619 		     0xFFFFFFFF);
620 	/* Setup Y pitch */
621 	if (CFG_CHECK(MT8183, p_id))
622 		reg = CFG_COMP(MT8183, ctx->param, wdma.w_in_byte);
623 	MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_W_IN_BYTE,
624 		     reg, 0x0000FFFF);
625 	/* Setup UV pitch */
626 	if (CFG_CHECK(MT8183, p_id))
627 		reg = CFG_COMP(MT8183, ctx->param, wdma.uv_stride);
628 	MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_UV_PITCH,
629 		     reg, 0x0000FFFF);
630 	/* Set the fixed ALPHA as 0xFF */
631 	MM_REG_WRITE(cmd, subsys_id, base, WDMA_ALPHA, 0x800000FF,
632 		     0x800000FF);
633 
634 	return 0;
635 }
636 
637 static int config_wdma_subfrm(struct mdp_comp_ctx *ctx,
638 			      struct mdp_cmdq_cmd *cmd, u32 index)
639 {
640 	phys_addr_t base = ctx->comp->reg_base;
641 	u8 subsys_id = ctx->comp->subsys_id;
642 	u32 reg = 0;
643 
644 	/* Write Y pixel offset */
645 	if (CFG_CHECK(MT8183, p_id))
646 		reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].offset[0]);
647 	MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_ADDR_OFFSET,
648 		     reg, 0x0FFFFFFF);
649 	/* Write U pixel offset */
650 	if (CFG_CHECK(MT8183, p_id))
651 		reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].offset[1]);
652 	MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_U_ADDR_OFFSET,
653 		     reg, 0x0FFFFFFF);
654 	/* Write V pixel offset */
655 	if (CFG_CHECK(MT8183, p_id))
656 		reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].offset[2]);
657 	MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_V_ADDR_OFFSET,
658 		     reg, 0x0FFFFFFF);
659 	/* Write source size */
660 	if (CFG_CHECK(MT8183, p_id))
661 		reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].src);
662 	MM_REG_WRITE(cmd, subsys_id, base, WDMA_SRC_SIZE, reg,
663 		     0x3FFF3FFF);
664 	/* Write target size */
665 	if (CFG_CHECK(MT8183, p_id))
666 		reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].clip);
667 	MM_REG_WRITE(cmd, subsys_id, base, WDMA_CLIP_SIZE, reg,
668 		     0x3FFF3FFF);
669 	/* Write clip offset */
670 	if (CFG_CHECK(MT8183, p_id))
671 		reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].clip_ofst);
672 	MM_REG_WRITE(cmd, subsys_id, base, WDMA_CLIP_COORD, reg,
673 		     0x3FFF3FFF);
674 
675 	/* Enable WDMA */
676 	MM_REG_WRITE(cmd, subsys_id, base, WDMA_EN, BIT(0), BIT(0));
677 
678 	return 0;
679 }
680 
681 static int wait_wdma_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
682 {
683 	phys_addr_t base = ctx->comp->reg_base;
684 	u8 subsys_id = ctx->comp->subsys_id;
685 
686 	MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
687 	/* Disable WDMA */
688 	MM_REG_WRITE(cmd, subsys_id, base, WDMA_EN, 0x0, BIT(0));
689 	return 0;
690 }
691 
692 static const struct mdp_comp_ops wdma_ops = {
693 	.get_comp_flag = get_comp_flag,
694 	.init_comp = init_wdma,
695 	.config_frame = config_wdma_frame,
696 	.config_subfrm = config_wdma_subfrm,
697 	.wait_comp_event = wait_wdma_event,
698 };
699 
700 static int init_ccorr(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
701 {
702 	phys_addr_t base = ctx->comp->reg_base;
703 	u8 subsys_id = ctx->comp->subsys_id;
704 
705 	/* CCORR enable */
706 	MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_EN, BIT(0), BIT(0));
707 	/* Relay mode */
708 	MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_CFG, BIT(0), BIT(0));
709 	return 0;
710 }
711 
712 static int config_ccorr_subfrm(struct mdp_comp_ctx *ctx,
713 			       struct mdp_cmdq_cmd *cmd, u32 index)
714 {
715 	phys_addr_t base = ctx->comp->reg_base;
716 	u8 subsys_id = ctx->comp->subsys_id;
717 	u32 csf_l = 0, csf_r = 0;
718 	u32 csf_t = 0, csf_b = 0;
719 	u32 hsize, vsize;
720 
721 	if (CFG_CHECK(MT8183, p_id)) {
722 		csf_l = CFG_COMP(MT8183, ctx->param, subfrms[index].in.left);
723 		csf_r = CFG_COMP(MT8183, ctx->param, subfrms[index].in.right);
724 		csf_t = CFG_COMP(MT8183, ctx->param, subfrms[index].in.top);
725 		csf_b = CFG_COMP(MT8183, ctx->param, subfrms[index].in.bottom);
726 	}
727 
728 	hsize = csf_r - csf_l + 1;
729 	vsize = csf_b - csf_t + 1;
730 	MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_SIZE,
731 		     (hsize << 16) + (vsize <<  0), 0x1FFF1FFF);
732 	return 0;
733 }
734 
735 static const struct mdp_comp_ops ccorr_ops = {
736 	.get_comp_flag = get_comp_flag,
737 	.init_comp = init_ccorr,
738 	.config_subfrm = config_ccorr_subfrm,
739 };
740 
741 static const struct mdp_comp_ops *mdp_comp_ops[MDP_COMP_TYPE_COUNT] = {
742 	[MDP_COMP_TYPE_RDMA] =		&rdma_ops,
743 	[MDP_COMP_TYPE_RSZ] =		&rsz_ops,
744 	[MDP_COMP_TYPE_WROT] =		&wrot_ops,
745 	[MDP_COMP_TYPE_WDMA] =		&wdma_ops,
746 	[MDP_COMP_TYPE_CCORR] =		&ccorr_ops,
747 };
748 
749 static const struct of_device_id mdp_comp_dt_ids[] = {
750 	{
751 		.compatible = "mediatek,mt8183-mdp3-rdma",
752 		.data = (void *)MDP_COMP_TYPE_RDMA,
753 	}, {
754 		.compatible = "mediatek,mt8183-mdp3-ccorr",
755 		.data = (void *)MDP_COMP_TYPE_CCORR,
756 	}, {
757 		.compatible = "mediatek,mt8183-mdp3-rsz",
758 		.data = (void *)MDP_COMP_TYPE_RSZ,
759 	}, {
760 		.compatible = "mediatek,mt8183-mdp3-wrot",
761 		.data = (void *)MDP_COMP_TYPE_WROT,
762 	}, {
763 		.compatible = "mediatek,mt8183-mdp3-wdma",
764 		.data = (void *)MDP_COMP_TYPE_WDMA,
765 	},
766 	{}
767 };
768 
769 static inline bool is_dma_capable(const enum mdp_comp_type type)
770 {
771 	return (type == MDP_COMP_TYPE_RDMA ||
772 		type == MDP_COMP_TYPE_WROT ||
773 		type == MDP_COMP_TYPE_WDMA);
774 }
775 
776 static inline bool is_bypass_gce_event(const enum mdp_comp_type type)
777 {
778 	/*
779 	 * Subcomponent PATH is only used for the direction of data flow and
780 	 * dose not need to wait for GCE event.
781 	 */
782 	return (type == MDP_COMP_TYPE_PATH);
783 }
784 
785 static int mdp_comp_get_id(struct mdp_dev *mdp, enum mdp_comp_type type, u32 alias_id)
786 {
787 	int i;
788 
789 	for (i = 0; i < mdp->mdp_data->comp_data_len; i++)
790 		if (mdp->mdp_data->comp_data[i].match.type == type &&
791 		    mdp->mdp_data->comp_data[i].match.alias_id == alias_id)
792 			return i;
793 	return -ENODEV;
794 }
795 
796 int mdp_comp_clock_on(struct device *dev, struct mdp_comp *comp)
797 {
798 	int i, ret;
799 
800 	/* Only DMA capable components need the pm control */
801 	if (comp->comp_dev && is_dma_capable(comp->type)) {
802 		ret = pm_runtime_resume_and_get(comp->comp_dev);
803 		if (ret < 0) {
804 			dev_err(dev,
805 				"Failed to get power, err %d. type:%d id:%d\n",
806 				ret, comp->type, comp->inner_id);
807 			return ret;
808 		}
809 	}
810 
811 	for (i = 0; i < comp->clk_num; i++) {
812 		if (IS_ERR_OR_NULL(comp->clks[i]))
813 			continue;
814 		ret = clk_prepare_enable(comp->clks[i]);
815 		if (ret) {
816 			dev_err(dev,
817 				"Failed to enable clk %d. type:%d id:%d\n",
818 				i, comp->type, comp->inner_id);
819 			goto err_revert;
820 		}
821 	}
822 
823 	return 0;
824 
825 err_revert:
826 	while (--i >= 0) {
827 		if (IS_ERR_OR_NULL(comp->clks[i]))
828 			continue;
829 		clk_disable_unprepare(comp->clks[i]);
830 	}
831 	if (comp->comp_dev && is_dma_capable(comp->type))
832 		pm_runtime_put_sync(comp->comp_dev);
833 
834 	return ret;
835 }
836 
837 void mdp_comp_clock_off(struct device *dev, struct mdp_comp *comp)
838 {
839 	int i;
840 
841 	for (i = 0; i < comp->clk_num; i++) {
842 		if (IS_ERR_OR_NULL(comp->clks[i]))
843 			continue;
844 		clk_disable_unprepare(comp->clks[i]);
845 	}
846 
847 	if (comp->comp_dev && is_dma_capable(comp->type))
848 		pm_runtime_put(comp->comp_dev);
849 }
850 
851 int mdp_comp_clocks_on(struct device *dev, struct mdp_comp *comps, int num)
852 {
853 	int i, ret;
854 
855 	for (i = 0; i < num; i++) {
856 		ret = mdp_comp_clock_on(dev, &comps[i]);
857 		if (ret)
858 			return ret;
859 	}
860 
861 	return 0;
862 }
863 
864 void mdp_comp_clocks_off(struct device *dev, struct mdp_comp *comps, int num)
865 {
866 	int i;
867 
868 	for (i = 0; i < num; i++)
869 		mdp_comp_clock_off(dev, &comps[i]);
870 }
871 
872 static int mdp_get_subsys_id(struct mdp_dev *mdp, struct device *dev,
873 			     struct device_node *node, struct mdp_comp *comp)
874 {
875 	struct platform_device *comp_pdev;
876 	struct cmdq_client_reg  cmdq_reg;
877 	int ret = 0;
878 	int index = 0;
879 
880 	if (!dev || !node || !comp)
881 		return -EINVAL;
882 
883 	comp_pdev = of_find_device_by_node(node);
884 
885 	if (!comp_pdev) {
886 		dev_err(dev, "get comp_pdev fail! comp public id=%d, inner id=%d, type=%d\n",
887 			comp->public_id, comp->inner_id, comp->type);
888 		return -ENODEV;
889 	}
890 
891 	index = mdp->mdp_data->comp_data[comp->public_id].info.dts_reg_ofst;
892 	ret = cmdq_dev_get_client_reg(&comp_pdev->dev, &cmdq_reg, index);
893 	if (ret != 0) {
894 		dev_err(&comp_pdev->dev, "cmdq_dev_get_subsys fail!\n");
895 		return -EINVAL;
896 	}
897 
898 	comp->subsys_id = cmdq_reg.subsys;
899 	dev_dbg(&comp_pdev->dev, "subsys id=%d\n", cmdq_reg.subsys);
900 
901 	return 0;
902 }
903 
904 static void __mdp_comp_init(struct mdp_dev *mdp, struct device_node *node,
905 			    struct mdp_comp *comp)
906 {
907 	struct resource res;
908 	phys_addr_t base;
909 	int index;
910 
911 	index = mdp->mdp_data->comp_data[comp->public_id].info.dts_reg_ofst;
912 	if (of_address_to_resource(node, index, &res) < 0)
913 		base = 0L;
914 	else
915 		base = res.start;
916 
917 	comp->mdp_dev = mdp;
918 	comp->regs = of_iomap(node, 0);
919 	comp->reg_base = base;
920 }
921 
922 static int mdp_comp_init(struct mdp_dev *mdp, struct device_node *node,
923 			 struct mdp_comp *comp, enum mtk_mdp_comp_id id)
924 {
925 	struct device *dev = &mdp->pdev->dev;
926 	struct platform_device *pdev_c;
927 	int clk_ofst;
928 	int i;
929 	s32 event;
930 
931 	if (id < 0 || id >= MDP_MAX_COMP_COUNT) {
932 		dev_err(dev, "Invalid component id %d\n", id);
933 		return -EINVAL;
934 	}
935 
936 	pdev_c = of_find_device_by_node(node);
937 	if (!pdev_c) {
938 		dev_warn(dev, "can't find platform device of node:%s\n",
939 			 node->name);
940 		return -ENODEV;
941 	}
942 
943 	comp->comp_dev = &pdev_c->dev;
944 	comp->public_id = id;
945 	comp->type = mdp->mdp_data->comp_data[id].match.type;
946 	comp->inner_id = mdp->mdp_data->comp_data[id].match.inner_id;
947 	comp->alias_id = mdp->mdp_data->comp_data[id].match.alias_id;
948 	comp->ops = mdp_comp_ops[comp->type];
949 	__mdp_comp_init(mdp, node, comp);
950 
951 	comp->clk_num = mdp->mdp_data->comp_data[id].info.clk_num;
952 	comp->clks = devm_kzalloc(dev, sizeof(struct clk *) * comp->clk_num,
953 				  GFP_KERNEL);
954 	if (!comp->clks)
955 		return -ENOMEM;
956 
957 	clk_ofst = mdp->mdp_data->comp_data[id].info.clk_ofst;
958 
959 	for (i = 0; i < comp->clk_num; i++) {
960 		comp->clks[i] = of_clk_get(node, i + clk_ofst);
961 		if (IS_ERR(comp->clks[i]))
962 			break;
963 	}
964 
965 	mdp_get_subsys_id(mdp, dev, node, comp);
966 
967 	/* Set GCE SOF event */
968 	if (is_bypass_gce_event(comp->type) ||
969 	    of_property_read_u32_index(node, "mediatek,gce-events",
970 				       MDP_GCE_EVENT_SOF, &event))
971 		event = MDP_GCE_NO_EVENT;
972 
973 	comp->gce_event[MDP_GCE_EVENT_SOF] = event;
974 
975 	/* Set GCE EOF event */
976 	if (is_dma_capable(comp->type)) {
977 		if (of_property_read_u32_index(node, "mediatek,gce-events",
978 					       MDP_GCE_EVENT_EOF, &event)) {
979 			dev_err(dev, "Component id %d has no EOF\n", id);
980 			return -EINVAL;
981 		}
982 	} else {
983 		event = MDP_GCE_NO_EVENT;
984 	}
985 
986 	comp->gce_event[MDP_GCE_EVENT_EOF] = event;
987 
988 	return 0;
989 }
990 
991 static void mdp_comp_deinit(struct mdp_comp *comp)
992 {
993 	if (!comp)
994 		return;
995 
996 	if (comp->comp_dev && comp->clks) {
997 		devm_kfree(&comp->mdp_dev->pdev->dev, comp->clks);
998 		comp->clks = NULL;
999 	}
1000 
1001 	if (comp->regs)
1002 		iounmap(comp->regs);
1003 }
1004 
1005 static struct mdp_comp *mdp_comp_create(struct mdp_dev *mdp,
1006 					struct device_node *node,
1007 					enum mtk_mdp_comp_id id)
1008 {
1009 	struct device *dev = &mdp->pdev->dev;
1010 	struct mdp_comp *comp;
1011 	int ret;
1012 
1013 	if (mdp->comp[id])
1014 		return ERR_PTR(-EEXIST);
1015 
1016 	comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
1017 	if (!comp)
1018 		return ERR_PTR(-ENOMEM);
1019 
1020 	ret = mdp_comp_init(mdp, node, comp, id);
1021 	if (ret) {
1022 		devm_kfree(dev, comp);
1023 		return ERR_PTR(ret);
1024 	}
1025 	mdp->comp[id] = comp;
1026 	mdp->comp[id]->mdp_dev = mdp;
1027 
1028 	dev_dbg(dev, "%s type:%d alias:%d public id:%d inner id:%d base:%#x regs:%p\n",
1029 		dev->of_node->name, comp->type, comp->alias_id, id, comp->inner_id,
1030 		(u32)comp->reg_base, comp->regs);
1031 	return comp;
1032 }
1033 
1034 static int mdp_comp_sub_create(struct mdp_dev *mdp)
1035 {
1036 	struct device *dev = &mdp->pdev->dev;
1037 	struct device_node *node, *parent;
1038 
1039 	parent = dev->of_node->parent;
1040 
1041 	for_each_child_of_node(parent, node) {
1042 		const struct of_device_id *of_id;
1043 		enum mdp_comp_type type;
1044 		int id, alias_id;
1045 		struct mdp_comp *comp;
1046 
1047 		of_id = of_match_node(mdp->mdp_data->mdp_sub_comp_dt_ids, node);
1048 		if (!of_id)
1049 			continue;
1050 		if (!of_device_is_available(node)) {
1051 			dev_dbg(dev, "Skipping disabled sub comp. %pOF\n",
1052 				node);
1053 			continue;
1054 		}
1055 
1056 		type = (enum mdp_comp_type)(uintptr_t)of_id->data;
1057 		alias_id = mdp_comp_alias_id[type];
1058 		id = mdp_comp_get_id(mdp, type, alias_id);
1059 		if (id < 0) {
1060 			dev_err(dev,
1061 				"Fail to get sub comp. id: type %d alias %d\n",
1062 				type, alias_id);
1063 			return -EINVAL;
1064 		}
1065 		mdp_comp_alias_id[type]++;
1066 
1067 		comp = mdp_comp_create(mdp, node, id);
1068 		if (IS_ERR(comp))
1069 			return PTR_ERR(comp);
1070 	}
1071 
1072 	return 0;
1073 }
1074 
1075 void mdp_comp_destroy(struct mdp_dev *mdp)
1076 {
1077 	int i;
1078 
1079 	for (i = 0; i < ARRAY_SIZE(mdp->comp); i++) {
1080 		if (mdp->comp[i]) {
1081 			if (is_dma_capable(mdp->comp[i]->type))
1082 				pm_runtime_disable(mdp->comp[i]->comp_dev);
1083 			mdp_comp_deinit(mdp->comp[i]);
1084 			devm_kfree(mdp->comp[i]->comp_dev, mdp->comp[i]);
1085 			mdp->comp[i] = NULL;
1086 		}
1087 	}
1088 }
1089 
1090 int mdp_comp_config(struct mdp_dev *mdp)
1091 {
1092 	struct device *dev = &mdp->pdev->dev;
1093 	struct device_node *node, *parent;
1094 	int ret;
1095 
1096 	memset(mdp_comp_alias_id, 0, sizeof(mdp_comp_alias_id));
1097 	p_id = mdp->mdp_data->mdp_plat_id;
1098 
1099 	parent = dev->of_node->parent;
1100 	/* Iterate over sibling MDP function blocks */
1101 	for_each_child_of_node(parent, node) {
1102 		const struct of_device_id *of_id;
1103 		enum mdp_comp_type type;
1104 		int id, alias_id;
1105 		struct mdp_comp *comp;
1106 
1107 		of_id = of_match_node(mdp_comp_dt_ids, node);
1108 		if (!of_id)
1109 			continue;
1110 
1111 		if (!of_device_is_available(node)) {
1112 			dev_dbg(dev, "Skipping disabled component %pOF\n",
1113 				node);
1114 			continue;
1115 		}
1116 
1117 		type = (enum mdp_comp_type)(uintptr_t)of_id->data;
1118 		alias_id = mdp_comp_alias_id[type];
1119 		id = mdp_comp_get_id(mdp, type, alias_id);
1120 		if (id < 0) {
1121 			dev_err(dev,
1122 				"Fail to get component id: type %d alias %d\n",
1123 				type, alias_id);
1124 			continue;
1125 		}
1126 		mdp_comp_alias_id[type]++;
1127 
1128 		comp = mdp_comp_create(mdp, node, id);
1129 		if (IS_ERR(comp)) {
1130 			ret = PTR_ERR(comp);
1131 			goto err_init_comps;
1132 		}
1133 
1134 		/* Only DMA capable components need the pm control */
1135 		if (!is_dma_capable(comp->type))
1136 			continue;
1137 		pm_runtime_enable(comp->comp_dev);
1138 	}
1139 
1140 	ret = mdp_comp_sub_create(mdp);
1141 	if (ret)
1142 		goto err_init_comps;
1143 
1144 	return 0;
1145 
1146 err_init_comps:
1147 	mdp_comp_destroy(mdp);
1148 	return ret;
1149 }
1150 
1151 int mdp_comp_ctx_config(struct mdp_dev *mdp, struct mdp_comp_ctx *ctx,
1152 			const struct img_compparam *param,
1153 			const struct img_ipi_frameparam *frame)
1154 {
1155 	struct device *dev = &mdp->pdev->dev;
1156 	enum mtk_mdp_comp_id public_id = MDP_COMP_NONE;
1157 	u32 arg;
1158 	int i, idx;
1159 
1160 	if (!param) {
1161 		dev_err(dev, "Invalid component param");
1162 		return -EINVAL;
1163 	}
1164 
1165 	if (CFG_CHECK(MT8183, p_id))
1166 		arg = CFG_COMP(MT8183, param, type);
1167 	else
1168 		return -EINVAL;
1169 	public_id = mdp_cfg_get_id_public(mdp, arg);
1170 	if (public_id < 0) {
1171 		dev_err(dev, "Invalid component id %d", public_id);
1172 		return -EINVAL;
1173 	}
1174 
1175 	ctx->comp = mdp->comp[public_id];
1176 	if (!ctx->comp) {
1177 		dev_err(dev, "Uninit component inner id %d", arg);
1178 		return -EINVAL;
1179 	}
1180 
1181 	ctx->param = param;
1182 	if (CFG_CHECK(MT8183, p_id))
1183 		arg = CFG_COMP(MT8183, param, input);
1184 	else
1185 		return -EINVAL;
1186 	ctx->input = &frame->inputs[arg];
1187 	if (CFG_CHECK(MT8183, p_id))
1188 		idx = CFG_COMP(MT8183, param, num_outputs);
1189 	else
1190 		return -EINVAL;
1191 	for (i = 0; i < idx; i++) {
1192 		if (CFG_CHECK(MT8183, p_id))
1193 			arg = CFG_COMP(MT8183, param, outputs[i]);
1194 		else
1195 			return -EINVAL;
1196 		ctx->outputs[i] = &frame->outputs[arg];
1197 	}
1198 	return 0;
1199 }
1200