1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2022 MediaTek Inc.
4 * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
5 */
6
7 #include <linux/clk.h>
8 #include <linux/of_platform.h>
9 #include <linux/of_address.h>
10 #include <linux/pm_runtime.h>
11 #include "mtk-mdp3-cfg.h"
12 #include "mtk-mdp3-comp.h"
13 #include "mtk-mdp3-core.h"
14 #include "mtk-mdp3-regs.h"
15
16 #include "mdp_reg_rdma.h"
17 #include "mdp_reg_ccorr.h"
18 #include "mdp_reg_rsz.h"
19 #include "mdp_reg_wrot.h"
20 #include "mdp_reg_wdma.h"
21
22 static u32 mdp_comp_alias_id[MDP_COMP_TYPE_COUNT];
23 static int p_id;
24
25 static inline const struct mdp_platform_config *
__get_plat_cfg(const struct mdp_comp_ctx * ctx)26 __get_plat_cfg(const struct mdp_comp_ctx *ctx)
27 {
28 if (!ctx)
29 return NULL;
30
31 return ctx->comp->mdp_dev->mdp_data->mdp_cfg;
32 }
33
get_comp_flag(const struct mdp_comp_ctx * ctx)34 static s64 get_comp_flag(const struct mdp_comp_ctx *ctx)
35 {
36 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
37 u32 rdma0, rsz1;
38
39 rdma0 = mdp_cfg_get_id_inner(ctx->comp->mdp_dev, MDP_COMP_RDMA0);
40 rsz1 = mdp_cfg_get_id_inner(ctx->comp->mdp_dev, MDP_COMP_RSZ1);
41 if (!rdma0 || !rsz1)
42 return MDP_COMP_NONE;
43
44 if (mdp_cfg && mdp_cfg->rdma_rsz1_sram_sharing)
45 if (ctx->comp->inner_id == rdma0)
46 return BIT(rdma0) | BIT(rsz1);
47
48 return BIT(ctx->comp->inner_id);
49 }
50
init_rdma(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)51 static int init_rdma(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
52 {
53 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
54 phys_addr_t base = ctx->comp->reg_base;
55 u8 subsys_id = ctx->comp->subsys_id;
56 s32 rdma0;
57
58 rdma0 = mdp_cfg_get_id_inner(ctx->comp->mdp_dev, MDP_COMP_RDMA0);
59 if (!rdma0)
60 return -EINVAL;
61
62 if (mdp_cfg && mdp_cfg->rdma_support_10bit) {
63 struct mdp_comp *prz1 = ctx->comp->mdp_dev->comp[MDP_COMP_RSZ1];
64
65 /* Disable RSZ1 */
66 if (ctx->comp->inner_id == rdma0 && prz1)
67 MM_REG_WRITE(cmd, subsys_id, prz1->reg_base, PRZ_ENABLE,
68 0x0, BIT(0));
69 }
70
71 /* Reset RDMA */
72 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_RESET, BIT(0), BIT(0));
73 MM_REG_POLL(cmd, subsys_id, base, MDP_RDMA_MON_STA_1, BIT(8), BIT(8));
74 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_RESET, 0x0, BIT(0));
75 return 0;
76 }
77
config_rdma_frame(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,const struct v4l2_rect * compose)78 static int config_rdma_frame(struct mdp_comp_ctx *ctx,
79 struct mdp_cmdq_cmd *cmd,
80 const struct v4l2_rect *compose)
81 {
82 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
83 u32 colorformat = ctx->input->buffer.format.colorformat;
84 bool block10bit = MDP_COLOR_IS_10BIT_PACKED(colorformat);
85 bool en_ufo = MDP_COLOR_IS_UFP(colorformat);
86 phys_addr_t base = ctx->comp->reg_base;
87 u8 subsys_id = ctx->comp->subsys_id;
88 u32 reg = 0;
89
90 if (mdp_cfg && mdp_cfg->rdma_support_10bit) {
91 if (block10bit)
92 MM_REG_WRITE(cmd, subsys_id, base,
93 MDP_RDMA_RESV_DUMMY_0, 0x7, 0x7);
94 else
95 MM_REG_WRITE(cmd, subsys_id, base,
96 MDP_RDMA_RESV_DUMMY_0, 0x0, 0x7);
97 }
98
99 /* Setup smi control */
100 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_GMCIF_CON,
101 (7 << 4) + //burst type to 8
102 (1 << 16), //enable pre-ultra
103 0x00030071);
104
105 /* Setup source frame info */
106 if (CFG_CHECK(MT8183, p_id))
107 reg = CFG_COMP(MT8183, ctx->param, rdma.src_ctrl);
108 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_CON, reg,
109 0x03C8FE0F);
110
111 if (mdp_cfg)
112 if (mdp_cfg->rdma_support_10bit && en_ufo) {
113 /* Setup source buffer base */
114 if (CFG_CHECK(MT8183, p_id))
115 reg = CFG_COMP(MT8183, ctx->param, rdma.ufo_dec_y);
116 MM_REG_WRITE(cmd, subsys_id,
117 base, MDP_RDMA_UFO_DEC_LENGTH_BASE_Y,
118 reg, 0xFFFFFFFF);
119 if (CFG_CHECK(MT8183, p_id))
120 reg = CFG_COMP(MT8183, ctx->param, rdma.ufo_dec_c);
121 MM_REG_WRITE(cmd, subsys_id,
122 base, MDP_RDMA_UFO_DEC_LENGTH_BASE_C,
123 reg, 0xFFFFFFFF);
124 /* Set 10bit source frame pitch */
125 if (block10bit) {
126 if (CFG_CHECK(MT8183, p_id))
127 reg = CFG_COMP(MT8183, ctx->param, rdma.mf_bkgd_in_pxl);
128 MM_REG_WRITE(cmd, subsys_id,
129 base, MDP_RDMA_MF_BKGD_SIZE_IN_PXL,
130 reg, 0x001FFFFF);
131 }
132 }
133
134 if (CFG_CHECK(MT8183, p_id))
135 reg = CFG_COMP(MT8183, ctx->param, rdma.control);
136 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_CON, reg,
137 0x1110);
138 /* Setup source buffer base */
139 if (CFG_CHECK(MT8183, p_id))
140 reg = CFG_COMP(MT8183, ctx->param, rdma.iova[0]);
141 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_0, reg,
142 0xFFFFFFFF);
143 if (CFG_CHECK(MT8183, p_id))
144 reg = CFG_COMP(MT8183, ctx->param, rdma.iova[1]);
145 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_1, reg,
146 0xFFFFFFFF);
147 if (CFG_CHECK(MT8183, p_id))
148 reg = CFG_COMP(MT8183, ctx->param, rdma.iova[2]);
149 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_2, reg,
150 0xFFFFFFFF);
151 /* Setup source buffer end */
152 if (CFG_CHECK(MT8183, p_id))
153 reg = CFG_COMP(MT8183, ctx->param, rdma.iova_end[0]);
154 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_0,
155 reg, 0xFFFFFFFF);
156 if (CFG_CHECK(MT8183, p_id))
157 reg = CFG_COMP(MT8183, ctx->param, rdma.iova_end[1]);
158 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_1,
159 reg, 0xFFFFFFFF);
160 if (CFG_CHECK(MT8183, p_id))
161 reg = CFG_COMP(MT8183, ctx->param, rdma.iova_end[2]);
162 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_2,
163 reg, 0xFFFFFFFF);
164 /* Setup source frame pitch */
165 if (CFG_CHECK(MT8183, p_id))
166 reg = CFG_COMP(MT8183, ctx->param, rdma.mf_bkgd);
167 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_BKGD_SIZE_IN_BYTE,
168 reg, 0x001FFFFF);
169 if (CFG_CHECK(MT8183, p_id))
170 reg = CFG_COMP(MT8183, ctx->param, rdma.sf_bkgd);
171 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SF_BKGD_SIZE_IN_BYTE,
172 reg, 0x001FFFFF);
173 /* Setup color transform */
174 if (CFG_CHECK(MT8183, p_id))
175 reg = CFG_COMP(MT8183, ctx->param, rdma.transform);
176 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_TRANSFORM_0,
177 reg, 0x0F110000);
178
179 return 0;
180 }
181
config_rdma_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)182 static int config_rdma_subfrm(struct mdp_comp_ctx *ctx,
183 struct mdp_cmdq_cmd *cmd, u32 index)
184 {
185 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
186 u32 colorformat = ctx->input->buffer.format.colorformat;
187 bool block10bit = MDP_COLOR_IS_10BIT_PACKED(colorformat);
188 bool en_ufo = MDP_COLOR_IS_UFP(colorformat);
189 phys_addr_t base = ctx->comp->reg_base;
190 u8 subsys_id = ctx->comp->subsys_id;
191 u32 csf_l = 0, csf_r = 0;
192 u32 reg = 0;
193
194 /* Enable RDMA */
195 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_EN, BIT(0), BIT(0));
196
197 /* Set Y pixel offset */
198 if (CFG_CHECK(MT8183, p_id))
199 reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset[0]);
200 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_0,
201 reg, 0xFFFFFFFF);
202
203 /* Set 10bit UFO mode */
204 if (mdp_cfg) {
205 if (mdp_cfg->rdma_support_10bit && block10bit && en_ufo) {
206 if (CFG_CHECK(MT8183, p_id))
207 reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset_0_p);
208 MM_REG_WRITE(cmd, subsys_id, base,
209 MDP_RDMA_SRC_OFFSET_0_P,
210 reg, 0xFFFFFFFF);
211 }
212 }
213
214 /* Set U pixel offset */
215 if (CFG_CHECK(MT8183, p_id))
216 reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset[1]);
217 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_1,
218 reg, 0xFFFFFFFF);
219 /* Set V pixel offset */
220 if (CFG_CHECK(MT8183, p_id))
221 reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].offset[2]);
222 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_2,
223 reg, 0xFFFFFFFF);
224 /* Set source size */
225 if (CFG_CHECK(MT8183, p_id))
226 reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].src);
227 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_SRC_SIZE, reg,
228 0x1FFF1FFF);
229 /* Set target size */
230 if (CFG_CHECK(MT8183, p_id))
231 reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].clip);
232 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_CLIP_SIZE,
233 reg, 0x1FFF1FFF);
234 /* Set crop offset */
235 if (CFG_CHECK(MT8183, p_id))
236 reg = CFG_COMP(MT8183, ctx->param, rdma.subfrms[index].clip_ofst);
237 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_OFFSET_1,
238 reg, 0x003F001F);
239
240 if (CFG_CHECK(MT8183, p_id)) {
241 csf_l = CFG_COMP(MT8183, ctx->param, subfrms[index].in.left);
242 csf_r = CFG_COMP(MT8183, ctx->param, subfrms[index].in.right);
243 }
244 if (mdp_cfg && mdp_cfg->rdma_upsample_repeat_only)
245 if ((csf_r - csf_l + 1) > 320)
246 MM_REG_WRITE(cmd, subsys_id, base,
247 MDP_RDMA_RESV_DUMMY_0, BIT(2), BIT(2));
248
249 return 0;
250 }
251
wait_rdma_event(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)252 static int wait_rdma_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
253 {
254 struct device *dev = &ctx->comp->mdp_dev->pdev->dev;
255 phys_addr_t base = ctx->comp->reg_base;
256 u8 subsys_id = ctx->comp->subsys_id;
257
258 if (ctx->comp->alias_id == 0)
259 MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
260 else
261 dev_err(dev, "Do not support RDMA1_DONE event\n");
262
263 /* Disable RDMA */
264 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_EN, 0x0, BIT(0));
265 return 0;
266 }
267
268 static const struct mdp_comp_ops rdma_ops = {
269 .get_comp_flag = get_comp_flag,
270 .init_comp = init_rdma,
271 .config_frame = config_rdma_frame,
272 .config_subfrm = config_rdma_subfrm,
273 .wait_comp_event = wait_rdma_event,
274 };
275
init_rsz(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)276 static int init_rsz(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
277 {
278 phys_addr_t base = ctx->comp->reg_base;
279 u8 subsys_id = ctx->comp->subsys_id;
280
281 /* Reset RSZ */
282 MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x10000, BIT(16));
283 MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(16));
284 /* Enable RSZ */
285 MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, BIT(0), BIT(0));
286 return 0;
287 }
288
config_rsz_frame(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,const struct v4l2_rect * compose)289 static int config_rsz_frame(struct mdp_comp_ctx *ctx,
290 struct mdp_cmdq_cmd *cmd,
291 const struct v4l2_rect *compose)
292 {
293 phys_addr_t base = ctx->comp->reg_base;
294 u8 subsys_id = ctx->comp->subsys_id;
295 bool bypass = FALSE;
296 u32 reg = 0;
297
298 if (CFG_CHECK(MT8183, p_id))
299 bypass = CFG_COMP(MT8183, ctx->param, frame.bypass);
300
301 if (bypass) {
302 /* Disable RSZ */
303 MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(0));
304 return 0;
305 }
306
307 if (CFG_CHECK(MT8183, p_id))
308 reg = CFG_COMP(MT8183, ctx->param, rsz.control1);
309 MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1, reg,
310 0x03FFFDF3);
311 if (CFG_CHECK(MT8183, p_id))
312 reg = CFG_COMP(MT8183, ctx->param, rsz.control2);
313 MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_2, reg,
314 0x0FFFC290);
315 if (CFG_CHECK(MT8183, p_id))
316 reg = CFG_COMP(MT8183, ctx->param, rsz.coeff_step_x);
317 MM_REG_WRITE(cmd, subsys_id, base, PRZ_HORIZONTAL_COEFF_STEP,
318 reg, 0x007FFFFF);
319 if (CFG_CHECK(MT8183, p_id))
320 reg = CFG_COMP(MT8183, ctx->param, rsz.coeff_step_y);
321 MM_REG_WRITE(cmd, subsys_id, base, PRZ_VERTICAL_COEFF_STEP,
322 reg, 0x007FFFFF);
323 return 0;
324 }
325
config_rsz_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)326 static int config_rsz_subfrm(struct mdp_comp_ctx *ctx,
327 struct mdp_cmdq_cmd *cmd, u32 index)
328 {
329 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
330 phys_addr_t base = ctx->comp->reg_base;
331 u8 subsys_id = ctx->comp->subsys_id;
332 u32 csf_l = 0, csf_r = 0;
333 u32 reg = 0;
334
335 if (CFG_CHECK(MT8183, p_id))
336 reg = CFG_COMP(MT8183, ctx->param, rsz.subfrms[index].control2);
337 MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_2, reg,
338 0x00003800);
339 if (CFG_CHECK(MT8183, p_id))
340 reg = CFG_COMP(MT8183, ctx->param, rsz.subfrms[index].src);
341 MM_REG_WRITE(cmd, subsys_id, base, PRZ_INPUT_IMAGE, reg,
342 0xFFFFFFFF);
343
344 if (CFG_CHECK(MT8183, p_id)) {
345 csf_l = CFG_COMP(MT8183, ctx->param, subfrms[index].in.left);
346 csf_r = CFG_COMP(MT8183, ctx->param, subfrms[index].in.right);
347 }
348 if (mdp_cfg && mdp_cfg->rsz_disable_dcm_small_sample)
349 if ((csf_r - csf_l + 1) <= 16)
350 MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1,
351 BIT(27), BIT(27));
352
353 if (CFG_CHECK(MT8183, p_id))
354 reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.left);
355 MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_HORIZONTAL_INTEGER_OFFSET,
356 reg, 0xFFFF);
357 if (CFG_CHECK(MT8183, p_id))
358 reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.left_subpix);
359 MM_REG_WRITE(cmd, subsys_id,
360 base, PRZ_LUMA_HORIZONTAL_SUBPIXEL_OFFSET,
361 reg, 0x1FFFFF);
362 if (CFG_CHECK(MT8183, p_id))
363 reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.top);
364 MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_INTEGER_OFFSET,
365 reg, 0xFFFF);
366 if (CFG_CHECK(MT8183, p_id))
367 reg = CFG_COMP(MT8183, ctx->param, subfrms[index].luma.top_subpix);
368 MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_SUBPIXEL_OFFSET,
369 reg, 0x1FFFFF);
370 if (CFG_CHECK(MT8183, p_id))
371 reg = CFG_COMP(MT8183, ctx->param, subfrms[index].chroma.left);
372 MM_REG_WRITE(cmd, subsys_id,
373 base, PRZ_CHROMA_HORIZONTAL_INTEGER_OFFSET,
374 reg, 0xFFFF);
375 if (CFG_CHECK(MT8183, p_id))
376 reg = CFG_COMP(MT8183, ctx->param, subfrms[index].chroma.left_subpix);
377 MM_REG_WRITE(cmd, subsys_id,
378 base, PRZ_CHROMA_HORIZONTAL_SUBPIXEL_OFFSET,
379 reg, 0x1FFFFF);
380
381 if (CFG_CHECK(MT8183, p_id))
382 reg = CFG_COMP(MT8183, ctx->param, rsz.subfrms[index].clip);
383 MM_REG_WRITE(cmd, subsys_id, base, PRZ_OUTPUT_IMAGE, reg,
384 0xFFFFFFFF);
385
386 return 0;
387 }
388
advance_rsz_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)389 static int advance_rsz_subfrm(struct mdp_comp_ctx *ctx,
390 struct mdp_cmdq_cmd *cmd, u32 index)
391 {
392 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
393
394 if (mdp_cfg && mdp_cfg->rsz_disable_dcm_small_sample) {
395 phys_addr_t base = ctx->comp->reg_base;
396 u8 subsys_id = ctx->comp->subsys_id;
397 u32 csf_l = 0, csf_r = 0;
398
399 if (CFG_CHECK(MT8183, p_id)) {
400 csf_l = CFG_COMP(MT8183, ctx->param, subfrms[index].in.left);
401 csf_r = CFG_COMP(MT8183, ctx->param, subfrms[index].in.right);
402 }
403
404 if ((csf_r - csf_l + 1) <= 16)
405 MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1, 0x0,
406 BIT(27));
407 }
408
409 return 0;
410 }
411
412 static const struct mdp_comp_ops rsz_ops = {
413 .get_comp_flag = get_comp_flag,
414 .init_comp = init_rsz,
415 .config_frame = config_rsz_frame,
416 .config_subfrm = config_rsz_subfrm,
417 .advance_subfrm = advance_rsz_subfrm,
418 };
419
init_wrot(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)420 static int init_wrot(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
421 {
422 phys_addr_t base = ctx->comp->reg_base;
423 u8 subsys_id = ctx->comp->subsys_id;
424
425 /* Reset WROT */
426 MM_REG_WRITE(cmd, subsys_id, base, VIDO_SOFT_RST, BIT(0), BIT(0));
427 MM_REG_POLL(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, BIT(0), BIT(0));
428 MM_REG_WRITE(cmd, subsys_id, base, VIDO_SOFT_RST, 0x0, BIT(0));
429 MM_REG_POLL(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, 0x0, BIT(0));
430 return 0;
431 }
432
config_wrot_frame(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,const struct v4l2_rect * compose)433 static int config_wrot_frame(struct mdp_comp_ctx *ctx,
434 struct mdp_cmdq_cmd *cmd,
435 const struct v4l2_rect *compose)
436 {
437 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
438 phys_addr_t base = ctx->comp->reg_base;
439 u8 subsys_id = ctx->comp->subsys_id;
440 u32 reg = 0;
441
442 /* Write frame base address */
443 if (CFG_CHECK(MT8183, p_id))
444 reg = CFG_COMP(MT8183, ctx->param, wrot.iova[0]);
445 MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR, reg,
446 0xFFFFFFFF);
447 if (CFG_CHECK(MT8183, p_id))
448 reg = CFG_COMP(MT8183, ctx->param, wrot.iova[1]);
449 MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_C, reg,
450 0xFFFFFFFF);
451 if (CFG_CHECK(MT8183, p_id))
452 reg = CFG_COMP(MT8183, ctx->param, wrot.iova[2]);
453 MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_V, reg,
454 0xFFFFFFFF);
455 /* Write frame related registers */
456 if (CFG_CHECK(MT8183, p_id))
457 reg = CFG_COMP(MT8183, ctx->param, wrot.control);
458 MM_REG_WRITE(cmd, subsys_id, base, VIDO_CTRL, reg,
459 0xF131510F);
460 /* Write frame Y pitch */
461 if (CFG_CHECK(MT8183, p_id))
462 reg = CFG_COMP(MT8183, ctx->param, wrot.stride[0]);
463 MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE, reg,
464 0x0000FFFF);
465 /* Write frame UV pitch */
466 if (CFG_CHECK(MT8183, p_id))
467 reg = CFG_COMP(MT8183, ctx->param, wrot.stride[1]);
468 MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE_C, reg,
469 0xFFFF);
470 if (CFG_CHECK(MT8183, p_id))
471 reg = CFG_COMP(MT8183, ctx->param, wrot.stride[2]);
472 MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE_V, reg,
473 0xFFFF);
474 /* Write matrix control */
475 if (CFG_CHECK(MT8183, p_id))
476 reg = CFG_COMP(MT8183, ctx->param, wrot.mat_ctrl);
477 MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAT_CTRL, reg, 0xF3);
478
479 /* Set the fixed ALPHA as 0xFF */
480 MM_REG_WRITE(cmd, subsys_id, base, VIDO_DITHER, 0xFF000000,
481 0xFF000000);
482 /* Set VIDO_EOL_SEL */
483 MM_REG_WRITE(cmd, subsys_id, base, VIDO_RSV_1, BIT(31), BIT(31));
484 /* Set VIDO_FIFO_TEST */
485 if (CFG_CHECK(MT8183, p_id))
486 reg = CFG_COMP(MT8183, ctx->param, wrot.fifo_test);
487 if (reg != 0)
488 MM_REG_WRITE(cmd, subsys_id, base, VIDO_FIFO_TEST,
489 reg, 0xFFF);
490 /* Filter enable */
491 if (mdp_cfg && mdp_cfg->wrot_filter_constraint) {
492 if (CFG_CHECK(MT8183, p_id))
493 reg = CFG_COMP(MT8183, ctx->param, wrot.filter);
494 MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE,
495 reg, 0x77);
496 }
497
498 return 0;
499 }
500
config_wrot_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)501 static int config_wrot_subfrm(struct mdp_comp_ctx *ctx,
502 struct mdp_cmdq_cmd *cmd, u32 index)
503 {
504 phys_addr_t base = ctx->comp->reg_base;
505 u8 subsys_id = ctx->comp->subsys_id;
506 u32 reg = 0;
507
508 /* Write Y pixel offset */
509 if (CFG_CHECK(MT8183, p_id))
510 reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].offset[0]);
511 MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR,
512 reg, 0x0FFFFFFF);
513 /* Write U pixel offset */
514 if (CFG_CHECK(MT8183, p_id))
515 reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].offset[1]);
516 MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR_C,
517 reg, 0x0FFFFFFF);
518 /* Write V pixel offset */
519 if (CFG_CHECK(MT8183, p_id))
520 reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].offset[2]);
521 MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR_V,
522 reg, 0x0FFFFFFF);
523 /* Write source size */
524 if (CFG_CHECK(MT8183, p_id))
525 reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].src);
526 MM_REG_WRITE(cmd, subsys_id, base, VIDO_IN_SIZE, reg,
527 0x1FFF1FFF);
528 /* Write target size */
529 if (CFG_CHECK(MT8183, p_id))
530 reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].clip);
531 MM_REG_WRITE(cmd, subsys_id, base, VIDO_TAR_SIZE, reg,
532 0x1FFF1FFF);
533 if (CFG_CHECK(MT8183, p_id))
534 reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].clip_ofst);
535 MM_REG_WRITE(cmd, subsys_id, base, VIDO_CROP_OFST, reg,
536 0x1FFF1FFF);
537
538 if (CFG_CHECK(MT8183, p_id))
539 reg = CFG_COMP(MT8183, ctx->param, wrot.subfrms[index].main_buf);
540 MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE,
541 reg, 0x1FFF7F00);
542
543 /* Enable WROT */
544 MM_REG_WRITE(cmd, subsys_id, base, VIDO_ROT_EN, BIT(0), BIT(0));
545
546 return 0;
547 }
548
wait_wrot_event(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)549 static int wait_wrot_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
550 {
551 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
552 struct device *dev = &ctx->comp->mdp_dev->pdev->dev;
553 phys_addr_t base = ctx->comp->reg_base;
554 u8 subsys_id = ctx->comp->subsys_id;
555
556 if (ctx->comp->alias_id == 0)
557 MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
558 else
559 dev_err(dev, "Do not support WROT1_DONE event\n");
560
561 if (mdp_cfg && mdp_cfg->wrot_filter_constraint)
562 MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE, 0x0,
563 0x77);
564
565 /* Disable WROT */
566 MM_REG_WRITE(cmd, subsys_id, base, VIDO_ROT_EN, 0x0, BIT(0));
567
568 return 0;
569 }
570
571 static const struct mdp_comp_ops wrot_ops = {
572 .get_comp_flag = get_comp_flag,
573 .init_comp = init_wrot,
574 .config_frame = config_wrot_frame,
575 .config_subfrm = config_wrot_subfrm,
576 .wait_comp_event = wait_wrot_event,
577 };
578
init_wdma(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)579 static int init_wdma(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
580 {
581 phys_addr_t base = ctx->comp->reg_base;
582 u8 subsys_id = ctx->comp->subsys_id;
583
584 /* Reset WDMA */
585 MM_REG_WRITE(cmd, subsys_id, base, WDMA_RST, BIT(0), BIT(0));
586 MM_REG_POLL(cmd, subsys_id, base, WDMA_FLOW_CTRL_DBG, BIT(0), BIT(0));
587 MM_REG_WRITE(cmd, subsys_id, base, WDMA_RST, 0x0, BIT(0));
588 return 0;
589 }
590
config_wdma_frame(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,const struct v4l2_rect * compose)591 static int config_wdma_frame(struct mdp_comp_ctx *ctx,
592 struct mdp_cmdq_cmd *cmd,
593 const struct v4l2_rect *compose)
594 {
595 phys_addr_t base = ctx->comp->reg_base;
596 u8 subsys_id = ctx->comp->subsys_id;
597 u32 reg = 0;
598
599 MM_REG_WRITE(cmd, subsys_id, base, WDMA_BUF_CON2, 0x10101050,
600 0xFFFFFFFF);
601
602 /* Setup frame information */
603 if (CFG_CHECK(MT8183, p_id))
604 reg = CFG_COMP(MT8183, ctx->param, wdma.wdma_cfg);
605 MM_REG_WRITE(cmd, subsys_id, base, WDMA_CFG, reg,
606 0x0F01B8F0);
607 /* Setup frame base address */
608 if (CFG_CHECK(MT8183, p_id))
609 reg = CFG_COMP(MT8183, ctx->param, wdma.iova[0]);
610 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_ADDR, reg,
611 0xFFFFFFFF);
612 if (CFG_CHECK(MT8183, p_id))
613 reg = CFG_COMP(MT8183, ctx->param, wdma.iova[1]);
614 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_U_ADDR, reg,
615 0xFFFFFFFF);
616 if (CFG_CHECK(MT8183, p_id))
617 reg = CFG_COMP(MT8183, ctx->param, wdma.iova[2]);
618 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_V_ADDR, reg,
619 0xFFFFFFFF);
620 /* Setup Y pitch */
621 if (CFG_CHECK(MT8183, p_id))
622 reg = CFG_COMP(MT8183, ctx->param, wdma.w_in_byte);
623 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_W_IN_BYTE,
624 reg, 0x0000FFFF);
625 /* Setup UV pitch */
626 if (CFG_CHECK(MT8183, p_id))
627 reg = CFG_COMP(MT8183, ctx->param, wdma.uv_stride);
628 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_UV_PITCH,
629 reg, 0x0000FFFF);
630 /* Set the fixed ALPHA as 0xFF */
631 MM_REG_WRITE(cmd, subsys_id, base, WDMA_ALPHA, 0x800000FF,
632 0x800000FF);
633
634 return 0;
635 }
636
config_wdma_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)637 static int config_wdma_subfrm(struct mdp_comp_ctx *ctx,
638 struct mdp_cmdq_cmd *cmd, u32 index)
639 {
640 phys_addr_t base = ctx->comp->reg_base;
641 u8 subsys_id = ctx->comp->subsys_id;
642 u32 reg = 0;
643
644 /* Write Y pixel offset */
645 if (CFG_CHECK(MT8183, p_id))
646 reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].offset[0]);
647 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_ADDR_OFFSET,
648 reg, 0x0FFFFFFF);
649 /* Write U pixel offset */
650 if (CFG_CHECK(MT8183, p_id))
651 reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].offset[1]);
652 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_U_ADDR_OFFSET,
653 reg, 0x0FFFFFFF);
654 /* Write V pixel offset */
655 if (CFG_CHECK(MT8183, p_id))
656 reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].offset[2]);
657 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_V_ADDR_OFFSET,
658 reg, 0x0FFFFFFF);
659 /* Write source size */
660 if (CFG_CHECK(MT8183, p_id))
661 reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].src);
662 MM_REG_WRITE(cmd, subsys_id, base, WDMA_SRC_SIZE, reg,
663 0x3FFF3FFF);
664 /* Write target size */
665 if (CFG_CHECK(MT8183, p_id))
666 reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].clip);
667 MM_REG_WRITE(cmd, subsys_id, base, WDMA_CLIP_SIZE, reg,
668 0x3FFF3FFF);
669 /* Write clip offset */
670 if (CFG_CHECK(MT8183, p_id))
671 reg = CFG_COMP(MT8183, ctx->param, wdma.subfrms[index].clip_ofst);
672 MM_REG_WRITE(cmd, subsys_id, base, WDMA_CLIP_COORD, reg,
673 0x3FFF3FFF);
674
675 /* Enable WDMA */
676 MM_REG_WRITE(cmd, subsys_id, base, WDMA_EN, BIT(0), BIT(0));
677
678 return 0;
679 }
680
wait_wdma_event(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)681 static int wait_wdma_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
682 {
683 phys_addr_t base = ctx->comp->reg_base;
684 u8 subsys_id = ctx->comp->subsys_id;
685
686 MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
687 /* Disable WDMA */
688 MM_REG_WRITE(cmd, subsys_id, base, WDMA_EN, 0x0, BIT(0));
689 return 0;
690 }
691
692 static const struct mdp_comp_ops wdma_ops = {
693 .get_comp_flag = get_comp_flag,
694 .init_comp = init_wdma,
695 .config_frame = config_wdma_frame,
696 .config_subfrm = config_wdma_subfrm,
697 .wait_comp_event = wait_wdma_event,
698 };
699
init_ccorr(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)700 static int init_ccorr(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
701 {
702 phys_addr_t base = ctx->comp->reg_base;
703 u8 subsys_id = ctx->comp->subsys_id;
704
705 /* CCORR enable */
706 MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_EN, BIT(0), BIT(0));
707 /* Relay mode */
708 MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_CFG, BIT(0), BIT(0));
709 return 0;
710 }
711
config_ccorr_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)712 static int config_ccorr_subfrm(struct mdp_comp_ctx *ctx,
713 struct mdp_cmdq_cmd *cmd, u32 index)
714 {
715 phys_addr_t base = ctx->comp->reg_base;
716 u8 subsys_id = ctx->comp->subsys_id;
717 u32 csf_l = 0, csf_r = 0;
718 u32 csf_t = 0, csf_b = 0;
719 u32 hsize, vsize;
720
721 if (CFG_CHECK(MT8183, p_id)) {
722 csf_l = CFG_COMP(MT8183, ctx->param, subfrms[index].in.left);
723 csf_r = CFG_COMP(MT8183, ctx->param, subfrms[index].in.right);
724 csf_t = CFG_COMP(MT8183, ctx->param, subfrms[index].in.top);
725 csf_b = CFG_COMP(MT8183, ctx->param, subfrms[index].in.bottom);
726 }
727
728 hsize = csf_r - csf_l + 1;
729 vsize = csf_b - csf_t + 1;
730 MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_SIZE,
731 (hsize << 16) + (vsize << 0), 0x1FFF1FFF);
732 return 0;
733 }
734
735 static const struct mdp_comp_ops ccorr_ops = {
736 .get_comp_flag = get_comp_flag,
737 .init_comp = init_ccorr,
738 .config_subfrm = config_ccorr_subfrm,
739 };
740
741 static const struct mdp_comp_ops *mdp_comp_ops[MDP_COMP_TYPE_COUNT] = {
742 [MDP_COMP_TYPE_RDMA] = &rdma_ops,
743 [MDP_COMP_TYPE_RSZ] = &rsz_ops,
744 [MDP_COMP_TYPE_WROT] = &wrot_ops,
745 [MDP_COMP_TYPE_WDMA] = &wdma_ops,
746 [MDP_COMP_TYPE_CCORR] = &ccorr_ops,
747 };
748
749 static const struct of_device_id mdp_comp_dt_ids[] __maybe_unused = {
750 {
751 .compatible = "mediatek,mt8183-mdp3-rdma",
752 .data = (void *)MDP_COMP_TYPE_RDMA,
753 }, {
754 .compatible = "mediatek,mt8183-mdp3-ccorr",
755 .data = (void *)MDP_COMP_TYPE_CCORR,
756 }, {
757 .compatible = "mediatek,mt8183-mdp3-rsz",
758 .data = (void *)MDP_COMP_TYPE_RSZ,
759 }, {
760 .compatible = "mediatek,mt8183-mdp3-wrot",
761 .data = (void *)MDP_COMP_TYPE_WROT,
762 }, {
763 .compatible = "mediatek,mt8183-mdp3-wdma",
764 .data = (void *)MDP_COMP_TYPE_WDMA,
765 },
766 {}
767 };
768
is_dma_capable(const enum mdp_comp_type type)769 static inline bool is_dma_capable(const enum mdp_comp_type type)
770 {
771 return (type == MDP_COMP_TYPE_RDMA ||
772 type == MDP_COMP_TYPE_WROT ||
773 type == MDP_COMP_TYPE_WDMA);
774 }
775
is_bypass_gce_event(const enum mdp_comp_type type)776 static inline bool is_bypass_gce_event(const enum mdp_comp_type type)
777 {
778 /*
779 * Subcomponent PATH is only used for the direction of data flow and
780 * dose not need to wait for GCE event.
781 */
782 return (type == MDP_COMP_TYPE_PATH);
783 }
784
mdp_comp_get_id(struct mdp_dev * mdp,enum mdp_comp_type type,u32 alias_id)785 static int mdp_comp_get_id(struct mdp_dev *mdp, enum mdp_comp_type type, u32 alias_id)
786 {
787 int i;
788
789 for (i = 0; i < mdp->mdp_data->comp_data_len; i++)
790 if (mdp->mdp_data->comp_data[i].match.type == type &&
791 mdp->mdp_data->comp_data[i].match.alias_id == alias_id)
792 return i;
793 return -ENODEV;
794 }
795
mdp_comp_clock_on(struct device * dev,struct mdp_comp * comp)796 int mdp_comp_clock_on(struct device *dev, struct mdp_comp *comp)
797 {
798 int i, ret;
799
800 /* Only DMA capable components need the pm control */
801 if (comp->comp_dev && is_dma_capable(comp->type)) {
802 ret = pm_runtime_resume_and_get(comp->comp_dev);
803 if (ret < 0) {
804 dev_err(dev,
805 "Failed to get power, err %d. type:%d id:%d\n",
806 ret, comp->type, comp->inner_id);
807 return ret;
808 }
809 }
810
811 for (i = 0; i < comp->clk_num; i++) {
812 if (IS_ERR_OR_NULL(comp->clks[i]))
813 continue;
814 ret = clk_prepare_enable(comp->clks[i]);
815 if (ret) {
816 dev_err(dev,
817 "Failed to enable clk %d. type:%d id:%d\n",
818 i, comp->type, comp->inner_id);
819 goto err_revert;
820 }
821 }
822
823 return 0;
824
825 err_revert:
826 while (--i >= 0) {
827 if (IS_ERR_OR_NULL(comp->clks[i]))
828 continue;
829 clk_disable_unprepare(comp->clks[i]);
830 }
831 if (comp->comp_dev && is_dma_capable(comp->type))
832 pm_runtime_put_sync(comp->comp_dev);
833
834 return ret;
835 }
836
mdp_comp_clock_off(struct device * dev,struct mdp_comp * comp)837 void mdp_comp_clock_off(struct device *dev, struct mdp_comp *comp)
838 {
839 int i;
840
841 for (i = 0; i < comp->clk_num; i++) {
842 if (IS_ERR_OR_NULL(comp->clks[i]))
843 continue;
844 clk_disable_unprepare(comp->clks[i]);
845 }
846
847 if (comp->comp_dev && is_dma_capable(comp->type))
848 pm_runtime_put(comp->comp_dev);
849 }
850
mdp_comp_clocks_on(struct device * dev,struct mdp_comp * comps,int num)851 int mdp_comp_clocks_on(struct device *dev, struct mdp_comp *comps, int num)
852 {
853 int i, ret;
854
855 for (i = 0; i < num; i++) {
856 ret = mdp_comp_clock_on(dev, &comps[i]);
857 if (ret)
858 return ret;
859 }
860
861 return 0;
862 }
863
mdp_comp_clocks_off(struct device * dev,struct mdp_comp * comps,int num)864 void mdp_comp_clocks_off(struct device *dev, struct mdp_comp *comps, int num)
865 {
866 int i;
867
868 for (i = 0; i < num; i++)
869 mdp_comp_clock_off(dev, &comps[i]);
870 }
871
mdp_get_subsys_id(struct mdp_dev * mdp,struct device * dev,struct device_node * node,struct mdp_comp * comp)872 static int mdp_get_subsys_id(struct mdp_dev *mdp, struct device *dev,
873 struct device_node *node, struct mdp_comp *comp)
874 {
875 struct platform_device *comp_pdev;
876 struct cmdq_client_reg cmdq_reg;
877 int ret = 0;
878 int index = 0;
879
880 if (!dev || !node || !comp)
881 return -EINVAL;
882
883 comp_pdev = of_find_device_by_node(node);
884
885 if (!comp_pdev) {
886 dev_err(dev, "get comp_pdev fail! comp public id=%d, inner id=%d, type=%d\n",
887 comp->public_id, comp->inner_id, comp->type);
888 return -ENODEV;
889 }
890
891 index = mdp->mdp_data->comp_data[comp->public_id].info.dts_reg_ofst;
892 ret = cmdq_dev_get_client_reg(&comp_pdev->dev, &cmdq_reg, index);
893 if (ret != 0) {
894 dev_err(&comp_pdev->dev, "cmdq_dev_get_subsys fail!\n");
895 put_device(&comp_pdev->dev);
896 return -EINVAL;
897 }
898
899 comp->subsys_id = cmdq_reg.subsys;
900 dev_dbg(&comp_pdev->dev, "subsys id=%d\n", cmdq_reg.subsys);
901 put_device(&comp_pdev->dev);
902
903 return 0;
904 }
905
__mdp_comp_init(struct mdp_dev * mdp,struct device_node * node,struct mdp_comp * comp)906 static void __mdp_comp_init(struct mdp_dev *mdp, struct device_node *node,
907 struct mdp_comp *comp)
908 {
909 struct resource res;
910 phys_addr_t base;
911 int index;
912
913 index = mdp->mdp_data->comp_data[comp->public_id].info.dts_reg_ofst;
914 if (of_address_to_resource(node, index, &res) < 0)
915 base = 0L;
916 else
917 base = res.start;
918
919 comp->mdp_dev = mdp;
920 comp->regs = of_iomap(node, 0);
921 comp->reg_base = base;
922 }
923
mdp_comp_init(struct mdp_dev * mdp,struct device_node * node,struct mdp_comp * comp,enum mtk_mdp_comp_id id)924 static int mdp_comp_init(struct mdp_dev *mdp, struct device_node *node,
925 struct mdp_comp *comp, enum mtk_mdp_comp_id id)
926 {
927 struct device *dev = &mdp->pdev->dev;
928 struct platform_device *pdev_c;
929 int clk_ofst;
930 int i;
931 s32 event;
932
933 if (id < 0 || id >= MDP_MAX_COMP_COUNT) {
934 dev_err(dev, "Invalid component id %d\n", id);
935 return -EINVAL;
936 }
937
938 pdev_c = of_find_device_by_node(node);
939 if (!pdev_c) {
940 dev_warn(dev, "can't find platform device of node:%s\n",
941 node->name);
942 return -ENODEV;
943 }
944
945 comp->comp_dev = &pdev_c->dev;
946 comp->public_id = id;
947 comp->type = mdp->mdp_data->comp_data[id].match.type;
948 comp->inner_id = mdp->mdp_data->comp_data[id].match.inner_id;
949 comp->alias_id = mdp->mdp_data->comp_data[id].match.alias_id;
950 comp->ops = mdp_comp_ops[comp->type];
951 __mdp_comp_init(mdp, node, comp);
952
953 comp->clk_num = mdp->mdp_data->comp_data[id].info.clk_num;
954 comp->clks = devm_kzalloc(dev, sizeof(struct clk *) * comp->clk_num,
955 GFP_KERNEL);
956 if (!comp->clks)
957 return -ENOMEM;
958
959 clk_ofst = mdp->mdp_data->comp_data[id].info.clk_ofst;
960
961 for (i = 0; i < comp->clk_num; i++) {
962 comp->clks[i] = of_clk_get(node, i + clk_ofst);
963 if (IS_ERR(comp->clks[i]))
964 break;
965 }
966
967 mdp_get_subsys_id(mdp, dev, node, comp);
968
969 /* Set GCE SOF event */
970 if (is_bypass_gce_event(comp->type) ||
971 of_property_read_u32_index(node, "mediatek,gce-events",
972 MDP_GCE_EVENT_SOF, &event))
973 event = MDP_GCE_NO_EVENT;
974
975 comp->gce_event[MDP_GCE_EVENT_SOF] = event;
976
977 /* Set GCE EOF event */
978 if (is_dma_capable(comp->type)) {
979 if (of_property_read_u32_index(node, "mediatek,gce-events",
980 MDP_GCE_EVENT_EOF, &event)) {
981 dev_err(dev, "Component id %d has no EOF\n", id);
982 return -EINVAL;
983 }
984 } else {
985 event = MDP_GCE_NO_EVENT;
986 }
987
988 comp->gce_event[MDP_GCE_EVENT_EOF] = event;
989
990 return 0;
991 }
992
mdp_comp_deinit(struct mdp_comp * comp)993 static void mdp_comp_deinit(struct mdp_comp *comp)
994 {
995 if (!comp)
996 return;
997
998 if (comp->comp_dev && comp->clks) {
999 devm_kfree(&comp->mdp_dev->pdev->dev, comp->clks);
1000 comp->clks = NULL;
1001 }
1002
1003 if (comp->regs)
1004 iounmap(comp->regs);
1005 }
1006
mdp_comp_create(struct mdp_dev * mdp,struct device_node * node,enum mtk_mdp_comp_id id)1007 static struct mdp_comp *mdp_comp_create(struct mdp_dev *mdp,
1008 struct device_node *node,
1009 enum mtk_mdp_comp_id id)
1010 {
1011 struct device *dev = &mdp->pdev->dev;
1012 struct mdp_comp *comp;
1013 int ret;
1014
1015 if (mdp->comp[id])
1016 return ERR_PTR(-EEXIST);
1017
1018 comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
1019 if (!comp)
1020 return ERR_PTR(-ENOMEM);
1021
1022 ret = mdp_comp_init(mdp, node, comp, id);
1023 if (ret) {
1024 devm_kfree(dev, comp);
1025 return ERR_PTR(ret);
1026 }
1027 mdp->comp[id] = comp;
1028 mdp->comp[id]->mdp_dev = mdp;
1029
1030 dev_dbg(dev, "%s type:%d alias:%d public id:%d inner id:%d base:%#x regs:%p\n",
1031 dev->of_node->name, comp->type, comp->alias_id, id, comp->inner_id,
1032 (u32)comp->reg_base, comp->regs);
1033 return comp;
1034 }
1035
mdp_comp_sub_create(struct mdp_dev * mdp)1036 static int mdp_comp_sub_create(struct mdp_dev *mdp)
1037 {
1038 struct device *dev = &mdp->pdev->dev;
1039 struct device_node *node, *parent;
1040 int ret = 0;
1041
1042 parent = dev->of_node->parent;
1043
1044 for_each_child_of_node(parent, node) {
1045 const struct of_device_id *of_id;
1046 enum mdp_comp_type type;
1047 int id, alias_id;
1048 struct mdp_comp *comp;
1049
1050 of_id = of_match_node(mdp->mdp_data->mdp_sub_comp_dt_ids, node);
1051 if (!of_id)
1052 continue;
1053 if (!of_device_is_available(node)) {
1054 dev_dbg(dev, "Skipping disabled sub comp. %pOF\n",
1055 node);
1056 continue;
1057 }
1058
1059 type = (enum mdp_comp_type)(uintptr_t)of_id->data;
1060 alias_id = mdp_comp_alias_id[type];
1061 id = mdp_comp_get_id(mdp, type, alias_id);
1062 if (id < 0) {
1063 dev_err(dev,
1064 "Fail to get sub comp. id: type %d alias %d\n",
1065 type, alias_id);
1066 ret = -EINVAL;
1067 goto err_free_node;
1068 }
1069 mdp_comp_alias_id[type]++;
1070
1071 comp = mdp_comp_create(mdp, node, id);
1072 if (IS_ERR(comp)) {
1073 ret = PTR_ERR(comp);
1074 goto err_free_node;
1075 }
1076 }
1077 return ret;
1078
1079 err_free_node:
1080 of_node_put(node);
1081 return ret;
1082 }
1083
mdp_comp_destroy(struct mdp_dev * mdp)1084 void mdp_comp_destroy(struct mdp_dev *mdp)
1085 {
1086 int i;
1087
1088 for (i = 0; i < ARRAY_SIZE(mdp->comp); i++) {
1089 if (mdp->comp[i]) {
1090 if (is_dma_capable(mdp->comp[i]->type))
1091 pm_runtime_disable(mdp->comp[i]->comp_dev);
1092 mdp_comp_deinit(mdp->comp[i]);
1093 devm_kfree(mdp->comp[i]->comp_dev, mdp->comp[i]);
1094 mdp->comp[i] = NULL;
1095 }
1096 }
1097 }
1098
mdp_comp_config(struct mdp_dev * mdp)1099 int mdp_comp_config(struct mdp_dev *mdp)
1100 {
1101 struct device *dev = &mdp->pdev->dev;
1102 struct device_node *node, *parent;
1103 int ret;
1104
1105 memset(mdp_comp_alias_id, 0, sizeof(mdp_comp_alias_id));
1106 p_id = mdp->mdp_data->mdp_plat_id;
1107
1108 parent = dev->of_node->parent;
1109 /* Iterate over sibling MDP function blocks */
1110 for_each_child_of_node(parent, node) {
1111 const struct of_device_id *of_id;
1112 enum mdp_comp_type type;
1113 int id, alias_id;
1114 struct mdp_comp *comp;
1115
1116 of_id = of_match_node(mdp_comp_dt_ids, node);
1117 if (!of_id)
1118 continue;
1119
1120 if (!of_device_is_available(node)) {
1121 dev_dbg(dev, "Skipping disabled component %pOF\n",
1122 node);
1123 continue;
1124 }
1125
1126 type = (enum mdp_comp_type)(uintptr_t)of_id->data;
1127 alias_id = mdp_comp_alias_id[type];
1128 id = mdp_comp_get_id(mdp, type, alias_id);
1129 if (id < 0) {
1130 dev_err(dev,
1131 "Fail to get component id: type %d alias %d\n",
1132 type, alias_id);
1133 continue;
1134 }
1135 mdp_comp_alias_id[type]++;
1136
1137 comp = mdp_comp_create(mdp, node, id);
1138 if (IS_ERR(comp)) {
1139 ret = PTR_ERR(comp);
1140 goto err_init_comps;
1141 }
1142
1143 /* Only DMA capable components need the pm control */
1144 if (!is_dma_capable(comp->type))
1145 continue;
1146 pm_runtime_enable(comp->comp_dev);
1147 }
1148
1149 ret = mdp_comp_sub_create(mdp);
1150 if (ret)
1151 goto err_init_comps;
1152
1153 return 0;
1154
1155 err_init_comps:
1156 mdp_comp_destroy(mdp);
1157 return ret;
1158 }
1159
mdp_comp_ctx_config(struct mdp_dev * mdp,struct mdp_comp_ctx * ctx,const struct img_compparam * param,const struct img_ipi_frameparam * frame)1160 int mdp_comp_ctx_config(struct mdp_dev *mdp, struct mdp_comp_ctx *ctx,
1161 const struct img_compparam *param,
1162 const struct img_ipi_frameparam *frame)
1163 {
1164 struct device *dev = &mdp->pdev->dev;
1165 enum mtk_mdp_comp_id public_id = MDP_COMP_NONE;
1166 u32 arg;
1167 int i, idx;
1168
1169 if (!param) {
1170 dev_err(dev, "Invalid component param");
1171 return -EINVAL;
1172 }
1173
1174 if (CFG_CHECK(MT8183, p_id))
1175 arg = CFG_COMP(MT8183, param, type);
1176 else
1177 return -EINVAL;
1178 public_id = mdp_cfg_get_id_public(mdp, arg);
1179 if (public_id < 0) {
1180 dev_err(dev, "Invalid component id %d", public_id);
1181 return -EINVAL;
1182 }
1183
1184 ctx->comp = mdp->comp[public_id];
1185 if (!ctx->comp) {
1186 dev_err(dev, "Uninit component inner id %d", arg);
1187 return -EINVAL;
1188 }
1189
1190 ctx->param = param;
1191 if (CFG_CHECK(MT8183, p_id))
1192 arg = CFG_COMP(MT8183, param, input);
1193 else
1194 return -EINVAL;
1195 ctx->input = &frame->inputs[arg];
1196 if (CFG_CHECK(MT8183, p_id))
1197 idx = CFG_COMP(MT8183, param, num_outputs);
1198 else
1199 return -EINVAL;
1200 for (i = 0; i < idx; i++) {
1201 if (CFG_CHECK(MT8183, p_id))
1202 arg = CFG_COMP(MT8183, param, outputs[i]);
1203 else
1204 return -EINVAL;
1205 ctx->outputs[i] = &frame->outputs[arg];
1206 }
1207 return 0;
1208 }
1209