1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2022 MediaTek Inc. 4 * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com> 5 */ 6 7 #include <linux/clk.h> 8 #include <linux/of_platform.h> 9 #include <linux/of_address.h> 10 #include <linux/pm_runtime.h> 11 #include "mtk-mdp3-comp.h" 12 #include "mtk-mdp3-core.h" 13 #include "mtk-mdp3-regs.h" 14 15 #include "mdp_reg_rdma.h" 16 #include "mdp_reg_ccorr.h" 17 #include "mdp_reg_rsz.h" 18 #include "mdp_reg_wrot.h" 19 #include "mdp_reg_wdma.h" 20 21 static u32 mdp_comp_alias_id[MDP_COMP_TYPE_COUNT]; 22 23 static inline const struct mdp_platform_config * 24 __get_plat_cfg(const struct mdp_comp_ctx *ctx) 25 { 26 if (!ctx) 27 return NULL; 28 29 return ctx->comp->mdp_dev->mdp_data->mdp_cfg; 30 } 31 32 static s64 get_comp_flag(const struct mdp_comp_ctx *ctx) 33 { 34 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx); 35 36 if (mdp_cfg && mdp_cfg->rdma_rsz1_sram_sharing) 37 if (ctx->comp->id == MDP_COMP_RDMA0) 38 return BIT(MDP_COMP_RDMA0) | BIT(MDP_COMP_RSZ1); 39 40 return BIT(ctx->comp->id); 41 } 42 43 static int init_rdma(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd) 44 { 45 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx); 46 phys_addr_t base = ctx->comp->reg_base; 47 u8 subsys_id = ctx->comp->subsys_id; 48 49 if (mdp_cfg && mdp_cfg->rdma_support_10bit) { 50 struct mdp_comp *prz1 = ctx->comp->mdp_dev->comp[MDP_COMP_RSZ1]; 51 52 /* Disable RSZ1 */ 53 if (ctx->comp->id == MDP_COMP_RDMA0 && prz1) 54 MM_REG_WRITE(cmd, subsys_id, prz1->reg_base, PRZ_ENABLE, 55 0x0, BIT(0)); 56 } 57 58 /* Reset RDMA */ 59 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_RESET, BIT(0), BIT(0)); 60 MM_REG_POLL(cmd, subsys_id, base, MDP_RDMA_MON_STA_1, BIT(8), BIT(8)); 61 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_RESET, 0x0, BIT(0)); 62 return 0; 63 } 64 65 static int config_rdma_frame(struct mdp_comp_ctx *ctx, 66 struct mdp_cmdq_cmd *cmd, 67 const struct v4l2_rect *compose) 68 { 69 const struct mdp_rdma_data *rdma = &ctx->param->rdma; 70 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx); 71 u32 colorformat = ctx->input->buffer.format.colorformat; 72 bool block10bit = MDP_COLOR_IS_10BIT_PACKED(colorformat); 73 bool en_ufo = MDP_COLOR_IS_UFP(colorformat); 74 phys_addr_t base = ctx->comp->reg_base; 75 u8 subsys_id = ctx->comp->subsys_id; 76 77 if (mdp_cfg && mdp_cfg->rdma_support_10bit) { 78 if (block10bit) 79 MM_REG_WRITE(cmd, subsys_id, base, 80 MDP_RDMA_RESV_DUMMY_0, 0x7, 0x7); 81 else 82 MM_REG_WRITE(cmd, subsys_id, base, 83 MDP_RDMA_RESV_DUMMY_0, 0x0, 0x7); 84 } 85 86 /* Setup smi control */ 87 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_GMCIF_CON, 88 (7 << 4) + //burst type to 8 89 (1 << 16), //enable pre-ultra 90 0x00030071); 91 92 /* Setup source frame info */ 93 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_CON, rdma->src_ctrl, 94 0x03C8FE0F); 95 96 if (mdp_cfg) 97 if (mdp_cfg->rdma_support_10bit && en_ufo) { 98 /* Setup source buffer base */ 99 MM_REG_WRITE(cmd, subsys_id, 100 base, MDP_RDMA_UFO_DEC_LENGTH_BASE_Y, 101 rdma->ufo_dec_y, 0xFFFFFFFF); 102 MM_REG_WRITE(cmd, subsys_id, 103 base, MDP_RDMA_UFO_DEC_LENGTH_BASE_C, 104 rdma->ufo_dec_c, 0xFFFFFFFF); 105 /* Set 10bit source frame pitch */ 106 if (block10bit) 107 MM_REG_WRITE(cmd, subsys_id, 108 base, MDP_RDMA_MF_BKGD_SIZE_IN_PXL, 109 rdma->mf_bkgd_in_pxl, 0x001FFFFF); 110 } 111 112 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_CON, rdma->control, 113 0x1110); 114 /* Setup source buffer base */ 115 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_0, rdma->iova[0], 116 0xFFFFFFFF); 117 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_1, rdma->iova[1], 118 0xFFFFFFFF); 119 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_2, rdma->iova[2], 120 0xFFFFFFFF); 121 /* Setup source buffer end */ 122 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_0, 123 rdma->iova_end[0], 0xFFFFFFFF); 124 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_1, 125 rdma->iova_end[1], 0xFFFFFFFF); 126 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_2, 127 rdma->iova_end[2], 0xFFFFFFFF); 128 /* Setup source frame pitch */ 129 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_BKGD_SIZE_IN_BYTE, 130 rdma->mf_bkgd, 0x001FFFFF); 131 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SF_BKGD_SIZE_IN_BYTE, 132 rdma->sf_bkgd, 0x001FFFFF); 133 /* Setup color transform */ 134 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_TRANSFORM_0, 135 rdma->transform, 0x0F110000); 136 137 return 0; 138 } 139 140 static int config_rdma_subfrm(struct mdp_comp_ctx *ctx, 141 struct mdp_cmdq_cmd *cmd, u32 index) 142 { 143 const struct mdp_rdma_subfrm *subfrm = &ctx->param->rdma.subfrms[index]; 144 const struct img_comp_subfrm *csf = &ctx->param->subfrms[index]; 145 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx); 146 u32 colorformat = ctx->input->buffer.format.colorformat; 147 bool block10bit = MDP_COLOR_IS_10BIT_PACKED(colorformat); 148 bool en_ufo = MDP_COLOR_IS_UFP(colorformat); 149 phys_addr_t base = ctx->comp->reg_base; 150 u8 subsys_id = ctx->comp->subsys_id; 151 152 /* Enable RDMA */ 153 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_EN, BIT(0), BIT(0)); 154 155 /* Set Y pixel offset */ 156 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_0, 157 subfrm->offset[0], 0xFFFFFFFF); 158 159 /* Set 10bit UFO mode */ 160 if (mdp_cfg) 161 if (mdp_cfg->rdma_support_10bit && block10bit && en_ufo) 162 MM_REG_WRITE(cmd, subsys_id, base, 163 MDP_RDMA_SRC_OFFSET_0_P, 164 subfrm->offset_0_p, 0xFFFFFFFF); 165 166 /* Set U pixel offset */ 167 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_1, 168 subfrm->offset[1], 0xFFFFFFFF); 169 /* Set V pixel offset */ 170 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_2, 171 subfrm->offset[2], 0xFFFFFFFF); 172 /* Set source size */ 173 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_SRC_SIZE, subfrm->src, 174 0x1FFF1FFF); 175 /* Set target size */ 176 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_CLIP_SIZE, 177 subfrm->clip, 0x1FFF1FFF); 178 /* Set crop offset */ 179 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_OFFSET_1, 180 subfrm->clip_ofst, 0x003F001F); 181 182 if (mdp_cfg && mdp_cfg->rdma_upsample_repeat_only) 183 if ((csf->in.right - csf->in.left + 1) > 320) 184 MM_REG_WRITE(cmd, subsys_id, base, 185 MDP_RDMA_RESV_DUMMY_0, BIT(2), BIT(2)); 186 187 return 0; 188 } 189 190 static int wait_rdma_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd) 191 { 192 struct device *dev = &ctx->comp->mdp_dev->pdev->dev; 193 phys_addr_t base = ctx->comp->reg_base; 194 u8 subsys_id = ctx->comp->subsys_id; 195 196 if (ctx->comp->alias_id == 0) 197 MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]); 198 else 199 dev_err(dev, "Do not support RDMA1_DONE event\n"); 200 201 /* Disable RDMA */ 202 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_EN, 0x0, BIT(0)); 203 return 0; 204 } 205 206 static const struct mdp_comp_ops rdma_ops = { 207 .get_comp_flag = get_comp_flag, 208 .init_comp = init_rdma, 209 .config_frame = config_rdma_frame, 210 .config_subfrm = config_rdma_subfrm, 211 .wait_comp_event = wait_rdma_event, 212 }; 213 214 static int init_rsz(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd) 215 { 216 phys_addr_t base = ctx->comp->reg_base; 217 u8 subsys_id = ctx->comp->subsys_id; 218 219 /* Reset RSZ */ 220 MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x10000, BIT(16)); 221 MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(16)); 222 /* Enable RSZ */ 223 MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, BIT(0), BIT(0)); 224 return 0; 225 } 226 227 static int config_rsz_frame(struct mdp_comp_ctx *ctx, 228 struct mdp_cmdq_cmd *cmd, 229 const struct v4l2_rect *compose) 230 { 231 const struct mdp_rsz_data *rsz = &ctx->param->rsz; 232 phys_addr_t base = ctx->comp->reg_base; 233 u8 subsys_id = ctx->comp->subsys_id; 234 235 if (ctx->param->frame.bypass) { 236 /* Disable RSZ */ 237 MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(0)); 238 return 0; 239 } 240 241 MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1, rsz->control1, 242 0x03FFFDF3); 243 MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_2, rsz->control2, 244 0x0FFFC290); 245 MM_REG_WRITE(cmd, subsys_id, base, PRZ_HORIZONTAL_COEFF_STEP, 246 rsz->coeff_step_x, 0x007FFFFF); 247 MM_REG_WRITE(cmd, subsys_id, base, PRZ_VERTICAL_COEFF_STEP, 248 rsz->coeff_step_y, 0x007FFFFF); 249 return 0; 250 } 251 252 static int config_rsz_subfrm(struct mdp_comp_ctx *ctx, 253 struct mdp_cmdq_cmd *cmd, u32 index) 254 { 255 const struct mdp_rsz_subfrm *subfrm = &ctx->param->rsz.subfrms[index]; 256 const struct img_comp_subfrm *csf = &ctx->param->subfrms[index]; 257 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx); 258 phys_addr_t base = ctx->comp->reg_base; 259 u8 subsys_id = ctx->comp->subsys_id; 260 261 MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_2, subfrm->control2, 262 0x00003800); 263 MM_REG_WRITE(cmd, subsys_id, base, PRZ_INPUT_IMAGE, subfrm->src, 264 0xFFFFFFFF); 265 266 if (mdp_cfg && mdp_cfg->rsz_disable_dcm_small_sample) 267 if ((csf->in.right - csf->in.left + 1) <= 16) 268 MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1, 269 BIT(27), BIT(27)); 270 271 MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_HORIZONTAL_INTEGER_OFFSET, 272 csf->luma.left, 0xFFFF); 273 MM_REG_WRITE(cmd, subsys_id, 274 base, PRZ_LUMA_HORIZONTAL_SUBPIXEL_OFFSET, 275 csf->luma.left_subpix, 0x1FFFFF); 276 MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_INTEGER_OFFSET, 277 csf->luma.top, 0xFFFF); 278 MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_SUBPIXEL_OFFSET, 279 csf->luma.top_subpix, 0x1FFFFF); 280 MM_REG_WRITE(cmd, subsys_id, 281 base, PRZ_CHROMA_HORIZONTAL_INTEGER_OFFSET, 282 csf->chroma.left, 0xFFFF); 283 MM_REG_WRITE(cmd, subsys_id, 284 base, PRZ_CHROMA_HORIZONTAL_SUBPIXEL_OFFSET, 285 csf->chroma.left_subpix, 0x1FFFFF); 286 287 MM_REG_WRITE(cmd, subsys_id, base, PRZ_OUTPUT_IMAGE, subfrm->clip, 288 0xFFFFFFFF); 289 290 return 0; 291 } 292 293 static int advance_rsz_subfrm(struct mdp_comp_ctx *ctx, 294 struct mdp_cmdq_cmd *cmd, u32 index) 295 { 296 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx); 297 298 if (mdp_cfg && mdp_cfg->rsz_disable_dcm_small_sample) { 299 const struct img_comp_subfrm *csf = &ctx->param->subfrms[index]; 300 phys_addr_t base = ctx->comp->reg_base; 301 u8 subsys_id = ctx->comp->subsys_id; 302 303 if ((csf->in.right - csf->in.left + 1) <= 16) 304 MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1, 0x0, 305 BIT(27)); 306 } 307 308 return 0; 309 } 310 311 static const struct mdp_comp_ops rsz_ops = { 312 .get_comp_flag = get_comp_flag, 313 .init_comp = init_rsz, 314 .config_frame = config_rsz_frame, 315 .config_subfrm = config_rsz_subfrm, 316 .advance_subfrm = advance_rsz_subfrm, 317 }; 318 319 static int init_wrot(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd) 320 { 321 phys_addr_t base = ctx->comp->reg_base; 322 u8 subsys_id = ctx->comp->subsys_id; 323 324 /* Reset WROT */ 325 MM_REG_WRITE(cmd, subsys_id, base, VIDO_SOFT_RST, BIT(0), BIT(0)); 326 MM_REG_POLL(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, BIT(0), BIT(0)); 327 MM_REG_WRITE(cmd, subsys_id, base, VIDO_SOFT_RST, 0x0, BIT(0)); 328 MM_REG_POLL(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, 0x0, BIT(0)); 329 return 0; 330 } 331 332 static int config_wrot_frame(struct mdp_comp_ctx *ctx, 333 struct mdp_cmdq_cmd *cmd, 334 const struct v4l2_rect *compose) 335 { 336 const struct mdp_wrot_data *wrot = &ctx->param->wrot; 337 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx); 338 phys_addr_t base = ctx->comp->reg_base; 339 u8 subsys_id = ctx->comp->subsys_id; 340 341 /* Write frame base address */ 342 MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR, wrot->iova[0], 343 0xFFFFFFFF); 344 MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_C, wrot->iova[1], 345 0xFFFFFFFF); 346 MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_V, wrot->iova[2], 347 0xFFFFFFFF); 348 /* Write frame related registers */ 349 MM_REG_WRITE(cmd, subsys_id, base, VIDO_CTRL, wrot->control, 350 0xF131510F); 351 /* Write frame Y pitch */ 352 MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE, wrot->stride[0], 353 0x0000FFFF); 354 /* Write frame UV pitch */ 355 MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE_C, wrot->stride[1], 356 0xFFFF); 357 MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE_V, wrot->stride[2], 358 0xFFFF); 359 /* Write matrix control */ 360 MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAT_CTRL, wrot->mat_ctrl, 0xF3); 361 362 /* Set the fixed ALPHA as 0xFF */ 363 MM_REG_WRITE(cmd, subsys_id, base, VIDO_DITHER, 0xFF000000, 364 0xFF000000); 365 /* Set VIDO_EOL_SEL */ 366 MM_REG_WRITE(cmd, subsys_id, base, VIDO_RSV_1, BIT(31), BIT(31)); 367 /* Set VIDO_FIFO_TEST */ 368 if (wrot->fifo_test != 0) 369 MM_REG_WRITE(cmd, subsys_id, base, VIDO_FIFO_TEST, 370 wrot->fifo_test, 0xFFF); 371 /* Filter enable */ 372 if (mdp_cfg && mdp_cfg->wrot_filter_constraint) 373 MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE, 374 wrot->filter, 0x77); 375 376 return 0; 377 } 378 379 static int config_wrot_subfrm(struct mdp_comp_ctx *ctx, 380 struct mdp_cmdq_cmd *cmd, u32 index) 381 { 382 const struct mdp_wrot_subfrm *subfrm = &ctx->param->wrot.subfrms[index]; 383 phys_addr_t base = ctx->comp->reg_base; 384 u8 subsys_id = ctx->comp->subsys_id; 385 386 /* Write Y pixel offset */ 387 MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR, 388 subfrm->offset[0], 0x0FFFFFFF); 389 /* Write U pixel offset */ 390 MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR_C, 391 subfrm->offset[1], 0x0FFFFFFF); 392 /* Write V pixel offset */ 393 MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR_V, 394 subfrm->offset[2], 0x0FFFFFFF); 395 /* Write source size */ 396 MM_REG_WRITE(cmd, subsys_id, base, VIDO_IN_SIZE, subfrm->src, 397 0x1FFF1FFF); 398 /* Write target size */ 399 MM_REG_WRITE(cmd, subsys_id, base, VIDO_TAR_SIZE, subfrm->clip, 400 0x1FFF1FFF); 401 MM_REG_WRITE(cmd, subsys_id, base, VIDO_CROP_OFST, subfrm->clip_ofst, 402 0x1FFF1FFF); 403 404 MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE, 405 subfrm->main_buf, 0x1FFF7F00); 406 407 /* Enable WROT */ 408 MM_REG_WRITE(cmd, subsys_id, base, VIDO_ROT_EN, BIT(0), BIT(0)); 409 410 return 0; 411 } 412 413 static int wait_wrot_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd) 414 { 415 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx); 416 struct device *dev = &ctx->comp->mdp_dev->pdev->dev; 417 phys_addr_t base = ctx->comp->reg_base; 418 u8 subsys_id = ctx->comp->subsys_id; 419 420 if (ctx->comp->alias_id == 0) 421 MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]); 422 else 423 dev_err(dev, "Do not support WROT1_DONE event\n"); 424 425 if (mdp_cfg && mdp_cfg->wrot_filter_constraint) 426 MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE, 0x0, 427 0x77); 428 429 /* Disable WROT */ 430 MM_REG_WRITE(cmd, subsys_id, base, VIDO_ROT_EN, 0x0, BIT(0)); 431 432 return 0; 433 } 434 435 static const struct mdp_comp_ops wrot_ops = { 436 .get_comp_flag = get_comp_flag, 437 .init_comp = init_wrot, 438 .config_frame = config_wrot_frame, 439 .config_subfrm = config_wrot_subfrm, 440 .wait_comp_event = wait_wrot_event, 441 }; 442 443 static int init_wdma(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd) 444 { 445 phys_addr_t base = ctx->comp->reg_base; 446 u8 subsys_id = ctx->comp->subsys_id; 447 448 /* Reset WDMA */ 449 MM_REG_WRITE(cmd, subsys_id, base, WDMA_RST, BIT(0), BIT(0)); 450 MM_REG_POLL(cmd, subsys_id, base, WDMA_FLOW_CTRL_DBG, BIT(0), BIT(0)); 451 MM_REG_WRITE(cmd, subsys_id, base, WDMA_RST, 0x0, BIT(0)); 452 return 0; 453 } 454 455 static int config_wdma_frame(struct mdp_comp_ctx *ctx, 456 struct mdp_cmdq_cmd *cmd, 457 const struct v4l2_rect *compose) 458 { 459 const struct mdp_wdma_data *wdma = &ctx->param->wdma; 460 phys_addr_t base = ctx->comp->reg_base; 461 u8 subsys_id = ctx->comp->subsys_id; 462 463 MM_REG_WRITE(cmd, subsys_id, base, WDMA_BUF_CON2, 0x10101050, 464 0xFFFFFFFF); 465 466 /* Setup frame information */ 467 MM_REG_WRITE(cmd, subsys_id, base, WDMA_CFG, wdma->wdma_cfg, 468 0x0F01B8F0); 469 /* Setup frame base address */ 470 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_ADDR, wdma->iova[0], 471 0xFFFFFFFF); 472 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_U_ADDR, wdma->iova[1], 473 0xFFFFFFFF); 474 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_V_ADDR, wdma->iova[2], 475 0xFFFFFFFF); 476 /* Setup Y pitch */ 477 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_W_IN_BYTE, 478 wdma->w_in_byte, 0x0000FFFF); 479 /* Setup UV pitch */ 480 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_UV_PITCH, 481 wdma->uv_stride, 0x0000FFFF); 482 /* Set the fixed ALPHA as 0xFF */ 483 MM_REG_WRITE(cmd, subsys_id, base, WDMA_ALPHA, 0x800000FF, 484 0x800000FF); 485 486 return 0; 487 } 488 489 static int config_wdma_subfrm(struct mdp_comp_ctx *ctx, 490 struct mdp_cmdq_cmd *cmd, u32 index) 491 { 492 const struct mdp_wdma_subfrm *subfrm = &ctx->param->wdma.subfrms[index]; 493 phys_addr_t base = ctx->comp->reg_base; 494 u8 subsys_id = ctx->comp->subsys_id; 495 496 /* Write Y pixel offset */ 497 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_ADDR_OFFSET, 498 subfrm->offset[0], 0x0FFFFFFF); 499 /* Write U pixel offset */ 500 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_U_ADDR_OFFSET, 501 subfrm->offset[1], 0x0FFFFFFF); 502 /* Write V pixel offset */ 503 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_V_ADDR_OFFSET, 504 subfrm->offset[2], 0x0FFFFFFF); 505 /* Write source size */ 506 MM_REG_WRITE(cmd, subsys_id, base, WDMA_SRC_SIZE, subfrm->src, 507 0x3FFF3FFF); 508 /* Write target size */ 509 MM_REG_WRITE(cmd, subsys_id, base, WDMA_CLIP_SIZE, subfrm->clip, 510 0x3FFF3FFF); 511 /* Write clip offset */ 512 MM_REG_WRITE(cmd, subsys_id, base, WDMA_CLIP_COORD, subfrm->clip_ofst, 513 0x3FFF3FFF); 514 515 /* Enable WDMA */ 516 MM_REG_WRITE(cmd, subsys_id, base, WDMA_EN, BIT(0), BIT(0)); 517 518 return 0; 519 } 520 521 static int wait_wdma_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd) 522 { 523 phys_addr_t base = ctx->comp->reg_base; 524 u8 subsys_id = ctx->comp->subsys_id; 525 526 MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]); 527 /* Disable WDMA */ 528 MM_REG_WRITE(cmd, subsys_id, base, WDMA_EN, 0x0, BIT(0)); 529 return 0; 530 } 531 532 static const struct mdp_comp_ops wdma_ops = { 533 .get_comp_flag = get_comp_flag, 534 .init_comp = init_wdma, 535 .config_frame = config_wdma_frame, 536 .config_subfrm = config_wdma_subfrm, 537 .wait_comp_event = wait_wdma_event, 538 }; 539 540 static int init_ccorr(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd) 541 { 542 phys_addr_t base = ctx->comp->reg_base; 543 u8 subsys_id = ctx->comp->subsys_id; 544 545 /* CCORR enable */ 546 MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_EN, BIT(0), BIT(0)); 547 /* Relay mode */ 548 MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_CFG, BIT(0), BIT(0)); 549 return 0; 550 } 551 552 static int config_ccorr_subfrm(struct mdp_comp_ctx *ctx, 553 struct mdp_cmdq_cmd *cmd, u32 index) 554 { 555 const struct img_comp_subfrm *csf = &ctx->param->subfrms[index]; 556 phys_addr_t base = ctx->comp->reg_base; 557 u8 subsys_id = ctx->comp->subsys_id; 558 u32 hsize, vsize; 559 560 hsize = csf->in.right - csf->in.left + 1; 561 vsize = csf->in.bottom - csf->in.top + 1; 562 MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_SIZE, 563 (hsize << 16) + (vsize << 0), 0x1FFF1FFF); 564 return 0; 565 } 566 567 static const struct mdp_comp_ops ccorr_ops = { 568 .get_comp_flag = get_comp_flag, 569 .init_comp = init_ccorr, 570 .config_subfrm = config_ccorr_subfrm, 571 }; 572 573 static const struct mdp_comp_ops *mdp_comp_ops[MDP_COMP_TYPE_COUNT] = { 574 [MDP_COMP_TYPE_RDMA] = &rdma_ops, 575 [MDP_COMP_TYPE_RSZ] = &rsz_ops, 576 [MDP_COMP_TYPE_WROT] = &wrot_ops, 577 [MDP_COMP_TYPE_WDMA] = &wdma_ops, 578 [MDP_COMP_TYPE_CCORR] = &ccorr_ops, 579 }; 580 581 struct mdp_comp_match { 582 enum mdp_comp_type type; 583 u32 alias_id; 584 }; 585 586 static const struct mdp_comp_match mdp_comp_matches[MDP_MAX_COMP_COUNT] = { 587 [MDP_COMP_WPEI] = { MDP_COMP_TYPE_WPEI, 0 }, 588 [MDP_COMP_WPEO] = { MDP_COMP_TYPE_EXTO, 2 }, 589 [MDP_COMP_WPEI2] = { MDP_COMP_TYPE_WPEI, 1 }, 590 [MDP_COMP_WPEO2] = { MDP_COMP_TYPE_EXTO, 3 }, 591 [MDP_COMP_ISP_IMGI] = { MDP_COMP_TYPE_IMGI, 0 }, 592 [MDP_COMP_ISP_IMGO] = { MDP_COMP_TYPE_EXTO, 0 }, 593 [MDP_COMP_ISP_IMG2O] = { MDP_COMP_TYPE_EXTO, 1 }, 594 595 [MDP_COMP_CAMIN] = { MDP_COMP_TYPE_DL_PATH, 0 }, 596 [MDP_COMP_CAMIN2] = { MDP_COMP_TYPE_DL_PATH, 1 }, 597 [MDP_COMP_RDMA0] = { MDP_COMP_TYPE_RDMA, 0 }, 598 [MDP_COMP_CCORR0] = { MDP_COMP_TYPE_CCORR, 0 }, 599 [MDP_COMP_RSZ0] = { MDP_COMP_TYPE_RSZ, 0 }, 600 [MDP_COMP_RSZ1] = { MDP_COMP_TYPE_RSZ, 1 }, 601 [MDP_COMP_PATH0_SOUT] = { MDP_COMP_TYPE_PATH, 0 }, 602 [MDP_COMP_PATH1_SOUT] = { MDP_COMP_TYPE_PATH, 1 }, 603 [MDP_COMP_WROT0] = { MDP_COMP_TYPE_WROT, 0 }, 604 [MDP_COMP_WDMA] = { MDP_COMP_TYPE_WDMA, 0 }, 605 }; 606 607 static const struct of_device_id mdp_comp_dt_ids[] = { 608 { 609 .compatible = "mediatek,mt8183-mdp3-rdma", 610 .data = (void *)MDP_COMP_TYPE_RDMA, 611 }, { 612 .compatible = "mediatek,mt8183-mdp3-ccorr", 613 .data = (void *)MDP_COMP_TYPE_CCORR, 614 }, { 615 .compatible = "mediatek,mt8183-mdp3-rsz", 616 .data = (void *)MDP_COMP_TYPE_RSZ, 617 }, { 618 .compatible = "mediatek,mt8183-mdp3-wrot", 619 .data = (void *)MDP_COMP_TYPE_WROT, 620 }, { 621 .compatible = "mediatek,mt8183-mdp3-wdma", 622 .data = (void *)MDP_COMP_TYPE_WDMA, 623 }, 624 {} 625 }; 626 627 static const struct of_device_id mdp_sub_comp_dt_ids[] = { 628 { 629 .compatible = "mediatek,mt8183-mdp3-wdma", 630 .data = (void *)MDP_COMP_TYPE_PATH, 631 }, { 632 .compatible = "mediatek,mt8183-mdp3-wrot", 633 .data = (void *)MDP_COMP_TYPE_PATH, 634 }, 635 {} 636 }; 637 638 /* Used to describe the item order in MDP property */ 639 struct mdp_comp_info { 640 u32 clk_num; 641 u32 clk_ofst; 642 u32 dts_reg_ofst; 643 }; 644 645 static const struct mdp_comp_info mdp_comp_dt_info[MDP_MAX_COMP_COUNT] = { 646 [MDP_COMP_RDMA0] = {2, 0, 0}, 647 [MDP_COMP_RSZ0] = {1, 0, 0}, 648 [MDP_COMP_WROT0] = {1, 0, 0}, 649 [MDP_COMP_WDMA] = {1, 0, 0}, 650 [MDP_COMP_CCORR0] = {1, 0, 0}, 651 }; 652 653 static inline bool is_dma_capable(const enum mdp_comp_type type) 654 { 655 return (type == MDP_COMP_TYPE_RDMA || 656 type == MDP_COMP_TYPE_WROT || 657 type == MDP_COMP_TYPE_WDMA); 658 } 659 660 static inline bool is_bypass_gce_event(const enum mdp_comp_type type) 661 { 662 /* 663 * Subcomponent PATH is only used for the direction of data flow and 664 * dose not need to wait for GCE event. 665 */ 666 return (type == MDP_COMP_TYPE_PATH); 667 } 668 669 static int mdp_comp_get_id(enum mdp_comp_type type, int alias_id) 670 { 671 int i; 672 673 for (i = 0; i < ARRAY_SIZE(mdp_comp_matches); i++) 674 if (mdp_comp_matches[i].type == type && 675 mdp_comp_matches[i].alias_id == alias_id) 676 return i; 677 return -ENODEV; 678 } 679 680 int mdp_comp_clock_on(struct device *dev, struct mdp_comp *comp) 681 { 682 int i, ret; 683 684 if (comp->comp_dev) { 685 ret = pm_runtime_resume_and_get(comp->comp_dev); 686 if (ret < 0) { 687 dev_err(dev, 688 "Failed to get power, err %d. type:%d id:%d\n", 689 ret, comp->type, comp->id); 690 return ret; 691 } 692 } 693 694 for (i = 0; i < ARRAY_SIZE(comp->clks); i++) { 695 if (IS_ERR_OR_NULL(comp->clks[i])) 696 continue; 697 ret = clk_prepare_enable(comp->clks[i]); 698 if (ret) { 699 dev_err(dev, 700 "Failed to enable clk %d. type:%d id:%d\n", 701 i, comp->type, comp->id); 702 goto err_revert; 703 } 704 } 705 706 return 0; 707 708 err_revert: 709 while (--i >= 0) { 710 if (IS_ERR_OR_NULL(comp->clks[i])) 711 continue; 712 clk_disable_unprepare(comp->clks[i]); 713 } 714 if (comp->comp_dev) 715 pm_runtime_put_sync(comp->comp_dev); 716 717 return ret; 718 } 719 720 void mdp_comp_clock_off(struct device *dev, struct mdp_comp *comp) 721 { 722 int i; 723 724 for (i = 0; i < ARRAY_SIZE(comp->clks); i++) { 725 if (IS_ERR_OR_NULL(comp->clks[i])) 726 continue; 727 clk_disable_unprepare(comp->clks[i]); 728 } 729 730 if (comp->comp_dev) 731 pm_runtime_put(comp->comp_dev); 732 } 733 734 int mdp_comp_clocks_on(struct device *dev, struct mdp_comp *comps, int num) 735 { 736 int i, ret; 737 738 for (i = 0; i < num; i++) { 739 ret = mdp_comp_clock_on(dev, &comps[i]); 740 if (ret) 741 return ret; 742 } 743 744 return 0; 745 } 746 747 void mdp_comp_clocks_off(struct device *dev, struct mdp_comp *comps, int num) 748 { 749 int i; 750 751 for (i = 0; i < num; i++) 752 mdp_comp_clock_off(dev, &comps[i]); 753 } 754 755 static int mdp_get_subsys_id(struct device *dev, struct device_node *node, 756 struct mdp_comp *comp) 757 { 758 struct platform_device *comp_pdev; 759 struct cmdq_client_reg cmdq_reg; 760 int ret = 0; 761 int index = 0; 762 763 if (!dev || !node || !comp) 764 return -EINVAL; 765 766 comp_pdev = of_find_device_by_node(node); 767 768 if (!comp_pdev) { 769 dev_err(dev, "get comp_pdev fail! comp id=%d type=%d\n", 770 comp->id, comp->type); 771 return -ENODEV; 772 } 773 774 index = mdp_comp_dt_info[comp->id].dts_reg_ofst; 775 ret = cmdq_dev_get_client_reg(&comp_pdev->dev, &cmdq_reg, index); 776 if (ret != 0) { 777 dev_err(&comp_pdev->dev, "cmdq_dev_get_subsys fail!\n"); 778 return -EINVAL; 779 } 780 781 comp->subsys_id = cmdq_reg.subsys; 782 dev_dbg(&comp_pdev->dev, "subsys id=%d\n", cmdq_reg.subsys); 783 784 return 0; 785 } 786 787 static void __mdp_comp_init(struct mdp_dev *mdp, struct device_node *node, 788 struct mdp_comp *comp) 789 { 790 struct resource res; 791 phys_addr_t base; 792 int index = mdp_comp_dt_info[comp->id].dts_reg_ofst; 793 794 if (of_address_to_resource(node, index, &res) < 0) 795 base = 0L; 796 else 797 base = res.start; 798 799 comp->mdp_dev = mdp; 800 comp->regs = of_iomap(node, 0); 801 comp->reg_base = base; 802 } 803 804 static int mdp_comp_init(struct mdp_dev *mdp, struct device_node *node, 805 struct mdp_comp *comp, enum mtk_mdp_comp_id id) 806 { 807 struct device *dev = &mdp->pdev->dev; 808 int clk_num; 809 int clk_ofst; 810 int i; 811 s32 event; 812 813 if (id < 0 || id >= MDP_MAX_COMP_COUNT) { 814 dev_err(dev, "Invalid component id %d\n", id); 815 return -EINVAL; 816 } 817 818 comp->id = id; 819 comp->type = mdp_comp_matches[id].type; 820 comp->alias_id = mdp_comp_matches[id].alias_id; 821 comp->ops = mdp_comp_ops[comp->type]; 822 __mdp_comp_init(mdp, node, comp); 823 824 clk_num = mdp_comp_dt_info[id].clk_num; 825 clk_ofst = mdp_comp_dt_info[id].clk_ofst; 826 827 for (i = 0; i < clk_num; i++) { 828 comp->clks[i] = of_clk_get(node, i + clk_ofst); 829 if (IS_ERR(comp->clks[i])) 830 break; 831 } 832 833 mdp_get_subsys_id(dev, node, comp); 834 835 /* Set GCE SOF event */ 836 if (is_bypass_gce_event(comp->type) || 837 of_property_read_u32_index(node, "mediatek,gce-events", 838 MDP_GCE_EVENT_SOF, &event)) 839 event = MDP_GCE_NO_EVENT; 840 841 comp->gce_event[MDP_GCE_EVENT_SOF] = event; 842 843 /* Set GCE EOF event */ 844 if (is_dma_capable(comp->type)) { 845 if (of_property_read_u32_index(node, "mediatek,gce-events", 846 MDP_GCE_EVENT_EOF, &event)) { 847 dev_err(dev, "Component id %d has no EOF\n", id); 848 return -EINVAL; 849 } 850 } else { 851 event = MDP_GCE_NO_EVENT; 852 } 853 854 comp->gce_event[MDP_GCE_EVENT_EOF] = event; 855 856 return 0; 857 } 858 859 static void mdp_comp_deinit(struct mdp_comp *comp) 860 { 861 if (!comp) 862 return; 863 864 if (comp->regs) 865 iounmap(comp->regs); 866 } 867 868 static struct mdp_comp *mdp_comp_create(struct mdp_dev *mdp, 869 struct device_node *node, 870 enum mtk_mdp_comp_id id) 871 { 872 struct device *dev = &mdp->pdev->dev; 873 struct mdp_comp *comp; 874 int ret; 875 876 if (mdp->comp[id]) 877 return ERR_PTR(-EEXIST); 878 879 comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL); 880 if (!comp) 881 return ERR_PTR(-ENOMEM); 882 883 ret = mdp_comp_init(mdp, node, comp, id); 884 if (ret) { 885 devm_kfree(dev, comp); 886 return ERR_PTR(ret); 887 } 888 mdp->comp[id] = comp; 889 mdp->comp[id]->mdp_dev = mdp; 890 891 dev_dbg(dev, "%s type:%d alias:%d id:%d base:%#x regs:%p\n", 892 dev->of_node->name, comp->type, comp->alias_id, id, 893 (u32)comp->reg_base, comp->regs); 894 return comp; 895 } 896 897 static int mdp_comp_sub_create(struct mdp_dev *mdp) 898 { 899 struct device *dev = &mdp->pdev->dev; 900 struct device_node *node, *parent; 901 902 parent = dev->of_node->parent; 903 904 for_each_child_of_node(parent, node) { 905 const struct of_device_id *of_id; 906 enum mdp_comp_type type; 907 int id, alias_id; 908 struct mdp_comp *comp; 909 910 of_id = of_match_node(mdp_sub_comp_dt_ids, node); 911 if (!of_id) 912 continue; 913 if (!of_device_is_available(node)) { 914 dev_dbg(dev, "Skipping disabled sub comp. %pOF\n", 915 node); 916 continue; 917 } 918 919 type = (enum mdp_comp_type)(uintptr_t)of_id->data; 920 alias_id = mdp_comp_alias_id[type]; 921 id = mdp_comp_get_id(type, alias_id); 922 if (id < 0) { 923 dev_err(dev, 924 "Fail to get sub comp. id: type %d alias %d\n", 925 type, alias_id); 926 return -EINVAL; 927 } 928 mdp_comp_alias_id[type]++; 929 930 comp = mdp_comp_create(mdp, node, id); 931 if (IS_ERR(comp)) 932 return PTR_ERR(comp); 933 } 934 935 return 0; 936 } 937 938 void mdp_comp_destroy(struct mdp_dev *mdp) 939 { 940 int i; 941 942 for (i = 0; i < ARRAY_SIZE(mdp->comp); i++) { 943 if (mdp->comp[i]) { 944 pm_runtime_disable(mdp->comp[i]->comp_dev); 945 mdp_comp_deinit(mdp->comp[i]); 946 devm_kfree(mdp->comp[i]->comp_dev, mdp->comp[i]); 947 mdp->comp[i] = NULL; 948 } 949 } 950 } 951 952 int mdp_comp_config(struct mdp_dev *mdp) 953 { 954 struct device *dev = &mdp->pdev->dev; 955 struct device_node *node, *parent; 956 struct platform_device *pdev; 957 int ret; 958 959 memset(mdp_comp_alias_id, 0, sizeof(mdp_comp_alias_id)); 960 961 parent = dev->of_node->parent; 962 /* Iterate over sibling MDP function blocks */ 963 for_each_child_of_node(parent, node) { 964 const struct of_device_id *of_id; 965 enum mdp_comp_type type; 966 int id, alias_id; 967 struct mdp_comp *comp; 968 969 of_id = of_match_node(mdp_comp_dt_ids, node); 970 if (!of_id) 971 continue; 972 973 if (!of_device_is_available(node)) { 974 dev_dbg(dev, "Skipping disabled component %pOF\n", 975 node); 976 continue; 977 } 978 979 type = (enum mdp_comp_type)(uintptr_t)of_id->data; 980 alias_id = mdp_comp_alias_id[type]; 981 id = mdp_comp_get_id(type, alias_id); 982 if (id < 0) { 983 dev_err(dev, 984 "Fail to get component id: type %d alias %d\n", 985 type, alias_id); 986 continue; 987 } 988 mdp_comp_alias_id[type]++; 989 990 comp = mdp_comp_create(mdp, node, id); 991 if (IS_ERR(comp)) { 992 ret = PTR_ERR(comp); 993 goto err_init_comps; 994 } 995 996 /* Only DMA capable components need the pm control */ 997 comp->comp_dev = NULL; 998 if (!is_dma_capable(comp->type)) 999 continue; 1000 1001 pdev = of_find_device_by_node(node); 1002 if (!pdev) { 1003 dev_warn(dev, "can't find platform device of node:%s\n", 1004 node->name); 1005 ret = -ENODEV; 1006 goto err_init_comps; 1007 } 1008 1009 comp->comp_dev = &pdev->dev; 1010 pm_runtime_enable(comp->comp_dev); 1011 } 1012 1013 ret = mdp_comp_sub_create(mdp); 1014 if (ret) 1015 goto err_init_comps; 1016 1017 return 0; 1018 1019 err_init_comps: 1020 mdp_comp_destroy(mdp); 1021 return ret; 1022 } 1023 1024 int mdp_comp_ctx_config(struct mdp_dev *mdp, struct mdp_comp_ctx *ctx, 1025 const struct img_compparam *param, 1026 const struct img_ipi_frameparam *frame) 1027 { 1028 struct device *dev = &mdp->pdev->dev; 1029 int i; 1030 1031 if (param->type < 0 || param->type >= MDP_MAX_COMP_COUNT) { 1032 dev_err(dev, "Invalid component id %d", param->type); 1033 return -EINVAL; 1034 } 1035 1036 ctx->comp = mdp->comp[param->type]; 1037 if (!ctx->comp) { 1038 dev_err(dev, "Uninit component id %d", param->type); 1039 return -EINVAL; 1040 } 1041 1042 ctx->param = param; 1043 ctx->input = &frame->inputs[param->input]; 1044 for (i = 0; i < param->num_outputs; i++) 1045 ctx->outputs[i] = &frame->outputs[param->outputs[i]]; 1046 return 0; 1047 } 1048