1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd 4 * Author:Mark Yao <mark.yao@rock-chips.com> 5 */ 6 7 #include <linux/clk.h> 8 #include <linux/component.h> 9 #include <linux/delay.h> 10 #include <linux/iopoll.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/of.h> 14 #include <linux/of_device.h> 15 #include <linux/overflow.h> 16 #include <linux/platform_device.h> 17 #include <linux/pm_runtime.h> 18 #include <linux/reset.h> 19 20 #include <drm/drm.h> 21 #include <drm/drm_atomic.h> 22 #include <drm/drm_atomic_uapi.h> 23 #include <drm/drm_blend.h> 24 #include <drm/drm_crtc.h> 25 #include <drm/drm_flip_work.h> 26 #include <drm/drm_fourcc.h> 27 #include <drm/drm_framebuffer.h> 28 #include <drm/drm_gem_atomic_helper.h> 29 #include <drm/drm_gem_framebuffer_helper.h> 30 #include <drm/drm_probe_helper.h> 31 #include <drm/drm_self_refresh_helper.h> 32 #include <drm/drm_vblank.h> 33 34 #ifdef CONFIG_DRM_ANALOGIX_DP 35 #include <drm/bridge/analogix_dp.h> 36 #endif 37 38 #include "rockchip_drm_drv.h" 39 #include "rockchip_drm_gem.h" 40 #include "rockchip_drm_fb.h" 41 #include "rockchip_drm_vop.h" 42 #include "rockchip_rgb.h" 43 44 #define VOP_WIN_SET(vop, win, name, v) \ 45 vop_reg_set(vop, &win->phy->name, win->base, ~0, v, #name) 46 #define VOP_SCL_SET(vop, win, name, v) \ 47 vop_reg_set(vop, &win->phy->scl->name, win->base, ~0, v, #name) 48 #define VOP_SCL_SET_EXT(vop, win, name, v) \ 49 vop_reg_set(vop, &win->phy->scl->ext->name, \ 50 win->base, ~0, v, #name) 51 52 #define VOP_WIN_YUV2YUV_SET(vop, win_yuv2yuv, name, v) \ 53 do { \ 54 if (win_yuv2yuv && win_yuv2yuv->name.mask) \ 55 vop_reg_set(vop, &win_yuv2yuv->name, 0, ~0, v, #name); \ 56 } while (0) 57 58 #define VOP_WIN_YUV2YUV_COEFFICIENT_SET(vop, win_yuv2yuv, name, v) \ 59 do { \ 60 if (win_yuv2yuv && win_yuv2yuv->phy->name.mask) \ 61 vop_reg_set(vop, &win_yuv2yuv->phy->name, win_yuv2yuv->base, ~0, v, #name); \ 62 } while (0) 63 64 #define VOP_INTR_SET_MASK(vop, name, mask, v) \ 65 vop_reg_set(vop, &vop->data->intr->name, 0, mask, v, #name) 66 67 #define VOP_REG_SET(vop, group, name, v) \ 68 vop_reg_set(vop, &vop->data->group->name, 0, ~0, v, #name) 69 70 #define VOP_INTR_SET_TYPE(vop, name, type, v) \ 71 do { \ 72 int i, reg = 0, mask = 0; \ 73 for (i = 0; i < vop->data->intr->nintrs; i++) { \ 74 if (vop->data->intr->intrs[i] & type) { \ 75 reg |= (v) << i; \ 76 mask |= 1 << i; \ 77 } \ 78 } \ 79 VOP_INTR_SET_MASK(vop, name, mask, reg); \ 80 } while (0) 81 #define VOP_INTR_GET_TYPE(vop, name, type) \ 82 vop_get_intr_type(vop, &vop->data->intr->name, type) 83 84 #define VOP_WIN_GET(vop, win, name) \ 85 vop_read_reg(vop, win->base, &win->phy->name) 86 87 #define VOP_WIN_HAS_REG(win, name) \ 88 (!!(win->phy->name.mask)) 89 90 #define VOP_WIN_GET_YRGBADDR(vop, win) \ 91 vop_readl(vop, win->base + win->phy->yrgb_mst.offset) 92 93 #define VOP_WIN_TO_INDEX(vop_win) \ 94 ((vop_win) - (vop_win)->vop->win) 95 96 #define VOP_AFBC_SET(vop, name, v) \ 97 do { \ 98 if ((vop)->data->afbc) \ 99 vop_reg_set((vop), &(vop)->data->afbc->name, \ 100 0, ~0, v, #name); \ 101 } while (0) 102 103 #define to_vop(x) container_of(x, struct vop, crtc) 104 #define to_vop_win(x) container_of(x, struct vop_win, base) 105 106 #define AFBC_FMT_RGB565 0x0 107 #define AFBC_FMT_U8U8U8U8 0x5 108 #define AFBC_FMT_U8U8U8 0x4 109 110 #define AFBC_TILE_16x16 BIT(4) 111 112 /* 113 * The coefficients of the following matrix are all fixed points. 114 * The format is S2.10 for the 3x3 part of the matrix, and S9.12 for the offsets. 115 * They are all represented in two's complement. 116 */ 117 static const uint32_t bt601_yuv2rgb[] = { 118 0x4A8, 0x0, 0x662, 119 0x4A8, 0x1E6F, 0x1CBF, 120 0x4A8, 0x812, 0x0, 121 0x321168, 0x0877CF, 0x2EB127 122 }; 123 124 enum vop_pending { 125 VOP_PENDING_FB_UNREF, 126 }; 127 128 struct vop_win { 129 struct drm_plane base; 130 const struct vop_win_data *data; 131 const struct vop_win_yuv2yuv_data *yuv2yuv_data; 132 struct vop *vop; 133 }; 134 135 struct rockchip_rgb; 136 struct vop { 137 struct drm_crtc crtc; 138 struct device *dev; 139 struct drm_device *drm_dev; 140 bool is_enabled; 141 142 struct completion dsp_hold_completion; 143 unsigned int win_enabled; 144 145 /* protected by dev->event_lock */ 146 struct drm_pending_vblank_event *event; 147 148 struct drm_flip_work fb_unref_work; 149 unsigned long pending; 150 151 struct completion line_flag_completion; 152 153 const struct vop_data *data; 154 155 uint32_t *regsbak; 156 void __iomem *regs; 157 void __iomem *lut_regs; 158 159 /* physical map length of vop register */ 160 uint32_t len; 161 162 /* one time only one process allowed to config the register */ 163 spinlock_t reg_lock; 164 /* lock vop irq reg */ 165 spinlock_t irq_lock; 166 /* protects crtc enable/disable */ 167 struct mutex vop_lock; 168 169 unsigned int irq; 170 171 /* vop AHP clk */ 172 struct clk *hclk; 173 /* vop dclk */ 174 struct clk *dclk; 175 /* vop share memory frequency */ 176 struct clk *aclk; 177 178 /* vop dclk reset */ 179 struct reset_control *dclk_rst; 180 181 /* optional internal rgb encoder */ 182 struct rockchip_rgb *rgb; 183 184 struct vop_win win[]; 185 }; 186 187 static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v) 188 { 189 writel(v, vop->regs + offset); 190 vop->regsbak[offset >> 2] = v; 191 } 192 193 static inline uint32_t vop_readl(struct vop *vop, uint32_t offset) 194 { 195 return readl(vop->regs + offset); 196 } 197 198 static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base, 199 const struct vop_reg *reg) 200 { 201 return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask; 202 } 203 204 static void vop_reg_set(struct vop *vop, const struct vop_reg *reg, 205 uint32_t _offset, uint32_t _mask, uint32_t v, 206 const char *reg_name) 207 { 208 int offset, mask, shift; 209 210 if (!reg || !reg->mask) { 211 DRM_DEV_DEBUG(vop->dev, "Warning: not support %s\n", reg_name); 212 return; 213 } 214 215 offset = reg->offset + _offset; 216 mask = reg->mask & _mask; 217 shift = reg->shift; 218 219 if (reg->write_mask) { 220 v = ((v << shift) & 0xffff) | (mask << (shift + 16)); 221 } else { 222 uint32_t cached_val = vop->regsbak[offset >> 2]; 223 224 v = (cached_val & ~(mask << shift)) | ((v & mask) << shift); 225 vop->regsbak[offset >> 2] = v; 226 } 227 228 if (reg->relaxed) 229 writel_relaxed(v, vop->regs + offset); 230 else 231 writel(v, vop->regs + offset); 232 } 233 234 static inline uint32_t vop_get_intr_type(struct vop *vop, 235 const struct vop_reg *reg, int type) 236 { 237 uint32_t i, ret = 0; 238 uint32_t regs = vop_read_reg(vop, 0, reg); 239 240 for (i = 0; i < vop->data->intr->nintrs; i++) { 241 if ((type & vop->data->intr->intrs[i]) && (regs & 1 << i)) 242 ret |= vop->data->intr->intrs[i]; 243 } 244 245 return ret; 246 } 247 248 static inline void vop_cfg_done(struct vop *vop) 249 { 250 VOP_REG_SET(vop, common, cfg_done, 1); 251 } 252 253 static bool has_rb_swapped(uint32_t format) 254 { 255 switch (format) { 256 case DRM_FORMAT_XBGR8888: 257 case DRM_FORMAT_ABGR8888: 258 case DRM_FORMAT_BGR888: 259 case DRM_FORMAT_BGR565: 260 return true; 261 default: 262 return false; 263 } 264 } 265 266 static bool has_uv_swapped(uint32_t format) 267 { 268 switch (format) { 269 case DRM_FORMAT_NV21: 270 case DRM_FORMAT_NV61: 271 case DRM_FORMAT_NV42: 272 return true; 273 default: 274 return false; 275 } 276 } 277 278 static enum vop_data_format vop_convert_format(uint32_t format) 279 { 280 switch (format) { 281 case DRM_FORMAT_XRGB8888: 282 case DRM_FORMAT_ARGB8888: 283 case DRM_FORMAT_XBGR8888: 284 case DRM_FORMAT_ABGR8888: 285 return VOP_FMT_ARGB8888; 286 case DRM_FORMAT_RGB888: 287 case DRM_FORMAT_BGR888: 288 return VOP_FMT_RGB888; 289 case DRM_FORMAT_RGB565: 290 case DRM_FORMAT_BGR565: 291 return VOP_FMT_RGB565; 292 case DRM_FORMAT_NV12: 293 case DRM_FORMAT_NV21: 294 return VOP_FMT_YUV420SP; 295 case DRM_FORMAT_NV16: 296 case DRM_FORMAT_NV61: 297 return VOP_FMT_YUV422SP; 298 case DRM_FORMAT_NV24: 299 case DRM_FORMAT_NV42: 300 return VOP_FMT_YUV444SP; 301 default: 302 DRM_ERROR("unsupported format[%08x]\n", format); 303 return -EINVAL; 304 } 305 } 306 307 static int vop_convert_afbc_format(uint32_t format) 308 { 309 switch (format) { 310 case DRM_FORMAT_XRGB8888: 311 case DRM_FORMAT_ARGB8888: 312 case DRM_FORMAT_XBGR8888: 313 case DRM_FORMAT_ABGR8888: 314 return AFBC_FMT_U8U8U8U8; 315 case DRM_FORMAT_RGB888: 316 case DRM_FORMAT_BGR888: 317 return AFBC_FMT_U8U8U8; 318 case DRM_FORMAT_RGB565: 319 case DRM_FORMAT_BGR565: 320 return AFBC_FMT_RGB565; 321 /* either of the below should not be reachable */ 322 default: 323 DRM_WARN_ONCE("unsupported AFBC format[%08x]\n", format); 324 return -EINVAL; 325 } 326 327 return -EINVAL; 328 } 329 330 static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src, 331 uint32_t dst, bool is_horizontal, 332 int vsu_mode, int *vskiplines) 333 { 334 uint16_t val = 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT; 335 336 if (vskiplines) 337 *vskiplines = 0; 338 339 if (is_horizontal) { 340 if (mode == SCALE_UP) 341 val = GET_SCL_FT_BIC(src, dst); 342 else if (mode == SCALE_DOWN) 343 val = GET_SCL_FT_BILI_DN(src, dst); 344 } else { 345 if (mode == SCALE_UP) { 346 if (vsu_mode == SCALE_UP_BIL) 347 val = GET_SCL_FT_BILI_UP(src, dst); 348 else 349 val = GET_SCL_FT_BIC(src, dst); 350 } else if (mode == SCALE_DOWN) { 351 if (vskiplines) { 352 *vskiplines = scl_get_vskiplines(src, dst); 353 val = scl_get_bili_dn_vskip(src, dst, 354 *vskiplines); 355 } else { 356 val = GET_SCL_FT_BILI_DN(src, dst); 357 } 358 } 359 } 360 361 return val; 362 } 363 364 static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win, 365 uint32_t src_w, uint32_t src_h, uint32_t dst_w, 366 uint32_t dst_h, const struct drm_format_info *info) 367 { 368 uint16_t yrgb_hor_scl_mode, yrgb_ver_scl_mode; 369 uint16_t cbcr_hor_scl_mode = SCALE_NONE; 370 uint16_t cbcr_ver_scl_mode = SCALE_NONE; 371 bool is_yuv = false; 372 uint16_t cbcr_src_w = src_w / info->hsub; 373 uint16_t cbcr_src_h = src_h / info->vsub; 374 uint16_t vsu_mode; 375 uint16_t lb_mode; 376 uint32_t val; 377 int vskiplines; 378 379 if (info->is_yuv) 380 is_yuv = true; 381 382 if (dst_w > 3840) { 383 DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n"); 384 return; 385 } 386 387 if (!win->phy->scl->ext) { 388 VOP_SCL_SET(vop, win, scale_yrgb_x, 389 scl_cal_scale2(src_w, dst_w)); 390 VOP_SCL_SET(vop, win, scale_yrgb_y, 391 scl_cal_scale2(src_h, dst_h)); 392 if (is_yuv) { 393 VOP_SCL_SET(vop, win, scale_cbcr_x, 394 scl_cal_scale2(cbcr_src_w, dst_w)); 395 VOP_SCL_SET(vop, win, scale_cbcr_y, 396 scl_cal_scale2(cbcr_src_h, dst_h)); 397 } 398 return; 399 } 400 401 yrgb_hor_scl_mode = scl_get_scl_mode(src_w, dst_w); 402 yrgb_ver_scl_mode = scl_get_scl_mode(src_h, dst_h); 403 404 if (is_yuv) { 405 cbcr_hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w); 406 cbcr_ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h); 407 if (cbcr_hor_scl_mode == SCALE_DOWN) 408 lb_mode = scl_vop_cal_lb_mode(dst_w, true); 409 else 410 lb_mode = scl_vop_cal_lb_mode(cbcr_src_w, true); 411 } else { 412 if (yrgb_hor_scl_mode == SCALE_DOWN) 413 lb_mode = scl_vop_cal_lb_mode(dst_w, false); 414 else 415 lb_mode = scl_vop_cal_lb_mode(src_w, false); 416 } 417 418 VOP_SCL_SET_EXT(vop, win, lb_mode, lb_mode); 419 if (lb_mode == LB_RGB_3840X2) { 420 if (yrgb_ver_scl_mode != SCALE_NONE) { 421 DRM_DEV_ERROR(vop->dev, "not allow yrgb ver scale\n"); 422 return; 423 } 424 if (cbcr_ver_scl_mode != SCALE_NONE) { 425 DRM_DEV_ERROR(vop->dev, "not allow cbcr ver scale\n"); 426 return; 427 } 428 vsu_mode = SCALE_UP_BIL; 429 } else if (lb_mode == LB_RGB_2560X4) { 430 vsu_mode = SCALE_UP_BIL; 431 } else { 432 vsu_mode = SCALE_UP_BIC; 433 } 434 435 val = scl_vop_cal_scale(yrgb_hor_scl_mode, src_w, dst_w, 436 true, 0, NULL); 437 VOP_SCL_SET(vop, win, scale_yrgb_x, val); 438 val = scl_vop_cal_scale(yrgb_ver_scl_mode, src_h, dst_h, 439 false, vsu_mode, &vskiplines); 440 VOP_SCL_SET(vop, win, scale_yrgb_y, val); 441 442 VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt4, vskiplines == 4); 443 VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt2, vskiplines == 2); 444 445 VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, yrgb_hor_scl_mode); 446 VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, yrgb_ver_scl_mode); 447 VOP_SCL_SET_EXT(vop, win, yrgb_hsd_mode, SCALE_DOWN_BIL); 448 VOP_SCL_SET_EXT(vop, win, yrgb_vsd_mode, SCALE_DOWN_BIL); 449 VOP_SCL_SET_EXT(vop, win, yrgb_vsu_mode, vsu_mode); 450 if (is_yuv) { 451 val = scl_vop_cal_scale(cbcr_hor_scl_mode, cbcr_src_w, 452 dst_w, true, 0, NULL); 453 VOP_SCL_SET(vop, win, scale_cbcr_x, val); 454 val = scl_vop_cal_scale(cbcr_ver_scl_mode, cbcr_src_h, 455 dst_h, false, vsu_mode, &vskiplines); 456 VOP_SCL_SET(vop, win, scale_cbcr_y, val); 457 458 VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt4, vskiplines == 4); 459 VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt2, vskiplines == 2); 460 VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, cbcr_hor_scl_mode); 461 VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, cbcr_ver_scl_mode); 462 VOP_SCL_SET_EXT(vop, win, cbcr_hsd_mode, SCALE_DOWN_BIL); 463 VOP_SCL_SET_EXT(vop, win, cbcr_vsd_mode, SCALE_DOWN_BIL); 464 VOP_SCL_SET_EXT(vop, win, cbcr_vsu_mode, vsu_mode); 465 } 466 } 467 468 static void vop_dsp_hold_valid_irq_enable(struct vop *vop) 469 { 470 unsigned long flags; 471 472 if (WARN_ON(!vop->is_enabled)) 473 return; 474 475 spin_lock_irqsave(&vop->irq_lock, flags); 476 477 VOP_INTR_SET_TYPE(vop, clear, DSP_HOLD_VALID_INTR, 1); 478 VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 1); 479 480 spin_unlock_irqrestore(&vop->irq_lock, flags); 481 } 482 483 static void vop_dsp_hold_valid_irq_disable(struct vop *vop) 484 { 485 unsigned long flags; 486 487 if (WARN_ON(!vop->is_enabled)) 488 return; 489 490 spin_lock_irqsave(&vop->irq_lock, flags); 491 492 VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 0); 493 494 spin_unlock_irqrestore(&vop->irq_lock, flags); 495 } 496 497 /* 498 * (1) each frame starts at the start of the Vsync pulse which is signaled by 499 * the "FRAME_SYNC" interrupt. 500 * (2) the active data region of each frame ends at dsp_vact_end 501 * (3) we should program this same number (dsp_vact_end) into dsp_line_frag_num, 502 * to get "LINE_FLAG" interrupt at the end of the active on screen data. 503 * 504 * VOP_INTR_CTRL0.dsp_line_frag_num = VOP_DSP_VACT_ST_END.dsp_vact_end 505 * Interrupts 506 * LINE_FLAG -------------------------------+ 507 * FRAME_SYNC ----+ | 508 * | | 509 * v v 510 * | Vsync | Vbp | Vactive | Vfp | 511 * ^ ^ ^ ^ 512 * | | | | 513 * | | | | 514 * dsp_vs_end ------------+ | | | VOP_DSP_VTOTAL_VS_END 515 * dsp_vact_start --------------+ | | VOP_DSP_VACT_ST_END 516 * dsp_vact_end ----------------------------+ | VOP_DSP_VACT_ST_END 517 * dsp_total -------------------------------------+ VOP_DSP_VTOTAL_VS_END 518 */ 519 static bool vop_line_flag_irq_is_enabled(struct vop *vop) 520 { 521 uint32_t line_flag_irq; 522 unsigned long flags; 523 524 spin_lock_irqsave(&vop->irq_lock, flags); 525 526 line_flag_irq = VOP_INTR_GET_TYPE(vop, enable, LINE_FLAG_INTR); 527 528 spin_unlock_irqrestore(&vop->irq_lock, flags); 529 530 return !!line_flag_irq; 531 } 532 533 static void vop_line_flag_irq_enable(struct vop *vop) 534 { 535 unsigned long flags; 536 537 if (WARN_ON(!vop->is_enabled)) 538 return; 539 540 spin_lock_irqsave(&vop->irq_lock, flags); 541 542 VOP_INTR_SET_TYPE(vop, clear, LINE_FLAG_INTR, 1); 543 VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 1); 544 545 spin_unlock_irqrestore(&vop->irq_lock, flags); 546 } 547 548 static void vop_line_flag_irq_disable(struct vop *vop) 549 { 550 unsigned long flags; 551 552 if (WARN_ON(!vop->is_enabled)) 553 return; 554 555 spin_lock_irqsave(&vop->irq_lock, flags); 556 557 VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 0); 558 559 spin_unlock_irqrestore(&vop->irq_lock, flags); 560 } 561 562 static int vop_core_clks_enable(struct vop *vop) 563 { 564 int ret; 565 566 ret = clk_enable(vop->hclk); 567 if (ret < 0) 568 return ret; 569 570 ret = clk_enable(vop->aclk); 571 if (ret < 0) 572 goto err_disable_hclk; 573 574 return 0; 575 576 err_disable_hclk: 577 clk_disable(vop->hclk); 578 return ret; 579 } 580 581 static void vop_core_clks_disable(struct vop *vop) 582 { 583 clk_disable(vop->aclk); 584 clk_disable(vop->hclk); 585 } 586 587 static void vop_win_disable(struct vop *vop, const struct vop_win *vop_win) 588 { 589 const struct vop_win_data *win = vop_win->data; 590 591 if (win->phy->scl && win->phy->scl->ext) { 592 VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE); 593 VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE); 594 VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE); 595 VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE); 596 } 597 598 VOP_WIN_SET(vop, win, enable, 0); 599 vop->win_enabled &= ~BIT(VOP_WIN_TO_INDEX(vop_win)); 600 } 601 602 static int vop_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) 603 { 604 struct vop *vop = to_vop(crtc); 605 int ret, i; 606 607 ret = pm_runtime_get_sync(vop->dev); 608 if (ret < 0) { 609 DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret); 610 return ret; 611 } 612 613 ret = vop_core_clks_enable(vop); 614 if (WARN_ON(ret < 0)) 615 goto err_put_pm_runtime; 616 617 ret = clk_enable(vop->dclk); 618 if (WARN_ON(ret < 0)) 619 goto err_disable_core; 620 621 /* 622 * Slave iommu shares power, irq and clock with vop. It was associated 623 * automatically with this master device via common driver code. 624 * Now that we have enabled the clock we attach it to the shared drm 625 * mapping. 626 */ 627 ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev); 628 if (ret) { 629 DRM_DEV_ERROR(vop->dev, 630 "failed to attach dma mapping, %d\n", ret); 631 goto err_disable_dclk; 632 } 633 634 spin_lock(&vop->reg_lock); 635 for (i = 0; i < vop->len; i += 4) 636 writel_relaxed(vop->regsbak[i / 4], vop->regs + i); 637 638 /* 639 * We need to make sure that all windows are disabled before we 640 * enable the crtc. Otherwise we might try to scan from a destroyed 641 * buffer later. 642 * 643 * In the case of enable-after-PSR, we don't need to worry about this 644 * case since the buffer is guaranteed to be valid and disabling the 645 * window will result in screen glitches on PSR exit. 646 */ 647 if (!old_state || !old_state->self_refresh_active) { 648 for (i = 0; i < vop->data->win_size; i++) { 649 struct vop_win *vop_win = &vop->win[i]; 650 651 vop_win_disable(vop, vop_win); 652 } 653 } 654 655 if (vop->data->afbc) { 656 struct rockchip_crtc_state *s; 657 /* 658 * Disable AFBC and forget there was a vop window with AFBC 659 */ 660 VOP_AFBC_SET(vop, enable, 0); 661 s = to_rockchip_crtc_state(crtc->state); 662 s->enable_afbc = false; 663 } 664 665 vop_cfg_done(vop); 666 667 spin_unlock(&vop->reg_lock); 668 669 /* 670 * At here, vop clock & iommu is enable, R/W vop regs would be safe. 671 */ 672 vop->is_enabled = true; 673 674 spin_lock(&vop->reg_lock); 675 676 VOP_REG_SET(vop, common, standby, 1); 677 678 spin_unlock(&vop->reg_lock); 679 680 drm_crtc_vblank_on(crtc); 681 682 return 0; 683 684 err_disable_dclk: 685 clk_disable(vop->dclk); 686 err_disable_core: 687 vop_core_clks_disable(vop); 688 err_put_pm_runtime: 689 pm_runtime_put_sync(vop->dev); 690 return ret; 691 } 692 693 static void rockchip_drm_set_win_enabled(struct drm_crtc *crtc, bool enabled) 694 { 695 struct vop *vop = to_vop(crtc); 696 int i; 697 698 spin_lock(&vop->reg_lock); 699 700 for (i = 0; i < vop->data->win_size; i++) { 701 struct vop_win *vop_win = &vop->win[i]; 702 const struct vop_win_data *win = vop_win->data; 703 704 VOP_WIN_SET(vop, win, enable, 705 enabled && (vop->win_enabled & BIT(i))); 706 } 707 vop_cfg_done(vop); 708 709 spin_unlock(&vop->reg_lock); 710 } 711 712 static void vop_crtc_atomic_disable(struct drm_crtc *crtc, 713 struct drm_atomic_state *state) 714 { 715 struct vop *vop = to_vop(crtc); 716 717 WARN_ON(vop->event); 718 719 if (crtc->state->self_refresh_active) 720 rockchip_drm_set_win_enabled(crtc, false); 721 722 mutex_lock(&vop->vop_lock); 723 724 drm_crtc_vblank_off(crtc); 725 726 if (crtc->state->self_refresh_active) 727 goto out; 728 729 /* 730 * Vop standby will take effect at end of current frame, 731 * if dsp hold valid irq happen, it means standby complete. 732 * 733 * we must wait standby complete when we want to disable aclk, 734 * if not, memory bus maybe dead. 735 */ 736 reinit_completion(&vop->dsp_hold_completion); 737 vop_dsp_hold_valid_irq_enable(vop); 738 739 spin_lock(&vop->reg_lock); 740 741 VOP_REG_SET(vop, common, standby, 1); 742 743 spin_unlock(&vop->reg_lock); 744 745 if (!wait_for_completion_timeout(&vop->dsp_hold_completion, 746 msecs_to_jiffies(200))) 747 WARN(1, "%s: timed out waiting for DSP hold", crtc->name); 748 749 vop_dsp_hold_valid_irq_disable(vop); 750 751 vop->is_enabled = false; 752 753 /* 754 * vop standby complete, so iommu detach is safe. 755 */ 756 rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev); 757 758 clk_disable(vop->dclk); 759 vop_core_clks_disable(vop); 760 pm_runtime_put(vop->dev); 761 762 out: 763 mutex_unlock(&vop->vop_lock); 764 765 if (crtc->state->event && !crtc->state->active) { 766 spin_lock_irq(&crtc->dev->event_lock); 767 drm_crtc_send_vblank_event(crtc, crtc->state->event); 768 spin_unlock_irq(&crtc->dev->event_lock); 769 770 crtc->state->event = NULL; 771 } 772 } 773 774 static void vop_plane_destroy(struct drm_plane *plane) 775 { 776 drm_plane_cleanup(plane); 777 } 778 779 static inline bool rockchip_afbc(u64 modifier) 780 { 781 return modifier == ROCKCHIP_AFBC_MOD; 782 } 783 784 static bool rockchip_mod_supported(struct drm_plane *plane, 785 u32 format, u64 modifier) 786 { 787 if (modifier == DRM_FORMAT_MOD_LINEAR) 788 return true; 789 790 if (!rockchip_afbc(modifier)) { 791 DRM_DEBUG_KMS("Unsupported format modifier 0x%llx\n", modifier); 792 793 return false; 794 } 795 796 return vop_convert_afbc_format(format) >= 0; 797 } 798 799 static int vop_plane_atomic_check(struct drm_plane *plane, 800 struct drm_atomic_state *state) 801 { 802 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, 803 plane); 804 struct drm_crtc *crtc = new_plane_state->crtc; 805 struct drm_crtc_state *crtc_state; 806 struct drm_framebuffer *fb = new_plane_state->fb; 807 struct vop_win *vop_win = to_vop_win(plane); 808 const struct vop_win_data *win = vop_win->data; 809 int ret; 810 int min_scale = win->phy->scl ? FRAC_16_16(1, 8) : 811 DRM_PLANE_NO_SCALING; 812 int max_scale = win->phy->scl ? FRAC_16_16(8, 1) : 813 DRM_PLANE_NO_SCALING; 814 815 if (!crtc || WARN_ON(!fb)) 816 return 0; 817 818 crtc_state = drm_atomic_get_existing_crtc_state(state, 819 crtc); 820 if (WARN_ON(!crtc_state)) 821 return -EINVAL; 822 823 ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, 824 min_scale, max_scale, 825 true, true); 826 if (ret) 827 return ret; 828 829 if (!new_plane_state->visible) 830 return 0; 831 832 ret = vop_convert_format(fb->format->format); 833 if (ret < 0) 834 return ret; 835 836 /* 837 * Src.x1 can be odd when do clip, but yuv plane start point 838 * need align with 2 pixel. 839 */ 840 if (fb->format->is_yuv && ((new_plane_state->src.x1 >> 16) % 2)) { 841 DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n"); 842 return -EINVAL; 843 } 844 845 if (fb->format->is_yuv && new_plane_state->rotation & DRM_MODE_REFLECT_Y) { 846 DRM_ERROR("Invalid Source: Yuv format does not support this rotation\n"); 847 return -EINVAL; 848 } 849 850 if (rockchip_afbc(fb->modifier)) { 851 struct vop *vop = to_vop(crtc); 852 853 if (!vop->data->afbc) { 854 DRM_ERROR("vop does not support AFBC\n"); 855 return -EINVAL; 856 } 857 858 ret = vop_convert_afbc_format(fb->format->format); 859 if (ret < 0) 860 return ret; 861 862 if (new_plane_state->src.x1 || new_plane_state->src.y1) { 863 DRM_ERROR("AFBC does not support offset display, xpos=%d, ypos=%d, offset=%d\n", 864 new_plane_state->src.x1, 865 new_plane_state->src.y1, fb->offsets[0]); 866 return -EINVAL; 867 } 868 869 if (new_plane_state->rotation && new_plane_state->rotation != DRM_MODE_ROTATE_0) { 870 DRM_ERROR("No rotation support in AFBC, rotation=%d\n", 871 new_plane_state->rotation); 872 return -EINVAL; 873 } 874 } 875 876 return 0; 877 } 878 879 static void vop_plane_atomic_disable(struct drm_plane *plane, 880 struct drm_atomic_state *state) 881 { 882 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, 883 plane); 884 struct vop_win *vop_win = to_vop_win(plane); 885 struct vop *vop = to_vop(old_state->crtc); 886 887 if (!old_state->crtc) 888 return; 889 890 spin_lock(&vop->reg_lock); 891 892 vop_win_disable(vop, vop_win); 893 894 spin_unlock(&vop->reg_lock); 895 } 896 897 static void vop_plane_atomic_update(struct drm_plane *plane, 898 struct drm_atomic_state *state) 899 { 900 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, 901 plane); 902 struct drm_crtc *crtc = new_state->crtc; 903 struct vop_win *vop_win = to_vop_win(plane); 904 const struct vop_win_data *win = vop_win->data; 905 const struct vop_win_yuv2yuv_data *win_yuv2yuv = vop_win->yuv2yuv_data; 906 struct vop *vop = to_vop(new_state->crtc); 907 struct drm_framebuffer *fb = new_state->fb; 908 unsigned int actual_w, actual_h; 909 unsigned int dsp_stx, dsp_sty; 910 uint32_t act_info, dsp_info, dsp_st; 911 struct drm_rect *src = &new_state->src; 912 struct drm_rect *dest = &new_state->dst; 913 struct drm_gem_object *obj, *uv_obj; 914 struct rockchip_gem_object *rk_obj, *rk_uv_obj; 915 unsigned long offset; 916 dma_addr_t dma_addr; 917 uint32_t val; 918 bool rb_swap, uv_swap; 919 int win_index = VOP_WIN_TO_INDEX(vop_win); 920 int format; 921 int is_yuv = fb->format->is_yuv; 922 int i; 923 924 /* 925 * can't update plane when vop is disabled. 926 */ 927 if (WARN_ON(!crtc)) 928 return; 929 930 if (WARN_ON(!vop->is_enabled)) 931 return; 932 933 if (!new_state->visible) { 934 vop_plane_atomic_disable(plane, state); 935 return; 936 } 937 938 obj = fb->obj[0]; 939 rk_obj = to_rockchip_obj(obj); 940 941 actual_w = drm_rect_width(src) >> 16; 942 actual_h = drm_rect_height(src) >> 16; 943 act_info = (actual_h - 1) << 16 | ((actual_w - 1) & 0xffff); 944 945 dsp_info = (drm_rect_height(dest) - 1) << 16; 946 dsp_info |= (drm_rect_width(dest) - 1) & 0xffff; 947 948 dsp_stx = dest->x1 + crtc->mode.htotal - crtc->mode.hsync_start; 949 dsp_sty = dest->y1 + crtc->mode.vtotal - crtc->mode.vsync_start; 950 dsp_st = dsp_sty << 16 | (dsp_stx & 0xffff); 951 952 offset = (src->x1 >> 16) * fb->format->cpp[0]; 953 offset += (src->y1 >> 16) * fb->pitches[0]; 954 dma_addr = rk_obj->dma_addr + offset + fb->offsets[0]; 955 956 /* 957 * For y-mirroring we need to move address 958 * to the beginning of the last line. 959 */ 960 if (new_state->rotation & DRM_MODE_REFLECT_Y) 961 dma_addr += (actual_h - 1) * fb->pitches[0]; 962 963 format = vop_convert_format(fb->format->format); 964 965 spin_lock(&vop->reg_lock); 966 967 if (rockchip_afbc(fb->modifier)) { 968 int afbc_format = vop_convert_afbc_format(fb->format->format); 969 970 VOP_AFBC_SET(vop, format, afbc_format | AFBC_TILE_16x16); 971 VOP_AFBC_SET(vop, hreg_block_split, 0); 972 VOP_AFBC_SET(vop, win_sel, VOP_WIN_TO_INDEX(vop_win)); 973 VOP_AFBC_SET(vop, hdr_ptr, dma_addr); 974 VOP_AFBC_SET(vop, pic_size, act_info); 975 } 976 977 VOP_WIN_SET(vop, win, format, format); 978 VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4)); 979 VOP_WIN_SET(vop, win, yrgb_mst, dma_addr); 980 VOP_WIN_YUV2YUV_SET(vop, win_yuv2yuv, y2r_en, is_yuv); 981 VOP_WIN_SET(vop, win, y_mir_en, 982 (new_state->rotation & DRM_MODE_REFLECT_Y) ? 1 : 0); 983 VOP_WIN_SET(vop, win, x_mir_en, 984 (new_state->rotation & DRM_MODE_REFLECT_X) ? 1 : 0); 985 986 if (is_yuv) { 987 int hsub = fb->format->hsub; 988 int vsub = fb->format->vsub; 989 int bpp = fb->format->cpp[1]; 990 991 uv_obj = fb->obj[1]; 992 rk_uv_obj = to_rockchip_obj(uv_obj); 993 994 offset = (src->x1 >> 16) * bpp / hsub; 995 offset += (src->y1 >> 16) * fb->pitches[1] / vsub; 996 997 dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1]; 998 VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4)); 999 VOP_WIN_SET(vop, win, uv_mst, dma_addr); 1000 1001 for (i = 0; i < NUM_YUV2YUV_COEFFICIENTS; i++) { 1002 VOP_WIN_YUV2YUV_COEFFICIENT_SET(vop, 1003 win_yuv2yuv, 1004 y2r_coefficients[i], 1005 bt601_yuv2rgb[i]); 1006 } 1007 1008 uv_swap = has_uv_swapped(fb->format->format); 1009 VOP_WIN_SET(vop, win, uv_swap, uv_swap); 1010 } 1011 1012 if (win->phy->scl) 1013 scl_vop_cal_scl_fac(vop, win, actual_w, actual_h, 1014 drm_rect_width(dest), drm_rect_height(dest), 1015 fb->format); 1016 1017 VOP_WIN_SET(vop, win, act_info, act_info); 1018 VOP_WIN_SET(vop, win, dsp_info, dsp_info); 1019 VOP_WIN_SET(vop, win, dsp_st, dsp_st); 1020 1021 rb_swap = has_rb_swapped(fb->format->format); 1022 VOP_WIN_SET(vop, win, rb_swap, rb_swap); 1023 1024 /* 1025 * Blending win0 with the background color doesn't seem to work 1026 * correctly. We only get the background color, no matter the contents 1027 * of the win0 framebuffer. However, blending pre-multiplied color 1028 * with the default opaque black default background color is a no-op, 1029 * so we can just disable blending to get the correct result. 1030 */ 1031 if (fb->format->has_alpha && win_index > 0) { 1032 VOP_WIN_SET(vop, win, dst_alpha_ctl, 1033 DST_FACTOR_M0(ALPHA_SRC_INVERSE)); 1034 val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) | 1035 SRC_ALPHA_M0(ALPHA_STRAIGHT) | 1036 SRC_BLEND_M0(ALPHA_PER_PIX) | 1037 SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) | 1038 SRC_FACTOR_M0(ALPHA_ONE); 1039 VOP_WIN_SET(vop, win, src_alpha_ctl, val); 1040 1041 VOP_WIN_SET(vop, win, alpha_pre_mul, ALPHA_SRC_PRE_MUL); 1042 VOP_WIN_SET(vop, win, alpha_mode, ALPHA_PER_PIX); 1043 VOP_WIN_SET(vop, win, alpha_en, 1); 1044 } else { 1045 VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0)); 1046 VOP_WIN_SET(vop, win, alpha_en, 0); 1047 } 1048 1049 VOP_WIN_SET(vop, win, enable, 1); 1050 vop->win_enabled |= BIT(win_index); 1051 spin_unlock(&vop->reg_lock); 1052 } 1053 1054 static int vop_plane_atomic_async_check(struct drm_plane *plane, 1055 struct drm_atomic_state *state) 1056 { 1057 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, 1058 plane); 1059 struct vop_win *vop_win = to_vop_win(plane); 1060 const struct vop_win_data *win = vop_win->data; 1061 int min_scale = win->phy->scl ? FRAC_16_16(1, 8) : 1062 DRM_PLANE_NO_SCALING; 1063 int max_scale = win->phy->scl ? FRAC_16_16(8, 1) : 1064 DRM_PLANE_NO_SCALING; 1065 struct drm_crtc_state *crtc_state; 1066 1067 if (plane != new_plane_state->crtc->cursor) 1068 return -EINVAL; 1069 1070 if (!plane->state) 1071 return -EINVAL; 1072 1073 if (!plane->state->fb) 1074 return -EINVAL; 1075 1076 if (state) 1077 crtc_state = drm_atomic_get_existing_crtc_state(state, 1078 new_plane_state->crtc); 1079 else /* Special case for asynchronous cursor updates. */ 1080 crtc_state = plane->crtc->state; 1081 1082 return drm_atomic_helper_check_plane_state(plane->state, crtc_state, 1083 min_scale, max_scale, 1084 true, true); 1085 } 1086 1087 static void vop_plane_atomic_async_update(struct drm_plane *plane, 1088 struct drm_atomic_state *state) 1089 { 1090 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, 1091 plane); 1092 struct vop *vop = to_vop(plane->state->crtc); 1093 struct drm_framebuffer *old_fb = plane->state->fb; 1094 1095 plane->state->crtc_x = new_state->crtc_x; 1096 plane->state->crtc_y = new_state->crtc_y; 1097 plane->state->crtc_h = new_state->crtc_h; 1098 plane->state->crtc_w = new_state->crtc_w; 1099 plane->state->src_x = new_state->src_x; 1100 plane->state->src_y = new_state->src_y; 1101 plane->state->src_h = new_state->src_h; 1102 plane->state->src_w = new_state->src_w; 1103 swap(plane->state->fb, new_state->fb); 1104 1105 if (vop->is_enabled) { 1106 vop_plane_atomic_update(plane, state); 1107 spin_lock(&vop->reg_lock); 1108 vop_cfg_done(vop); 1109 spin_unlock(&vop->reg_lock); 1110 1111 /* 1112 * A scanout can still be occurring, so we can't drop the 1113 * reference to the old framebuffer. To solve this we get a 1114 * reference to old_fb and set a worker to release it later. 1115 * FIXME: if we perform 500 async_update calls before the 1116 * vblank, then we can have 500 different framebuffers waiting 1117 * to be released. 1118 */ 1119 if (old_fb && plane->state->fb != old_fb) { 1120 drm_framebuffer_get(old_fb); 1121 WARN_ON(drm_crtc_vblank_get(plane->state->crtc) != 0); 1122 drm_flip_work_queue(&vop->fb_unref_work, old_fb); 1123 set_bit(VOP_PENDING_FB_UNREF, &vop->pending); 1124 } 1125 } 1126 } 1127 1128 static const struct drm_plane_helper_funcs plane_helper_funcs = { 1129 .atomic_check = vop_plane_atomic_check, 1130 .atomic_update = vop_plane_atomic_update, 1131 .atomic_disable = vop_plane_atomic_disable, 1132 .atomic_async_check = vop_plane_atomic_async_check, 1133 .atomic_async_update = vop_plane_atomic_async_update, 1134 }; 1135 1136 static const struct drm_plane_funcs vop_plane_funcs = { 1137 .update_plane = drm_atomic_helper_update_plane, 1138 .disable_plane = drm_atomic_helper_disable_plane, 1139 .destroy = vop_plane_destroy, 1140 .reset = drm_atomic_helper_plane_reset, 1141 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 1142 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 1143 .format_mod_supported = rockchip_mod_supported, 1144 }; 1145 1146 static int vop_crtc_enable_vblank(struct drm_crtc *crtc) 1147 { 1148 struct vop *vop = to_vop(crtc); 1149 unsigned long flags; 1150 1151 if (WARN_ON(!vop->is_enabled)) 1152 return -EPERM; 1153 1154 spin_lock_irqsave(&vop->irq_lock, flags); 1155 1156 VOP_INTR_SET_TYPE(vop, clear, FS_INTR, 1); 1157 VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 1); 1158 1159 spin_unlock_irqrestore(&vop->irq_lock, flags); 1160 1161 return 0; 1162 } 1163 1164 static void vop_crtc_disable_vblank(struct drm_crtc *crtc) 1165 { 1166 struct vop *vop = to_vop(crtc); 1167 unsigned long flags; 1168 1169 if (WARN_ON(!vop->is_enabled)) 1170 return; 1171 1172 spin_lock_irqsave(&vop->irq_lock, flags); 1173 1174 VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 0); 1175 1176 spin_unlock_irqrestore(&vop->irq_lock, flags); 1177 } 1178 1179 static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, 1180 const struct drm_display_mode *mode, 1181 struct drm_display_mode *adjusted_mode) 1182 { 1183 struct vop *vop = to_vop(crtc); 1184 unsigned long rate; 1185 1186 /* 1187 * Clock craziness. 1188 * 1189 * Key points: 1190 * 1191 * - DRM works in in kHz. 1192 * - Clock framework works in Hz. 1193 * - Rockchip's clock driver picks the clock rate that is the 1194 * same _OR LOWER_ than the one requested. 1195 * 1196 * Action plan: 1197 * 1198 * 1. Try to set the exact rate first, and confirm the clock framework 1199 * can provide it. 1200 * 1201 * 2. If the clock framework cannot provide the exact rate, we should 1202 * add 999 Hz to the requested rate. That way if the clock we need 1203 * is 60000001 Hz (~60 MHz) and DRM tells us to make 60000 kHz then 1204 * the clock framework will actually give us the right clock. 1205 * 1206 * 3. Get the clock framework to round the rate for us to tell us 1207 * what it will actually make. 1208 * 1209 * 4. Store the rounded up rate so that we don't need to worry about 1210 * this in the actual clk_set_rate(). 1211 */ 1212 rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000); 1213 if (rate / 1000 != adjusted_mode->clock) 1214 rate = clk_round_rate(vop->dclk, 1215 adjusted_mode->clock * 1000 + 999); 1216 adjusted_mode->clock = DIV_ROUND_UP(rate, 1000); 1217 1218 return true; 1219 } 1220 1221 static bool vop_dsp_lut_is_enabled(struct vop *vop) 1222 { 1223 return vop_read_reg(vop, 0, &vop->data->common->dsp_lut_en); 1224 } 1225 1226 static void vop_crtc_write_gamma_lut(struct vop *vop, struct drm_crtc *crtc) 1227 { 1228 struct drm_color_lut *lut = crtc->state->gamma_lut->data; 1229 unsigned int i; 1230 1231 for (i = 0; i < crtc->gamma_size; i++) { 1232 u32 word; 1233 1234 word = (drm_color_lut_extract(lut[i].red, 10) << 20) | 1235 (drm_color_lut_extract(lut[i].green, 10) << 10) | 1236 drm_color_lut_extract(lut[i].blue, 10); 1237 writel(word, vop->lut_regs + i * 4); 1238 } 1239 } 1240 1241 static void vop_crtc_gamma_set(struct vop *vop, struct drm_crtc *crtc, 1242 struct drm_crtc_state *old_state) 1243 { 1244 struct drm_crtc_state *state = crtc->state; 1245 unsigned int idle; 1246 int ret; 1247 1248 if (!vop->lut_regs) 1249 return; 1250 /* 1251 * To disable gamma (gamma_lut is null) or to write 1252 * an update to the LUT, clear dsp_lut_en. 1253 */ 1254 spin_lock(&vop->reg_lock); 1255 VOP_REG_SET(vop, common, dsp_lut_en, 0); 1256 vop_cfg_done(vop); 1257 spin_unlock(&vop->reg_lock); 1258 1259 /* 1260 * In order to write the LUT to the internal memory, 1261 * we need to first make sure the dsp_lut_en bit is cleared. 1262 */ 1263 ret = readx_poll_timeout(vop_dsp_lut_is_enabled, vop, 1264 idle, !idle, 5, 30 * 1000); 1265 if (ret) { 1266 DRM_DEV_ERROR(vop->dev, "display LUT RAM enable timeout!\n"); 1267 return; 1268 } 1269 1270 if (!state->gamma_lut) 1271 return; 1272 1273 spin_lock(&vop->reg_lock); 1274 vop_crtc_write_gamma_lut(vop, crtc); 1275 VOP_REG_SET(vop, common, dsp_lut_en, 1); 1276 vop_cfg_done(vop); 1277 spin_unlock(&vop->reg_lock); 1278 } 1279 1280 static void vop_crtc_atomic_begin(struct drm_crtc *crtc, 1281 struct drm_atomic_state *state) 1282 { 1283 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, 1284 crtc); 1285 struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, 1286 crtc); 1287 struct vop *vop = to_vop(crtc); 1288 1289 /* 1290 * Only update GAMMA if the 'active' flag is not changed, 1291 * otherwise it's updated by .atomic_enable. 1292 */ 1293 if (crtc_state->color_mgmt_changed && 1294 !crtc_state->active_changed) 1295 vop_crtc_gamma_set(vop, crtc, old_crtc_state); 1296 } 1297 1298 static void vop_crtc_atomic_enable(struct drm_crtc *crtc, 1299 struct drm_atomic_state *state) 1300 { 1301 struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, 1302 crtc); 1303 struct vop *vop = to_vop(crtc); 1304 const struct vop_data *vop_data = vop->data; 1305 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state); 1306 struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode; 1307 u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start; 1308 u16 hdisplay = adjusted_mode->hdisplay; 1309 u16 htotal = adjusted_mode->htotal; 1310 u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start; 1311 u16 hact_end = hact_st + hdisplay; 1312 u16 vdisplay = adjusted_mode->vdisplay; 1313 u16 vtotal = adjusted_mode->vtotal; 1314 u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start; 1315 u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start; 1316 u16 vact_end = vact_st + vdisplay; 1317 uint32_t pin_pol, val; 1318 int dither_bpc = s->output_bpc ? s->output_bpc : 10; 1319 int ret; 1320 1321 if (old_state && old_state->self_refresh_active) { 1322 drm_crtc_vblank_on(crtc); 1323 rockchip_drm_set_win_enabled(crtc, true); 1324 return; 1325 } 1326 1327 /* 1328 * If we have a GAMMA LUT in the state, then let's make sure 1329 * it's updated. We might be coming out of suspend, 1330 * which means the LUT internal memory needs to be re-written. 1331 */ 1332 if (crtc->state->gamma_lut) 1333 vop_crtc_gamma_set(vop, crtc, old_state); 1334 1335 mutex_lock(&vop->vop_lock); 1336 1337 WARN_ON(vop->event); 1338 1339 ret = vop_enable(crtc, old_state); 1340 if (ret) { 1341 mutex_unlock(&vop->vop_lock); 1342 DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret); 1343 return; 1344 } 1345 pin_pol = (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ? 1346 BIT(HSYNC_POSITIVE) : 0; 1347 pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) ? 1348 BIT(VSYNC_POSITIVE) : 0; 1349 VOP_REG_SET(vop, output, pin_pol, pin_pol); 1350 VOP_REG_SET(vop, output, mipi_dual_channel_en, 0); 1351 1352 switch (s->output_type) { 1353 case DRM_MODE_CONNECTOR_LVDS: 1354 VOP_REG_SET(vop, output, rgb_dclk_pol, 1); 1355 VOP_REG_SET(vop, output, rgb_pin_pol, pin_pol); 1356 VOP_REG_SET(vop, output, rgb_en, 1); 1357 break; 1358 case DRM_MODE_CONNECTOR_eDP: 1359 VOP_REG_SET(vop, output, edp_dclk_pol, 1); 1360 VOP_REG_SET(vop, output, edp_pin_pol, pin_pol); 1361 VOP_REG_SET(vop, output, edp_en, 1); 1362 break; 1363 case DRM_MODE_CONNECTOR_HDMIA: 1364 VOP_REG_SET(vop, output, hdmi_dclk_pol, 1); 1365 VOP_REG_SET(vop, output, hdmi_pin_pol, pin_pol); 1366 VOP_REG_SET(vop, output, hdmi_en, 1); 1367 break; 1368 case DRM_MODE_CONNECTOR_DSI: 1369 VOP_REG_SET(vop, output, mipi_dclk_pol, 1); 1370 VOP_REG_SET(vop, output, mipi_pin_pol, pin_pol); 1371 VOP_REG_SET(vop, output, mipi_en, 1); 1372 VOP_REG_SET(vop, output, mipi_dual_channel_en, 1373 !!(s->output_flags & ROCKCHIP_OUTPUT_DSI_DUAL)); 1374 break; 1375 case DRM_MODE_CONNECTOR_DisplayPort: 1376 VOP_REG_SET(vop, output, dp_dclk_pol, 0); 1377 VOP_REG_SET(vop, output, dp_pin_pol, pin_pol); 1378 VOP_REG_SET(vop, output, dp_en, 1); 1379 break; 1380 default: 1381 DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n", 1382 s->output_type); 1383 } 1384 1385 /* 1386 * if vop is not support RGB10 output, need force RGB10 to RGB888. 1387 */ 1388 if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA && 1389 !(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10)) 1390 s->output_mode = ROCKCHIP_OUT_MODE_P888; 1391 1392 if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA && dither_bpc <= 8) 1393 VOP_REG_SET(vop, common, pre_dither_down, 1); 1394 else 1395 VOP_REG_SET(vop, common, pre_dither_down, 0); 1396 1397 if (dither_bpc == 6) { 1398 VOP_REG_SET(vop, common, dither_down_sel, DITHER_DOWN_ALLEGRO); 1399 VOP_REG_SET(vop, common, dither_down_mode, RGB888_TO_RGB666); 1400 VOP_REG_SET(vop, common, dither_down_en, 1); 1401 } else { 1402 VOP_REG_SET(vop, common, dither_down_en, 0); 1403 } 1404 1405 VOP_REG_SET(vop, common, out_mode, s->output_mode); 1406 1407 VOP_REG_SET(vop, modeset, htotal_pw, (htotal << 16) | hsync_len); 1408 val = hact_st << 16; 1409 val |= hact_end; 1410 VOP_REG_SET(vop, modeset, hact_st_end, val); 1411 VOP_REG_SET(vop, modeset, hpost_st_end, val); 1412 1413 VOP_REG_SET(vop, modeset, vtotal_pw, (vtotal << 16) | vsync_len); 1414 val = vact_st << 16; 1415 val |= vact_end; 1416 VOP_REG_SET(vop, modeset, vact_st_end, val); 1417 VOP_REG_SET(vop, modeset, vpost_st_end, val); 1418 1419 VOP_REG_SET(vop, intr, line_flag_num[0], vact_end); 1420 1421 clk_set_rate(vop->dclk, adjusted_mode->clock * 1000); 1422 1423 VOP_REG_SET(vop, common, standby, 0); 1424 mutex_unlock(&vop->vop_lock); 1425 } 1426 1427 static bool vop_fs_irq_is_pending(struct vop *vop) 1428 { 1429 return VOP_INTR_GET_TYPE(vop, status, FS_INTR); 1430 } 1431 1432 static void vop_wait_for_irq_handler(struct vop *vop) 1433 { 1434 bool pending; 1435 int ret; 1436 1437 /* 1438 * Spin until frame start interrupt status bit goes low, which means 1439 * that interrupt handler was invoked and cleared it. The timeout of 1440 * 10 msecs is really too long, but it is just a safety measure if 1441 * something goes really wrong. The wait will only happen in the very 1442 * unlikely case of a vblank happening exactly at the same time and 1443 * shouldn't exceed microseconds range. 1444 */ 1445 ret = readx_poll_timeout_atomic(vop_fs_irq_is_pending, vop, pending, 1446 !pending, 0, 10 * 1000); 1447 if (ret) 1448 DRM_DEV_ERROR(vop->dev, "VOP vblank IRQ stuck for 10 ms\n"); 1449 1450 synchronize_irq(vop->irq); 1451 } 1452 1453 static int vop_crtc_atomic_check(struct drm_crtc *crtc, 1454 struct drm_atomic_state *state) 1455 { 1456 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, 1457 crtc); 1458 struct vop *vop = to_vop(crtc); 1459 struct drm_plane *plane; 1460 struct drm_plane_state *plane_state; 1461 struct rockchip_crtc_state *s; 1462 int afbc_planes = 0; 1463 1464 if (vop->lut_regs && crtc_state->color_mgmt_changed && 1465 crtc_state->gamma_lut) { 1466 unsigned int len; 1467 1468 len = drm_color_lut_size(crtc_state->gamma_lut); 1469 if (len != crtc->gamma_size) { 1470 DRM_DEBUG_KMS("Invalid LUT size; got %d, expected %d\n", 1471 len, crtc->gamma_size); 1472 return -EINVAL; 1473 } 1474 } 1475 1476 drm_atomic_crtc_state_for_each_plane(plane, crtc_state) { 1477 plane_state = 1478 drm_atomic_get_plane_state(crtc_state->state, plane); 1479 if (IS_ERR(plane_state)) { 1480 DRM_DEBUG_KMS("Cannot get plane state for plane %s\n", 1481 plane->name); 1482 return PTR_ERR(plane_state); 1483 } 1484 1485 if (drm_is_afbc(plane_state->fb->modifier)) 1486 ++afbc_planes; 1487 } 1488 1489 if (afbc_planes > 1) { 1490 DRM_DEBUG_KMS("Invalid number of AFBC planes; got %d, expected at most 1\n", afbc_planes); 1491 return -EINVAL; 1492 } 1493 1494 s = to_rockchip_crtc_state(crtc_state); 1495 s->enable_afbc = afbc_planes > 0; 1496 1497 return 0; 1498 } 1499 1500 static void vop_crtc_atomic_flush(struct drm_crtc *crtc, 1501 struct drm_atomic_state *state) 1502 { 1503 struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, 1504 crtc); 1505 struct drm_atomic_state *old_state = old_crtc_state->state; 1506 struct drm_plane_state *old_plane_state, *new_plane_state; 1507 struct vop *vop = to_vop(crtc); 1508 struct drm_plane *plane; 1509 struct rockchip_crtc_state *s; 1510 int i; 1511 1512 if (WARN_ON(!vop->is_enabled)) 1513 return; 1514 1515 spin_lock(&vop->reg_lock); 1516 1517 /* Enable AFBC if there is some AFBC window, disable otherwise. */ 1518 s = to_rockchip_crtc_state(crtc->state); 1519 VOP_AFBC_SET(vop, enable, s->enable_afbc); 1520 vop_cfg_done(vop); 1521 1522 spin_unlock(&vop->reg_lock); 1523 1524 /* 1525 * There is a (rather unlikely) possiblity that a vblank interrupt 1526 * fired before we set the cfg_done bit. To avoid spuriously 1527 * signalling flip completion we need to wait for it to finish. 1528 */ 1529 vop_wait_for_irq_handler(vop); 1530 1531 spin_lock_irq(&crtc->dev->event_lock); 1532 if (crtc->state->event) { 1533 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 1534 WARN_ON(vop->event); 1535 1536 vop->event = crtc->state->event; 1537 crtc->state->event = NULL; 1538 } 1539 spin_unlock_irq(&crtc->dev->event_lock); 1540 1541 for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, 1542 new_plane_state, i) { 1543 if (!old_plane_state->fb) 1544 continue; 1545 1546 if (old_plane_state->fb == new_plane_state->fb) 1547 continue; 1548 1549 drm_framebuffer_get(old_plane_state->fb); 1550 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 1551 drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb); 1552 set_bit(VOP_PENDING_FB_UNREF, &vop->pending); 1553 } 1554 } 1555 1556 static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = { 1557 .mode_fixup = vop_crtc_mode_fixup, 1558 .atomic_check = vop_crtc_atomic_check, 1559 .atomic_begin = vop_crtc_atomic_begin, 1560 .atomic_flush = vop_crtc_atomic_flush, 1561 .atomic_enable = vop_crtc_atomic_enable, 1562 .atomic_disable = vop_crtc_atomic_disable, 1563 }; 1564 1565 static void vop_crtc_destroy(struct drm_crtc *crtc) 1566 { 1567 drm_crtc_cleanup(crtc); 1568 } 1569 1570 static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc) 1571 { 1572 struct rockchip_crtc_state *rockchip_state; 1573 1574 if (WARN_ON(!crtc->state)) 1575 return NULL; 1576 1577 rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL); 1578 if (!rockchip_state) 1579 return NULL; 1580 1581 __drm_atomic_helper_crtc_duplicate_state(crtc, &rockchip_state->base); 1582 return &rockchip_state->base; 1583 } 1584 1585 static void vop_crtc_destroy_state(struct drm_crtc *crtc, 1586 struct drm_crtc_state *state) 1587 { 1588 struct rockchip_crtc_state *s = to_rockchip_crtc_state(state); 1589 1590 __drm_atomic_helper_crtc_destroy_state(&s->base); 1591 kfree(s); 1592 } 1593 1594 static void vop_crtc_reset(struct drm_crtc *crtc) 1595 { 1596 struct rockchip_crtc_state *crtc_state = 1597 kzalloc(sizeof(*crtc_state), GFP_KERNEL); 1598 1599 if (crtc->state) 1600 vop_crtc_destroy_state(crtc, crtc->state); 1601 1602 __drm_atomic_helper_crtc_reset(crtc, &crtc_state->base); 1603 } 1604 1605 #ifdef CONFIG_DRM_ANALOGIX_DP 1606 static struct drm_connector *vop_get_edp_connector(struct vop *vop) 1607 { 1608 struct drm_connector *connector; 1609 struct drm_connector_list_iter conn_iter; 1610 1611 drm_connector_list_iter_begin(vop->drm_dev, &conn_iter); 1612 drm_for_each_connector_iter(connector, &conn_iter) { 1613 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1614 drm_connector_list_iter_end(&conn_iter); 1615 return connector; 1616 } 1617 } 1618 drm_connector_list_iter_end(&conn_iter); 1619 1620 return NULL; 1621 } 1622 1623 static int vop_crtc_set_crc_source(struct drm_crtc *crtc, 1624 const char *source_name) 1625 { 1626 struct vop *vop = to_vop(crtc); 1627 struct drm_connector *connector; 1628 int ret; 1629 1630 connector = vop_get_edp_connector(vop); 1631 if (!connector) 1632 return -EINVAL; 1633 1634 if (source_name && strcmp(source_name, "auto") == 0) 1635 ret = analogix_dp_start_crc(connector); 1636 else if (!source_name) 1637 ret = analogix_dp_stop_crc(connector); 1638 else 1639 ret = -EINVAL; 1640 1641 return ret; 1642 } 1643 1644 static int 1645 vop_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name, 1646 size_t *values_cnt) 1647 { 1648 if (source_name && strcmp(source_name, "auto") != 0) 1649 return -EINVAL; 1650 1651 *values_cnt = 3; 1652 return 0; 1653 } 1654 1655 #else 1656 static int vop_crtc_set_crc_source(struct drm_crtc *crtc, 1657 const char *source_name) 1658 { 1659 return -ENODEV; 1660 } 1661 1662 static int 1663 vop_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name, 1664 size_t *values_cnt) 1665 { 1666 return -ENODEV; 1667 } 1668 #endif 1669 1670 static const struct drm_crtc_funcs vop_crtc_funcs = { 1671 .set_config = drm_atomic_helper_set_config, 1672 .page_flip = drm_atomic_helper_page_flip, 1673 .destroy = vop_crtc_destroy, 1674 .reset = vop_crtc_reset, 1675 .atomic_duplicate_state = vop_crtc_duplicate_state, 1676 .atomic_destroy_state = vop_crtc_destroy_state, 1677 .enable_vblank = vop_crtc_enable_vblank, 1678 .disable_vblank = vop_crtc_disable_vblank, 1679 .set_crc_source = vop_crtc_set_crc_source, 1680 .verify_crc_source = vop_crtc_verify_crc_source, 1681 }; 1682 1683 static void vop_fb_unref_worker(struct drm_flip_work *work, void *val) 1684 { 1685 struct vop *vop = container_of(work, struct vop, fb_unref_work); 1686 struct drm_framebuffer *fb = val; 1687 1688 drm_crtc_vblank_put(&vop->crtc); 1689 drm_framebuffer_put(fb); 1690 } 1691 1692 static void vop_handle_vblank(struct vop *vop) 1693 { 1694 struct drm_device *drm = vop->drm_dev; 1695 struct drm_crtc *crtc = &vop->crtc; 1696 1697 spin_lock(&drm->event_lock); 1698 if (vop->event) { 1699 drm_crtc_send_vblank_event(crtc, vop->event); 1700 drm_crtc_vblank_put(crtc); 1701 vop->event = NULL; 1702 } 1703 spin_unlock(&drm->event_lock); 1704 1705 if (test_and_clear_bit(VOP_PENDING_FB_UNREF, &vop->pending)) 1706 drm_flip_work_commit(&vop->fb_unref_work, system_unbound_wq); 1707 } 1708 1709 static irqreturn_t vop_isr(int irq, void *data) 1710 { 1711 struct vop *vop = data; 1712 struct drm_crtc *crtc = &vop->crtc; 1713 uint32_t active_irqs; 1714 int ret = IRQ_NONE; 1715 1716 /* 1717 * The irq is shared with the iommu. If the runtime-pm state of the 1718 * vop-device is disabled the irq has to be targeted at the iommu. 1719 */ 1720 if (!pm_runtime_get_if_in_use(vop->dev)) 1721 return IRQ_NONE; 1722 1723 if (vop_core_clks_enable(vop)) { 1724 DRM_DEV_ERROR_RATELIMITED(vop->dev, "couldn't enable clocks\n"); 1725 goto out; 1726 } 1727 1728 /* 1729 * interrupt register has interrupt status, enable and clear bits, we 1730 * must hold irq_lock to avoid a race with enable/disable_vblank(). 1731 */ 1732 spin_lock(&vop->irq_lock); 1733 1734 active_irqs = VOP_INTR_GET_TYPE(vop, status, INTR_MASK); 1735 /* Clear all active interrupt sources */ 1736 if (active_irqs) 1737 VOP_INTR_SET_TYPE(vop, clear, active_irqs, 1); 1738 1739 spin_unlock(&vop->irq_lock); 1740 1741 /* This is expected for vop iommu irqs, since the irq is shared */ 1742 if (!active_irqs) 1743 goto out_disable; 1744 1745 if (active_irqs & DSP_HOLD_VALID_INTR) { 1746 complete(&vop->dsp_hold_completion); 1747 active_irqs &= ~DSP_HOLD_VALID_INTR; 1748 ret = IRQ_HANDLED; 1749 } 1750 1751 if (active_irqs & LINE_FLAG_INTR) { 1752 complete(&vop->line_flag_completion); 1753 active_irqs &= ~LINE_FLAG_INTR; 1754 ret = IRQ_HANDLED; 1755 } 1756 1757 if (active_irqs & FS_INTR) { 1758 drm_crtc_handle_vblank(crtc); 1759 vop_handle_vblank(vop); 1760 active_irqs &= ~FS_INTR; 1761 ret = IRQ_HANDLED; 1762 } 1763 1764 /* Unhandled irqs are spurious. */ 1765 if (active_irqs) 1766 DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n", 1767 active_irqs); 1768 1769 out_disable: 1770 vop_core_clks_disable(vop); 1771 out: 1772 pm_runtime_put(vop->dev); 1773 return ret; 1774 } 1775 1776 static void vop_plane_add_properties(struct drm_plane *plane, 1777 const struct vop_win_data *win_data) 1778 { 1779 unsigned int flags = 0; 1780 1781 flags |= VOP_WIN_HAS_REG(win_data, x_mir_en) ? DRM_MODE_REFLECT_X : 0; 1782 flags |= VOP_WIN_HAS_REG(win_data, y_mir_en) ? DRM_MODE_REFLECT_Y : 0; 1783 if (flags) 1784 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, 1785 DRM_MODE_ROTATE_0 | flags); 1786 } 1787 1788 static int vop_create_crtc(struct vop *vop) 1789 { 1790 const struct vop_data *vop_data = vop->data; 1791 struct device *dev = vop->dev; 1792 struct drm_device *drm_dev = vop->drm_dev; 1793 struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp; 1794 struct drm_crtc *crtc = &vop->crtc; 1795 struct device_node *port; 1796 int ret; 1797 int i; 1798 1799 /* 1800 * Create drm_plane for primary and cursor planes first, since we need 1801 * to pass them to drm_crtc_init_with_planes, which sets the 1802 * "possible_crtcs" to the newly initialized crtc. 1803 */ 1804 for (i = 0; i < vop_data->win_size; i++) { 1805 struct vop_win *vop_win = &vop->win[i]; 1806 const struct vop_win_data *win_data = vop_win->data; 1807 1808 if (win_data->type != DRM_PLANE_TYPE_PRIMARY && 1809 win_data->type != DRM_PLANE_TYPE_CURSOR) 1810 continue; 1811 1812 ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base, 1813 0, &vop_plane_funcs, 1814 win_data->phy->data_formats, 1815 win_data->phy->nformats, 1816 win_data->phy->format_modifiers, 1817 win_data->type, NULL); 1818 if (ret) { 1819 DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n", 1820 ret); 1821 goto err_cleanup_planes; 1822 } 1823 1824 plane = &vop_win->base; 1825 drm_plane_helper_add(plane, &plane_helper_funcs); 1826 vop_plane_add_properties(plane, win_data); 1827 if (plane->type == DRM_PLANE_TYPE_PRIMARY) 1828 primary = plane; 1829 else if (plane->type == DRM_PLANE_TYPE_CURSOR) 1830 cursor = plane; 1831 } 1832 1833 ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, 1834 &vop_crtc_funcs, NULL); 1835 if (ret) 1836 goto err_cleanup_planes; 1837 1838 drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs); 1839 if (vop->lut_regs) { 1840 drm_mode_crtc_set_gamma_size(crtc, vop_data->lut_size); 1841 drm_crtc_enable_color_mgmt(crtc, 0, false, vop_data->lut_size); 1842 } 1843 1844 /* 1845 * Create drm_planes for overlay windows with possible_crtcs restricted 1846 * to the newly created crtc. 1847 */ 1848 for (i = 0; i < vop_data->win_size; i++) { 1849 struct vop_win *vop_win = &vop->win[i]; 1850 const struct vop_win_data *win_data = vop_win->data; 1851 unsigned long possible_crtcs = drm_crtc_mask(crtc); 1852 1853 if (win_data->type != DRM_PLANE_TYPE_OVERLAY) 1854 continue; 1855 1856 ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base, 1857 possible_crtcs, 1858 &vop_plane_funcs, 1859 win_data->phy->data_formats, 1860 win_data->phy->nformats, 1861 win_data->phy->format_modifiers, 1862 win_data->type, NULL); 1863 if (ret) { 1864 DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n", 1865 ret); 1866 goto err_cleanup_crtc; 1867 } 1868 drm_plane_helper_add(&vop_win->base, &plane_helper_funcs); 1869 vop_plane_add_properties(&vop_win->base, win_data); 1870 } 1871 1872 port = of_get_child_by_name(dev->of_node, "port"); 1873 if (!port) { 1874 DRM_DEV_ERROR(vop->dev, "no port node found in %pOF\n", 1875 dev->of_node); 1876 ret = -ENOENT; 1877 goto err_cleanup_crtc; 1878 } 1879 1880 drm_flip_work_init(&vop->fb_unref_work, "fb_unref", 1881 vop_fb_unref_worker); 1882 1883 init_completion(&vop->dsp_hold_completion); 1884 init_completion(&vop->line_flag_completion); 1885 crtc->port = port; 1886 1887 ret = drm_self_refresh_helper_init(crtc); 1888 if (ret) 1889 DRM_DEV_DEBUG_KMS(vop->dev, 1890 "Failed to init %s with SR helpers %d, ignoring\n", 1891 crtc->name, ret); 1892 1893 return 0; 1894 1895 err_cleanup_crtc: 1896 drm_crtc_cleanup(crtc); 1897 err_cleanup_planes: 1898 list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list, 1899 head) 1900 drm_plane_cleanup(plane); 1901 return ret; 1902 } 1903 1904 static void vop_destroy_crtc(struct vop *vop) 1905 { 1906 struct drm_crtc *crtc = &vop->crtc; 1907 struct drm_device *drm_dev = vop->drm_dev; 1908 struct drm_plane *plane, *tmp; 1909 1910 drm_self_refresh_helper_cleanup(crtc); 1911 1912 of_node_put(crtc->port); 1913 1914 /* 1915 * We need to cleanup the planes now. Why? 1916 * 1917 * The planes are "&vop->win[i].base". That means the memory is 1918 * all part of the big "struct vop" chunk of memory. That memory 1919 * was devm allocated and associated with this component. We need to 1920 * free it ourselves before vop_unbind() finishes. 1921 */ 1922 list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list, 1923 head) 1924 vop_plane_destroy(plane); 1925 1926 /* 1927 * Destroy CRTC after vop_plane_destroy() since vop_disable_plane() 1928 * references the CRTC. 1929 */ 1930 drm_crtc_cleanup(crtc); 1931 drm_flip_work_cleanup(&vop->fb_unref_work); 1932 } 1933 1934 static int vop_initial(struct vop *vop) 1935 { 1936 struct reset_control *ahb_rst; 1937 int i, ret; 1938 1939 vop->hclk = devm_clk_get(vop->dev, "hclk_vop"); 1940 if (IS_ERR(vop->hclk)) { 1941 DRM_DEV_ERROR(vop->dev, "failed to get hclk source\n"); 1942 return PTR_ERR(vop->hclk); 1943 } 1944 vop->aclk = devm_clk_get(vop->dev, "aclk_vop"); 1945 if (IS_ERR(vop->aclk)) { 1946 DRM_DEV_ERROR(vop->dev, "failed to get aclk source\n"); 1947 return PTR_ERR(vop->aclk); 1948 } 1949 vop->dclk = devm_clk_get(vop->dev, "dclk_vop"); 1950 if (IS_ERR(vop->dclk)) { 1951 DRM_DEV_ERROR(vop->dev, "failed to get dclk source\n"); 1952 return PTR_ERR(vop->dclk); 1953 } 1954 1955 ret = pm_runtime_get_sync(vop->dev); 1956 if (ret < 0) { 1957 DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret); 1958 return ret; 1959 } 1960 1961 ret = clk_prepare(vop->dclk); 1962 if (ret < 0) { 1963 DRM_DEV_ERROR(vop->dev, "failed to prepare dclk\n"); 1964 goto err_put_pm_runtime; 1965 } 1966 1967 /* Enable both the hclk and aclk to setup the vop */ 1968 ret = clk_prepare_enable(vop->hclk); 1969 if (ret < 0) { 1970 DRM_DEV_ERROR(vop->dev, "failed to prepare/enable hclk\n"); 1971 goto err_unprepare_dclk; 1972 } 1973 1974 ret = clk_prepare_enable(vop->aclk); 1975 if (ret < 0) { 1976 DRM_DEV_ERROR(vop->dev, "failed to prepare/enable aclk\n"); 1977 goto err_disable_hclk; 1978 } 1979 1980 /* 1981 * do hclk_reset, reset all vop registers. 1982 */ 1983 ahb_rst = devm_reset_control_get(vop->dev, "ahb"); 1984 if (IS_ERR(ahb_rst)) { 1985 DRM_DEV_ERROR(vop->dev, "failed to get ahb reset\n"); 1986 ret = PTR_ERR(ahb_rst); 1987 goto err_disable_aclk; 1988 } 1989 reset_control_assert(ahb_rst); 1990 usleep_range(10, 20); 1991 reset_control_deassert(ahb_rst); 1992 1993 VOP_INTR_SET_TYPE(vop, clear, INTR_MASK, 1); 1994 VOP_INTR_SET_TYPE(vop, enable, INTR_MASK, 0); 1995 1996 for (i = 0; i < vop->len; i += sizeof(u32)) 1997 vop->regsbak[i / 4] = readl_relaxed(vop->regs + i); 1998 1999 VOP_REG_SET(vop, misc, global_regdone_en, 1); 2000 VOP_REG_SET(vop, common, dsp_blank, 0); 2001 2002 for (i = 0; i < vop->data->win_size; i++) { 2003 struct vop_win *vop_win = &vop->win[i]; 2004 const struct vop_win_data *win = vop_win->data; 2005 int channel = i * 2 + 1; 2006 2007 VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel); 2008 vop_win_disable(vop, vop_win); 2009 VOP_WIN_SET(vop, win, gate, 1); 2010 } 2011 2012 vop_cfg_done(vop); 2013 2014 /* 2015 * do dclk_reset, let all config take affect. 2016 */ 2017 vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk"); 2018 if (IS_ERR(vop->dclk_rst)) { 2019 DRM_DEV_ERROR(vop->dev, "failed to get dclk reset\n"); 2020 ret = PTR_ERR(vop->dclk_rst); 2021 goto err_disable_aclk; 2022 } 2023 reset_control_assert(vop->dclk_rst); 2024 usleep_range(10, 20); 2025 reset_control_deassert(vop->dclk_rst); 2026 2027 clk_disable(vop->hclk); 2028 clk_disable(vop->aclk); 2029 2030 vop->is_enabled = false; 2031 2032 pm_runtime_put_sync(vop->dev); 2033 2034 return 0; 2035 2036 err_disable_aclk: 2037 clk_disable_unprepare(vop->aclk); 2038 err_disable_hclk: 2039 clk_disable_unprepare(vop->hclk); 2040 err_unprepare_dclk: 2041 clk_unprepare(vop->dclk); 2042 err_put_pm_runtime: 2043 pm_runtime_put_sync(vop->dev); 2044 return ret; 2045 } 2046 2047 /* 2048 * Initialize the vop->win array elements. 2049 */ 2050 static void vop_win_init(struct vop *vop) 2051 { 2052 const struct vop_data *vop_data = vop->data; 2053 unsigned int i; 2054 2055 for (i = 0; i < vop_data->win_size; i++) { 2056 struct vop_win *vop_win = &vop->win[i]; 2057 const struct vop_win_data *win_data = &vop_data->win[i]; 2058 2059 vop_win->data = win_data; 2060 vop_win->vop = vop; 2061 2062 if (vop_data->win_yuv2yuv) 2063 vop_win->yuv2yuv_data = &vop_data->win_yuv2yuv[i]; 2064 } 2065 } 2066 2067 /** 2068 * rockchip_drm_wait_vact_end 2069 * @crtc: CRTC to enable line flag 2070 * @mstimeout: millisecond for timeout 2071 * 2072 * Wait for vact_end line flag irq or timeout. 2073 * 2074 * Returns: 2075 * Zero on success, negative errno on failure. 2076 */ 2077 int rockchip_drm_wait_vact_end(struct drm_crtc *crtc, unsigned int mstimeout) 2078 { 2079 struct vop *vop = to_vop(crtc); 2080 unsigned long jiffies_left; 2081 int ret = 0; 2082 2083 if (!crtc || !vop->is_enabled) 2084 return -ENODEV; 2085 2086 mutex_lock(&vop->vop_lock); 2087 if (mstimeout <= 0) { 2088 ret = -EINVAL; 2089 goto out; 2090 } 2091 2092 if (vop_line_flag_irq_is_enabled(vop)) { 2093 ret = -EBUSY; 2094 goto out; 2095 } 2096 2097 reinit_completion(&vop->line_flag_completion); 2098 vop_line_flag_irq_enable(vop); 2099 2100 jiffies_left = wait_for_completion_timeout(&vop->line_flag_completion, 2101 msecs_to_jiffies(mstimeout)); 2102 vop_line_flag_irq_disable(vop); 2103 2104 if (jiffies_left == 0) { 2105 DRM_DEV_ERROR(vop->dev, "Timeout waiting for IRQ\n"); 2106 ret = -ETIMEDOUT; 2107 goto out; 2108 } 2109 2110 out: 2111 mutex_unlock(&vop->vop_lock); 2112 return ret; 2113 } 2114 EXPORT_SYMBOL(rockchip_drm_wait_vact_end); 2115 2116 static int vop_bind(struct device *dev, struct device *master, void *data) 2117 { 2118 struct platform_device *pdev = to_platform_device(dev); 2119 const struct vop_data *vop_data; 2120 struct drm_device *drm_dev = data; 2121 struct vop *vop; 2122 struct resource *res; 2123 int ret, irq; 2124 2125 vop_data = of_device_get_match_data(dev); 2126 if (!vop_data) 2127 return -ENODEV; 2128 2129 /* Allocate vop struct and its vop_win array */ 2130 vop = devm_kzalloc(dev, struct_size(vop, win, vop_data->win_size), 2131 GFP_KERNEL); 2132 if (!vop) 2133 return -ENOMEM; 2134 2135 vop->dev = dev; 2136 vop->data = vop_data; 2137 vop->drm_dev = drm_dev; 2138 dev_set_drvdata(dev, vop); 2139 2140 vop_win_init(vop); 2141 2142 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2143 vop->regs = devm_ioremap_resource(dev, res); 2144 if (IS_ERR(vop->regs)) 2145 return PTR_ERR(vop->regs); 2146 vop->len = resource_size(res); 2147 2148 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2149 if (res) { 2150 if (!vop_data->lut_size) { 2151 DRM_DEV_ERROR(dev, "no gamma LUT size defined\n"); 2152 return -EINVAL; 2153 } 2154 vop->lut_regs = devm_ioremap_resource(dev, res); 2155 if (IS_ERR(vop->lut_regs)) 2156 return PTR_ERR(vop->lut_regs); 2157 } 2158 2159 vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL); 2160 if (!vop->regsbak) 2161 return -ENOMEM; 2162 2163 irq = platform_get_irq(pdev, 0); 2164 if (irq < 0) { 2165 DRM_DEV_ERROR(dev, "cannot find irq for vop\n"); 2166 return irq; 2167 } 2168 vop->irq = (unsigned int)irq; 2169 2170 spin_lock_init(&vop->reg_lock); 2171 spin_lock_init(&vop->irq_lock); 2172 mutex_init(&vop->vop_lock); 2173 2174 ret = vop_create_crtc(vop); 2175 if (ret) 2176 return ret; 2177 2178 pm_runtime_enable(&pdev->dev); 2179 2180 ret = vop_initial(vop); 2181 if (ret < 0) { 2182 DRM_DEV_ERROR(&pdev->dev, 2183 "cannot initial vop dev - err %d\n", ret); 2184 goto err_disable_pm_runtime; 2185 } 2186 2187 ret = devm_request_irq(dev, vop->irq, vop_isr, 2188 IRQF_SHARED, dev_name(dev), vop); 2189 if (ret) 2190 goto err_disable_pm_runtime; 2191 2192 if (vop->data->feature & VOP_FEATURE_INTERNAL_RGB) { 2193 vop->rgb = rockchip_rgb_init(dev, &vop->crtc, vop->drm_dev); 2194 if (IS_ERR(vop->rgb)) { 2195 ret = PTR_ERR(vop->rgb); 2196 goto err_disable_pm_runtime; 2197 } 2198 } 2199 2200 rockchip_drm_dma_init_device(drm_dev, dev); 2201 2202 return 0; 2203 2204 err_disable_pm_runtime: 2205 pm_runtime_disable(&pdev->dev); 2206 vop_destroy_crtc(vop); 2207 return ret; 2208 } 2209 2210 static void vop_unbind(struct device *dev, struct device *master, void *data) 2211 { 2212 struct vop *vop = dev_get_drvdata(dev); 2213 2214 if (vop->rgb) 2215 rockchip_rgb_fini(vop->rgb); 2216 2217 pm_runtime_disable(dev); 2218 vop_destroy_crtc(vop); 2219 2220 clk_unprepare(vop->aclk); 2221 clk_unprepare(vop->hclk); 2222 clk_unprepare(vop->dclk); 2223 } 2224 2225 const struct component_ops vop_component_ops = { 2226 .bind = vop_bind, 2227 .unbind = vop_unbind, 2228 }; 2229 EXPORT_SYMBOL_GPL(vop_component_ops); 2230