1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd 4 * Author:Mark Yao <mark.yao@rock-chips.com> 5 */ 6 7 #include <linux/clk.h> 8 #include <linux/component.h> 9 #include <linux/delay.h> 10 #include <linux/iopoll.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/of.h> 14 #include <linux/of_device.h> 15 #include <linux/overflow.h> 16 #include <linux/platform_device.h> 17 #include <linux/pm_runtime.h> 18 #include <linux/reset.h> 19 20 #include <drm/drm.h> 21 #include <drm/drm_atomic.h> 22 #include <drm/drm_atomic_uapi.h> 23 #include <drm/drm_crtc.h> 24 #include <drm/drm_flip_work.h> 25 #include <drm/drm_fourcc.h> 26 #include <drm/drm_gem_framebuffer_helper.h> 27 #include <drm/drm_plane_helper.h> 28 #include <drm/drm_probe_helper.h> 29 #include <drm/drm_self_refresh_helper.h> 30 #include <drm/drm_vblank.h> 31 32 #ifdef CONFIG_DRM_ANALOGIX_DP 33 #include <drm/bridge/analogix_dp.h> 34 #endif 35 36 #include "rockchip_drm_drv.h" 37 #include "rockchip_drm_gem.h" 38 #include "rockchip_drm_fb.h" 39 #include "rockchip_drm_vop.h" 40 #include "rockchip_rgb.h" 41 42 #define VOP_SELF_REFRESH_ENTRY_DELAY_MS 100 43 44 #define VOP_WIN_SET(vop, win, name, v) \ 45 vop_reg_set(vop, &win->phy->name, win->base, ~0, v, #name) 46 #define VOP_SCL_SET(vop, win, name, v) \ 47 vop_reg_set(vop, &win->phy->scl->name, win->base, ~0, v, #name) 48 #define VOP_SCL_SET_EXT(vop, win, name, v) \ 49 vop_reg_set(vop, &win->phy->scl->ext->name, \ 50 win->base, ~0, v, #name) 51 52 #define VOP_WIN_YUV2YUV_SET(vop, win_yuv2yuv, name, v) \ 53 do { \ 54 if (win_yuv2yuv && win_yuv2yuv->name.mask) \ 55 vop_reg_set(vop, &win_yuv2yuv->name, 0, ~0, v, #name); \ 56 } while (0) 57 58 #define VOP_WIN_YUV2YUV_COEFFICIENT_SET(vop, win_yuv2yuv, name, v) \ 59 do { \ 60 if (win_yuv2yuv && win_yuv2yuv->phy->name.mask) \ 61 vop_reg_set(vop, &win_yuv2yuv->phy->name, win_yuv2yuv->base, ~0, v, #name); \ 62 } while (0) 63 64 #define VOP_INTR_SET_MASK(vop, name, mask, v) \ 65 vop_reg_set(vop, &vop->data->intr->name, 0, mask, v, #name) 66 67 #define VOP_REG_SET(vop, group, name, v) \ 68 vop_reg_set(vop, &vop->data->group->name, 0, ~0, v, #name) 69 70 #define VOP_INTR_SET_TYPE(vop, name, type, v) \ 71 do { \ 72 int i, reg = 0, mask = 0; \ 73 for (i = 0; i < vop->data->intr->nintrs; i++) { \ 74 if (vop->data->intr->intrs[i] & type) { \ 75 reg |= (v) << i; \ 76 mask |= 1 << i; \ 77 } \ 78 } \ 79 VOP_INTR_SET_MASK(vop, name, mask, reg); \ 80 } while (0) 81 #define VOP_INTR_GET_TYPE(vop, name, type) \ 82 vop_get_intr_type(vop, &vop->data->intr->name, type) 83 84 #define VOP_WIN_GET(vop, win, name) \ 85 vop_read_reg(vop, win->base, &win->phy->name) 86 87 #define VOP_WIN_HAS_REG(win, name) \ 88 (!!(win->phy->name.mask)) 89 90 #define VOP_WIN_GET_YRGBADDR(vop, win) \ 91 vop_readl(vop, win->base + win->phy->yrgb_mst.offset) 92 93 #define VOP_WIN_TO_INDEX(vop_win) \ 94 ((vop_win) - (vop_win)->vop->win) 95 96 #define to_vop(x) container_of(x, struct vop, crtc) 97 #define to_vop_win(x) container_of(x, struct vop_win, base) 98 99 /* 100 * The coefficients of the following matrix are all fixed points. 101 * The format is S2.10 for the 3x3 part of the matrix, and S9.12 for the offsets. 102 * They are all represented in two's complement. 103 */ 104 static const uint32_t bt601_yuv2rgb[] = { 105 0x4A8, 0x0, 0x662, 106 0x4A8, 0x1E6F, 0x1CBF, 107 0x4A8, 0x812, 0x0, 108 0x321168, 0x0877CF, 0x2EB127 109 }; 110 111 enum vop_pending { 112 VOP_PENDING_FB_UNREF, 113 }; 114 115 struct vop_win { 116 struct drm_plane base; 117 const struct vop_win_data *data; 118 const struct vop_win_yuv2yuv_data *yuv2yuv_data; 119 struct vop *vop; 120 }; 121 122 struct rockchip_rgb; 123 struct vop { 124 struct drm_crtc crtc; 125 struct device *dev; 126 struct drm_device *drm_dev; 127 bool is_enabled; 128 129 struct completion dsp_hold_completion; 130 unsigned int win_enabled; 131 132 /* protected by dev->event_lock */ 133 struct drm_pending_vblank_event *event; 134 135 struct drm_flip_work fb_unref_work; 136 unsigned long pending; 137 138 struct completion line_flag_completion; 139 140 const struct vop_data *data; 141 142 uint32_t *regsbak; 143 void __iomem *regs; 144 145 /* physical map length of vop register */ 146 uint32_t len; 147 148 /* one time only one process allowed to config the register */ 149 spinlock_t reg_lock; 150 /* lock vop irq reg */ 151 spinlock_t irq_lock; 152 /* protects crtc enable/disable */ 153 struct mutex vop_lock; 154 155 unsigned int irq; 156 157 /* vop AHP clk */ 158 struct clk *hclk; 159 /* vop dclk */ 160 struct clk *dclk; 161 /* vop share memory frequency */ 162 struct clk *aclk; 163 164 /* vop dclk reset */ 165 struct reset_control *dclk_rst; 166 167 /* optional internal rgb encoder */ 168 struct rockchip_rgb *rgb; 169 170 struct vop_win win[]; 171 }; 172 173 static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v) 174 { 175 writel(v, vop->regs + offset); 176 vop->regsbak[offset >> 2] = v; 177 } 178 179 static inline uint32_t vop_readl(struct vop *vop, uint32_t offset) 180 { 181 return readl(vop->regs + offset); 182 } 183 184 static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base, 185 const struct vop_reg *reg) 186 { 187 return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask; 188 } 189 190 static void vop_reg_set(struct vop *vop, const struct vop_reg *reg, 191 uint32_t _offset, uint32_t _mask, uint32_t v, 192 const char *reg_name) 193 { 194 int offset, mask, shift; 195 196 if (!reg || !reg->mask) { 197 DRM_DEV_DEBUG(vop->dev, "Warning: not support %s\n", reg_name); 198 return; 199 } 200 201 offset = reg->offset + _offset; 202 mask = reg->mask & _mask; 203 shift = reg->shift; 204 205 if (reg->write_mask) { 206 v = ((v << shift) & 0xffff) | (mask << (shift + 16)); 207 } else { 208 uint32_t cached_val = vop->regsbak[offset >> 2]; 209 210 v = (cached_val & ~(mask << shift)) | ((v & mask) << shift); 211 vop->regsbak[offset >> 2] = v; 212 } 213 214 if (reg->relaxed) 215 writel_relaxed(v, vop->regs + offset); 216 else 217 writel(v, vop->regs + offset); 218 } 219 220 static inline uint32_t vop_get_intr_type(struct vop *vop, 221 const struct vop_reg *reg, int type) 222 { 223 uint32_t i, ret = 0; 224 uint32_t regs = vop_read_reg(vop, 0, reg); 225 226 for (i = 0; i < vop->data->intr->nintrs; i++) { 227 if ((type & vop->data->intr->intrs[i]) && (regs & 1 << i)) 228 ret |= vop->data->intr->intrs[i]; 229 } 230 231 return ret; 232 } 233 234 static inline void vop_cfg_done(struct vop *vop) 235 { 236 VOP_REG_SET(vop, common, cfg_done, 1); 237 } 238 239 static bool has_rb_swapped(uint32_t format) 240 { 241 switch (format) { 242 case DRM_FORMAT_XBGR8888: 243 case DRM_FORMAT_ABGR8888: 244 case DRM_FORMAT_BGR888: 245 case DRM_FORMAT_BGR565: 246 return true; 247 default: 248 return false; 249 } 250 } 251 252 static enum vop_data_format vop_convert_format(uint32_t format) 253 { 254 switch (format) { 255 case DRM_FORMAT_XRGB8888: 256 case DRM_FORMAT_ARGB8888: 257 case DRM_FORMAT_XBGR8888: 258 case DRM_FORMAT_ABGR8888: 259 return VOP_FMT_ARGB8888; 260 case DRM_FORMAT_RGB888: 261 case DRM_FORMAT_BGR888: 262 return VOP_FMT_RGB888; 263 case DRM_FORMAT_RGB565: 264 case DRM_FORMAT_BGR565: 265 return VOP_FMT_RGB565; 266 case DRM_FORMAT_NV12: 267 return VOP_FMT_YUV420SP; 268 case DRM_FORMAT_NV16: 269 return VOP_FMT_YUV422SP; 270 case DRM_FORMAT_NV24: 271 return VOP_FMT_YUV444SP; 272 default: 273 DRM_ERROR("unsupported format[%08x]\n", format); 274 return -EINVAL; 275 } 276 } 277 278 static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src, 279 uint32_t dst, bool is_horizontal, 280 int vsu_mode, int *vskiplines) 281 { 282 uint16_t val = 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT; 283 284 if (vskiplines) 285 *vskiplines = 0; 286 287 if (is_horizontal) { 288 if (mode == SCALE_UP) 289 val = GET_SCL_FT_BIC(src, dst); 290 else if (mode == SCALE_DOWN) 291 val = GET_SCL_FT_BILI_DN(src, dst); 292 } else { 293 if (mode == SCALE_UP) { 294 if (vsu_mode == SCALE_UP_BIL) 295 val = GET_SCL_FT_BILI_UP(src, dst); 296 else 297 val = GET_SCL_FT_BIC(src, dst); 298 } else if (mode == SCALE_DOWN) { 299 if (vskiplines) { 300 *vskiplines = scl_get_vskiplines(src, dst); 301 val = scl_get_bili_dn_vskip(src, dst, 302 *vskiplines); 303 } else { 304 val = GET_SCL_FT_BILI_DN(src, dst); 305 } 306 } 307 } 308 309 return val; 310 } 311 312 static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win, 313 uint32_t src_w, uint32_t src_h, uint32_t dst_w, 314 uint32_t dst_h, const struct drm_format_info *info) 315 { 316 uint16_t yrgb_hor_scl_mode, yrgb_ver_scl_mode; 317 uint16_t cbcr_hor_scl_mode = SCALE_NONE; 318 uint16_t cbcr_ver_scl_mode = SCALE_NONE; 319 bool is_yuv = false; 320 uint16_t cbcr_src_w = src_w / info->hsub; 321 uint16_t cbcr_src_h = src_h / info->vsub; 322 uint16_t vsu_mode; 323 uint16_t lb_mode; 324 uint32_t val; 325 int vskiplines; 326 327 if (info->is_yuv) 328 is_yuv = true; 329 330 if (dst_w > 3840) { 331 DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n"); 332 return; 333 } 334 335 if (!win->phy->scl->ext) { 336 VOP_SCL_SET(vop, win, scale_yrgb_x, 337 scl_cal_scale2(src_w, dst_w)); 338 VOP_SCL_SET(vop, win, scale_yrgb_y, 339 scl_cal_scale2(src_h, dst_h)); 340 if (is_yuv) { 341 VOP_SCL_SET(vop, win, scale_cbcr_x, 342 scl_cal_scale2(cbcr_src_w, dst_w)); 343 VOP_SCL_SET(vop, win, scale_cbcr_y, 344 scl_cal_scale2(cbcr_src_h, dst_h)); 345 } 346 return; 347 } 348 349 yrgb_hor_scl_mode = scl_get_scl_mode(src_w, dst_w); 350 yrgb_ver_scl_mode = scl_get_scl_mode(src_h, dst_h); 351 352 if (is_yuv) { 353 cbcr_hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w); 354 cbcr_ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h); 355 if (cbcr_hor_scl_mode == SCALE_DOWN) 356 lb_mode = scl_vop_cal_lb_mode(dst_w, true); 357 else 358 lb_mode = scl_vop_cal_lb_mode(cbcr_src_w, true); 359 } else { 360 if (yrgb_hor_scl_mode == SCALE_DOWN) 361 lb_mode = scl_vop_cal_lb_mode(dst_w, false); 362 else 363 lb_mode = scl_vop_cal_lb_mode(src_w, false); 364 } 365 366 VOP_SCL_SET_EXT(vop, win, lb_mode, lb_mode); 367 if (lb_mode == LB_RGB_3840X2) { 368 if (yrgb_ver_scl_mode != SCALE_NONE) { 369 DRM_DEV_ERROR(vop->dev, "not allow yrgb ver scale\n"); 370 return; 371 } 372 if (cbcr_ver_scl_mode != SCALE_NONE) { 373 DRM_DEV_ERROR(vop->dev, "not allow cbcr ver scale\n"); 374 return; 375 } 376 vsu_mode = SCALE_UP_BIL; 377 } else if (lb_mode == LB_RGB_2560X4) { 378 vsu_mode = SCALE_UP_BIL; 379 } else { 380 vsu_mode = SCALE_UP_BIC; 381 } 382 383 val = scl_vop_cal_scale(yrgb_hor_scl_mode, src_w, dst_w, 384 true, 0, NULL); 385 VOP_SCL_SET(vop, win, scale_yrgb_x, val); 386 val = scl_vop_cal_scale(yrgb_ver_scl_mode, src_h, dst_h, 387 false, vsu_mode, &vskiplines); 388 VOP_SCL_SET(vop, win, scale_yrgb_y, val); 389 390 VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt4, vskiplines == 4); 391 VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt2, vskiplines == 2); 392 393 VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, yrgb_hor_scl_mode); 394 VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, yrgb_ver_scl_mode); 395 VOP_SCL_SET_EXT(vop, win, yrgb_hsd_mode, SCALE_DOWN_BIL); 396 VOP_SCL_SET_EXT(vop, win, yrgb_vsd_mode, SCALE_DOWN_BIL); 397 VOP_SCL_SET_EXT(vop, win, yrgb_vsu_mode, vsu_mode); 398 if (is_yuv) { 399 val = scl_vop_cal_scale(cbcr_hor_scl_mode, cbcr_src_w, 400 dst_w, true, 0, NULL); 401 VOP_SCL_SET(vop, win, scale_cbcr_x, val); 402 val = scl_vop_cal_scale(cbcr_ver_scl_mode, cbcr_src_h, 403 dst_h, false, vsu_mode, &vskiplines); 404 VOP_SCL_SET(vop, win, scale_cbcr_y, val); 405 406 VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt4, vskiplines == 4); 407 VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt2, vskiplines == 2); 408 VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, cbcr_hor_scl_mode); 409 VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, cbcr_ver_scl_mode); 410 VOP_SCL_SET_EXT(vop, win, cbcr_hsd_mode, SCALE_DOWN_BIL); 411 VOP_SCL_SET_EXT(vop, win, cbcr_vsd_mode, SCALE_DOWN_BIL); 412 VOP_SCL_SET_EXT(vop, win, cbcr_vsu_mode, vsu_mode); 413 } 414 } 415 416 static void vop_dsp_hold_valid_irq_enable(struct vop *vop) 417 { 418 unsigned long flags; 419 420 if (WARN_ON(!vop->is_enabled)) 421 return; 422 423 spin_lock_irqsave(&vop->irq_lock, flags); 424 425 VOP_INTR_SET_TYPE(vop, clear, DSP_HOLD_VALID_INTR, 1); 426 VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 1); 427 428 spin_unlock_irqrestore(&vop->irq_lock, flags); 429 } 430 431 static void vop_dsp_hold_valid_irq_disable(struct vop *vop) 432 { 433 unsigned long flags; 434 435 if (WARN_ON(!vop->is_enabled)) 436 return; 437 438 spin_lock_irqsave(&vop->irq_lock, flags); 439 440 VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 0); 441 442 spin_unlock_irqrestore(&vop->irq_lock, flags); 443 } 444 445 /* 446 * (1) each frame starts at the start of the Vsync pulse which is signaled by 447 * the "FRAME_SYNC" interrupt. 448 * (2) the active data region of each frame ends at dsp_vact_end 449 * (3) we should program this same number (dsp_vact_end) into dsp_line_frag_num, 450 * to get "LINE_FLAG" interrupt at the end of the active on screen data. 451 * 452 * VOP_INTR_CTRL0.dsp_line_frag_num = VOP_DSP_VACT_ST_END.dsp_vact_end 453 * Interrupts 454 * LINE_FLAG -------------------------------+ 455 * FRAME_SYNC ----+ | 456 * | | 457 * v v 458 * | Vsync | Vbp | Vactive | Vfp | 459 * ^ ^ ^ ^ 460 * | | | | 461 * | | | | 462 * dsp_vs_end ------------+ | | | VOP_DSP_VTOTAL_VS_END 463 * dsp_vact_start --------------+ | | VOP_DSP_VACT_ST_END 464 * dsp_vact_end ----------------------------+ | VOP_DSP_VACT_ST_END 465 * dsp_total -------------------------------------+ VOP_DSP_VTOTAL_VS_END 466 */ 467 static bool vop_line_flag_irq_is_enabled(struct vop *vop) 468 { 469 uint32_t line_flag_irq; 470 unsigned long flags; 471 472 spin_lock_irqsave(&vop->irq_lock, flags); 473 474 line_flag_irq = VOP_INTR_GET_TYPE(vop, enable, LINE_FLAG_INTR); 475 476 spin_unlock_irqrestore(&vop->irq_lock, flags); 477 478 return !!line_flag_irq; 479 } 480 481 static void vop_line_flag_irq_enable(struct vop *vop) 482 { 483 unsigned long flags; 484 485 if (WARN_ON(!vop->is_enabled)) 486 return; 487 488 spin_lock_irqsave(&vop->irq_lock, flags); 489 490 VOP_INTR_SET_TYPE(vop, clear, LINE_FLAG_INTR, 1); 491 VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 1); 492 493 spin_unlock_irqrestore(&vop->irq_lock, flags); 494 } 495 496 static void vop_line_flag_irq_disable(struct vop *vop) 497 { 498 unsigned long flags; 499 500 if (WARN_ON(!vop->is_enabled)) 501 return; 502 503 spin_lock_irqsave(&vop->irq_lock, flags); 504 505 VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 0); 506 507 spin_unlock_irqrestore(&vop->irq_lock, flags); 508 } 509 510 static int vop_core_clks_enable(struct vop *vop) 511 { 512 int ret; 513 514 ret = clk_enable(vop->hclk); 515 if (ret < 0) 516 return ret; 517 518 ret = clk_enable(vop->aclk); 519 if (ret < 0) 520 goto err_disable_hclk; 521 522 return 0; 523 524 err_disable_hclk: 525 clk_disable(vop->hclk); 526 return ret; 527 } 528 529 static void vop_core_clks_disable(struct vop *vop) 530 { 531 clk_disable(vop->aclk); 532 clk_disable(vop->hclk); 533 } 534 535 static void vop_win_disable(struct vop *vop, const struct vop_win *vop_win) 536 { 537 const struct vop_win_data *win = vop_win->data; 538 539 if (win->phy->scl && win->phy->scl->ext) { 540 VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE); 541 VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE); 542 VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE); 543 VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE); 544 } 545 546 VOP_WIN_SET(vop, win, enable, 0); 547 vop->win_enabled &= ~BIT(VOP_WIN_TO_INDEX(vop_win)); 548 } 549 550 static int vop_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) 551 { 552 struct vop *vop = to_vop(crtc); 553 int ret, i; 554 555 ret = pm_runtime_get_sync(vop->dev); 556 if (ret < 0) { 557 DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret); 558 return ret; 559 } 560 561 ret = vop_core_clks_enable(vop); 562 if (WARN_ON(ret < 0)) 563 goto err_put_pm_runtime; 564 565 ret = clk_enable(vop->dclk); 566 if (WARN_ON(ret < 0)) 567 goto err_disable_core; 568 569 /* 570 * Slave iommu shares power, irq and clock with vop. It was associated 571 * automatically with this master device via common driver code. 572 * Now that we have enabled the clock we attach it to the shared drm 573 * mapping. 574 */ 575 ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev); 576 if (ret) { 577 DRM_DEV_ERROR(vop->dev, 578 "failed to attach dma mapping, %d\n", ret); 579 goto err_disable_dclk; 580 } 581 582 spin_lock(&vop->reg_lock); 583 for (i = 0; i < vop->len; i += 4) 584 writel_relaxed(vop->regsbak[i / 4], vop->regs + i); 585 586 /* 587 * We need to make sure that all windows are disabled before we 588 * enable the crtc. Otherwise we might try to scan from a destroyed 589 * buffer later. 590 * 591 * In the case of enable-after-PSR, we don't need to worry about this 592 * case since the buffer is guaranteed to be valid and disabling the 593 * window will result in screen glitches on PSR exit. 594 */ 595 if (!old_state || !old_state->self_refresh_active) { 596 for (i = 0; i < vop->data->win_size; i++) { 597 struct vop_win *vop_win = &vop->win[i]; 598 599 vop_win_disable(vop, vop_win); 600 } 601 } 602 spin_unlock(&vop->reg_lock); 603 604 vop_cfg_done(vop); 605 606 /* 607 * At here, vop clock & iommu is enable, R/W vop regs would be safe. 608 */ 609 vop->is_enabled = true; 610 611 spin_lock(&vop->reg_lock); 612 613 VOP_REG_SET(vop, common, standby, 1); 614 615 spin_unlock(&vop->reg_lock); 616 617 drm_crtc_vblank_on(crtc); 618 619 return 0; 620 621 err_disable_dclk: 622 clk_disable(vop->dclk); 623 err_disable_core: 624 vop_core_clks_disable(vop); 625 err_put_pm_runtime: 626 pm_runtime_put_sync(vop->dev); 627 return ret; 628 } 629 630 static void rockchip_drm_set_win_enabled(struct drm_crtc *crtc, bool enabled) 631 { 632 struct vop *vop = to_vop(crtc); 633 int i; 634 635 spin_lock(&vop->reg_lock); 636 637 for (i = 0; i < vop->data->win_size; i++) { 638 struct vop_win *vop_win = &vop->win[i]; 639 const struct vop_win_data *win = vop_win->data; 640 641 VOP_WIN_SET(vop, win, enable, 642 enabled && (vop->win_enabled & BIT(i))); 643 } 644 vop_cfg_done(vop); 645 646 spin_unlock(&vop->reg_lock); 647 } 648 649 static void vop_crtc_atomic_disable(struct drm_crtc *crtc, 650 struct drm_crtc_state *old_state) 651 { 652 struct vop *vop = to_vop(crtc); 653 654 WARN_ON(vop->event); 655 656 if (crtc->state->self_refresh_active) 657 rockchip_drm_set_win_enabled(crtc, false); 658 659 mutex_lock(&vop->vop_lock); 660 661 drm_crtc_vblank_off(crtc); 662 663 if (crtc->state->self_refresh_active) 664 goto out; 665 666 /* 667 * Vop standby will take effect at end of current frame, 668 * if dsp hold valid irq happen, it means standby complete. 669 * 670 * we must wait standby complete when we want to disable aclk, 671 * if not, memory bus maybe dead. 672 */ 673 reinit_completion(&vop->dsp_hold_completion); 674 vop_dsp_hold_valid_irq_enable(vop); 675 676 spin_lock(&vop->reg_lock); 677 678 VOP_REG_SET(vop, common, standby, 1); 679 680 spin_unlock(&vop->reg_lock); 681 682 wait_for_completion(&vop->dsp_hold_completion); 683 684 vop_dsp_hold_valid_irq_disable(vop); 685 686 vop->is_enabled = false; 687 688 /* 689 * vop standby complete, so iommu detach is safe. 690 */ 691 rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev); 692 693 clk_disable(vop->dclk); 694 vop_core_clks_disable(vop); 695 pm_runtime_put(vop->dev); 696 697 out: 698 mutex_unlock(&vop->vop_lock); 699 700 if (crtc->state->event && !crtc->state->active) { 701 spin_lock_irq(&crtc->dev->event_lock); 702 drm_crtc_send_vblank_event(crtc, crtc->state->event); 703 spin_unlock_irq(&crtc->dev->event_lock); 704 705 crtc->state->event = NULL; 706 } 707 } 708 709 static void vop_plane_destroy(struct drm_plane *plane) 710 { 711 drm_plane_cleanup(plane); 712 } 713 714 static int vop_plane_atomic_check(struct drm_plane *plane, 715 struct drm_plane_state *state) 716 { 717 struct drm_crtc *crtc = state->crtc; 718 struct drm_crtc_state *crtc_state; 719 struct drm_framebuffer *fb = state->fb; 720 struct vop_win *vop_win = to_vop_win(plane); 721 const struct vop_win_data *win = vop_win->data; 722 int ret; 723 int min_scale = win->phy->scl ? FRAC_16_16(1, 8) : 724 DRM_PLANE_HELPER_NO_SCALING; 725 int max_scale = win->phy->scl ? FRAC_16_16(8, 1) : 726 DRM_PLANE_HELPER_NO_SCALING; 727 728 if (!crtc || !fb) 729 return 0; 730 731 crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc); 732 if (WARN_ON(!crtc_state)) 733 return -EINVAL; 734 735 ret = drm_atomic_helper_check_plane_state(state, crtc_state, 736 min_scale, max_scale, 737 true, true); 738 if (ret) 739 return ret; 740 741 if (!state->visible) 742 return 0; 743 744 ret = vop_convert_format(fb->format->format); 745 if (ret < 0) 746 return ret; 747 748 /* 749 * Src.x1 can be odd when do clip, but yuv plane start point 750 * need align with 2 pixel. 751 */ 752 if (fb->format->is_yuv && ((state->src.x1 >> 16) % 2)) { 753 DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n"); 754 return -EINVAL; 755 } 756 757 if (fb->format->is_yuv && state->rotation & DRM_MODE_REFLECT_Y) { 758 DRM_ERROR("Invalid Source: Yuv format does not support this rotation\n"); 759 return -EINVAL; 760 } 761 762 return 0; 763 } 764 765 static void vop_plane_atomic_disable(struct drm_plane *plane, 766 struct drm_plane_state *old_state) 767 { 768 struct vop_win *vop_win = to_vop_win(plane); 769 struct vop *vop = to_vop(old_state->crtc); 770 771 if (!old_state->crtc) 772 return; 773 774 spin_lock(&vop->reg_lock); 775 776 vop_win_disable(vop, vop_win); 777 778 spin_unlock(&vop->reg_lock); 779 } 780 781 static void vop_plane_atomic_update(struct drm_plane *plane, 782 struct drm_plane_state *old_state) 783 { 784 struct drm_plane_state *state = plane->state; 785 struct drm_crtc *crtc = state->crtc; 786 struct vop_win *vop_win = to_vop_win(plane); 787 const struct vop_win_data *win = vop_win->data; 788 const struct vop_win_yuv2yuv_data *win_yuv2yuv = vop_win->yuv2yuv_data; 789 struct vop *vop = to_vop(state->crtc); 790 struct drm_framebuffer *fb = state->fb; 791 unsigned int actual_w, actual_h; 792 unsigned int dsp_stx, dsp_sty; 793 uint32_t act_info, dsp_info, dsp_st; 794 struct drm_rect *src = &state->src; 795 struct drm_rect *dest = &state->dst; 796 struct drm_gem_object *obj, *uv_obj; 797 struct rockchip_gem_object *rk_obj, *rk_uv_obj; 798 unsigned long offset; 799 dma_addr_t dma_addr; 800 uint32_t val; 801 bool rb_swap; 802 int win_index = VOP_WIN_TO_INDEX(vop_win); 803 int format; 804 int is_yuv = fb->format->is_yuv; 805 int i; 806 807 /* 808 * can't update plane when vop is disabled. 809 */ 810 if (WARN_ON(!crtc)) 811 return; 812 813 if (WARN_ON(!vop->is_enabled)) 814 return; 815 816 if (!state->visible) { 817 vop_plane_atomic_disable(plane, old_state); 818 return; 819 } 820 821 obj = fb->obj[0]; 822 rk_obj = to_rockchip_obj(obj); 823 824 actual_w = drm_rect_width(src) >> 16; 825 actual_h = drm_rect_height(src) >> 16; 826 act_info = (actual_h - 1) << 16 | ((actual_w - 1) & 0xffff); 827 828 dsp_info = (drm_rect_height(dest) - 1) << 16; 829 dsp_info |= (drm_rect_width(dest) - 1) & 0xffff; 830 831 dsp_stx = dest->x1 + crtc->mode.htotal - crtc->mode.hsync_start; 832 dsp_sty = dest->y1 + crtc->mode.vtotal - crtc->mode.vsync_start; 833 dsp_st = dsp_sty << 16 | (dsp_stx & 0xffff); 834 835 offset = (src->x1 >> 16) * fb->format->cpp[0]; 836 offset += (src->y1 >> 16) * fb->pitches[0]; 837 dma_addr = rk_obj->dma_addr + offset + fb->offsets[0]; 838 839 /* 840 * For y-mirroring we need to move address 841 * to the beginning of the last line. 842 */ 843 if (state->rotation & DRM_MODE_REFLECT_Y) 844 dma_addr += (actual_h - 1) * fb->pitches[0]; 845 846 format = vop_convert_format(fb->format->format); 847 848 spin_lock(&vop->reg_lock); 849 850 VOP_WIN_SET(vop, win, format, format); 851 VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4)); 852 VOP_WIN_SET(vop, win, yrgb_mst, dma_addr); 853 VOP_WIN_YUV2YUV_SET(vop, win_yuv2yuv, y2r_en, is_yuv); 854 VOP_WIN_SET(vop, win, y_mir_en, 855 (state->rotation & DRM_MODE_REFLECT_Y) ? 1 : 0); 856 VOP_WIN_SET(vop, win, x_mir_en, 857 (state->rotation & DRM_MODE_REFLECT_X) ? 1 : 0); 858 859 if (is_yuv) { 860 int hsub = fb->format->hsub; 861 int vsub = fb->format->vsub; 862 int bpp = fb->format->cpp[1]; 863 864 uv_obj = fb->obj[1]; 865 rk_uv_obj = to_rockchip_obj(uv_obj); 866 867 offset = (src->x1 >> 16) * bpp / hsub; 868 offset += (src->y1 >> 16) * fb->pitches[1] / vsub; 869 870 dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1]; 871 VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4)); 872 VOP_WIN_SET(vop, win, uv_mst, dma_addr); 873 874 for (i = 0; i < NUM_YUV2YUV_COEFFICIENTS; i++) { 875 VOP_WIN_YUV2YUV_COEFFICIENT_SET(vop, 876 win_yuv2yuv, 877 y2r_coefficients[i], 878 bt601_yuv2rgb[i]); 879 } 880 } 881 882 if (win->phy->scl) 883 scl_vop_cal_scl_fac(vop, win, actual_w, actual_h, 884 drm_rect_width(dest), drm_rect_height(dest), 885 fb->format); 886 887 VOP_WIN_SET(vop, win, act_info, act_info); 888 VOP_WIN_SET(vop, win, dsp_info, dsp_info); 889 VOP_WIN_SET(vop, win, dsp_st, dsp_st); 890 891 rb_swap = has_rb_swapped(fb->format->format); 892 VOP_WIN_SET(vop, win, rb_swap, rb_swap); 893 894 /* 895 * Blending win0 with the background color doesn't seem to work 896 * correctly. We only get the background color, no matter the contents 897 * of the win0 framebuffer. However, blending pre-multiplied color 898 * with the default opaque black default background color is a no-op, 899 * so we can just disable blending to get the correct result. 900 */ 901 if (fb->format->has_alpha && win_index > 0) { 902 VOP_WIN_SET(vop, win, dst_alpha_ctl, 903 DST_FACTOR_M0(ALPHA_SRC_INVERSE)); 904 val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) | 905 SRC_ALPHA_M0(ALPHA_STRAIGHT) | 906 SRC_BLEND_M0(ALPHA_PER_PIX) | 907 SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) | 908 SRC_FACTOR_M0(ALPHA_ONE); 909 VOP_WIN_SET(vop, win, src_alpha_ctl, val); 910 } else { 911 VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0)); 912 } 913 914 VOP_WIN_SET(vop, win, enable, 1); 915 vop->win_enabled |= BIT(win_index); 916 spin_unlock(&vop->reg_lock); 917 } 918 919 static int vop_plane_atomic_async_check(struct drm_plane *plane, 920 struct drm_plane_state *state) 921 { 922 struct vop_win *vop_win = to_vop_win(plane); 923 const struct vop_win_data *win = vop_win->data; 924 int min_scale = win->phy->scl ? FRAC_16_16(1, 8) : 925 DRM_PLANE_HELPER_NO_SCALING; 926 int max_scale = win->phy->scl ? FRAC_16_16(8, 1) : 927 DRM_PLANE_HELPER_NO_SCALING; 928 struct drm_crtc_state *crtc_state; 929 930 if (plane != state->crtc->cursor) 931 return -EINVAL; 932 933 if (!plane->state) 934 return -EINVAL; 935 936 if (!plane->state->fb) 937 return -EINVAL; 938 939 if (state->state) 940 crtc_state = drm_atomic_get_existing_crtc_state(state->state, 941 state->crtc); 942 else /* Special case for asynchronous cursor updates. */ 943 crtc_state = plane->crtc->state; 944 945 return drm_atomic_helper_check_plane_state(plane->state, crtc_state, 946 min_scale, max_scale, 947 true, true); 948 } 949 950 static void vop_plane_atomic_async_update(struct drm_plane *plane, 951 struct drm_plane_state *new_state) 952 { 953 struct vop *vop = to_vop(plane->state->crtc); 954 struct drm_framebuffer *old_fb = plane->state->fb; 955 956 plane->state->crtc_x = new_state->crtc_x; 957 plane->state->crtc_y = new_state->crtc_y; 958 plane->state->crtc_h = new_state->crtc_h; 959 plane->state->crtc_w = new_state->crtc_w; 960 plane->state->src_x = new_state->src_x; 961 plane->state->src_y = new_state->src_y; 962 plane->state->src_h = new_state->src_h; 963 plane->state->src_w = new_state->src_w; 964 swap(plane->state->fb, new_state->fb); 965 966 if (vop->is_enabled) { 967 vop_plane_atomic_update(plane, plane->state); 968 spin_lock(&vop->reg_lock); 969 vop_cfg_done(vop); 970 spin_unlock(&vop->reg_lock); 971 972 /* 973 * A scanout can still be occurring, so we can't drop the 974 * reference to the old framebuffer. To solve this we get a 975 * reference to old_fb and set a worker to release it later. 976 * FIXME: if we perform 500 async_update calls before the 977 * vblank, then we can have 500 different framebuffers waiting 978 * to be released. 979 */ 980 if (old_fb && plane->state->fb != old_fb) { 981 drm_framebuffer_get(old_fb); 982 WARN_ON(drm_crtc_vblank_get(plane->state->crtc) != 0); 983 drm_flip_work_queue(&vop->fb_unref_work, old_fb); 984 set_bit(VOP_PENDING_FB_UNREF, &vop->pending); 985 } 986 } 987 } 988 989 static const struct drm_plane_helper_funcs plane_helper_funcs = { 990 .atomic_check = vop_plane_atomic_check, 991 .atomic_update = vop_plane_atomic_update, 992 .atomic_disable = vop_plane_atomic_disable, 993 .atomic_async_check = vop_plane_atomic_async_check, 994 .atomic_async_update = vop_plane_atomic_async_update, 995 .prepare_fb = drm_gem_fb_prepare_fb, 996 }; 997 998 static const struct drm_plane_funcs vop_plane_funcs = { 999 .update_plane = drm_atomic_helper_update_plane, 1000 .disable_plane = drm_atomic_helper_disable_plane, 1001 .destroy = vop_plane_destroy, 1002 .reset = drm_atomic_helper_plane_reset, 1003 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 1004 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 1005 }; 1006 1007 static int vop_crtc_enable_vblank(struct drm_crtc *crtc) 1008 { 1009 struct vop *vop = to_vop(crtc); 1010 unsigned long flags; 1011 1012 if (WARN_ON(!vop->is_enabled)) 1013 return -EPERM; 1014 1015 spin_lock_irqsave(&vop->irq_lock, flags); 1016 1017 VOP_INTR_SET_TYPE(vop, clear, FS_INTR, 1); 1018 VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 1); 1019 1020 spin_unlock_irqrestore(&vop->irq_lock, flags); 1021 1022 return 0; 1023 } 1024 1025 static void vop_crtc_disable_vblank(struct drm_crtc *crtc) 1026 { 1027 struct vop *vop = to_vop(crtc); 1028 unsigned long flags; 1029 1030 if (WARN_ON(!vop->is_enabled)) 1031 return; 1032 1033 spin_lock_irqsave(&vop->irq_lock, flags); 1034 1035 VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 0); 1036 1037 spin_unlock_irqrestore(&vop->irq_lock, flags); 1038 } 1039 1040 static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, 1041 const struct drm_display_mode *mode, 1042 struct drm_display_mode *adjusted_mode) 1043 { 1044 struct vop *vop = to_vop(crtc); 1045 1046 adjusted_mode->clock = 1047 DIV_ROUND_UP(clk_round_rate(vop->dclk, 1048 adjusted_mode->clock * 1000), 1000); 1049 1050 return true; 1051 } 1052 1053 static void vop_crtc_atomic_enable(struct drm_crtc *crtc, 1054 struct drm_crtc_state *old_state) 1055 { 1056 struct vop *vop = to_vop(crtc); 1057 const struct vop_data *vop_data = vop->data; 1058 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state); 1059 struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode; 1060 u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start; 1061 u16 hdisplay = adjusted_mode->hdisplay; 1062 u16 htotal = adjusted_mode->htotal; 1063 u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start; 1064 u16 hact_end = hact_st + hdisplay; 1065 u16 vdisplay = adjusted_mode->vdisplay; 1066 u16 vtotal = adjusted_mode->vtotal; 1067 u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start; 1068 u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start; 1069 u16 vact_end = vact_st + vdisplay; 1070 uint32_t pin_pol, val; 1071 int dither_bpc = s->output_bpc ? s->output_bpc : 10; 1072 int ret; 1073 1074 if (old_state && old_state->self_refresh_active) { 1075 drm_crtc_vblank_on(crtc); 1076 rockchip_drm_set_win_enabled(crtc, true); 1077 return; 1078 } 1079 1080 mutex_lock(&vop->vop_lock); 1081 1082 WARN_ON(vop->event); 1083 1084 ret = vop_enable(crtc, old_state); 1085 if (ret) { 1086 mutex_unlock(&vop->vop_lock); 1087 DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret); 1088 return; 1089 } 1090 1091 pin_pol = BIT(DCLK_INVERT); 1092 pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ? 1093 BIT(HSYNC_POSITIVE) : 0; 1094 pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) ? 1095 BIT(VSYNC_POSITIVE) : 0; 1096 VOP_REG_SET(vop, output, pin_pol, pin_pol); 1097 VOP_REG_SET(vop, output, mipi_dual_channel_en, 0); 1098 1099 switch (s->output_type) { 1100 case DRM_MODE_CONNECTOR_LVDS: 1101 VOP_REG_SET(vop, output, rgb_en, 1); 1102 VOP_REG_SET(vop, output, rgb_pin_pol, pin_pol); 1103 break; 1104 case DRM_MODE_CONNECTOR_eDP: 1105 VOP_REG_SET(vop, output, edp_pin_pol, pin_pol); 1106 VOP_REG_SET(vop, output, edp_en, 1); 1107 break; 1108 case DRM_MODE_CONNECTOR_HDMIA: 1109 VOP_REG_SET(vop, output, hdmi_pin_pol, pin_pol); 1110 VOP_REG_SET(vop, output, hdmi_en, 1); 1111 break; 1112 case DRM_MODE_CONNECTOR_DSI: 1113 VOP_REG_SET(vop, output, mipi_pin_pol, pin_pol); 1114 VOP_REG_SET(vop, output, mipi_en, 1); 1115 VOP_REG_SET(vop, output, mipi_dual_channel_en, 1116 !!(s->output_flags & ROCKCHIP_OUTPUT_DSI_DUAL)); 1117 break; 1118 case DRM_MODE_CONNECTOR_DisplayPort: 1119 pin_pol &= ~BIT(DCLK_INVERT); 1120 VOP_REG_SET(vop, output, dp_pin_pol, pin_pol); 1121 VOP_REG_SET(vop, output, dp_en, 1); 1122 break; 1123 default: 1124 DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n", 1125 s->output_type); 1126 } 1127 1128 /* 1129 * if vop is not support RGB10 output, need force RGB10 to RGB888. 1130 */ 1131 if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA && 1132 !(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10)) 1133 s->output_mode = ROCKCHIP_OUT_MODE_P888; 1134 1135 if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA && dither_bpc <= 8) 1136 VOP_REG_SET(vop, common, pre_dither_down, 1); 1137 else 1138 VOP_REG_SET(vop, common, pre_dither_down, 0); 1139 1140 if (dither_bpc == 6) { 1141 VOP_REG_SET(vop, common, dither_down_sel, DITHER_DOWN_ALLEGRO); 1142 VOP_REG_SET(vop, common, dither_down_mode, RGB888_TO_RGB666); 1143 VOP_REG_SET(vop, common, dither_down_en, 1); 1144 } else { 1145 VOP_REG_SET(vop, common, dither_down_en, 0); 1146 } 1147 1148 VOP_REG_SET(vop, common, out_mode, s->output_mode); 1149 1150 VOP_REG_SET(vop, modeset, htotal_pw, (htotal << 16) | hsync_len); 1151 val = hact_st << 16; 1152 val |= hact_end; 1153 VOP_REG_SET(vop, modeset, hact_st_end, val); 1154 VOP_REG_SET(vop, modeset, hpost_st_end, val); 1155 1156 VOP_REG_SET(vop, modeset, vtotal_pw, (vtotal << 16) | vsync_len); 1157 val = vact_st << 16; 1158 val |= vact_end; 1159 VOP_REG_SET(vop, modeset, vact_st_end, val); 1160 VOP_REG_SET(vop, modeset, vpost_st_end, val); 1161 1162 VOP_REG_SET(vop, intr, line_flag_num[0], vact_end); 1163 1164 clk_set_rate(vop->dclk, adjusted_mode->clock * 1000); 1165 1166 VOP_REG_SET(vop, common, standby, 0); 1167 mutex_unlock(&vop->vop_lock); 1168 } 1169 1170 static bool vop_fs_irq_is_pending(struct vop *vop) 1171 { 1172 return VOP_INTR_GET_TYPE(vop, status, FS_INTR); 1173 } 1174 1175 static void vop_wait_for_irq_handler(struct vop *vop) 1176 { 1177 bool pending; 1178 int ret; 1179 1180 /* 1181 * Spin until frame start interrupt status bit goes low, which means 1182 * that interrupt handler was invoked and cleared it. The timeout of 1183 * 10 msecs is really too long, but it is just a safety measure if 1184 * something goes really wrong. The wait will only happen in the very 1185 * unlikely case of a vblank happening exactly at the same time and 1186 * shouldn't exceed microseconds range. 1187 */ 1188 ret = readx_poll_timeout_atomic(vop_fs_irq_is_pending, vop, pending, 1189 !pending, 0, 10 * 1000); 1190 if (ret) 1191 DRM_DEV_ERROR(vop->dev, "VOP vblank IRQ stuck for 10 ms\n"); 1192 1193 synchronize_irq(vop->irq); 1194 } 1195 1196 static void vop_crtc_atomic_flush(struct drm_crtc *crtc, 1197 struct drm_crtc_state *old_crtc_state) 1198 { 1199 struct drm_atomic_state *old_state = old_crtc_state->state; 1200 struct drm_plane_state *old_plane_state, *new_plane_state; 1201 struct vop *vop = to_vop(crtc); 1202 struct drm_plane *plane; 1203 int i; 1204 1205 if (WARN_ON(!vop->is_enabled)) 1206 return; 1207 1208 spin_lock(&vop->reg_lock); 1209 1210 vop_cfg_done(vop); 1211 1212 spin_unlock(&vop->reg_lock); 1213 1214 /* 1215 * There is a (rather unlikely) possiblity that a vblank interrupt 1216 * fired before we set the cfg_done bit. To avoid spuriously 1217 * signalling flip completion we need to wait for it to finish. 1218 */ 1219 vop_wait_for_irq_handler(vop); 1220 1221 spin_lock_irq(&crtc->dev->event_lock); 1222 if (crtc->state->event) { 1223 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 1224 WARN_ON(vop->event); 1225 1226 vop->event = crtc->state->event; 1227 crtc->state->event = NULL; 1228 } 1229 spin_unlock_irq(&crtc->dev->event_lock); 1230 1231 for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, 1232 new_plane_state, i) { 1233 if (!old_plane_state->fb) 1234 continue; 1235 1236 if (old_plane_state->fb == new_plane_state->fb) 1237 continue; 1238 1239 drm_framebuffer_get(old_plane_state->fb); 1240 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 1241 drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb); 1242 set_bit(VOP_PENDING_FB_UNREF, &vop->pending); 1243 } 1244 } 1245 1246 static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = { 1247 .mode_fixup = vop_crtc_mode_fixup, 1248 .atomic_flush = vop_crtc_atomic_flush, 1249 .atomic_enable = vop_crtc_atomic_enable, 1250 .atomic_disable = vop_crtc_atomic_disable, 1251 }; 1252 1253 static void vop_crtc_destroy(struct drm_crtc *crtc) 1254 { 1255 drm_crtc_cleanup(crtc); 1256 } 1257 1258 static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc) 1259 { 1260 struct rockchip_crtc_state *rockchip_state; 1261 1262 rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL); 1263 if (!rockchip_state) 1264 return NULL; 1265 1266 __drm_atomic_helper_crtc_duplicate_state(crtc, &rockchip_state->base); 1267 return &rockchip_state->base; 1268 } 1269 1270 static void vop_crtc_destroy_state(struct drm_crtc *crtc, 1271 struct drm_crtc_state *state) 1272 { 1273 struct rockchip_crtc_state *s = to_rockchip_crtc_state(state); 1274 1275 __drm_atomic_helper_crtc_destroy_state(&s->base); 1276 kfree(s); 1277 } 1278 1279 static void vop_crtc_reset(struct drm_crtc *crtc) 1280 { 1281 struct rockchip_crtc_state *crtc_state = 1282 kzalloc(sizeof(*crtc_state), GFP_KERNEL); 1283 1284 if (crtc->state) 1285 vop_crtc_destroy_state(crtc, crtc->state); 1286 1287 __drm_atomic_helper_crtc_reset(crtc, &crtc_state->base); 1288 } 1289 1290 #ifdef CONFIG_DRM_ANALOGIX_DP 1291 static struct drm_connector *vop_get_edp_connector(struct vop *vop) 1292 { 1293 struct drm_connector *connector; 1294 struct drm_connector_list_iter conn_iter; 1295 1296 drm_connector_list_iter_begin(vop->drm_dev, &conn_iter); 1297 drm_for_each_connector_iter(connector, &conn_iter) { 1298 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1299 drm_connector_list_iter_end(&conn_iter); 1300 return connector; 1301 } 1302 } 1303 drm_connector_list_iter_end(&conn_iter); 1304 1305 return NULL; 1306 } 1307 1308 static int vop_crtc_set_crc_source(struct drm_crtc *crtc, 1309 const char *source_name) 1310 { 1311 struct vop *vop = to_vop(crtc); 1312 struct drm_connector *connector; 1313 int ret; 1314 1315 connector = vop_get_edp_connector(vop); 1316 if (!connector) 1317 return -EINVAL; 1318 1319 if (source_name && strcmp(source_name, "auto") == 0) 1320 ret = analogix_dp_start_crc(connector); 1321 else if (!source_name) 1322 ret = analogix_dp_stop_crc(connector); 1323 else 1324 ret = -EINVAL; 1325 1326 return ret; 1327 } 1328 1329 static int 1330 vop_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name, 1331 size_t *values_cnt) 1332 { 1333 if (source_name && strcmp(source_name, "auto") != 0) 1334 return -EINVAL; 1335 1336 *values_cnt = 3; 1337 return 0; 1338 } 1339 1340 #else 1341 static int vop_crtc_set_crc_source(struct drm_crtc *crtc, 1342 const char *source_name) 1343 { 1344 return -ENODEV; 1345 } 1346 1347 static int 1348 vop_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name, 1349 size_t *values_cnt) 1350 { 1351 return -ENODEV; 1352 } 1353 #endif 1354 1355 static const struct drm_crtc_funcs vop_crtc_funcs = { 1356 .set_config = drm_atomic_helper_set_config, 1357 .page_flip = drm_atomic_helper_page_flip, 1358 .destroy = vop_crtc_destroy, 1359 .reset = vop_crtc_reset, 1360 .atomic_duplicate_state = vop_crtc_duplicate_state, 1361 .atomic_destroy_state = vop_crtc_destroy_state, 1362 .enable_vblank = vop_crtc_enable_vblank, 1363 .disable_vblank = vop_crtc_disable_vblank, 1364 .set_crc_source = vop_crtc_set_crc_source, 1365 .verify_crc_source = vop_crtc_verify_crc_source, 1366 }; 1367 1368 static void vop_fb_unref_worker(struct drm_flip_work *work, void *val) 1369 { 1370 struct vop *vop = container_of(work, struct vop, fb_unref_work); 1371 struct drm_framebuffer *fb = val; 1372 1373 drm_crtc_vblank_put(&vop->crtc); 1374 drm_framebuffer_put(fb); 1375 } 1376 1377 static void vop_handle_vblank(struct vop *vop) 1378 { 1379 struct drm_device *drm = vop->drm_dev; 1380 struct drm_crtc *crtc = &vop->crtc; 1381 1382 spin_lock(&drm->event_lock); 1383 if (vop->event) { 1384 drm_crtc_send_vblank_event(crtc, vop->event); 1385 drm_crtc_vblank_put(crtc); 1386 vop->event = NULL; 1387 } 1388 spin_unlock(&drm->event_lock); 1389 1390 if (test_and_clear_bit(VOP_PENDING_FB_UNREF, &vop->pending)) 1391 drm_flip_work_commit(&vop->fb_unref_work, system_unbound_wq); 1392 } 1393 1394 static irqreturn_t vop_isr(int irq, void *data) 1395 { 1396 struct vop *vop = data; 1397 struct drm_crtc *crtc = &vop->crtc; 1398 uint32_t active_irqs; 1399 int ret = IRQ_NONE; 1400 1401 /* 1402 * The irq is shared with the iommu. If the runtime-pm state of the 1403 * vop-device is disabled the irq has to be targeted at the iommu. 1404 */ 1405 if (!pm_runtime_get_if_in_use(vop->dev)) 1406 return IRQ_NONE; 1407 1408 if (vop_core_clks_enable(vop)) { 1409 DRM_DEV_ERROR_RATELIMITED(vop->dev, "couldn't enable clocks\n"); 1410 goto out; 1411 } 1412 1413 /* 1414 * interrupt register has interrupt status, enable and clear bits, we 1415 * must hold irq_lock to avoid a race with enable/disable_vblank(). 1416 */ 1417 spin_lock(&vop->irq_lock); 1418 1419 active_irqs = VOP_INTR_GET_TYPE(vop, status, INTR_MASK); 1420 /* Clear all active interrupt sources */ 1421 if (active_irqs) 1422 VOP_INTR_SET_TYPE(vop, clear, active_irqs, 1); 1423 1424 spin_unlock(&vop->irq_lock); 1425 1426 /* This is expected for vop iommu irqs, since the irq is shared */ 1427 if (!active_irqs) 1428 goto out_disable; 1429 1430 if (active_irqs & DSP_HOLD_VALID_INTR) { 1431 complete(&vop->dsp_hold_completion); 1432 active_irqs &= ~DSP_HOLD_VALID_INTR; 1433 ret = IRQ_HANDLED; 1434 } 1435 1436 if (active_irqs & LINE_FLAG_INTR) { 1437 complete(&vop->line_flag_completion); 1438 active_irqs &= ~LINE_FLAG_INTR; 1439 ret = IRQ_HANDLED; 1440 } 1441 1442 if (active_irqs & FS_INTR) { 1443 drm_crtc_handle_vblank(crtc); 1444 vop_handle_vblank(vop); 1445 active_irqs &= ~FS_INTR; 1446 ret = IRQ_HANDLED; 1447 } 1448 1449 /* Unhandled irqs are spurious. */ 1450 if (active_irqs) 1451 DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n", 1452 active_irqs); 1453 1454 out_disable: 1455 vop_core_clks_disable(vop); 1456 out: 1457 pm_runtime_put(vop->dev); 1458 return ret; 1459 } 1460 1461 static void vop_plane_add_properties(struct drm_plane *plane, 1462 const struct vop_win_data *win_data) 1463 { 1464 unsigned int flags = 0; 1465 1466 flags |= VOP_WIN_HAS_REG(win_data, x_mir_en) ? DRM_MODE_REFLECT_X : 0; 1467 flags |= VOP_WIN_HAS_REG(win_data, y_mir_en) ? DRM_MODE_REFLECT_Y : 0; 1468 if (flags) 1469 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, 1470 DRM_MODE_ROTATE_0 | flags); 1471 } 1472 1473 static int vop_create_crtc(struct vop *vop) 1474 { 1475 const struct vop_data *vop_data = vop->data; 1476 struct device *dev = vop->dev; 1477 struct drm_device *drm_dev = vop->drm_dev; 1478 struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp; 1479 struct drm_crtc *crtc = &vop->crtc; 1480 struct device_node *port; 1481 int ret; 1482 int i; 1483 1484 /* 1485 * Create drm_plane for primary and cursor planes first, since we need 1486 * to pass them to drm_crtc_init_with_planes, which sets the 1487 * "possible_crtcs" to the newly initialized crtc. 1488 */ 1489 for (i = 0; i < vop_data->win_size; i++) { 1490 struct vop_win *vop_win = &vop->win[i]; 1491 const struct vop_win_data *win_data = vop_win->data; 1492 1493 if (win_data->type != DRM_PLANE_TYPE_PRIMARY && 1494 win_data->type != DRM_PLANE_TYPE_CURSOR) 1495 continue; 1496 1497 ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base, 1498 0, &vop_plane_funcs, 1499 win_data->phy->data_formats, 1500 win_data->phy->nformats, 1501 NULL, win_data->type, NULL); 1502 if (ret) { 1503 DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n", 1504 ret); 1505 goto err_cleanup_planes; 1506 } 1507 1508 plane = &vop_win->base; 1509 drm_plane_helper_add(plane, &plane_helper_funcs); 1510 vop_plane_add_properties(plane, win_data); 1511 if (plane->type == DRM_PLANE_TYPE_PRIMARY) 1512 primary = plane; 1513 else if (plane->type == DRM_PLANE_TYPE_CURSOR) 1514 cursor = plane; 1515 } 1516 1517 ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, 1518 &vop_crtc_funcs, NULL); 1519 if (ret) 1520 goto err_cleanup_planes; 1521 1522 drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs); 1523 1524 /* 1525 * Create drm_planes for overlay windows with possible_crtcs restricted 1526 * to the newly created crtc. 1527 */ 1528 for (i = 0; i < vop_data->win_size; i++) { 1529 struct vop_win *vop_win = &vop->win[i]; 1530 const struct vop_win_data *win_data = vop_win->data; 1531 unsigned long possible_crtcs = drm_crtc_mask(crtc); 1532 1533 if (win_data->type != DRM_PLANE_TYPE_OVERLAY) 1534 continue; 1535 1536 ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base, 1537 possible_crtcs, 1538 &vop_plane_funcs, 1539 win_data->phy->data_formats, 1540 win_data->phy->nformats, 1541 NULL, win_data->type, NULL); 1542 if (ret) { 1543 DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n", 1544 ret); 1545 goto err_cleanup_crtc; 1546 } 1547 drm_plane_helper_add(&vop_win->base, &plane_helper_funcs); 1548 vop_plane_add_properties(&vop_win->base, win_data); 1549 } 1550 1551 port = of_get_child_by_name(dev->of_node, "port"); 1552 if (!port) { 1553 DRM_DEV_ERROR(vop->dev, "no port node found in %pOF\n", 1554 dev->of_node); 1555 ret = -ENOENT; 1556 goto err_cleanup_crtc; 1557 } 1558 1559 drm_flip_work_init(&vop->fb_unref_work, "fb_unref", 1560 vop_fb_unref_worker); 1561 1562 init_completion(&vop->dsp_hold_completion); 1563 init_completion(&vop->line_flag_completion); 1564 crtc->port = port; 1565 1566 ret = drm_self_refresh_helper_init(crtc, 1567 VOP_SELF_REFRESH_ENTRY_DELAY_MS); 1568 if (ret) 1569 DRM_DEV_DEBUG_KMS(vop->dev, 1570 "Failed to init %s with SR helpers %d, ignoring\n", 1571 crtc->name, ret); 1572 1573 return 0; 1574 1575 err_cleanup_crtc: 1576 drm_crtc_cleanup(crtc); 1577 err_cleanup_planes: 1578 list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list, 1579 head) 1580 drm_plane_cleanup(plane); 1581 return ret; 1582 } 1583 1584 static void vop_destroy_crtc(struct vop *vop) 1585 { 1586 struct drm_crtc *crtc = &vop->crtc; 1587 struct drm_device *drm_dev = vop->drm_dev; 1588 struct drm_plane *plane, *tmp; 1589 1590 drm_self_refresh_helper_cleanup(crtc); 1591 1592 of_node_put(crtc->port); 1593 1594 /* 1595 * We need to cleanup the planes now. Why? 1596 * 1597 * The planes are "&vop->win[i].base". That means the memory is 1598 * all part of the big "struct vop" chunk of memory. That memory 1599 * was devm allocated and associated with this component. We need to 1600 * free it ourselves before vop_unbind() finishes. 1601 */ 1602 list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list, 1603 head) 1604 vop_plane_destroy(plane); 1605 1606 /* 1607 * Destroy CRTC after vop_plane_destroy() since vop_disable_plane() 1608 * references the CRTC. 1609 */ 1610 drm_crtc_cleanup(crtc); 1611 drm_flip_work_cleanup(&vop->fb_unref_work); 1612 } 1613 1614 static int vop_initial(struct vop *vop) 1615 { 1616 struct reset_control *ahb_rst; 1617 int i, ret; 1618 1619 vop->hclk = devm_clk_get(vop->dev, "hclk_vop"); 1620 if (IS_ERR(vop->hclk)) { 1621 DRM_DEV_ERROR(vop->dev, "failed to get hclk source\n"); 1622 return PTR_ERR(vop->hclk); 1623 } 1624 vop->aclk = devm_clk_get(vop->dev, "aclk_vop"); 1625 if (IS_ERR(vop->aclk)) { 1626 DRM_DEV_ERROR(vop->dev, "failed to get aclk source\n"); 1627 return PTR_ERR(vop->aclk); 1628 } 1629 vop->dclk = devm_clk_get(vop->dev, "dclk_vop"); 1630 if (IS_ERR(vop->dclk)) { 1631 DRM_DEV_ERROR(vop->dev, "failed to get dclk source\n"); 1632 return PTR_ERR(vop->dclk); 1633 } 1634 1635 ret = pm_runtime_get_sync(vop->dev); 1636 if (ret < 0) { 1637 DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret); 1638 return ret; 1639 } 1640 1641 ret = clk_prepare(vop->dclk); 1642 if (ret < 0) { 1643 DRM_DEV_ERROR(vop->dev, "failed to prepare dclk\n"); 1644 goto err_put_pm_runtime; 1645 } 1646 1647 /* Enable both the hclk and aclk to setup the vop */ 1648 ret = clk_prepare_enable(vop->hclk); 1649 if (ret < 0) { 1650 DRM_DEV_ERROR(vop->dev, "failed to prepare/enable hclk\n"); 1651 goto err_unprepare_dclk; 1652 } 1653 1654 ret = clk_prepare_enable(vop->aclk); 1655 if (ret < 0) { 1656 DRM_DEV_ERROR(vop->dev, "failed to prepare/enable aclk\n"); 1657 goto err_disable_hclk; 1658 } 1659 1660 /* 1661 * do hclk_reset, reset all vop registers. 1662 */ 1663 ahb_rst = devm_reset_control_get(vop->dev, "ahb"); 1664 if (IS_ERR(ahb_rst)) { 1665 DRM_DEV_ERROR(vop->dev, "failed to get ahb reset\n"); 1666 ret = PTR_ERR(ahb_rst); 1667 goto err_disable_aclk; 1668 } 1669 reset_control_assert(ahb_rst); 1670 usleep_range(10, 20); 1671 reset_control_deassert(ahb_rst); 1672 1673 VOP_INTR_SET_TYPE(vop, clear, INTR_MASK, 1); 1674 VOP_INTR_SET_TYPE(vop, enable, INTR_MASK, 0); 1675 1676 for (i = 0; i < vop->len; i += sizeof(u32)) 1677 vop->regsbak[i / 4] = readl_relaxed(vop->regs + i); 1678 1679 VOP_REG_SET(vop, misc, global_regdone_en, 1); 1680 VOP_REG_SET(vop, common, dsp_blank, 0); 1681 1682 for (i = 0; i < vop->data->win_size; i++) { 1683 struct vop_win *vop_win = &vop->win[i]; 1684 const struct vop_win_data *win = vop_win->data; 1685 int channel = i * 2 + 1; 1686 1687 VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel); 1688 vop_win_disable(vop, vop_win); 1689 VOP_WIN_SET(vop, win, gate, 1); 1690 } 1691 1692 vop_cfg_done(vop); 1693 1694 /* 1695 * do dclk_reset, let all config take affect. 1696 */ 1697 vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk"); 1698 if (IS_ERR(vop->dclk_rst)) { 1699 DRM_DEV_ERROR(vop->dev, "failed to get dclk reset\n"); 1700 ret = PTR_ERR(vop->dclk_rst); 1701 goto err_disable_aclk; 1702 } 1703 reset_control_assert(vop->dclk_rst); 1704 usleep_range(10, 20); 1705 reset_control_deassert(vop->dclk_rst); 1706 1707 clk_disable(vop->hclk); 1708 clk_disable(vop->aclk); 1709 1710 vop->is_enabled = false; 1711 1712 pm_runtime_put_sync(vop->dev); 1713 1714 return 0; 1715 1716 err_disable_aclk: 1717 clk_disable_unprepare(vop->aclk); 1718 err_disable_hclk: 1719 clk_disable_unprepare(vop->hclk); 1720 err_unprepare_dclk: 1721 clk_unprepare(vop->dclk); 1722 err_put_pm_runtime: 1723 pm_runtime_put_sync(vop->dev); 1724 return ret; 1725 } 1726 1727 /* 1728 * Initialize the vop->win array elements. 1729 */ 1730 static void vop_win_init(struct vop *vop) 1731 { 1732 const struct vop_data *vop_data = vop->data; 1733 unsigned int i; 1734 1735 for (i = 0; i < vop_data->win_size; i++) { 1736 struct vop_win *vop_win = &vop->win[i]; 1737 const struct vop_win_data *win_data = &vop_data->win[i]; 1738 1739 vop_win->data = win_data; 1740 vop_win->vop = vop; 1741 1742 if (vop_data->win_yuv2yuv) 1743 vop_win->yuv2yuv_data = &vop_data->win_yuv2yuv[i]; 1744 } 1745 } 1746 1747 /** 1748 * rockchip_drm_wait_vact_end 1749 * @crtc: CRTC to enable line flag 1750 * @mstimeout: millisecond for timeout 1751 * 1752 * Wait for vact_end line flag irq or timeout. 1753 * 1754 * Returns: 1755 * Zero on success, negative errno on failure. 1756 */ 1757 int rockchip_drm_wait_vact_end(struct drm_crtc *crtc, unsigned int mstimeout) 1758 { 1759 struct vop *vop = to_vop(crtc); 1760 unsigned long jiffies_left; 1761 int ret = 0; 1762 1763 if (!crtc || !vop->is_enabled) 1764 return -ENODEV; 1765 1766 mutex_lock(&vop->vop_lock); 1767 if (mstimeout <= 0) { 1768 ret = -EINVAL; 1769 goto out; 1770 } 1771 1772 if (vop_line_flag_irq_is_enabled(vop)) { 1773 ret = -EBUSY; 1774 goto out; 1775 } 1776 1777 reinit_completion(&vop->line_flag_completion); 1778 vop_line_flag_irq_enable(vop); 1779 1780 jiffies_left = wait_for_completion_timeout(&vop->line_flag_completion, 1781 msecs_to_jiffies(mstimeout)); 1782 vop_line_flag_irq_disable(vop); 1783 1784 if (jiffies_left == 0) { 1785 DRM_DEV_ERROR(vop->dev, "Timeout waiting for IRQ\n"); 1786 ret = -ETIMEDOUT; 1787 goto out; 1788 } 1789 1790 out: 1791 mutex_unlock(&vop->vop_lock); 1792 return ret; 1793 } 1794 EXPORT_SYMBOL(rockchip_drm_wait_vact_end); 1795 1796 static int vop_bind(struct device *dev, struct device *master, void *data) 1797 { 1798 struct platform_device *pdev = to_platform_device(dev); 1799 const struct vop_data *vop_data; 1800 struct drm_device *drm_dev = data; 1801 struct vop *vop; 1802 struct resource *res; 1803 int ret, irq; 1804 1805 vop_data = of_device_get_match_data(dev); 1806 if (!vop_data) 1807 return -ENODEV; 1808 1809 /* Allocate vop struct and its vop_win array */ 1810 vop = devm_kzalloc(dev, struct_size(vop, win, vop_data->win_size), 1811 GFP_KERNEL); 1812 if (!vop) 1813 return -ENOMEM; 1814 1815 vop->dev = dev; 1816 vop->data = vop_data; 1817 vop->drm_dev = drm_dev; 1818 dev_set_drvdata(dev, vop); 1819 1820 vop_win_init(vop); 1821 1822 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1823 vop->len = resource_size(res); 1824 vop->regs = devm_ioremap_resource(dev, res); 1825 if (IS_ERR(vop->regs)) 1826 return PTR_ERR(vop->regs); 1827 1828 vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL); 1829 if (!vop->regsbak) 1830 return -ENOMEM; 1831 1832 irq = platform_get_irq(pdev, 0); 1833 if (irq < 0) { 1834 DRM_DEV_ERROR(dev, "cannot find irq for vop\n"); 1835 return irq; 1836 } 1837 vop->irq = (unsigned int)irq; 1838 1839 spin_lock_init(&vop->reg_lock); 1840 spin_lock_init(&vop->irq_lock); 1841 mutex_init(&vop->vop_lock); 1842 1843 ret = vop_create_crtc(vop); 1844 if (ret) 1845 return ret; 1846 1847 pm_runtime_enable(&pdev->dev); 1848 1849 ret = vop_initial(vop); 1850 if (ret < 0) { 1851 DRM_DEV_ERROR(&pdev->dev, 1852 "cannot initial vop dev - err %d\n", ret); 1853 goto err_disable_pm_runtime; 1854 } 1855 1856 ret = devm_request_irq(dev, vop->irq, vop_isr, 1857 IRQF_SHARED, dev_name(dev), vop); 1858 if (ret) 1859 goto err_disable_pm_runtime; 1860 1861 if (vop->data->feature & VOP_FEATURE_INTERNAL_RGB) { 1862 vop->rgb = rockchip_rgb_init(dev, &vop->crtc, vop->drm_dev); 1863 if (IS_ERR(vop->rgb)) { 1864 ret = PTR_ERR(vop->rgb); 1865 goto err_disable_pm_runtime; 1866 } 1867 } 1868 1869 return 0; 1870 1871 err_disable_pm_runtime: 1872 pm_runtime_disable(&pdev->dev); 1873 vop_destroy_crtc(vop); 1874 return ret; 1875 } 1876 1877 static void vop_unbind(struct device *dev, struct device *master, void *data) 1878 { 1879 struct vop *vop = dev_get_drvdata(dev); 1880 1881 if (vop->rgb) 1882 rockchip_rgb_fini(vop->rgb); 1883 1884 pm_runtime_disable(dev); 1885 vop_destroy_crtc(vop); 1886 1887 clk_unprepare(vop->aclk); 1888 clk_unprepare(vop->hclk); 1889 clk_unprepare(vop->dclk); 1890 } 1891 1892 const struct component_ops vop_component_ops = { 1893 .bind = vop_bind, 1894 .unbind = vop_unbind, 1895 }; 1896 EXPORT_SYMBOL_GPL(vop_component_ops); 1897