1 /* 2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd 3 * Author:Mark Yao <mark.yao@rock-chips.com> 4 * 5 * This software is licensed under the terms of the GNU General Public 6 * License version 2, as published by the Free Software Foundation, and 7 * may be copied, distributed, and modified under those terms. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 */ 14 15 #include <drm/drm.h> 16 #include <drm/drmP.h> 17 #include <drm/drm_atomic.h> 18 #include <drm/drm_crtc.h> 19 #include <drm/drm_crtc_helper.h> 20 #include <drm/drm_flip_work.h> 21 #include <drm/drm_plane_helper.h> 22 #ifdef CONFIG_DRM_ANALOGIX_DP 23 #include <drm/bridge/analogix_dp.h> 24 #endif 25 26 #include <linux/kernel.h> 27 #include <linux/module.h> 28 #include <linux/platform_device.h> 29 #include <linux/clk.h> 30 #include <linux/iopoll.h> 31 #include <linux/of.h> 32 #include <linux/of_device.h> 33 #include <linux/pm_runtime.h> 34 #include <linux/component.h> 35 36 #include <linux/reset.h> 37 #include <linux/delay.h> 38 39 #include "rockchip_drm_drv.h" 40 #include "rockchip_drm_gem.h" 41 #include "rockchip_drm_fb.h" 42 #include "rockchip_drm_psr.h" 43 #include "rockchip_drm_vop.h" 44 45 #define __REG_SET_RELAXED(x, off, mask, shift, v, write_mask) \ 46 vop_mask_write(x, off, mask, shift, v, write_mask, true) 47 48 #define __REG_SET_NORMAL(x, off, mask, shift, v, write_mask) \ 49 vop_mask_write(x, off, mask, shift, v, write_mask, false) 50 51 #define REG_SET(x, base, reg, v, mode) \ 52 __REG_SET_##mode(x, base + reg.offset, \ 53 reg.mask, reg.shift, v, reg.write_mask) 54 #define REG_SET_MASK(x, base, reg, mask, v, mode) \ 55 __REG_SET_##mode(x, base + reg.offset, \ 56 mask, reg.shift, v, reg.write_mask) 57 58 #define VOP_WIN_SET(x, win, name, v) \ 59 REG_SET(x, win->base, win->phy->name, v, RELAXED) 60 #define VOP_SCL_SET(x, win, name, v) \ 61 REG_SET(x, win->base, win->phy->scl->name, v, RELAXED) 62 #define VOP_SCL_SET_EXT(x, win, name, v) \ 63 REG_SET(x, win->base, win->phy->scl->ext->name, v, RELAXED) 64 #define VOP_CTRL_SET(x, name, v) \ 65 REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL) 66 67 #define VOP_INTR_GET(vop, name) \ 68 vop_read_reg(vop, 0, &vop->data->ctrl->name) 69 70 #define VOP_INTR_SET(vop, name, mask, v) \ 71 REG_SET_MASK(vop, 0, vop->data->intr->name, mask, v, NORMAL) 72 #define VOP_INTR_SET_TYPE(vop, name, type, v) \ 73 do { \ 74 int i, reg = 0, mask = 0; \ 75 for (i = 0; i < vop->data->intr->nintrs; i++) { \ 76 if (vop->data->intr->intrs[i] & type) { \ 77 reg |= (v) << i; \ 78 mask |= 1 << i; \ 79 } \ 80 } \ 81 VOP_INTR_SET(vop, name, mask, reg); \ 82 } while (0) 83 #define VOP_INTR_GET_TYPE(vop, name, type) \ 84 vop_get_intr_type(vop, &vop->data->intr->name, type) 85 86 #define VOP_WIN_GET(x, win, name) \ 87 vop_read_reg(x, win->base, &win->phy->name) 88 89 #define VOP_WIN_GET_YRGBADDR(vop, win) \ 90 vop_readl(vop, win->base + win->phy->yrgb_mst.offset) 91 92 #define to_vop(x) container_of(x, struct vop, crtc) 93 #define to_vop_win(x) container_of(x, struct vop_win, base) 94 95 enum vop_pending { 96 VOP_PENDING_FB_UNREF, 97 }; 98 99 struct vop_win { 100 struct drm_plane base; 101 const struct vop_win_data *data; 102 struct vop *vop; 103 }; 104 105 struct vop { 106 struct drm_crtc crtc; 107 struct device *dev; 108 struct drm_device *drm_dev; 109 bool is_enabled; 110 111 /* mutex vsync_ work */ 112 struct mutex vsync_mutex; 113 bool vsync_work_pending; 114 struct completion dsp_hold_completion; 115 116 /* protected by dev->event_lock */ 117 struct drm_pending_vblank_event *event; 118 119 struct drm_flip_work fb_unref_work; 120 unsigned long pending; 121 122 struct completion line_flag_completion; 123 124 const struct vop_data *data; 125 126 uint32_t *regsbak; 127 void __iomem *regs; 128 129 /* physical map length of vop register */ 130 uint32_t len; 131 132 /* one time only one process allowed to config the register */ 133 spinlock_t reg_lock; 134 /* lock vop irq reg */ 135 spinlock_t irq_lock; 136 137 unsigned int irq; 138 139 /* vop AHP clk */ 140 struct clk *hclk; 141 /* vop dclk */ 142 struct clk *dclk; 143 /* vop share memory frequency */ 144 struct clk *aclk; 145 146 /* vop dclk reset */ 147 struct reset_control *dclk_rst; 148 149 struct vop_win win[]; 150 }; 151 152 static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v) 153 { 154 writel(v, vop->regs + offset); 155 vop->regsbak[offset >> 2] = v; 156 } 157 158 static inline uint32_t vop_readl(struct vop *vop, uint32_t offset) 159 { 160 return readl(vop->regs + offset); 161 } 162 163 static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base, 164 const struct vop_reg *reg) 165 { 166 return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask; 167 } 168 169 static inline void vop_mask_write(struct vop *vop, uint32_t offset, 170 uint32_t mask, uint32_t shift, uint32_t v, 171 bool write_mask, bool relaxed) 172 { 173 if (!mask) 174 return; 175 176 if (write_mask) { 177 v = ((v << shift) & 0xffff) | (mask << (shift + 16)); 178 } else { 179 uint32_t cached_val = vop->regsbak[offset >> 2]; 180 181 v = (cached_val & ~(mask << shift)) | ((v & mask) << shift); 182 vop->regsbak[offset >> 2] = v; 183 } 184 185 if (relaxed) 186 writel_relaxed(v, vop->regs + offset); 187 else 188 writel(v, vop->regs + offset); 189 } 190 191 static inline uint32_t vop_get_intr_type(struct vop *vop, 192 const struct vop_reg *reg, int type) 193 { 194 uint32_t i, ret = 0; 195 uint32_t regs = vop_read_reg(vop, 0, reg); 196 197 for (i = 0; i < vop->data->intr->nintrs; i++) { 198 if ((type & vop->data->intr->intrs[i]) && (regs & 1 << i)) 199 ret |= vop->data->intr->intrs[i]; 200 } 201 202 return ret; 203 } 204 205 static inline void vop_cfg_done(struct vop *vop) 206 { 207 VOP_CTRL_SET(vop, cfg_done, 1); 208 } 209 210 static bool has_rb_swapped(uint32_t format) 211 { 212 switch (format) { 213 case DRM_FORMAT_XBGR8888: 214 case DRM_FORMAT_ABGR8888: 215 case DRM_FORMAT_BGR888: 216 case DRM_FORMAT_BGR565: 217 return true; 218 default: 219 return false; 220 } 221 } 222 223 static enum vop_data_format vop_convert_format(uint32_t format) 224 { 225 switch (format) { 226 case DRM_FORMAT_XRGB8888: 227 case DRM_FORMAT_ARGB8888: 228 case DRM_FORMAT_XBGR8888: 229 case DRM_FORMAT_ABGR8888: 230 return VOP_FMT_ARGB8888; 231 case DRM_FORMAT_RGB888: 232 case DRM_FORMAT_BGR888: 233 return VOP_FMT_RGB888; 234 case DRM_FORMAT_RGB565: 235 case DRM_FORMAT_BGR565: 236 return VOP_FMT_RGB565; 237 case DRM_FORMAT_NV12: 238 return VOP_FMT_YUV420SP; 239 case DRM_FORMAT_NV16: 240 return VOP_FMT_YUV422SP; 241 case DRM_FORMAT_NV24: 242 return VOP_FMT_YUV444SP; 243 default: 244 DRM_ERROR("unsupported format[%08x]\n", format); 245 return -EINVAL; 246 } 247 } 248 249 static bool is_yuv_support(uint32_t format) 250 { 251 switch (format) { 252 case DRM_FORMAT_NV12: 253 case DRM_FORMAT_NV16: 254 case DRM_FORMAT_NV24: 255 return true; 256 default: 257 return false; 258 } 259 } 260 261 static bool is_alpha_support(uint32_t format) 262 { 263 switch (format) { 264 case DRM_FORMAT_ARGB8888: 265 case DRM_FORMAT_ABGR8888: 266 return true; 267 default: 268 return false; 269 } 270 } 271 272 static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src, 273 uint32_t dst, bool is_horizontal, 274 int vsu_mode, int *vskiplines) 275 { 276 uint16_t val = 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT; 277 278 if (is_horizontal) { 279 if (mode == SCALE_UP) 280 val = GET_SCL_FT_BIC(src, dst); 281 else if (mode == SCALE_DOWN) 282 val = GET_SCL_FT_BILI_DN(src, dst); 283 } else { 284 if (mode == SCALE_UP) { 285 if (vsu_mode == SCALE_UP_BIL) 286 val = GET_SCL_FT_BILI_UP(src, dst); 287 else 288 val = GET_SCL_FT_BIC(src, dst); 289 } else if (mode == SCALE_DOWN) { 290 if (vskiplines) { 291 *vskiplines = scl_get_vskiplines(src, dst); 292 val = scl_get_bili_dn_vskip(src, dst, 293 *vskiplines); 294 } else { 295 val = GET_SCL_FT_BILI_DN(src, dst); 296 } 297 } 298 } 299 300 return val; 301 } 302 303 static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win, 304 uint32_t src_w, uint32_t src_h, uint32_t dst_w, 305 uint32_t dst_h, uint32_t pixel_format) 306 { 307 uint16_t yrgb_hor_scl_mode, yrgb_ver_scl_mode; 308 uint16_t cbcr_hor_scl_mode = SCALE_NONE; 309 uint16_t cbcr_ver_scl_mode = SCALE_NONE; 310 int hsub = drm_format_horz_chroma_subsampling(pixel_format); 311 int vsub = drm_format_vert_chroma_subsampling(pixel_format); 312 bool is_yuv = is_yuv_support(pixel_format); 313 uint16_t cbcr_src_w = src_w / hsub; 314 uint16_t cbcr_src_h = src_h / vsub; 315 uint16_t vsu_mode; 316 uint16_t lb_mode; 317 uint32_t val; 318 int vskiplines = 0; 319 320 if (dst_w > 3840) { 321 DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n"); 322 return; 323 } 324 325 if (!win->phy->scl->ext) { 326 VOP_SCL_SET(vop, win, scale_yrgb_x, 327 scl_cal_scale2(src_w, dst_w)); 328 VOP_SCL_SET(vop, win, scale_yrgb_y, 329 scl_cal_scale2(src_h, dst_h)); 330 if (is_yuv) { 331 VOP_SCL_SET(vop, win, scale_cbcr_x, 332 scl_cal_scale2(cbcr_src_w, dst_w)); 333 VOP_SCL_SET(vop, win, scale_cbcr_y, 334 scl_cal_scale2(cbcr_src_h, dst_h)); 335 } 336 return; 337 } 338 339 yrgb_hor_scl_mode = scl_get_scl_mode(src_w, dst_w); 340 yrgb_ver_scl_mode = scl_get_scl_mode(src_h, dst_h); 341 342 if (is_yuv) { 343 cbcr_hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w); 344 cbcr_ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h); 345 if (cbcr_hor_scl_mode == SCALE_DOWN) 346 lb_mode = scl_vop_cal_lb_mode(dst_w, true); 347 else 348 lb_mode = scl_vop_cal_lb_mode(cbcr_src_w, true); 349 } else { 350 if (yrgb_hor_scl_mode == SCALE_DOWN) 351 lb_mode = scl_vop_cal_lb_mode(dst_w, false); 352 else 353 lb_mode = scl_vop_cal_lb_mode(src_w, false); 354 } 355 356 VOP_SCL_SET_EXT(vop, win, lb_mode, lb_mode); 357 if (lb_mode == LB_RGB_3840X2) { 358 if (yrgb_ver_scl_mode != SCALE_NONE) { 359 DRM_DEV_ERROR(vop->dev, "not allow yrgb ver scale\n"); 360 return; 361 } 362 if (cbcr_ver_scl_mode != SCALE_NONE) { 363 DRM_DEV_ERROR(vop->dev, "not allow cbcr ver scale\n"); 364 return; 365 } 366 vsu_mode = SCALE_UP_BIL; 367 } else if (lb_mode == LB_RGB_2560X4) { 368 vsu_mode = SCALE_UP_BIL; 369 } else { 370 vsu_mode = SCALE_UP_BIC; 371 } 372 373 val = scl_vop_cal_scale(yrgb_hor_scl_mode, src_w, dst_w, 374 true, 0, NULL); 375 VOP_SCL_SET(vop, win, scale_yrgb_x, val); 376 val = scl_vop_cal_scale(yrgb_ver_scl_mode, src_h, dst_h, 377 false, vsu_mode, &vskiplines); 378 VOP_SCL_SET(vop, win, scale_yrgb_y, val); 379 380 VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt4, vskiplines == 4); 381 VOP_SCL_SET_EXT(vop, win, vsd_yrgb_gt2, vskiplines == 2); 382 383 VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, yrgb_hor_scl_mode); 384 VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, yrgb_ver_scl_mode); 385 VOP_SCL_SET_EXT(vop, win, yrgb_hsd_mode, SCALE_DOWN_BIL); 386 VOP_SCL_SET_EXT(vop, win, yrgb_vsd_mode, SCALE_DOWN_BIL); 387 VOP_SCL_SET_EXT(vop, win, yrgb_vsu_mode, vsu_mode); 388 if (is_yuv) { 389 val = scl_vop_cal_scale(cbcr_hor_scl_mode, cbcr_src_w, 390 dst_w, true, 0, NULL); 391 VOP_SCL_SET(vop, win, scale_cbcr_x, val); 392 val = scl_vop_cal_scale(cbcr_ver_scl_mode, cbcr_src_h, 393 dst_h, false, vsu_mode, &vskiplines); 394 VOP_SCL_SET(vop, win, scale_cbcr_y, val); 395 396 VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt4, vskiplines == 4); 397 VOP_SCL_SET_EXT(vop, win, vsd_cbcr_gt2, vskiplines == 2); 398 VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, cbcr_hor_scl_mode); 399 VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, cbcr_ver_scl_mode); 400 VOP_SCL_SET_EXT(vop, win, cbcr_hsd_mode, SCALE_DOWN_BIL); 401 VOP_SCL_SET_EXT(vop, win, cbcr_vsd_mode, SCALE_DOWN_BIL); 402 VOP_SCL_SET_EXT(vop, win, cbcr_vsu_mode, vsu_mode); 403 } 404 } 405 406 static void vop_dsp_hold_valid_irq_enable(struct vop *vop) 407 { 408 unsigned long flags; 409 410 if (WARN_ON(!vop->is_enabled)) 411 return; 412 413 spin_lock_irqsave(&vop->irq_lock, flags); 414 415 VOP_INTR_SET_TYPE(vop, clear, DSP_HOLD_VALID_INTR, 1); 416 VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 1); 417 418 spin_unlock_irqrestore(&vop->irq_lock, flags); 419 } 420 421 static void vop_dsp_hold_valid_irq_disable(struct vop *vop) 422 { 423 unsigned long flags; 424 425 if (WARN_ON(!vop->is_enabled)) 426 return; 427 428 spin_lock_irqsave(&vop->irq_lock, flags); 429 430 VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 0); 431 432 spin_unlock_irqrestore(&vop->irq_lock, flags); 433 } 434 435 /* 436 * (1) each frame starts at the start of the Vsync pulse which is signaled by 437 * the "FRAME_SYNC" interrupt. 438 * (2) the active data region of each frame ends at dsp_vact_end 439 * (3) we should program this same number (dsp_vact_end) into dsp_line_frag_num, 440 * to get "LINE_FLAG" interrupt at the end of the active on screen data. 441 * 442 * VOP_INTR_CTRL0.dsp_line_frag_num = VOP_DSP_VACT_ST_END.dsp_vact_end 443 * Interrupts 444 * LINE_FLAG -------------------------------+ 445 * FRAME_SYNC ----+ | 446 * | | 447 * v v 448 * | Vsync | Vbp | Vactive | Vfp | 449 * ^ ^ ^ ^ 450 * | | | | 451 * | | | | 452 * dsp_vs_end ------------+ | | | VOP_DSP_VTOTAL_VS_END 453 * dsp_vact_start --------------+ | | VOP_DSP_VACT_ST_END 454 * dsp_vact_end ----------------------------+ | VOP_DSP_VACT_ST_END 455 * dsp_total -------------------------------------+ VOP_DSP_VTOTAL_VS_END 456 */ 457 static bool vop_line_flag_irq_is_enabled(struct vop *vop) 458 { 459 uint32_t line_flag_irq; 460 unsigned long flags; 461 462 spin_lock_irqsave(&vop->irq_lock, flags); 463 464 line_flag_irq = VOP_INTR_GET_TYPE(vop, enable, LINE_FLAG_INTR); 465 466 spin_unlock_irqrestore(&vop->irq_lock, flags); 467 468 return !!line_flag_irq; 469 } 470 471 static void vop_line_flag_irq_enable(struct vop *vop) 472 { 473 unsigned long flags; 474 475 if (WARN_ON(!vop->is_enabled)) 476 return; 477 478 spin_lock_irqsave(&vop->irq_lock, flags); 479 480 VOP_INTR_SET_TYPE(vop, clear, LINE_FLAG_INTR, 1); 481 VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 1); 482 483 spin_unlock_irqrestore(&vop->irq_lock, flags); 484 } 485 486 static void vop_line_flag_irq_disable(struct vop *vop) 487 { 488 unsigned long flags; 489 490 if (WARN_ON(!vop->is_enabled)) 491 return; 492 493 spin_lock_irqsave(&vop->irq_lock, flags); 494 495 VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 0); 496 497 spin_unlock_irqrestore(&vop->irq_lock, flags); 498 } 499 500 static int vop_enable(struct drm_crtc *crtc) 501 { 502 struct vop *vop = to_vop(crtc); 503 int ret, i; 504 505 ret = pm_runtime_get_sync(vop->dev); 506 if (ret < 0) { 507 dev_err(vop->dev, "failed to get pm runtime: %d\n", ret); 508 return ret; 509 } 510 511 ret = clk_enable(vop->hclk); 512 if (WARN_ON(ret < 0)) 513 goto err_put_pm_runtime; 514 515 ret = clk_enable(vop->dclk); 516 if (WARN_ON(ret < 0)) 517 goto err_disable_hclk; 518 519 ret = clk_enable(vop->aclk); 520 if (WARN_ON(ret < 0)) 521 goto err_disable_dclk; 522 523 /* 524 * Slave iommu shares power, irq and clock with vop. It was associated 525 * automatically with this master device via common driver code. 526 * Now that we have enabled the clock we attach it to the shared drm 527 * mapping. 528 */ 529 ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev); 530 if (ret) { 531 dev_err(vop->dev, "failed to attach dma mapping, %d\n", ret); 532 goto err_disable_aclk; 533 } 534 535 memcpy(vop->regs, vop->regsbak, vop->len); 536 /* 537 * We need to make sure that all windows are disabled before we 538 * enable the crtc. Otherwise we might try to scan from a destroyed 539 * buffer later. 540 */ 541 for (i = 0; i < vop->data->win_size; i++) { 542 struct vop_win *vop_win = &vop->win[i]; 543 const struct vop_win_data *win = vop_win->data; 544 545 spin_lock(&vop->reg_lock); 546 VOP_WIN_SET(vop, win, enable, 0); 547 spin_unlock(&vop->reg_lock); 548 } 549 550 vop_cfg_done(vop); 551 552 /* 553 * At here, vop clock & iommu is enable, R/W vop regs would be safe. 554 */ 555 vop->is_enabled = true; 556 557 spin_lock(&vop->reg_lock); 558 559 VOP_CTRL_SET(vop, standby, 0); 560 561 spin_unlock(&vop->reg_lock); 562 563 enable_irq(vop->irq); 564 565 drm_crtc_vblank_on(crtc); 566 567 return 0; 568 569 err_disable_aclk: 570 clk_disable(vop->aclk); 571 err_disable_dclk: 572 clk_disable(vop->dclk); 573 err_disable_hclk: 574 clk_disable(vop->hclk); 575 err_put_pm_runtime: 576 pm_runtime_put_sync(vop->dev); 577 return ret; 578 } 579 580 static void vop_crtc_disable(struct drm_crtc *crtc) 581 { 582 struct vop *vop = to_vop(crtc); 583 584 WARN_ON(vop->event); 585 586 rockchip_drm_psr_deactivate(&vop->crtc); 587 588 drm_crtc_vblank_off(crtc); 589 590 /* 591 * Vop standby will take effect at end of current frame, 592 * if dsp hold valid irq happen, it means standby complete. 593 * 594 * we must wait standby complete when we want to disable aclk, 595 * if not, memory bus maybe dead. 596 */ 597 reinit_completion(&vop->dsp_hold_completion); 598 vop_dsp_hold_valid_irq_enable(vop); 599 600 spin_lock(&vop->reg_lock); 601 602 VOP_CTRL_SET(vop, standby, 1); 603 604 spin_unlock(&vop->reg_lock); 605 606 wait_for_completion(&vop->dsp_hold_completion); 607 608 vop_dsp_hold_valid_irq_disable(vop); 609 610 disable_irq(vop->irq); 611 612 vop->is_enabled = false; 613 614 /* 615 * vop standby complete, so iommu detach is safe. 616 */ 617 rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev); 618 619 clk_disable(vop->dclk); 620 clk_disable(vop->aclk); 621 clk_disable(vop->hclk); 622 pm_runtime_put(vop->dev); 623 624 if (crtc->state->event && !crtc->state->active) { 625 spin_lock_irq(&crtc->dev->event_lock); 626 drm_crtc_send_vblank_event(crtc, crtc->state->event); 627 spin_unlock_irq(&crtc->dev->event_lock); 628 629 crtc->state->event = NULL; 630 } 631 } 632 633 static void vop_plane_destroy(struct drm_plane *plane) 634 { 635 drm_plane_cleanup(plane); 636 } 637 638 static int vop_plane_atomic_check(struct drm_plane *plane, 639 struct drm_plane_state *state) 640 { 641 struct drm_crtc *crtc = state->crtc; 642 struct drm_crtc_state *crtc_state; 643 struct drm_framebuffer *fb = state->fb; 644 struct vop_win *vop_win = to_vop_win(plane); 645 const struct vop_win_data *win = vop_win->data; 646 int ret; 647 struct drm_rect clip; 648 int min_scale = win->phy->scl ? FRAC_16_16(1, 8) : 649 DRM_PLANE_HELPER_NO_SCALING; 650 int max_scale = win->phy->scl ? FRAC_16_16(8, 1) : 651 DRM_PLANE_HELPER_NO_SCALING; 652 653 if (!crtc || !fb) 654 return 0; 655 656 crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc); 657 if (WARN_ON(!crtc_state)) 658 return -EINVAL; 659 660 clip.x1 = 0; 661 clip.y1 = 0; 662 clip.x2 = crtc_state->adjusted_mode.hdisplay; 663 clip.y2 = crtc_state->adjusted_mode.vdisplay; 664 665 ret = drm_plane_helper_check_state(state, &clip, 666 min_scale, max_scale, 667 true, true); 668 if (ret) 669 return ret; 670 671 if (!state->visible) 672 return 0; 673 674 ret = vop_convert_format(fb->format->format); 675 if (ret < 0) 676 return ret; 677 678 /* 679 * Src.x1 can be odd when do clip, but yuv plane start point 680 * need align with 2 pixel. 681 */ 682 if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) { 683 DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n"); 684 return -EINVAL; 685 } 686 687 return 0; 688 } 689 690 static void vop_plane_atomic_disable(struct drm_plane *plane, 691 struct drm_plane_state *old_state) 692 { 693 struct vop_win *vop_win = to_vop_win(plane); 694 const struct vop_win_data *win = vop_win->data; 695 struct vop *vop = to_vop(old_state->crtc); 696 697 if (!old_state->crtc) 698 return; 699 700 spin_lock(&vop->reg_lock); 701 702 VOP_WIN_SET(vop, win, enable, 0); 703 704 spin_unlock(&vop->reg_lock); 705 } 706 707 static void vop_plane_atomic_update(struct drm_plane *plane, 708 struct drm_plane_state *old_state) 709 { 710 struct drm_plane_state *state = plane->state; 711 struct drm_crtc *crtc = state->crtc; 712 struct vop_win *vop_win = to_vop_win(plane); 713 const struct vop_win_data *win = vop_win->data; 714 struct vop *vop = to_vop(state->crtc); 715 struct drm_framebuffer *fb = state->fb; 716 unsigned int actual_w, actual_h; 717 unsigned int dsp_stx, dsp_sty; 718 uint32_t act_info, dsp_info, dsp_st; 719 struct drm_rect *src = &state->src; 720 struct drm_rect *dest = &state->dst; 721 struct drm_gem_object *obj, *uv_obj; 722 struct rockchip_gem_object *rk_obj, *rk_uv_obj; 723 unsigned long offset; 724 dma_addr_t dma_addr; 725 uint32_t val; 726 bool rb_swap; 727 int format; 728 729 /* 730 * can't update plane when vop is disabled. 731 */ 732 if (WARN_ON(!crtc)) 733 return; 734 735 if (WARN_ON(!vop->is_enabled)) 736 return; 737 738 if (!state->visible) { 739 vop_plane_atomic_disable(plane, old_state); 740 return; 741 } 742 743 obj = rockchip_fb_get_gem_obj(fb, 0); 744 rk_obj = to_rockchip_obj(obj); 745 746 actual_w = drm_rect_width(src) >> 16; 747 actual_h = drm_rect_height(src) >> 16; 748 act_info = (actual_h - 1) << 16 | ((actual_w - 1) & 0xffff); 749 750 dsp_info = (drm_rect_height(dest) - 1) << 16; 751 dsp_info |= (drm_rect_width(dest) - 1) & 0xffff; 752 753 dsp_stx = dest->x1 + crtc->mode.htotal - crtc->mode.hsync_start; 754 dsp_sty = dest->y1 + crtc->mode.vtotal - crtc->mode.vsync_start; 755 dsp_st = dsp_sty << 16 | (dsp_stx & 0xffff); 756 757 offset = (src->x1 >> 16) * fb->format->cpp[0]; 758 offset += (src->y1 >> 16) * fb->pitches[0]; 759 dma_addr = rk_obj->dma_addr + offset + fb->offsets[0]; 760 761 format = vop_convert_format(fb->format->format); 762 763 spin_lock(&vop->reg_lock); 764 765 VOP_WIN_SET(vop, win, format, format); 766 VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4)); 767 VOP_WIN_SET(vop, win, yrgb_mst, dma_addr); 768 if (is_yuv_support(fb->format->format)) { 769 int hsub = drm_format_horz_chroma_subsampling(fb->format->format); 770 int vsub = drm_format_vert_chroma_subsampling(fb->format->format); 771 int bpp = fb->format->cpp[1]; 772 773 uv_obj = rockchip_fb_get_gem_obj(fb, 1); 774 rk_uv_obj = to_rockchip_obj(uv_obj); 775 776 offset = (src->x1 >> 16) * bpp / hsub; 777 offset += (src->y1 >> 16) * fb->pitches[1] / vsub; 778 779 dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1]; 780 VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4)); 781 VOP_WIN_SET(vop, win, uv_mst, dma_addr); 782 } 783 784 if (win->phy->scl) 785 scl_vop_cal_scl_fac(vop, win, actual_w, actual_h, 786 drm_rect_width(dest), drm_rect_height(dest), 787 fb->format->format); 788 789 VOP_WIN_SET(vop, win, act_info, act_info); 790 VOP_WIN_SET(vop, win, dsp_info, dsp_info); 791 VOP_WIN_SET(vop, win, dsp_st, dsp_st); 792 793 rb_swap = has_rb_swapped(fb->format->format); 794 VOP_WIN_SET(vop, win, rb_swap, rb_swap); 795 796 if (is_alpha_support(fb->format->format)) { 797 VOP_WIN_SET(vop, win, dst_alpha_ctl, 798 DST_FACTOR_M0(ALPHA_SRC_INVERSE)); 799 val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) | 800 SRC_ALPHA_M0(ALPHA_STRAIGHT) | 801 SRC_BLEND_M0(ALPHA_PER_PIX) | 802 SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) | 803 SRC_FACTOR_M0(ALPHA_ONE); 804 VOP_WIN_SET(vop, win, src_alpha_ctl, val); 805 } else { 806 VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0)); 807 } 808 809 VOP_WIN_SET(vop, win, enable, 1); 810 spin_unlock(&vop->reg_lock); 811 } 812 813 static const struct drm_plane_helper_funcs plane_helper_funcs = { 814 .atomic_check = vop_plane_atomic_check, 815 .atomic_update = vop_plane_atomic_update, 816 .atomic_disable = vop_plane_atomic_disable, 817 }; 818 819 static const struct drm_plane_funcs vop_plane_funcs = { 820 .update_plane = drm_atomic_helper_update_plane, 821 .disable_plane = drm_atomic_helper_disable_plane, 822 .destroy = vop_plane_destroy, 823 .reset = drm_atomic_helper_plane_reset, 824 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 825 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 826 }; 827 828 static int vop_crtc_enable_vblank(struct drm_crtc *crtc) 829 { 830 struct vop *vop = to_vop(crtc); 831 unsigned long flags; 832 833 if (WARN_ON(!vop->is_enabled)) 834 return -EPERM; 835 836 spin_lock_irqsave(&vop->irq_lock, flags); 837 838 VOP_INTR_SET_TYPE(vop, clear, FS_INTR, 1); 839 VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 1); 840 841 spin_unlock_irqrestore(&vop->irq_lock, flags); 842 843 return 0; 844 } 845 846 static void vop_crtc_disable_vblank(struct drm_crtc *crtc) 847 { 848 struct vop *vop = to_vop(crtc); 849 unsigned long flags; 850 851 if (WARN_ON(!vop->is_enabled)) 852 return; 853 854 spin_lock_irqsave(&vop->irq_lock, flags); 855 856 VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 0); 857 858 spin_unlock_irqrestore(&vop->irq_lock, flags); 859 } 860 861 static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, 862 const struct drm_display_mode *mode, 863 struct drm_display_mode *adjusted_mode) 864 { 865 struct vop *vop = to_vop(crtc); 866 867 adjusted_mode->clock = 868 clk_round_rate(vop->dclk, mode->clock * 1000) / 1000; 869 870 return true; 871 } 872 873 static void vop_crtc_enable(struct drm_crtc *crtc) 874 { 875 struct vop *vop = to_vop(crtc); 876 const struct vop_data *vop_data = vop->data; 877 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state); 878 struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode; 879 u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start; 880 u16 hdisplay = adjusted_mode->hdisplay; 881 u16 htotal = adjusted_mode->htotal; 882 u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start; 883 u16 hact_end = hact_st + hdisplay; 884 u16 vdisplay = adjusted_mode->vdisplay; 885 u16 vtotal = adjusted_mode->vtotal; 886 u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start; 887 u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start; 888 u16 vact_end = vact_st + vdisplay; 889 uint32_t pin_pol, val; 890 int ret; 891 892 WARN_ON(vop->event); 893 894 ret = vop_enable(crtc); 895 if (ret) { 896 DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret); 897 return; 898 } 899 900 /* 901 * If dclk rate is zero, mean that scanout is stop, 902 * we don't need wait any more. 903 */ 904 if (clk_get_rate(vop->dclk)) { 905 /* 906 * Rk3288 vop timing register is immediately, when configure 907 * display timing on display time, may cause tearing. 908 * 909 * Vop standby will take effect at end of current frame, 910 * if dsp hold valid irq happen, it means standby complete. 911 * 912 * mode set: 913 * standby and wait complete --> |---- 914 * | display time 915 * |---- 916 * |---> dsp hold irq 917 * configure display timing --> | 918 * standby exit | 919 * | new frame start. 920 */ 921 922 reinit_completion(&vop->dsp_hold_completion); 923 vop_dsp_hold_valid_irq_enable(vop); 924 925 spin_lock(&vop->reg_lock); 926 927 VOP_CTRL_SET(vop, standby, 1); 928 929 spin_unlock(&vop->reg_lock); 930 931 wait_for_completion(&vop->dsp_hold_completion); 932 933 vop_dsp_hold_valid_irq_disable(vop); 934 } 935 936 pin_pol = BIT(DCLK_INVERT); 937 pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ? 938 BIT(HSYNC_POSITIVE) : 0; 939 pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) ? 940 BIT(VSYNC_POSITIVE) : 0; 941 VOP_CTRL_SET(vop, pin_pol, pin_pol); 942 943 switch (s->output_type) { 944 case DRM_MODE_CONNECTOR_LVDS: 945 VOP_CTRL_SET(vop, rgb_en, 1); 946 VOP_CTRL_SET(vop, rgb_pin_pol, pin_pol); 947 break; 948 case DRM_MODE_CONNECTOR_eDP: 949 VOP_CTRL_SET(vop, edp_pin_pol, pin_pol); 950 VOP_CTRL_SET(vop, edp_en, 1); 951 break; 952 case DRM_MODE_CONNECTOR_HDMIA: 953 VOP_CTRL_SET(vop, hdmi_pin_pol, pin_pol); 954 VOP_CTRL_SET(vop, hdmi_en, 1); 955 break; 956 case DRM_MODE_CONNECTOR_DSI: 957 VOP_CTRL_SET(vop, mipi_pin_pol, pin_pol); 958 VOP_CTRL_SET(vop, mipi_en, 1); 959 break; 960 case DRM_MODE_CONNECTOR_DisplayPort: 961 pin_pol &= ~BIT(DCLK_INVERT); 962 VOP_CTRL_SET(vop, dp_pin_pol, pin_pol); 963 VOP_CTRL_SET(vop, dp_en, 1); 964 break; 965 default: 966 DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n", 967 s->output_type); 968 } 969 970 /* 971 * if vop is not support RGB10 output, need force RGB10 to RGB888. 972 */ 973 if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA && 974 !(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10)) 975 s->output_mode = ROCKCHIP_OUT_MODE_P888; 976 VOP_CTRL_SET(vop, out_mode, s->output_mode); 977 978 VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len); 979 val = hact_st << 16; 980 val |= hact_end; 981 VOP_CTRL_SET(vop, hact_st_end, val); 982 VOP_CTRL_SET(vop, hpost_st_end, val); 983 984 VOP_CTRL_SET(vop, vtotal_pw, (vtotal << 16) | vsync_len); 985 val = vact_st << 16; 986 val |= vact_end; 987 VOP_CTRL_SET(vop, vact_st_end, val); 988 VOP_CTRL_SET(vop, vpost_st_end, val); 989 990 VOP_CTRL_SET(vop, line_flag_num[0], vact_end); 991 992 clk_set_rate(vop->dclk, adjusted_mode->clock * 1000); 993 994 VOP_CTRL_SET(vop, standby, 0); 995 996 rockchip_drm_psr_activate(&vop->crtc); 997 } 998 999 static bool vop_fs_irq_is_pending(struct vop *vop) 1000 { 1001 return VOP_INTR_GET_TYPE(vop, status, FS_INTR); 1002 } 1003 1004 static void vop_wait_for_irq_handler(struct vop *vop) 1005 { 1006 bool pending; 1007 int ret; 1008 1009 /* 1010 * Spin until frame start interrupt status bit goes low, which means 1011 * that interrupt handler was invoked and cleared it. The timeout of 1012 * 10 msecs is really too long, but it is just a safety measure if 1013 * something goes really wrong. The wait will only happen in the very 1014 * unlikely case of a vblank happening exactly at the same time and 1015 * shouldn't exceed microseconds range. 1016 */ 1017 ret = readx_poll_timeout_atomic(vop_fs_irq_is_pending, vop, pending, 1018 !pending, 0, 10 * 1000); 1019 if (ret) 1020 DRM_DEV_ERROR(vop->dev, "VOP vblank IRQ stuck for 10 ms\n"); 1021 1022 synchronize_irq(vop->irq); 1023 } 1024 1025 static void vop_crtc_atomic_flush(struct drm_crtc *crtc, 1026 struct drm_crtc_state *old_crtc_state) 1027 { 1028 struct drm_atomic_state *old_state = old_crtc_state->state; 1029 struct drm_plane_state *old_plane_state; 1030 struct vop *vop = to_vop(crtc); 1031 struct drm_plane *plane; 1032 int i; 1033 1034 if (WARN_ON(!vop->is_enabled)) 1035 return; 1036 1037 spin_lock(&vop->reg_lock); 1038 1039 vop_cfg_done(vop); 1040 1041 spin_unlock(&vop->reg_lock); 1042 1043 /* 1044 * There is a (rather unlikely) possiblity that a vblank interrupt 1045 * fired before we set the cfg_done bit. To avoid spuriously 1046 * signalling flip completion we need to wait for it to finish. 1047 */ 1048 vop_wait_for_irq_handler(vop); 1049 1050 spin_lock_irq(&crtc->dev->event_lock); 1051 if (crtc->state->event) { 1052 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 1053 WARN_ON(vop->event); 1054 1055 vop->event = crtc->state->event; 1056 crtc->state->event = NULL; 1057 } 1058 spin_unlock_irq(&crtc->dev->event_lock); 1059 1060 for_each_plane_in_state(old_state, plane, old_plane_state, i) { 1061 if (!old_plane_state->fb) 1062 continue; 1063 1064 if (old_plane_state->fb == plane->state->fb) 1065 continue; 1066 1067 drm_framebuffer_reference(old_plane_state->fb); 1068 drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb); 1069 set_bit(VOP_PENDING_FB_UNREF, &vop->pending); 1070 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 1071 } 1072 } 1073 1074 static void vop_crtc_atomic_begin(struct drm_crtc *crtc, 1075 struct drm_crtc_state *old_crtc_state) 1076 { 1077 rockchip_drm_psr_flush(crtc); 1078 } 1079 1080 static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = { 1081 .enable = vop_crtc_enable, 1082 .disable = vop_crtc_disable, 1083 .mode_fixup = vop_crtc_mode_fixup, 1084 .atomic_flush = vop_crtc_atomic_flush, 1085 .atomic_begin = vop_crtc_atomic_begin, 1086 }; 1087 1088 static void vop_crtc_destroy(struct drm_crtc *crtc) 1089 { 1090 drm_crtc_cleanup(crtc); 1091 } 1092 1093 static void vop_crtc_reset(struct drm_crtc *crtc) 1094 { 1095 if (crtc->state) 1096 __drm_atomic_helper_crtc_destroy_state(crtc->state); 1097 kfree(crtc->state); 1098 1099 crtc->state = kzalloc(sizeof(struct rockchip_crtc_state), GFP_KERNEL); 1100 if (crtc->state) 1101 crtc->state->crtc = crtc; 1102 } 1103 1104 static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc) 1105 { 1106 struct rockchip_crtc_state *rockchip_state; 1107 1108 rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL); 1109 if (!rockchip_state) 1110 return NULL; 1111 1112 __drm_atomic_helper_crtc_duplicate_state(crtc, &rockchip_state->base); 1113 return &rockchip_state->base; 1114 } 1115 1116 static void vop_crtc_destroy_state(struct drm_crtc *crtc, 1117 struct drm_crtc_state *state) 1118 { 1119 struct rockchip_crtc_state *s = to_rockchip_crtc_state(state); 1120 1121 __drm_atomic_helper_crtc_destroy_state(&s->base); 1122 kfree(s); 1123 } 1124 1125 #ifdef CONFIG_DRM_ANALOGIX_DP 1126 static struct drm_connector *vop_get_edp_connector(struct vop *vop) 1127 { 1128 struct drm_connector *connector; 1129 struct drm_connector_list_iter conn_iter; 1130 1131 drm_connector_list_iter_begin(vop->drm_dev, &conn_iter); 1132 drm_for_each_connector_iter(connector, &conn_iter) { 1133 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1134 drm_connector_list_iter_end(&conn_iter); 1135 return connector; 1136 } 1137 } 1138 drm_connector_list_iter_end(&conn_iter); 1139 1140 return NULL; 1141 } 1142 1143 static int vop_crtc_set_crc_source(struct drm_crtc *crtc, 1144 const char *source_name, size_t *values_cnt) 1145 { 1146 struct vop *vop = to_vop(crtc); 1147 struct drm_connector *connector; 1148 int ret; 1149 1150 connector = vop_get_edp_connector(vop); 1151 if (!connector) 1152 return -EINVAL; 1153 1154 *values_cnt = 3; 1155 1156 if (source_name && strcmp(source_name, "auto") == 0) 1157 ret = analogix_dp_start_crc(connector); 1158 else if (!source_name) 1159 ret = analogix_dp_stop_crc(connector); 1160 else 1161 ret = -EINVAL; 1162 1163 return ret; 1164 } 1165 #else 1166 static int vop_crtc_set_crc_source(struct drm_crtc *crtc, 1167 const char *source_name, size_t *values_cnt) 1168 { 1169 return -ENODEV; 1170 } 1171 #endif 1172 1173 static const struct drm_crtc_funcs vop_crtc_funcs = { 1174 .set_config = drm_atomic_helper_set_config, 1175 .page_flip = drm_atomic_helper_page_flip, 1176 .destroy = vop_crtc_destroy, 1177 .reset = vop_crtc_reset, 1178 .atomic_duplicate_state = vop_crtc_duplicate_state, 1179 .atomic_destroy_state = vop_crtc_destroy_state, 1180 .enable_vblank = vop_crtc_enable_vblank, 1181 .disable_vblank = vop_crtc_disable_vblank, 1182 .set_crc_source = vop_crtc_set_crc_source, 1183 }; 1184 1185 static void vop_fb_unref_worker(struct drm_flip_work *work, void *val) 1186 { 1187 struct vop *vop = container_of(work, struct vop, fb_unref_work); 1188 struct drm_framebuffer *fb = val; 1189 1190 drm_crtc_vblank_put(&vop->crtc); 1191 drm_framebuffer_unreference(fb); 1192 } 1193 1194 static void vop_handle_vblank(struct vop *vop) 1195 { 1196 struct drm_device *drm = vop->drm_dev; 1197 struct drm_crtc *crtc = &vop->crtc; 1198 unsigned long flags; 1199 1200 spin_lock_irqsave(&drm->event_lock, flags); 1201 if (vop->event) { 1202 drm_crtc_send_vblank_event(crtc, vop->event); 1203 drm_crtc_vblank_put(crtc); 1204 vop->event = NULL; 1205 } 1206 spin_unlock_irqrestore(&drm->event_lock, flags); 1207 1208 if (test_and_clear_bit(VOP_PENDING_FB_UNREF, &vop->pending)) 1209 drm_flip_work_commit(&vop->fb_unref_work, system_unbound_wq); 1210 } 1211 1212 static irqreturn_t vop_isr(int irq, void *data) 1213 { 1214 struct vop *vop = data; 1215 struct drm_crtc *crtc = &vop->crtc; 1216 uint32_t active_irqs; 1217 unsigned long flags; 1218 int ret = IRQ_NONE; 1219 1220 /* 1221 * interrupt register has interrupt status, enable and clear bits, we 1222 * must hold irq_lock to avoid a race with enable/disable_vblank(). 1223 */ 1224 spin_lock_irqsave(&vop->irq_lock, flags); 1225 1226 active_irqs = VOP_INTR_GET_TYPE(vop, status, INTR_MASK); 1227 /* Clear all active interrupt sources */ 1228 if (active_irqs) 1229 VOP_INTR_SET_TYPE(vop, clear, active_irqs, 1); 1230 1231 spin_unlock_irqrestore(&vop->irq_lock, flags); 1232 1233 /* This is expected for vop iommu irqs, since the irq is shared */ 1234 if (!active_irqs) 1235 return IRQ_NONE; 1236 1237 if (active_irqs & DSP_HOLD_VALID_INTR) { 1238 complete(&vop->dsp_hold_completion); 1239 active_irqs &= ~DSP_HOLD_VALID_INTR; 1240 ret = IRQ_HANDLED; 1241 } 1242 1243 if (active_irqs & LINE_FLAG_INTR) { 1244 complete(&vop->line_flag_completion); 1245 active_irqs &= ~LINE_FLAG_INTR; 1246 ret = IRQ_HANDLED; 1247 } 1248 1249 if (active_irqs & FS_INTR) { 1250 drm_crtc_handle_vblank(crtc); 1251 vop_handle_vblank(vop); 1252 active_irqs &= ~FS_INTR; 1253 ret = IRQ_HANDLED; 1254 } 1255 1256 /* Unhandled irqs are spurious. */ 1257 if (active_irqs) 1258 DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n", 1259 active_irqs); 1260 1261 return ret; 1262 } 1263 1264 static int vop_create_crtc(struct vop *vop) 1265 { 1266 const struct vop_data *vop_data = vop->data; 1267 struct device *dev = vop->dev; 1268 struct drm_device *drm_dev = vop->drm_dev; 1269 struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp; 1270 struct drm_crtc *crtc = &vop->crtc; 1271 struct device_node *port; 1272 int ret; 1273 int i; 1274 1275 /* 1276 * Create drm_plane for primary and cursor planes first, since we need 1277 * to pass them to drm_crtc_init_with_planes, which sets the 1278 * "possible_crtcs" to the newly initialized crtc. 1279 */ 1280 for (i = 0; i < vop_data->win_size; i++) { 1281 struct vop_win *vop_win = &vop->win[i]; 1282 const struct vop_win_data *win_data = vop_win->data; 1283 1284 if (win_data->type != DRM_PLANE_TYPE_PRIMARY && 1285 win_data->type != DRM_PLANE_TYPE_CURSOR) 1286 continue; 1287 1288 ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base, 1289 0, &vop_plane_funcs, 1290 win_data->phy->data_formats, 1291 win_data->phy->nformats, 1292 win_data->type, NULL); 1293 if (ret) { 1294 DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n", 1295 ret); 1296 goto err_cleanup_planes; 1297 } 1298 1299 plane = &vop_win->base; 1300 drm_plane_helper_add(plane, &plane_helper_funcs); 1301 if (plane->type == DRM_PLANE_TYPE_PRIMARY) 1302 primary = plane; 1303 else if (plane->type == DRM_PLANE_TYPE_CURSOR) 1304 cursor = plane; 1305 } 1306 1307 ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, 1308 &vop_crtc_funcs, NULL); 1309 if (ret) 1310 goto err_cleanup_planes; 1311 1312 drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs); 1313 1314 /* 1315 * Create drm_planes for overlay windows with possible_crtcs restricted 1316 * to the newly created crtc. 1317 */ 1318 for (i = 0; i < vop_data->win_size; i++) { 1319 struct vop_win *vop_win = &vop->win[i]; 1320 const struct vop_win_data *win_data = vop_win->data; 1321 unsigned long possible_crtcs = 1 << drm_crtc_index(crtc); 1322 1323 if (win_data->type != DRM_PLANE_TYPE_OVERLAY) 1324 continue; 1325 1326 ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base, 1327 possible_crtcs, 1328 &vop_plane_funcs, 1329 win_data->phy->data_formats, 1330 win_data->phy->nformats, 1331 win_data->type, NULL); 1332 if (ret) { 1333 DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n", 1334 ret); 1335 goto err_cleanup_crtc; 1336 } 1337 drm_plane_helper_add(&vop_win->base, &plane_helper_funcs); 1338 } 1339 1340 port = of_get_child_by_name(dev->of_node, "port"); 1341 if (!port) { 1342 DRM_DEV_ERROR(vop->dev, "no port node found in %s\n", 1343 dev->of_node->full_name); 1344 ret = -ENOENT; 1345 goto err_cleanup_crtc; 1346 } 1347 1348 drm_flip_work_init(&vop->fb_unref_work, "fb_unref", 1349 vop_fb_unref_worker); 1350 1351 init_completion(&vop->dsp_hold_completion); 1352 init_completion(&vop->line_flag_completion); 1353 crtc->port = port; 1354 1355 return 0; 1356 1357 err_cleanup_crtc: 1358 drm_crtc_cleanup(crtc); 1359 err_cleanup_planes: 1360 list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list, 1361 head) 1362 drm_plane_cleanup(plane); 1363 return ret; 1364 } 1365 1366 static void vop_destroy_crtc(struct vop *vop) 1367 { 1368 struct drm_crtc *crtc = &vop->crtc; 1369 struct drm_device *drm_dev = vop->drm_dev; 1370 struct drm_plane *plane, *tmp; 1371 1372 of_node_put(crtc->port); 1373 1374 /* 1375 * We need to cleanup the planes now. Why? 1376 * 1377 * The planes are "&vop->win[i].base". That means the memory is 1378 * all part of the big "struct vop" chunk of memory. That memory 1379 * was devm allocated and associated with this component. We need to 1380 * free it ourselves before vop_unbind() finishes. 1381 */ 1382 list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list, 1383 head) 1384 vop_plane_destroy(plane); 1385 1386 /* 1387 * Destroy CRTC after vop_plane_destroy() since vop_disable_plane() 1388 * references the CRTC. 1389 */ 1390 drm_crtc_cleanup(crtc); 1391 drm_flip_work_cleanup(&vop->fb_unref_work); 1392 } 1393 1394 static int vop_initial(struct vop *vop) 1395 { 1396 const struct vop_data *vop_data = vop->data; 1397 const struct vop_reg_data *init_table = vop_data->init_table; 1398 struct reset_control *ahb_rst; 1399 int i, ret; 1400 1401 vop->hclk = devm_clk_get(vop->dev, "hclk_vop"); 1402 if (IS_ERR(vop->hclk)) { 1403 dev_err(vop->dev, "failed to get hclk source\n"); 1404 return PTR_ERR(vop->hclk); 1405 } 1406 vop->aclk = devm_clk_get(vop->dev, "aclk_vop"); 1407 if (IS_ERR(vop->aclk)) { 1408 dev_err(vop->dev, "failed to get aclk source\n"); 1409 return PTR_ERR(vop->aclk); 1410 } 1411 vop->dclk = devm_clk_get(vop->dev, "dclk_vop"); 1412 if (IS_ERR(vop->dclk)) { 1413 dev_err(vop->dev, "failed to get dclk source\n"); 1414 return PTR_ERR(vop->dclk); 1415 } 1416 1417 ret = pm_runtime_get_sync(vop->dev); 1418 if (ret < 0) { 1419 dev_err(vop->dev, "failed to get pm runtime: %d\n", ret); 1420 return ret; 1421 } 1422 1423 ret = clk_prepare(vop->dclk); 1424 if (ret < 0) { 1425 dev_err(vop->dev, "failed to prepare dclk\n"); 1426 goto err_put_pm_runtime; 1427 } 1428 1429 /* Enable both the hclk and aclk to setup the vop */ 1430 ret = clk_prepare_enable(vop->hclk); 1431 if (ret < 0) { 1432 dev_err(vop->dev, "failed to prepare/enable hclk\n"); 1433 goto err_unprepare_dclk; 1434 } 1435 1436 ret = clk_prepare_enable(vop->aclk); 1437 if (ret < 0) { 1438 dev_err(vop->dev, "failed to prepare/enable aclk\n"); 1439 goto err_disable_hclk; 1440 } 1441 1442 /* 1443 * do hclk_reset, reset all vop registers. 1444 */ 1445 ahb_rst = devm_reset_control_get(vop->dev, "ahb"); 1446 if (IS_ERR(ahb_rst)) { 1447 dev_err(vop->dev, "failed to get ahb reset\n"); 1448 ret = PTR_ERR(ahb_rst); 1449 goto err_disable_aclk; 1450 } 1451 reset_control_assert(ahb_rst); 1452 usleep_range(10, 20); 1453 reset_control_deassert(ahb_rst); 1454 1455 memcpy(vop->regsbak, vop->regs, vop->len); 1456 1457 for (i = 0; i < vop_data->table_size; i++) 1458 vop_writel(vop, init_table[i].offset, init_table[i].value); 1459 1460 for (i = 0; i < vop_data->win_size; i++) { 1461 const struct vop_win_data *win = &vop_data->win[i]; 1462 1463 VOP_WIN_SET(vop, win, enable, 0); 1464 } 1465 1466 vop_cfg_done(vop); 1467 1468 /* 1469 * do dclk_reset, let all config take affect. 1470 */ 1471 vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk"); 1472 if (IS_ERR(vop->dclk_rst)) { 1473 dev_err(vop->dev, "failed to get dclk reset\n"); 1474 ret = PTR_ERR(vop->dclk_rst); 1475 goto err_disable_aclk; 1476 } 1477 reset_control_assert(vop->dclk_rst); 1478 usleep_range(10, 20); 1479 reset_control_deassert(vop->dclk_rst); 1480 1481 clk_disable(vop->hclk); 1482 clk_disable(vop->aclk); 1483 1484 vop->is_enabled = false; 1485 1486 pm_runtime_put_sync(vop->dev); 1487 1488 return 0; 1489 1490 err_disable_aclk: 1491 clk_disable_unprepare(vop->aclk); 1492 err_disable_hclk: 1493 clk_disable_unprepare(vop->hclk); 1494 err_unprepare_dclk: 1495 clk_unprepare(vop->dclk); 1496 err_put_pm_runtime: 1497 pm_runtime_put_sync(vop->dev); 1498 return ret; 1499 } 1500 1501 /* 1502 * Initialize the vop->win array elements. 1503 */ 1504 static void vop_win_init(struct vop *vop) 1505 { 1506 const struct vop_data *vop_data = vop->data; 1507 unsigned int i; 1508 1509 for (i = 0; i < vop_data->win_size; i++) { 1510 struct vop_win *vop_win = &vop->win[i]; 1511 const struct vop_win_data *win_data = &vop_data->win[i]; 1512 1513 vop_win->data = win_data; 1514 vop_win->vop = vop; 1515 } 1516 } 1517 1518 /** 1519 * rockchip_drm_wait_vact_end 1520 * @crtc: CRTC to enable line flag 1521 * @mstimeout: millisecond for timeout 1522 * 1523 * Wait for vact_end line flag irq or timeout. 1524 * 1525 * Returns: 1526 * Zero on success, negative errno on failure. 1527 */ 1528 int rockchip_drm_wait_vact_end(struct drm_crtc *crtc, unsigned int mstimeout) 1529 { 1530 struct vop *vop = to_vop(crtc); 1531 unsigned long jiffies_left; 1532 1533 if (!crtc || !vop->is_enabled) 1534 return -ENODEV; 1535 1536 if (mstimeout <= 0) 1537 return -EINVAL; 1538 1539 if (vop_line_flag_irq_is_enabled(vop)) 1540 return -EBUSY; 1541 1542 reinit_completion(&vop->line_flag_completion); 1543 vop_line_flag_irq_enable(vop); 1544 1545 jiffies_left = wait_for_completion_timeout(&vop->line_flag_completion, 1546 msecs_to_jiffies(mstimeout)); 1547 vop_line_flag_irq_disable(vop); 1548 1549 if (jiffies_left == 0) { 1550 dev_err(vop->dev, "Timeout waiting for IRQ\n"); 1551 return -ETIMEDOUT; 1552 } 1553 1554 return 0; 1555 } 1556 EXPORT_SYMBOL(rockchip_drm_wait_vact_end); 1557 1558 static int vop_bind(struct device *dev, struct device *master, void *data) 1559 { 1560 struct platform_device *pdev = to_platform_device(dev); 1561 const struct vop_data *vop_data; 1562 struct drm_device *drm_dev = data; 1563 struct vop *vop; 1564 struct resource *res; 1565 size_t alloc_size; 1566 int ret, irq; 1567 1568 vop_data = of_device_get_match_data(dev); 1569 if (!vop_data) 1570 return -ENODEV; 1571 1572 /* Allocate vop struct and its vop_win array */ 1573 alloc_size = sizeof(*vop) + sizeof(*vop->win) * vop_data->win_size; 1574 vop = devm_kzalloc(dev, alloc_size, GFP_KERNEL); 1575 if (!vop) 1576 return -ENOMEM; 1577 1578 vop->dev = dev; 1579 vop->data = vop_data; 1580 vop->drm_dev = drm_dev; 1581 dev_set_drvdata(dev, vop); 1582 1583 vop_win_init(vop); 1584 1585 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1586 vop->len = resource_size(res); 1587 vop->regs = devm_ioremap_resource(dev, res); 1588 if (IS_ERR(vop->regs)) 1589 return PTR_ERR(vop->regs); 1590 1591 vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL); 1592 if (!vop->regsbak) 1593 return -ENOMEM; 1594 1595 irq = platform_get_irq(pdev, 0); 1596 if (irq < 0) { 1597 dev_err(dev, "cannot find irq for vop\n"); 1598 return irq; 1599 } 1600 vop->irq = (unsigned int)irq; 1601 1602 spin_lock_init(&vop->reg_lock); 1603 spin_lock_init(&vop->irq_lock); 1604 1605 mutex_init(&vop->vsync_mutex); 1606 1607 ret = devm_request_irq(dev, vop->irq, vop_isr, 1608 IRQF_SHARED, dev_name(dev), vop); 1609 if (ret) 1610 return ret; 1611 1612 /* IRQ is initially disabled; it gets enabled in power_on */ 1613 disable_irq(vop->irq); 1614 1615 ret = vop_create_crtc(vop); 1616 if (ret) 1617 goto err_enable_irq; 1618 1619 pm_runtime_enable(&pdev->dev); 1620 1621 ret = vop_initial(vop); 1622 if (ret < 0) { 1623 dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret); 1624 goto err_disable_pm_runtime; 1625 } 1626 1627 return 0; 1628 1629 err_disable_pm_runtime: 1630 pm_runtime_disable(&pdev->dev); 1631 vop_destroy_crtc(vop); 1632 err_enable_irq: 1633 enable_irq(vop->irq); /* To balance out the disable_irq above */ 1634 return ret; 1635 } 1636 1637 static void vop_unbind(struct device *dev, struct device *master, void *data) 1638 { 1639 struct vop *vop = dev_get_drvdata(dev); 1640 1641 pm_runtime_disable(dev); 1642 vop_destroy_crtc(vop); 1643 1644 clk_unprepare(vop->aclk); 1645 clk_unprepare(vop->hclk); 1646 clk_unprepare(vop->dclk); 1647 } 1648 1649 const struct component_ops vop_component_ops = { 1650 .bind = vop_bind, 1651 .unbind = vop_unbind, 1652 }; 1653 EXPORT_SYMBOL_GPL(vop_component_ops); 1654