1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015 MediaTek Inc. 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/mailbox_controller.h> 9 #include <linux/pm_runtime.h> 10 #include <linux/soc/mediatek/mtk-cmdq.h> 11 #include <linux/soc/mediatek/mtk-mmsys.h> 12 #include <linux/soc/mediatek/mtk-mutex.h> 13 14 #include <asm/barrier.h> 15 16 #include <drm/drm_atomic.h> 17 #include <drm/drm_atomic_helper.h> 18 #include <drm/drm_plane_helper.h> 19 #include <drm/drm_probe_helper.h> 20 #include <drm/drm_vblank.h> 21 22 #include "mtk_drm_drv.h" 23 #include "mtk_drm_crtc.h" 24 #include "mtk_drm_ddp_comp.h" 25 #include "mtk_drm_gem.h" 26 #include "mtk_drm_plane.h" 27 28 /* 29 * struct mtk_drm_crtc - MediaTek specific crtc structure. 30 * @base: crtc object. 31 * @enabled: records whether crtc_enable succeeded 32 * @planes: array of 4 drm_plane structures, one for each overlay plane 33 * @pending_planes: whether any plane has pending changes to be applied 34 * @mmsys_dev: pointer to the mmsys device for configuration registers 35 * @mutex: handle to one of the ten disp_mutex streams 36 * @ddp_comp_nr: number of components in ddp_comp 37 * @ddp_comp: array of pointers the mtk_ddp_comp structures used by this crtc 38 * 39 * TODO: Needs update: this header is missing a bunch of member descriptions. 40 */ 41 struct mtk_drm_crtc { 42 struct drm_crtc base; 43 bool enabled; 44 45 bool pending_needs_vblank; 46 struct drm_pending_vblank_event *event; 47 48 struct drm_plane *planes; 49 unsigned int layer_nr; 50 bool pending_planes; 51 bool pending_async_planes; 52 53 #if IS_REACHABLE(CONFIG_MTK_CMDQ) 54 struct cmdq_client cmdq_client; 55 struct cmdq_pkt cmdq_handle; 56 u32 cmdq_event; 57 u32 cmdq_vblank_cnt; 58 #endif 59 60 struct device *mmsys_dev; 61 struct mtk_mutex *mutex; 62 unsigned int ddp_comp_nr; 63 struct mtk_ddp_comp **ddp_comp; 64 65 /* lock for display hardware access */ 66 struct mutex hw_lock; 67 bool config_updating; 68 }; 69 70 struct mtk_crtc_state { 71 struct drm_crtc_state base; 72 73 bool pending_config; 74 unsigned int pending_width; 75 unsigned int pending_height; 76 unsigned int pending_vrefresh; 77 }; 78 79 static inline struct mtk_drm_crtc *to_mtk_crtc(struct drm_crtc *c) 80 { 81 return container_of(c, struct mtk_drm_crtc, base); 82 } 83 84 static inline struct mtk_crtc_state *to_mtk_crtc_state(struct drm_crtc_state *s) 85 { 86 return container_of(s, struct mtk_crtc_state, base); 87 } 88 89 static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc) 90 { 91 struct drm_crtc *crtc = &mtk_crtc->base; 92 unsigned long flags; 93 94 spin_lock_irqsave(&crtc->dev->event_lock, flags); 95 drm_crtc_send_vblank_event(crtc, mtk_crtc->event); 96 drm_crtc_vblank_put(crtc); 97 mtk_crtc->event = NULL; 98 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 99 } 100 101 static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc) 102 { 103 drm_crtc_handle_vblank(&mtk_crtc->base); 104 if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) { 105 mtk_drm_crtc_finish_page_flip(mtk_crtc); 106 mtk_crtc->pending_needs_vblank = false; 107 } 108 } 109 110 #if IS_REACHABLE(CONFIG_MTK_CMDQ) 111 static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, 112 size_t size) 113 { 114 struct device *dev; 115 dma_addr_t dma_addr; 116 117 pkt->va_base = kzalloc(size, GFP_KERNEL); 118 if (!pkt->va_base) { 119 kfree(pkt); 120 return -ENOMEM; 121 } 122 pkt->buf_size = size; 123 pkt->cl = (void *)client; 124 125 dev = client->chan->mbox->dev; 126 dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size, 127 DMA_TO_DEVICE); 128 if (dma_mapping_error(dev, dma_addr)) { 129 dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size); 130 kfree(pkt->va_base); 131 kfree(pkt); 132 return -ENOMEM; 133 } 134 135 pkt->pa_base = dma_addr; 136 137 return 0; 138 } 139 140 static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt) 141 { 142 struct cmdq_client *client = (struct cmdq_client *)pkt->cl; 143 144 dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size, 145 DMA_TO_DEVICE); 146 kfree(pkt->va_base); 147 kfree(pkt); 148 } 149 #endif 150 151 static void mtk_drm_crtc_destroy(struct drm_crtc *crtc) 152 { 153 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 154 155 mtk_mutex_put(mtk_crtc->mutex); 156 #if IS_REACHABLE(CONFIG_MTK_CMDQ) 157 mtk_drm_cmdq_pkt_destroy(&mtk_crtc->cmdq_handle); 158 159 if (mtk_crtc->cmdq_client.chan) { 160 mbox_free_channel(mtk_crtc->cmdq_client.chan); 161 mtk_crtc->cmdq_client.chan = NULL; 162 } 163 #endif 164 drm_crtc_cleanup(crtc); 165 } 166 167 static void mtk_drm_crtc_reset(struct drm_crtc *crtc) 168 { 169 struct mtk_crtc_state *state; 170 171 if (crtc->state) 172 __drm_atomic_helper_crtc_destroy_state(crtc->state); 173 174 kfree(to_mtk_crtc_state(crtc->state)); 175 crtc->state = NULL; 176 177 state = kzalloc(sizeof(*state), GFP_KERNEL); 178 if (state) 179 __drm_atomic_helper_crtc_reset(crtc, &state->base); 180 } 181 182 static struct drm_crtc_state *mtk_drm_crtc_duplicate_state(struct drm_crtc *crtc) 183 { 184 struct mtk_crtc_state *state; 185 186 state = kzalloc(sizeof(*state), GFP_KERNEL); 187 if (!state) 188 return NULL; 189 190 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); 191 192 WARN_ON(state->base.crtc != crtc); 193 state->base.crtc = crtc; 194 195 return &state->base; 196 } 197 198 static void mtk_drm_crtc_destroy_state(struct drm_crtc *crtc, 199 struct drm_crtc_state *state) 200 { 201 __drm_atomic_helper_crtc_destroy_state(state); 202 kfree(to_mtk_crtc_state(state)); 203 } 204 205 static bool mtk_drm_crtc_mode_fixup(struct drm_crtc *crtc, 206 const struct drm_display_mode *mode, 207 struct drm_display_mode *adjusted_mode) 208 { 209 /* Nothing to do here, but this callback is mandatory. */ 210 return true; 211 } 212 213 static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) 214 { 215 struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state); 216 217 state->pending_width = crtc->mode.hdisplay; 218 state->pending_height = crtc->mode.vdisplay; 219 state->pending_vrefresh = drm_mode_vrefresh(&crtc->mode); 220 wmb(); /* Make sure the above parameters are set before update */ 221 state->pending_config = true; 222 } 223 224 static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc) 225 { 226 int ret; 227 int i; 228 229 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { 230 ret = mtk_ddp_comp_clk_enable(mtk_crtc->ddp_comp[i]); 231 if (ret) { 232 DRM_ERROR("Failed to enable clock %d: %d\n", i, ret); 233 goto err; 234 } 235 } 236 237 return 0; 238 err: 239 while (--i >= 0) 240 mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]); 241 return ret; 242 } 243 244 static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc) 245 { 246 int i; 247 248 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) 249 mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]); 250 } 251 252 static 253 struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, 254 struct drm_plane *plane, 255 unsigned int *local_layer) 256 { 257 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 258 struct mtk_ddp_comp *comp; 259 int i, count = 0; 260 unsigned int local_index = plane - mtk_crtc->planes; 261 262 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { 263 comp = mtk_crtc->ddp_comp[i]; 264 if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) { 265 *local_layer = local_index - count; 266 return comp; 267 } 268 count += mtk_ddp_comp_layer_nr(comp); 269 } 270 271 WARN(1, "Failed to find component for plane %d\n", plane->index); 272 return NULL; 273 } 274 275 #if IS_REACHABLE(CONFIG_MTK_CMDQ) 276 static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) 277 { 278 struct cmdq_cb_data *data = mssg; 279 struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client); 280 struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client); 281 struct mtk_crtc_state *state; 282 unsigned int i; 283 284 if (data->sta < 0) 285 return; 286 287 state = to_mtk_crtc_state(mtk_crtc->base.state); 288 289 state->pending_config = false; 290 291 if (mtk_crtc->pending_planes) { 292 for (i = 0; i < mtk_crtc->layer_nr; i++) { 293 struct drm_plane *plane = &mtk_crtc->planes[i]; 294 struct mtk_plane_state *plane_state; 295 296 plane_state = to_mtk_plane_state(plane->state); 297 298 plane_state->pending.config = false; 299 } 300 mtk_crtc->pending_planes = false; 301 } 302 303 if (mtk_crtc->pending_async_planes) { 304 for (i = 0; i < mtk_crtc->layer_nr; i++) { 305 struct drm_plane *plane = &mtk_crtc->planes[i]; 306 struct mtk_plane_state *plane_state; 307 308 plane_state = to_mtk_plane_state(plane->state); 309 310 plane_state->pending.async_config = false; 311 } 312 mtk_crtc->pending_async_planes = false; 313 } 314 315 mtk_crtc->cmdq_vblank_cnt = 0; 316 } 317 #endif 318 319 static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) 320 { 321 struct drm_crtc *crtc = &mtk_crtc->base; 322 struct drm_connector *connector; 323 struct drm_encoder *encoder; 324 struct drm_connector_list_iter conn_iter; 325 unsigned int width, height, vrefresh, bpc = MTK_MAX_BPC; 326 int ret; 327 int i; 328 329 if (WARN_ON(!crtc->state)) 330 return -EINVAL; 331 332 width = crtc->state->adjusted_mode.hdisplay; 333 height = crtc->state->adjusted_mode.vdisplay; 334 vrefresh = drm_mode_vrefresh(&crtc->state->adjusted_mode); 335 336 drm_for_each_encoder(encoder, crtc->dev) { 337 if (encoder->crtc != crtc) 338 continue; 339 340 drm_connector_list_iter_begin(crtc->dev, &conn_iter); 341 drm_for_each_connector_iter(connector, &conn_iter) { 342 if (connector->encoder != encoder) 343 continue; 344 if (connector->display_info.bpc != 0 && 345 bpc > connector->display_info.bpc) 346 bpc = connector->display_info.bpc; 347 } 348 drm_connector_list_iter_end(&conn_iter); 349 } 350 351 ret = pm_runtime_resume_and_get(crtc->dev->dev); 352 if (ret < 0) { 353 DRM_ERROR("Failed to enable power domain: %d\n", ret); 354 return ret; 355 } 356 357 ret = mtk_mutex_prepare(mtk_crtc->mutex); 358 if (ret < 0) { 359 DRM_ERROR("Failed to enable mutex clock: %d\n", ret); 360 goto err_pm_runtime_put; 361 } 362 363 ret = mtk_crtc_ddp_clk_enable(mtk_crtc); 364 if (ret < 0) { 365 DRM_ERROR("Failed to enable component clocks: %d\n", ret); 366 goto err_mutex_unprepare; 367 } 368 369 for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) { 370 mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev, 371 mtk_crtc->ddp_comp[i]->id, 372 mtk_crtc->ddp_comp[i + 1]->id); 373 mtk_mutex_add_comp(mtk_crtc->mutex, 374 mtk_crtc->ddp_comp[i]->id); 375 } 376 mtk_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id); 377 mtk_mutex_enable(mtk_crtc->mutex); 378 379 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { 380 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i]; 381 382 if (i == 1) 383 mtk_ddp_comp_bgclr_in_on(comp); 384 385 mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL); 386 mtk_ddp_comp_start(comp); 387 } 388 389 /* Initially configure all planes */ 390 for (i = 0; i < mtk_crtc->layer_nr; i++) { 391 struct drm_plane *plane = &mtk_crtc->planes[i]; 392 struct mtk_plane_state *plane_state; 393 struct mtk_ddp_comp *comp; 394 unsigned int local_layer; 395 396 plane_state = to_mtk_plane_state(plane->state); 397 comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); 398 if (comp) 399 mtk_ddp_comp_layer_config(comp, local_layer, 400 plane_state, NULL); 401 } 402 403 return 0; 404 405 err_mutex_unprepare: 406 mtk_mutex_unprepare(mtk_crtc->mutex); 407 err_pm_runtime_put: 408 pm_runtime_put(crtc->dev->dev); 409 return ret; 410 } 411 412 static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) 413 { 414 struct drm_device *drm = mtk_crtc->base.dev; 415 struct drm_crtc *crtc = &mtk_crtc->base; 416 int i; 417 418 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { 419 mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]); 420 if (i == 1) 421 mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]); 422 } 423 424 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) 425 mtk_mutex_remove_comp(mtk_crtc->mutex, 426 mtk_crtc->ddp_comp[i]->id); 427 mtk_mutex_disable(mtk_crtc->mutex); 428 for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) { 429 mtk_mmsys_ddp_disconnect(mtk_crtc->mmsys_dev, 430 mtk_crtc->ddp_comp[i]->id, 431 mtk_crtc->ddp_comp[i + 1]->id); 432 mtk_mutex_remove_comp(mtk_crtc->mutex, 433 mtk_crtc->ddp_comp[i]->id); 434 } 435 mtk_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id); 436 mtk_crtc_ddp_clk_disable(mtk_crtc); 437 mtk_mutex_unprepare(mtk_crtc->mutex); 438 439 pm_runtime_put(drm->dev); 440 441 if (crtc->state->event && !crtc->state->active) { 442 spin_lock_irq(&crtc->dev->event_lock); 443 drm_crtc_send_vblank_event(crtc, crtc->state->event); 444 crtc->state->event = NULL; 445 spin_unlock_irq(&crtc->dev->event_lock); 446 } 447 } 448 449 static void mtk_crtc_ddp_config(struct drm_crtc *crtc, 450 struct cmdq_pkt *cmdq_handle) 451 { 452 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 453 struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); 454 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; 455 unsigned int i; 456 unsigned int local_layer; 457 458 /* 459 * TODO: instead of updating the registers here, we should prepare 460 * working registers in atomic_commit and let the hardware command 461 * queue update module registers on vblank. 462 */ 463 if (state->pending_config) { 464 mtk_ddp_comp_config(comp, state->pending_width, 465 state->pending_height, 466 state->pending_vrefresh, 0, 467 cmdq_handle); 468 469 if (!cmdq_handle) 470 state->pending_config = false; 471 } 472 473 if (mtk_crtc->pending_planes) { 474 for (i = 0; i < mtk_crtc->layer_nr; i++) { 475 struct drm_plane *plane = &mtk_crtc->planes[i]; 476 struct mtk_plane_state *plane_state; 477 478 plane_state = to_mtk_plane_state(plane->state); 479 480 if (!plane_state->pending.config) 481 continue; 482 483 comp = mtk_drm_ddp_comp_for_plane(crtc, plane, 484 &local_layer); 485 486 if (comp) 487 mtk_ddp_comp_layer_config(comp, local_layer, 488 plane_state, 489 cmdq_handle); 490 if (!cmdq_handle) 491 plane_state->pending.config = false; 492 } 493 494 if (!cmdq_handle) 495 mtk_crtc->pending_planes = false; 496 } 497 498 if (mtk_crtc->pending_async_planes) { 499 for (i = 0; i < mtk_crtc->layer_nr; i++) { 500 struct drm_plane *plane = &mtk_crtc->planes[i]; 501 struct mtk_plane_state *plane_state; 502 503 plane_state = to_mtk_plane_state(plane->state); 504 505 if (!plane_state->pending.async_config) 506 continue; 507 508 comp = mtk_drm_ddp_comp_for_plane(crtc, plane, 509 &local_layer); 510 511 if (comp) 512 mtk_ddp_comp_layer_config(comp, local_layer, 513 plane_state, 514 cmdq_handle); 515 if (!cmdq_handle) 516 plane_state->pending.async_config = false; 517 } 518 519 if (!cmdq_handle) 520 mtk_crtc->pending_async_planes = false; 521 } 522 } 523 524 static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, 525 bool needs_vblank) 526 { 527 #if IS_REACHABLE(CONFIG_MTK_CMDQ) 528 struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle; 529 #endif 530 struct drm_crtc *crtc = &mtk_crtc->base; 531 struct mtk_drm_private *priv = crtc->dev->dev_private; 532 unsigned int pending_planes = 0, pending_async_planes = 0; 533 int i; 534 535 mutex_lock(&mtk_crtc->hw_lock); 536 mtk_crtc->config_updating = true; 537 if (needs_vblank) 538 mtk_crtc->pending_needs_vblank = true; 539 540 for (i = 0; i < mtk_crtc->layer_nr; i++) { 541 struct drm_plane *plane = &mtk_crtc->planes[i]; 542 struct mtk_plane_state *plane_state; 543 544 plane_state = to_mtk_plane_state(plane->state); 545 if (plane_state->pending.dirty) { 546 plane_state->pending.config = true; 547 plane_state->pending.dirty = false; 548 pending_planes |= BIT(i); 549 } else if (plane_state->pending.async_dirty) { 550 plane_state->pending.async_config = true; 551 plane_state->pending.async_dirty = false; 552 pending_async_planes |= BIT(i); 553 } 554 } 555 if (pending_planes) 556 mtk_crtc->pending_planes = true; 557 if (pending_async_planes) 558 mtk_crtc->pending_async_planes = true; 559 560 if (priv->data->shadow_register) { 561 mtk_mutex_acquire(mtk_crtc->mutex); 562 mtk_crtc_ddp_config(crtc, NULL); 563 mtk_mutex_release(mtk_crtc->mutex); 564 } 565 #if IS_REACHABLE(CONFIG_MTK_CMDQ) 566 if (mtk_crtc->cmdq_client.chan) { 567 mbox_flush(mtk_crtc->cmdq_client.chan, 2000); 568 cmdq_handle->cmd_buf_size = 0; 569 cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); 570 cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false); 571 mtk_crtc_ddp_config(crtc, cmdq_handle); 572 cmdq_pkt_finalize(cmdq_handle); 573 dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev, 574 cmdq_handle->pa_base, 575 cmdq_handle->cmd_buf_size, 576 DMA_TO_DEVICE); 577 /* 578 * CMDQ command should execute in next 3 vblank. 579 * One vblank interrupt before send message (occasionally) 580 * and one vblank interrupt after cmdq done, 581 * so it's timeout after 3 vblank interrupt. 582 * If it fail to execute in next 3 vblank, timeout happen. 583 */ 584 mtk_crtc->cmdq_vblank_cnt = 3; 585 586 mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle); 587 mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0); 588 } 589 #endif 590 mtk_crtc->config_updating = false; 591 mutex_unlock(&mtk_crtc->hw_lock); 592 } 593 594 static void mtk_crtc_ddp_irq(void *data) 595 { 596 struct drm_crtc *crtc = data; 597 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 598 struct mtk_drm_private *priv = crtc->dev->dev_private; 599 600 #if IS_REACHABLE(CONFIG_MTK_CMDQ) 601 if (!priv->data->shadow_register && !mtk_crtc->cmdq_client.chan) 602 mtk_crtc_ddp_config(crtc, NULL); 603 else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0) 604 DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n", 605 drm_crtc_index(&mtk_crtc->base)); 606 #else 607 if (!priv->data->shadow_register) 608 mtk_crtc_ddp_config(crtc, NULL); 609 #endif 610 mtk_drm_finish_page_flip(mtk_crtc); 611 } 612 613 static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) 614 { 615 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 616 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; 617 618 mtk_ddp_comp_enable_vblank(comp, mtk_crtc_ddp_irq, &mtk_crtc->base); 619 620 return 0; 621 } 622 623 static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc) 624 { 625 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 626 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; 627 628 mtk_ddp_comp_disable_vblank(comp); 629 } 630 631 int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, 632 struct mtk_plane_state *state) 633 { 634 unsigned int local_layer; 635 struct mtk_ddp_comp *comp; 636 637 comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); 638 if (comp) 639 return mtk_ddp_comp_layer_check(comp, local_layer, state); 640 return 0; 641 } 642 643 void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, 644 struct drm_atomic_state *state) 645 { 646 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 647 648 if (!mtk_crtc->enabled) 649 return; 650 651 mtk_drm_crtc_update_config(mtk_crtc, false); 652 } 653 654 static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, 655 struct drm_atomic_state *state) 656 { 657 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 658 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; 659 int ret; 660 661 DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); 662 663 ret = pm_runtime_resume_and_get(comp->dev); 664 if (ret < 0) { 665 DRM_DEV_ERROR(comp->dev, "Failed to enable power domain: %d\n", ret); 666 return; 667 } 668 669 ret = mtk_crtc_ddp_hw_init(mtk_crtc); 670 if (ret) { 671 pm_runtime_put(comp->dev); 672 return; 673 } 674 675 drm_crtc_vblank_on(crtc); 676 mtk_crtc->enabled = true; 677 } 678 679 static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, 680 struct drm_atomic_state *state) 681 { 682 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 683 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; 684 int i, ret; 685 686 DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); 687 if (!mtk_crtc->enabled) 688 return; 689 690 /* Set all pending plane state to disabled */ 691 for (i = 0; i < mtk_crtc->layer_nr; i++) { 692 struct drm_plane *plane = &mtk_crtc->planes[i]; 693 struct mtk_plane_state *plane_state; 694 695 plane_state = to_mtk_plane_state(plane->state); 696 plane_state->pending.enable = false; 697 plane_state->pending.config = true; 698 } 699 mtk_crtc->pending_planes = true; 700 701 mtk_drm_crtc_update_config(mtk_crtc, false); 702 /* Wait for planes to be disabled */ 703 drm_crtc_wait_one_vblank(crtc); 704 705 drm_crtc_vblank_off(crtc); 706 mtk_crtc_ddp_hw_fini(mtk_crtc); 707 ret = pm_runtime_put(comp->dev); 708 if (ret < 0) 709 DRM_DEV_ERROR(comp->dev, "Failed to disable power domain: %d\n", ret); 710 711 mtk_crtc->enabled = false; 712 } 713 714 static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc, 715 struct drm_atomic_state *state) 716 { 717 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, 718 crtc); 719 struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state); 720 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 721 722 if (mtk_crtc->event && mtk_crtc_state->base.event) 723 DRM_ERROR("new event while there is still a pending event\n"); 724 725 if (mtk_crtc_state->base.event) { 726 mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc); 727 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 728 mtk_crtc->event = mtk_crtc_state->base.event; 729 mtk_crtc_state->base.event = NULL; 730 } 731 } 732 733 static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc, 734 struct drm_atomic_state *state) 735 { 736 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 737 int i; 738 739 if (crtc->state->color_mgmt_changed) 740 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { 741 mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state); 742 mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state); 743 } 744 mtk_drm_crtc_update_config(mtk_crtc, !!mtk_crtc->event); 745 } 746 747 static const struct drm_crtc_funcs mtk_crtc_funcs = { 748 .set_config = drm_atomic_helper_set_config, 749 .page_flip = drm_atomic_helper_page_flip, 750 .destroy = mtk_drm_crtc_destroy, 751 .reset = mtk_drm_crtc_reset, 752 .atomic_duplicate_state = mtk_drm_crtc_duplicate_state, 753 .atomic_destroy_state = mtk_drm_crtc_destroy_state, 754 .enable_vblank = mtk_drm_crtc_enable_vblank, 755 .disable_vblank = mtk_drm_crtc_disable_vblank, 756 }; 757 758 static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = { 759 .mode_fixup = mtk_drm_crtc_mode_fixup, 760 .mode_set_nofb = mtk_drm_crtc_mode_set_nofb, 761 .atomic_begin = mtk_drm_crtc_atomic_begin, 762 .atomic_flush = mtk_drm_crtc_atomic_flush, 763 .atomic_enable = mtk_drm_crtc_atomic_enable, 764 .atomic_disable = mtk_drm_crtc_atomic_disable, 765 }; 766 767 static int mtk_drm_crtc_init(struct drm_device *drm, 768 struct mtk_drm_crtc *mtk_crtc, 769 unsigned int pipe) 770 { 771 struct drm_plane *primary = NULL; 772 struct drm_plane *cursor = NULL; 773 int i, ret; 774 775 for (i = 0; i < mtk_crtc->layer_nr; i++) { 776 if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY) 777 primary = &mtk_crtc->planes[i]; 778 else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR) 779 cursor = &mtk_crtc->planes[i]; 780 } 781 782 ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor, 783 &mtk_crtc_funcs, NULL); 784 if (ret) 785 goto err_cleanup_crtc; 786 787 drm_crtc_helper_add(&mtk_crtc->base, &mtk_crtc_helper_funcs); 788 789 return 0; 790 791 err_cleanup_crtc: 792 drm_crtc_cleanup(&mtk_crtc->base); 793 return ret; 794 } 795 796 static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc, 797 int comp_idx) 798 { 799 struct mtk_ddp_comp *comp; 800 801 if (comp_idx > 1) 802 return 0; 803 804 comp = mtk_crtc->ddp_comp[comp_idx]; 805 if (!comp->funcs) 806 return 0; 807 808 if (comp_idx == 1 && !comp->funcs->bgclr_in_on) 809 return 0; 810 811 return mtk_ddp_comp_layer_nr(comp); 812 } 813 814 static inline 815 enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx, 816 unsigned int num_planes) 817 { 818 if (plane_idx == 0) 819 return DRM_PLANE_TYPE_PRIMARY; 820 else if (plane_idx == (num_planes - 1)) 821 return DRM_PLANE_TYPE_CURSOR; 822 else 823 return DRM_PLANE_TYPE_OVERLAY; 824 825 } 826 827 static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev, 828 struct mtk_drm_crtc *mtk_crtc, 829 int comp_idx, int pipe) 830 { 831 int num_planes = mtk_drm_crtc_num_comp_planes(mtk_crtc, comp_idx); 832 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx]; 833 int i, ret; 834 835 for (i = 0; i < num_planes; i++) { 836 ret = mtk_plane_init(drm_dev, 837 &mtk_crtc->planes[mtk_crtc->layer_nr], 838 BIT(pipe), 839 mtk_drm_crtc_plane_type(mtk_crtc->layer_nr, 840 num_planes), 841 mtk_ddp_comp_supported_rotations(comp)); 842 if (ret) 843 return ret; 844 845 mtk_crtc->layer_nr++; 846 } 847 return 0; 848 } 849 850 int mtk_drm_crtc_create(struct drm_device *drm_dev, 851 const enum mtk_ddp_comp_id *path, unsigned int path_len) 852 { 853 struct mtk_drm_private *priv = drm_dev->dev_private; 854 struct device *dev = drm_dev->dev; 855 struct mtk_drm_crtc *mtk_crtc; 856 unsigned int num_comp_planes = 0; 857 int pipe = priv->num_pipes; 858 int ret; 859 int i; 860 bool has_ctm = false; 861 uint gamma_lut_size = 0; 862 863 if (!path) 864 return 0; 865 866 for (i = 0; i < path_len; i++) { 867 enum mtk_ddp_comp_id comp_id = path[i]; 868 struct device_node *node; 869 struct mtk_ddp_comp *comp; 870 871 node = priv->comp_node[comp_id]; 872 comp = &priv->ddp_comp[comp_id]; 873 874 if (!node) { 875 dev_info(dev, 876 "Not creating crtc %d because component %d is disabled or missing\n", 877 pipe, comp_id); 878 return 0; 879 } 880 881 if (!comp->dev) { 882 dev_err(dev, "Component %pOF not initialized\n", node); 883 return -ENODEV; 884 } 885 } 886 887 mtk_crtc = devm_kzalloc(dev, sizeof(*mtk_crtc), GFP_KERNEL); 888 if (!mtk_crtc) 889 return -ENOMEM; 890 891 mtk_crtc->mmsys_dev = priv->mmsys_dev; 892 mtk_crtc->ddp_comp_nr = path_len; 893 mtk_crtc->ddp_comp = devm_kmalloc_array(dev, mtk_crtc->ddp_comp_nr, 894 sizeof(*mtk_crtc->ddp_comp), 895 GFP_KERNEL); 896 if (!mtk_crtc->ddp_comp) 897 return -ENOMEM; 898 899 mtk_crtc->mutex = mtk_mutex_get(priv->mutex_dev); 900 if (IS_ERR(mtk_crtc->mutex)) { 901 ret = PTR_ERR(mtk_crtc->mutex); 902 dev_err(dev, "Failed to get mutex: %d\n", ret); 903 return ret; 904 } 905 906 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { 907 enum mtk_ddp_comp_id comp_id = path[i]; 908 struct mtk_ddp_comp *comp; 909 910 comp = &priv->ddp_comp[comp_id]; 911 mtk_crtc->ddp_comp[i] = comp; 912 913 if (comp->funcs) { 914 if (comp->funcs->gamma_set) 915 gamma_lut_size = MTK_LUT_SIZE; 916 917 if (comp->funcs->ctm_set) 918 has_ctm = true; 919 } 920 } 921 922 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) 923 num_comp_planes += mtk_drm_crtc_num_comp_planes(mtk_crtc, i); 924 925 mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes, 926 sizeof(struct drm_plane), GFP_KERNEL); 927 928 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { 929 ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i, 930 pipe); 931 if (ret) 932 return ret; 933 } 934 935 ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, pipe); 936 if (ret < 0) 937 return ret; 938 939 if (gamma_lut_size) 940 drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size); 941 drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size); 942 priv->num_pipes++; 943 mutex_init(&mtk_crtc->hw_lock); 944 945 #if IS_REACHABLE(CONFIG_MTK_CMDQ) 946 mtk_crtc->cmdq_client.client.dev = mtk_crtc->mmsys_dev; 947 mtk_crtc->cmdq_client.client.tx_block = false; 948 mtk_crtc->cmdq_client.client.knows_txdone = true; 949 mtk_crtc->cmdq_client.client.rx_callback = ddp_cmdq_cb; 950 mtk_crtc->cmdq_client.chan = 951 mbox_request_channel(&mtk_crtc->cmdq_client.client, 952 drm_crtc_index(&mtk_crtc->base)); 953 if (IS_ERR(mtk_crtc->cmdq_client.chan)) { 954 dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n", 955 drm_crtc_index(&mtk_crtc->base)); 956 mtk_crtc->cmdq_client.chan = NULL; 957 } 958 959 if (mtk_crtc->cmdq_client.chan) { 960 ret = of_property_read_u32_index(priv->mutex_node, 961 "mediatek,gce-events", 962 drm_crtc_index(&mtk_crtc->base), 963 &mtk_crtc->cmdq_event); 964 if (ret) { 965 dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n", 966 drm_crtc_index(&mtk_crtc->base)); 967 mbox_free_channel(mtk_crtc->cmdq_client.chan); 968 mtk_crtc->cmdq_client.chan = NULL; 969 } else { 970 ret = mtk_drm_cmdq_pkt_create(&mtk_crtc->cmdq_client, 971 &mtk_crtc->cmdq_handle, 972 PAGE_SIZE); 973 if (ret) { 974 dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n", 975 drm_crtc_index(&mtk_crtc->base)); 976 mbox_free_channel(mtk_crtc->cmdq_client.chan); 977 mtk_crtc->cmdq_client.chan = NULL; 978 } 979 } 980 } 981 #endif 982 return 0; 983 } 984