1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2014, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2013 Red Hat 5 * Author: Rob Clark <robdclark@gmail.com> 6 */ 7 8 #include <linux/interconnect.h> 9 #include <linux/of_irq.h> 10 11 #include "msm_drv.h" 12 #include "msm_gem.h" 13 #include "msm_mmu.h" 14 #include "mdp5_kms.h" 15 16 static const char *iommu_ports[] = { 17 "mdp_0", 18 }; 19 20 static int mdp5_hw_init(struct msm_kms *kms) 21 { 22 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 23 struct device *dev = &mdp5_kms->pdev->dev; 24 unsigned long flags; 25 26 pm_runtime_get_sync(dev); 27 28 /* Magic unknown register writes: 29 * 30 * W VBIF:0x004 00000001 (mdss_mdp.c:839) 31 * W MDP5:0x2e0 0xe9 (mdss_mdp.c:839) 32 * W MDP5:0x2e4 0x55 (mdss_mdp.c:839) 33 * W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839) 34 * W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839) 35 * W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839) 36 * W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839) 37 * W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839) 38 * W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839) 39 * 40 * Downstream fbdev driver gets these register offsets/values 41 * from DT.. not really sure what these registers are or if 42 * different values for different boards/SoC's, etc. I guess 43 * they are the golden registers. 44 * 45 * Not setting these does not seem to cause any problem. But 46 * we may be getting lucky with the bootloader initializing 47 * them for us. OTOH, if we can always count on the bootloader 48 * setting the golden registers, then perhaps we don't need to 49 * care. 50 */ 51 52 spin_lock_irqsave(&mdp5_kms->resource_lock, flags); 53 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0); 54 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); 55 56 mdp5_ctlm_hw_reset(mdp5_kms->ctlm); 57 58 pm_runtime_put_sync(dev); 59 60 return 0; 61 } 62 63 /* Global/shared object state funcs */ 64 65 /* 66 * This is a helper that returns the private state currently in operation. 67 * Note that this would return the "old_state" if called in the atomic check 68 * path, and the "new_state" after the atomic swap has been done. 69 */ 70 struct mdp5_global_state * 71 mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms) 72 { 73 return to_mdp5_global_state(mdp5_kms->glob_state.state); 74 } 75 76 /* 77 * This acquires the modeset lock set aside for global state, creates 78 * a new duplicated private object state. 79 */ 80 struct mdp5_global_state *mdp5_get_global_state(struct drm_atomic_state *s) 81 { 82 struct msm_drm_private *priv = s->dev->dev_private; 83 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); 84 struct drm_private_state *priv_state; 85 int ret; 86 87 ret = drm_modeset_lock(&mdp5_kms->glob_state_lock, s->acquire_ctx); 88 if (ret) 89 return ERR_PTR(ret); 90 91 priv_state = drm_atomic_get_private_obj_state(s, &mdp5_kms->glob_state); 92 if (IS_ERR(priv_state)) 93 return ERR_CAST(priv_state); 94 95 return to_mdp5_global_state(priv_state); 96 } 97 98 static struct drm_private_state * 99 mdp5_global_duplicate_state(struct drm_private_obj *obj) 100 { 101 struct mdp5_global_state *state; 102 103 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 104 if (!state) 105 return NULL; 106 107 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 108 109 return &state->base; 110 } 111 112 static void mdp5_global_destroy_state(struct drm_private_obj *obj, 113 struct drm_private_state *state) 114 { 115 struct mdp5_global_state *mdp5_state = to_mdp5_global_state(state); 116 117 kfree(mdp5_state); 118 } 119 120 static const struct drm_private_state_funcs mdp5_global_state_funcs = { 121 .atomic_duplicate_state = mdp5_global_duplicate_state, 122 .atomic_destroy_state = mdp5_global_destroy_state, 123 }; 124 125 static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms) 126 { 127 struct mdp5_global_state *state; 128 129 drm_modeset_lock_init(&mdp5_kms->glob_state_lock); 130 131 state = kzalloc(sizeof(*state), GFP_KERNEL); 132 if (!state) 133 return -ENOMEM; 134 135 state->mdp5_kms = mdp5_kms; 136 137 drm_atomic_private_obj_init(mdp5_kms->dev, &mdp5_kms->glob_state, 138 &state->base, 139 &mdp5_global_state_funcs); 140 return 0; 141 } 142 143 static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) 144 { 145 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 146 struct device *dev = &mdp5_kms->pdev->dev; 147 struct mdp5_global_state *global_state; 148 149 global_state = mdp5_get_existing_global_state(mdp5_kms); 150 151 pm_runtime_get_sync(dev); 152 153 if (mdp5_kms->smp) 154 mdp5_smp_prepare_commit(mdp5_kms->smp, &global_state->smp); 155 } 156 157 static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) 158 { 159 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 160 struct device *dev = &mdp5_kms->pdev->dev; 161 struct mdp5_global_state *global_state; 162 163 drm_atomic_helper_wait_for_vblanks(mdp5_kms->dev, state); 164 165 global_state = mdp5_get_existing_global_state(mdp5_kms); 166 167 if (mdp5_kms->smp) 168 mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp); 169 170 pm_runtime_put_sync(dev); 171 } 172 173 static void mdp5_wait_for_crtc_commit_done(struct msm_kms *kms, 174 struct drm_crtc *crtc) 175 { 176 mdp5_crtc_wait_for_commit_done(crtc); 177 } 178 179 static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate, 180 struct drm_encoder *encoder) 181 { 182 return rate; 183 } 184 185 static int mdp5_set_split_display(struct msm_kms *kms, 186 struct drm_encoder *encoder, 187 struct drm_encoder *slave_encoder, 188 bool is_cmd_mode) 189 { 190 if (is_cmd_mode) 191 return mdp5_cmd_encoder_set_split_display(encoder, 192 slave_encoder); 193 else 194 return mdp5_vid_encoder_set_split_display(encoder, 195 slave_encoder); 196 } 197 198 static void mdp5_set_encoder_mode(struct msm_kms *kms, 199 struct drm_encoder *encoder, 200 bool cmd_mode) 201 { 202 mdp5_encoder_set_intf_mode(encoder, cmd_mode); 203 } 204 205 static void mdp5_kms_destroy(struct msm_kms *kms) 206 { 207 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 208 struct msm_gem_address_space *aspace = kms->aspace; 209 int i; 210 211 for (i = 0; i < mdp5_kms->num_hwmixers; i++) 212 mdp5_mixer_destroy(mdp5_kms->hwmixers[i]); 213 214 for (i = 0; i < mdp5_kms->num_hwpipes; i++) 215 mdp5_pipe_destroy(mdp5_kms->hwpipes[i]); 216 217 if (aspace) { 218 aspace->mmu->funcs->detach(aspace->mmu, 219 iommu_ports, ARRAY_SIZE(iommu_ports)); 220 msm_gem_address_space_put(aspace); 221 } 222 } 223 224 #ifdef CONFIG_DEBUG_FS 225 static int smp_show(struct seq_file *m, void *arg) 226 { 227 struct drm_info_node *node = (struct drm_info_node *) m->private; 228 struct drm_device *dev = node->minor->dev; 229 struct msm_drm_private *priv = dev->dev_private; 230 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); 231 struct drm_printer p = drm_seq_file_printer(m); 232 233 if (!mdp5_kms->smp) { 234 drm_printf(&p, "no SMP pool\n"); 235 return 0; 236 } 237 238 mdp5_smp_dump(mdp5_kms->smp, &p); 239 240 return 0; 241 } 242 243 static struct drm_info_list mdp5_debugfs_list[] = { 244 {"smp", smp_show }, 245 }; 246 247 static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) 248 { 249 struct drm_device *dev = minor->dev; 250 int ret; 251 252 ret = drm_debugfs_create_files(mdp5_debugfs_list, 253 ARRAY_SIZE(mdp5_debugfs_list), 254 minor->debugfs_root, minor); 255 256 if (ret) { 257 DRM_DEV_ERROR(dev->dev, "could not install mdp5_debugfs_list\n"); 258 return ret; 259 } 260 261 return 0; 262 } 263 #endif 264 265 static const struct mdp_kms_funcs kms_funcs = { 266 .base = { 267 .hw_init = mdp5_hw_init, 268 .irq_preinstall = mdp5_irq_preinstall, 269 .irq_postinstall = mdp5_irq_postinstall, 270 .irq_uninstall = mdp5_irq_uninstall, 271 .irq = mdp5_irq, 272 .enable_vblank = mdp5_enable_vblank, 273 .disable_vblank = mdp5_disable_vblank, 274 .prepare_commit = mdp5_prepare_commit, 275 .complete_commit = mdp5_complete_commit, 276 .wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done, 277 .get_format = mdp_get_format, 278 .round_pixclk = mdp5_round_pixclk, 279 .set_split_display = mdp5_set_split_display, 280 .set_encoder_mode = mdp5_set_encoder_mode, 281 .destroy = mdp5_kms_destroy, 282 #ifdef CONFIG_DEBUG_FS 283 .debugfs_init = mdp5_kms_debugfs_init, 284 #endif 285 }, 286 .set_irqmask = mdp5_set_irqmask, 287 }; 288 289 int mdp5_disable(struct mdp5_kms *mdp5_kms) 290 { 291 DBG(""); 292 293 mdp5_kms->enable_count--; 294 WARN_ON(mdp5_kms->enable_count < 0); 295 296 clk_disable_unprepare(mdp5_kms->ahb_clk); 297 clk_disable_unprepare(mdp5_kms->axi_clk); 298 clk_disable_unprepare(mdp5_kms->core_clk); 299 if (mdp5_kms->lut_clk) 300 clk_disable_unprepare(mdp5_kms->lut_clk); 301 302 return 0; 303 } 304 305 int mdp5_enable(struct mdp5_kms *mdp5_kms) 306 { 307 DBG(""); 308 309 mdp5_kms->enable_count++; 310 311 clk_prepare_enable(mdp5_kms->ahb_clk); 312 clk_prepare_enable(mdp5_kms->axi_clk); 313 clk_prepare_enable(mdp5_kms->core_clk); 314 if (mdp5_kms->lut_clk) 315 clk_prepare_enable(mdp5_kms->lut_clk); 316 317 return 0; 318 } 319 320 static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, 321 struct mdp5_interface *intf, 322 struct mdp5_ctl *ctl) 323 { 324 struct drm_device *dev = mdp5_kms->dev; 325 struct msm_drm_private *priv = dev->dev_private; 326 struct drm_encoder *encoder; 327 328 encoder = mdp5_encoder_init(dev, intf, ctl); 329 if (IS_ERR(encoder)) { 330 DRM_DEV_ERROR(dev->dev, "failed to construct encoder\n"); 331 return encoder; 332 } 333 334 priv->encoders[priv->num_encoders++] = encoder; 335 336 return encoder; 337 } 338 339 static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num) 340 { 341 const enum mdp5_intf_type *intfs = hw_cfg->intf.connect; 342 const int intf_cnt = ARRAY_SIZE(hw_cfg->intf.connect); 343 int id = 0, i; 344 345 for (i = 0; i < intf_cnt; i++) { 346 if (intfs[i] == INTF_DSI) { 347 if (intf_num == i) 348 return id; 349 350 id++; 351 } 352 } 353 354 return -EINVAL; 355 } 356 357 static int modeset_init_intf(struct mdp5_kms *mdp5_kms, 358 struct mdp5_interface *intf) 359 { 360 struct drm_device *dev = mdp5_kms->dev; 361 struct msm_drm_private *priv = dev->dev_private; 362 struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm; 363 struct mdp5_ctl *ctl; 364 struct drm_encoder *encoder; 365 int ret = 0; 366 367 switch (intf->type) { 368 case INTF_eDP: 369 if (!priv->edp) 370 break; 371 372 ctl = mdp5_ctlm_request(ctlm, intf->num); 373 if (!ctl) { 374 ret = -EINVAL; 375 break; 376 } 377 378 encoder = construct_encoder(mdp5_kms, intf, ctl); 379 if (IS_ERR(encoder)) { 380 ret = PTR_ERR(encoder); 381 break; 382 } 383 384 ret = msm_edp_modeset_init(priv->edp, dev, encoder); 385 break; 386 case INTF_HDMI: 387 if (!priv->hdmi) 388 break; 389 390 ctl = mdp5_ctlm_request(ctlm, intf->num); 391 if (!ctl) { 392 ret = -EINVAL; 393 break; 394 } 395 396 encoder = construct_encoder(mdp5_kms, intf, ctl); 397 if (IS_ERR(encoder)) { 398 ret = PTR_ERR(encoder); 399 break; 400 } 401 402 ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder); 403 break; 404 case INTF_DSI: 405 { 406 const struct mdp5_cfg_hw *hw_cfg = 407 mdp5_cfg_get_hw_config(mdp5_kms->cfg); 408 int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num); 409 410 if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) { 411 DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n", 412 intf->num); 413 ret = -EINVAL; 414 break; 415 } 416 417 if (!priv->dsi[dsi_id]) 418 break; 419 420 ctl = mdp5_ctlm_request(ctlm, intf->num); 421 if (!ctl) { 422 ret = -EINVAL; 423 break; 424 } 425 426 encoder = construct_encoder(mdp5_kms, intf, ctl); 427 if (IS_ERR(encoder)) { 428 ret = PTR_ERR(encoder); 429 break; 430 } 431 432 ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder); 433 break; 434 } 435 default: 436 DRM_DEV_ERROR(dev->dev, "unknown intf: %d\n", intf->type); 437 ret = -EINVAL; 438 break; 439 } 440 441 return ret; 442 } 443 444 static int modeset_init(struct mdp5_kms *mdp5_kms) 445 { 446 struct drm_device *dev = mdp5_kms->dev; 447 struct msm_drm_private *priv = dev->dev_private; 448 const struct mdp5_cfg_hw *hw_cfg; 449 unsigned int num_crtcs; 450 int i, ret, pi = 0, ci = 0; 451 struct drm_plane *primary[MAX_BASES] = { NULL }; 452 struct drm_plane *cursor[MAX_BASES] = { NULL }; 453 454 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 455 456 /* 457 * Construct encoders and modeset initialize connector devices 458 * for each external display interface. 459 */ 460 for (i = 0; i < mdp5_kms->num_intfs; i++) { 461 ret = modeset_init_intf(mdp5_kms, mdp5_kms->intfs[i]); 462 if (ret) 463 goto fail; 464 } 465 466 /* 467 * We should ideally have less number of encoders (set up by parsing 468 * the MDP5 interfaces) than the number of layer mixers present in HW, 469 * but let's be safe here anyway 470 */ 471 num_crtcs = min(priv->num_encoders, mdp5_kms->num_hwmixers); 472 473 /* 474 * Construct planes equaling the number of hw pipes, and CRTCs for the 475 * N encoders set up by the driver. The first N planes become primary 476 * planes for the CRTCs, with the remainder as overlay planes: 477 */ 478 for (i = 0; i < mdp5_kms->num_hwpipes; i++) { 479 struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; 480 struct drm_plane *plane; 481 enum drm_plane_type type; 482 483 if (i < num_crtcs) 484 type = DRM_PLANE_TYPE_PRIMARY; 485 else if (hwpipe->caps & MDP_PIPE_CAP_CURSOR) 486 type = DRM_PLANE_TYPE_CURSOR; 487 else 488 type = DRM_PLANE_TYPE_OVERLAY; 489 490 plane = mdp5_plane_init(dev, type); 491 if (IS_ERR(plane)) { 492 ret = PTR_ERR(plane); 493 DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret); 494 goto fail; 495 } 496 priv->planes[priv->num_planes++] = plane; 497 498 if (type == DRM_PLANE_TYPE_PRIMARY) 499 primary[pi++] = plane; 500 if (type == DRM_PLANE_TYPE_CURSOR) 501 cursor[ci++] = plane; 502 } 503 504 for (i = 0; i < num_crtcs; i++) { 505 struct drm_crtc *crtc; 506 507 crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i); 508 if (IS_ERR(crtc)) { 509 ret = PTR_ERR(crtc); 510 DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret); 511 goto fail; 512 } 513 priv->crtcs[priv->num_crtcs++] = crtc; 514 } 515 516 /* 517 * Now that we know the number of crtcs we've created, set the possible 518 * crtcs for the encoders 519 */ 520 for (i = 0; i < priv->num_encoders; i++) { 521 struct drm_encoder *encoder = priv->encoders[i]; 522 523 encoder->possible_crtcs = (1 << priv->num_crtcs) - 1; 524 } 525 526 return 0; 527 528 fail: 529 return ret; 530 } 531 532 static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms, 533 u32 *major, u32 *minor) 534 { 535 struct device *dev = &mdp5_kms->pdev->dev; 536 u32 version; 537 538 pm_runtime_get_sync(dev); 539 version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION); 540 pm_runtime_put_sync(dev); 541 542 *major = FIELD(version, MDP5_HW_VERSION_MAJOR); 543 *minor = FIELD(version, MDP5_HW_VERSION_MINOR); 544 545 DRM_DEV_INFO(dev, "MDP5 version v%d.%d", *major, *minor); 546 } 547 548 static int get_clk(struct platform_device *pdev, struct clk **clkp, 549 const char *name, bool mandatory) 550 { 551 struct device *dev = &pdev->dev; 552 struct clk *clk = msm_clk_get(pdev, name); 553 if (IS_ERR(clk) && mandatory) { 554 DRM_DEV_ERROR(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); 555 return PTR_ERR(clk); 556 } 557 if (IS_ERR(clk)) 558 DBG("skipping %s", name); 559 else 560 *clkp = clk; 561 562 return 0; 563 } 564 565 static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc) 566 { 567 struct drm_device *dev = crtc->dev; 568 struct drm_encoder *encoder; 569 570 drm_for_each_encoder(encoder, dev) 571 if (encoder->crtc == crtc) 572 return encoder; 573 574 return NULL; 575 } 576 577 static bool mdp5_get_scanoutpos(struct drm_device *dev, unsigned int pipe, 578 bool in_vblank_irq, int *vpos, int *hpos, 579 ktime_t *stime, ktime_t *etime, 580 const struct drm_display_mode *mode) 581 { 582 struct msm_drm_private *priv = dev->dev_private; 583 struct drm_crtc *crtc; 584 struct drm_encoder *encoder; 585 int line, vsw, vbp, vactive_start, vactive_end, vfp_end; 586 587 crtc = priv->crtcs[pipe]; 588 if (!crtc) { 589 DRM_ERROR("Invalid crtc %d\n", pipe); 590 return false; 591 } 592 593 encoder = get_encoder_from_crtc(crtc); 594 if (!encoder) { 595 DRM_ERROR("no encoder found for crtc %d\n", pipe); 596 return false; 597 } 598 599 vsw = mode->crtc_vsync_end - mode->crtc_vsync_start; 600 vbp = mode->crtc_vtotal - mode->crtc_vsync_end; 601 602 /* 603 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at 604 * the end of VFP. Translate the porch values relative to the line 605 * counter positions. 606 */ 607 608 vactive_start = vsw + vbp + 1; 609 610 vactive_end = vactive_start + mode->crtc_vdisplay; 611 612 /* last scan line before VSYNC */ 613 vfp_end = mode->crtc_vtotal; 614 615 if (stime) 616 *stime = ktime_get(); 617 618 line = mdp5_encoder_get_linecount(encoder); 619 620 if (line < vactive_start) { 621 line -= vactive_start; 622 } else if (line > vactive_end) { 623 line = line - vfp_end - vactive_start; 624 } else { 625 line -= vactive_start; 626 } 627 628 *vpos = line; 629 *hpos = 0; 630 631 if (etime) 632 *etime = ktime_get(); 633 634 return true; 635 } 636 637 static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 638 { 639 struct msm_drm_private *priv = dev->dev_private; 640 struct drm_crtc *crtc; 641 struct drm_encoder *encoder; 642 643 if (pipe >= priv->num_crtcs) 644 return 0; 645 646 crtc = priv->crtcs[pipe]; 647 if (!crtc) 648 return 0; 649 650 encoder = get_encoder_from_crtc(crtc); 651 if (!encoder) 652 return 0; 653 654 return mdp5_encoder_get_framecount(encoder); 655 } 656 657 struct msm_kms *mdp5_kms_init(struct drm_device *dev) 658 { 659 struct msm_drm_private *priv = dev->dev_private; 660 struct platform_device *pdev; 661 struct mdp5_kms *mdp5_kms; 662 struct mdp5_cfg *config; 663 struct msm_kms *kms; 664 struct msm_gem_address_space *aspace; 665 int irq, i, ret; 666 667 /* priv->kms would have been populated by the MDP5 driver */ 668 kms = priv->kms; 669 if (!kms) 670 return NULL; 671 672 mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 673 674 mdp_kms_init(&mdp5_kms->base, &kms_funcs); 675 676 pdev = mdp5_kms->pdev; 677 678 irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 679 if (irq < 0) { 680 ret = irq; 681 DRM_DEV_ERROR(&pdev->dev, "failed to get irq: %d\n", ret); 682 goto fail; 683 } 684 685 kms->irq = irq; 686 687 config = mdp5_cfg_get_config(mdp5_kms->cfg); 688 689 /* make sure things are off before attaching iommu (bootloader could 690 * have left things on, in which case we'll start getting faults if 691 * we don't disable): 692 */ 693 pm_runtime_get_sync(&pdev->dev); 694 for (i = 0; i < MDP5_INTF_NUM_MAX; i++) { 695 if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) || 696 !config->hw->intf.base[i]) 697 continue; 698 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0); 699 700 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3); 701 } 702 mdelay(16); 703 704 if (config->platform.iommu) { 705 aspace = msm_gem_address_space_create(&pdev->dev, 706 config->platform.iommu, "mdp5"); 707 if (IS_ERR(aspace)) { 708 ret = PTR_ERR(aspace); 709 goto fail; 710 } 711 712 kms->aspace = aspace; 713 714 ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, 715 ARRAY_SIZE(iommu_ports)); 716 if (ret) { 717 DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n", 718 ret); 719 goto fail; 720 } 721 } else { 722 DRM_DEV_INFO(&pdev->dev, 723 "no iommu, fallback to phys contig buffers for scanout\n"); 724 aspace = NULL; 725 } 726 727 pm_runtime_put_sync(&pdev->dev); 728 729 ret = modeset_init(mdp5_kms); 730 if (ret) { 731 DRM_DEV_ERROR(&pdev->dev, "modeset_init failed: %d\n", ret); 732 goto fail; 733 } 734 735 dev->mode_config.min_width = 0; 736 dev->mode_config.min_height = 0; 737 dev->mode_config.max_width = 0xffff; 738 dev->mode_config.max_height = 0xffff; 739 740 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; 741 dev->driver->get_scanout_position = mdp5_get_scanoutpos; 742 dev->driver->get_vblank_counter = mdp5_get_vblank_counter; 743 dev->max_vblank_count = 0xffffffff; 744 dev->vblank_disable_immediate = true; 745 746 return kms; 747 fail: 748 if (kms) 749 mdp5_kms_destroy(kms); 750 return ERR_PTR(ret); 751 } 752 753 static void mdp5_destroy(struct platform_device *pdev) 754 { 755 struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); 756 int i; 757 758 if (mdp5_kms->ctlm) 759 mdp5_ctlm_destroy(mdp5_kms->ctlm); 760 if (mdp5_kms->smp) 761 mdp5_smp_destroy(mdp5_kms->smp); 762 if (mdp5_kms->cfg) 763 mdp5_cfg_destroy(mdp5_kms->cfg); 764 765 for (i = 0; i < mdp5_kms->num_intfs; i++) 766 kfree(mdp5_kms->intfs[i]); 767 768 if (mdp5_kms->rpm_enabled) 769 pm_runtime_disable(&pdev->dev); 770 771 drm_atomic_private_obj_fini(&mdp5_kms->glob_state); 772 drm_modeset_lock_fini(&mdp5_kms->glob_state_lock); 773 } 774 775 static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt, 776 const enum mdp5_pipe *pipes, const uint32_t *offsets, 777 uint32_t caps) 778 { 779 struct drm_device *dev = mdp5_kms->dev; 780 int i, ret; 781 782 for (i = 0; i < cnt; i++) { 783 struct mdp5_hw_pipe *hwpipe; 784 785 hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps); 786 if (IS_ERR(hwpipe)) { 787 ret = PTR_ERR(hwpipe); 788 DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n", 789 pipe2name(pipes[i]), ret); 790 return ret; 791 } 792 hwpipe->idx = mdp5_kms->num_hwpipes; 793 mdp5_kms->hwpipes[mdp5_kms->num_hwpipes++] = hwpipe; 794 } 795 796 return 0; 797 } 798 799 static int hwpipe_init(struct mdp5_kms *mdp5_kms) 800 { 801 static const enum mdp5_pipe rgb_planes[] = { 802 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3, 803 }; 804 static const enum mdp5_pipe vig_planes[] = { 805 SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3, 806 }; 807 static const enum mdp5_pipe dma_planes[] = { 808 SSPP_DMA0, SSPP_DMA1, 809 }; 810 static const enum mdp5_pipe cursor_planes[] = { 811 SSPP_CURSOR0, SSPP_CURSOR1, 812 }; 813 const struct mdp5_cfg_hw *hw_cfg; 814 int ret; 815 816 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 817 818 /* Construct RGB pipes: */ 819 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_rgb.count, rgb_planes, 820 hw_cfg->pipe_rgb.base, hw_cfg->pipe_rgb.caps); 821 if (ret) 822 return ret; 823 824 /* Construct video (VIG) pipes: */ 825 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_vig.count, vig_planes, 826 hw_cfg->pipe_vig.base, hw_cfg->pipe_vig.caps); 827 if (ret) 828 return ret; 829 830 /* Construct DMA pipes: */ 831 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_dma.count, dma_planes, 832 hw_cfg->pipe_dma.base, hw_cfg->pipe_dma.caps); 833 if (ret) 834 return ret; 835 836 /* Construct cursor pipes: */ 837 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_cursor.count, 838 cursor_planes, hw_cfg->pipe_cursor.base, 839 hw_cfg->pipe_cursor.caps); 840 if (ret) 841 return ret; 842 843 return 0; 844 } 845 846 static int hwmixer_init(struct mdp5_kms *mdp5_kms) 847 { 848 struct drm_device *dev = mdp5_kms->dev; 849 const struct mdp5_cfg_hw *hw_cfg; 850 int i, ret; 851 852 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 853 854 for (i = 0; i < hw_cfg->lm.count; i++) { 855 struct mdp5_hw_mixer *mixer; 856 857 mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]); 858 if (IS_ERR(mixer)) { 859 ret = PTR_ERR(mixer); 860 DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n", 861 i, ret); 862 return ret; 863 } 864 865 mixer->idx = mdp5_kms->num_hwmixers; 866 mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer; 867 } 868 869 return 0; 870 } 871 872 static int interface_init(struct mdp5_kms *mdp5_kms) 873 { 874 struct drm_device *dev = mdp5_kms->dev; 875 const struct mdp5_cfg_hw *hw_cfg; 876 const enum mdp5_intf_type *intf_types; 877 int i; 878 879 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 880 intf_types = hw_cfg->intf.connect; 881 882 for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) { 883 struct mdp5_interface *intf; 884 885 if (intf_types[i] == INTF_DISABLED) 886 continue; 887 888 intf = kzalloc(sizeof(*intf), GFP_KERNEL); 889 if (!intf) { 890 DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i); 891 return -ENOMEM; 892 } 893 894 intf->num = i; 895 intf->type = intf_types[i]; 896 intf->mode = MDP5_INTF_MODE_NONE; 897 intf->idx = mdp5_kms->num_intfs; 898 mdp5_kms->intfs[mdp5_kms->num_intfs++] = intf; 899 } 900 901 return 0; 902 } 903 904 static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) 905 { 906 struct msm_drm_private *priv = dev->dev_private; 907 struct mdp5_kms *mdp5_kms; 908 struct mdp5_cfg *config; 909 u32 major, minor; 910 int ret; 911 912 mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL); 913 if (!mdp5_kms) { 914 ret = -ENOMEM; 915 goto fail; 916 } 917 918 platform_set_drvdata(pdev, mdp5_kms); 919 920 spin_lock_init(&mdp5_kms->resource_lock); 921 922 mdp5_kms->dev = dev; 923 mdp5_kms->pdev = pdev; 924 925 ret = mdp5_global_obj_init(mdp5_kms); 926 if (ret) 927 goto fail; 928 929 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5"); 930 if (IS_ERR(mdp5_kms->mmio)) { 931 ret = PTR_ERR(mdp5_kms->mmio); 932 goto fail; 933 } 934 935 /* mandatory clocks: */ 936 ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true); 937 if (ret) 938 goto fail; 939 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true); 940 if (ret) 941 goto fail; 942 ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true); 943 if (ret) 944 goto fail; 945 ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true); 946 if (ret) 947 goto fail; 948 949 /* optional clocks: */ 950 get_clk(pdev, &mdp5_kms->lut_clk, "lut", false); 951 952 /* we need to set a default rate before enabling. Set a safe 953 * rate first, then figure out hw revision, and then set a 954 * more optimal rate: 955 */ 956 clk_set_rate(mdp5_kms->core_clk, 200000000); 957 958 pm_runtime_enable(&pdev->dev); 959 mdp5_kms->rpm_enabled = true; 960 961 read_mdp_hw_revision(mdp5_kms, &major, &minor); 962 963 mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor); 964 if (IS_ERR(mdp5_kms->cfg)) { 965 ret = PTR_ERR(mdp5_kms->cfg); 966 mdp5_kms->cfg = NULL; 967 goto fail; 968 } 969 970 config = mdp5_cfg_get_config(mdp5_kms->cfg); 971 mdp5_kms->caps = config->hw->mdp.caps; 972 973 /* TODO: compute core clock rate at runtime */ 974 clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk); 975 976 /* 977 * Some chipsets have a Shared Memory Pool (SMP), while others 978 * have dedicated latency buffering per source pipe instead; 979 * this section initializes the SMP: 980 */ 981 if (mdp5_kms->caps & MDP_CAP_SMP) { 982 mdp5_kms->smp = mdp5_smp_init(mdp5_kms, &config->hw->smp); 983 if (IS_ERR(mdp5_kms->smp)) { 984 ret = PTR_ERR(mdp5_kms->smp); 985 mdp5_kms->smp = NULL; 986 goto fail; 987 } 988 } 989 990 mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg); 991 if (IS_ERR(mdp5_kms->ctlm)) { 992 ret = PTR_ERR(mdp5_kms->ctlm); 993 mdp5_kms->ctlm = NULL; 994 goto fail; 995 } 996 997 ret = hwpipe_init(mdp5_kms); 998 if (ret) 999 goto fail; 1000 1001 ret = hwmixer_init(mdp5_kms); 1002 if (ret) 1003 goto fail; 1004 1005 ret = interface_init(mdp5_kms); 1006 if (ret) 1007 goto fail; 1008 1009 /* set uninit-ed kms */ 1010 priv->kms = &mdp5_kms->base.base; 1011 1012 return 0; 1013 fail: 1014 mdp5_destroy(pdev); 1015 return ret; 1016 } 1017 1018 static int mdp5_bind(struct device *dev, struct device *master, void *data) 1019 { 1020 struct drm_device *ddev = dev_get_drvdata(master); 1021 struct platform_device *pdev = to_platform_device(dev); 1022 1023 DBG(""); 1024 1025 return mdp5_init(pdev, ddev); 1026 } 1027 1028 static void mdp5_unbind(struct device *dev, struct device *master, 1029 void *data) 1030 { 1031 struct platform_device *pdev = to_platform_device(dev); 1032 1033 mdp5_destroy(pdev); 1034 } 1035 1036 static const struct component_ops mdp5_ops = { 1037 .bind = mdp5_bind, 1038 .unbind = mdp5_unbind, 1039 }; 1040 1041 static int mdp5_setup_interconnect(struct platform_device *pdev) 1042 { 1043 struct icc_path *path0 = of_icc_get(&pdev->dev, "mdp0-mem"); 1044 struct icc_path *path1 = of_icc_get(&pdev->dev, "mdp1-mem"); 1045 struct icc_path *path_rot = of_icc_get(&pdev->dev, "rotator-mem"); 1046 1047 if (IS_ERR(path0)) 1048 return PTR_ERR(path0); 1049 1050 if (!path0) { 1051 /* no interconnect support is not necessarily a fatal 1052 * condition, the platform may simply not have an 1053 * interconnect driver yet. But warn about it in case 1054 * bootloader didn't setup bus clocks high enough for 1055 * scanout. 1056 */ 1057 dev_warn(&pdev->dev, "No interconnect support may cause display underflows!\n"); 1058 return 0; 1059 } 1060 1061 icc_set_bw(path0, 0, MBps_to_icc(6400)); 1062 1063 if (!IS_ERR_OR_NULL(path1)) 1064 icc_set_bw(path1, 0, MBps_to_icc(6400)); 1065 if (!IS_ERR_OR_NULL(path_rot)) 1066 icc_set_bw(path_rot, 0, MBps_to_icc(6400)); 1067 1068 return 0; 1069 } 1070 1071 static int mdp5_dev_probe(struct platform_device *pdev) 1072 { 1073 int ret; 1074 1075 DBG(""); 1076 1077 ret = mdp5_setup_interconnect(pdev); 1078 if (ret) 1079 return ret; 1080 1081 return component_add(&pdev->dev, &mdp5_ops); 1082 } 1083 1084 static int mdp5_dev_remove(struct platform_device *pdev) 1085 { 1086 DBG(""); 1087 component_del(&pdev->dev, &mdp5_ops); 1088 return 0; 1089 } 1090 1091 static __maybe_unused int mdp5_runtime_suspend(struct device *dev) 1092 { 1093 struct platform_device *pdev = to_platform_device(dev); 1094 struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); 1095 1096 DBG(""); 1097 1098 return mdp5_disable(mdp5_kms); 1099 } 1100 1101 static __maybe_unused int mdp5_runtime_resume(struct device *dev) 1102 { 1103 struct platform_device *pdev = to_platform_device(dev); 1104 struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); 1105 1106 DBG(""); 1107 1108 return mdp5_enable(mdp5_kms); 1109 } 1110 1111 static const struct dev_pm_ops mdp5_pm_ops = { 1112 SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL) 1113 }; 1114 1115 static const struct of_device_id mdp5_dt_match[] = { 1116 { .compatible = "qcom,mdp5", }, 1117 /* to support downstream DT files */ 1118 { .compatible = "qcom,mdss_mdp", }, 1119 {} 1120 }; 1121 MODULE_DEVICE_TABLE(of, mdp5_dt_match); 1122 1123 static struct platform_driver mdp5_driver = { 1124 .probe = mdp5_dev_probe, 1125 .remove = mdp5_dev_remove, 1126 .driver = { 1127 .name = "msm_mdp", 1128 .of_match_table = mdp5_dt_match, 1129 .pm = &mdp5_pm_ops, 1130 }, 1131 }; 1132 1133 void __init msm_mdp_register(void) 1134 { 1135 DBG(""); 1136 platform_driver_register(&mdp5_driver); 1137 } 1138 1139 void __exit msm_mdp_unregister(void) 1140 { 1141 DBG(""); 1142 platform_driver_unregister(&mdp5_driver); 1143 } 1144