1 /* 2 * Copyright (c) 2014, The Linux Foundation. All rights reserved. 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published by 8 * the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include <linux/of_irq.h> 20 21 #include "msm_drv.h" 22 #include "msm_gem.h" 23 #include "msm_mmu.h" 24 #include "mdp5_kms.h" 25 26 static const char *iommu_ports[] = { 27 "mdp_0", 28 }; 29 30 static int mdp5_hw_init(struct msm_kms *kms) 31 { 32 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 33 struct device *dev = &mdp5_kms->pdev->dev; 34 unsigned long flags; 35 36 pm_runtime_get_sync(dev); 37 38 /* Magic unknown register writes: 39 * 40 * W VBIF:0x004 00000001 (mdss_mdp.c:839) 41 * W MDP5:0x2e0 0xe9 (mdss_mdp.c:839) 42 * W MDP5:0x2e4 0x55 (mdss_mdp.c:839) 43 * W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839) 44 * W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839) 45 * W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839) 46 * W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839) 47 * W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839) 48 * W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839) 49 * 50 * Downstream fbdev driver gets these register offsets/values 51 * from DT.. not really sure what these registers are or if 52 * different values for different boards/SoC's, etc. I guess 53 * they are the golden registers. 54 * 55 * Not setting these does not seem to cause any problem. But 56 * we may be getting lucky with the bootloader initializing 57 * them for us. OTOH, if we can always count on the bootloader 58 * setting the golden registers, then perhaps we don't need to 59 * care. 60 */ 61 62 spin_lock_irqsave(&mdp5_kms->resource_lock, flags); 63 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0); 64 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); 65 66 mdp5_ctlm_hw_reset(mdp5_kms->ctlm); 67 68 pm_runtime_put_sync(dev); 69 70 return 0; 71 } 72 73 /* Global/shared object state funcs */ 74 75 /* 76 * This is a helper that returns the private state currently in operation. 77 * Note that this would return the "old_state" if called in the atomic check 78 * path, and the "new_state" after the atomic swap has been done. 79 */ 80 struct mdp5_global_state * 81 mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms) 82 { 83 return to_mdp5_global_state(mdp5_kms->glob_state.state); 84 } 85 86 /* 87 * This acquires the modeset lock set aside for global state, creates 88 * a new duplicated private object state. 89 */ 90 struct mdp5_global_state *mdp5_get_global_state(struct drm_atomic_state *s) 91 { 92 struct msm_drm_private *priv = s->dev->dev_private; 93 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); 94 struct drm_private_state *priv_state; 95 int ret; 96 97 ret = drm_modeset_lock(&mdp5_kms->glob_state_lock, s->acquire_ctx); 98 if (ret) 99 return ERR_PTR(ret); 100 101 priv_state = drm_atomic_get_private_obj_state(s, &mdp5_kms->glob_state); 102 if (IS_ERR(priv_state)) 103 return ERR_CAST(priv_state); 104 105 return to_mdp5_global_state(priv_state); 106 } 107 108 static struct drm_private_state * 109 mdp5_global_duplicate_state(struct drm_private_obj *obj) 110 { 111 struct mdp5_global_state *state; 112 113 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 114 if (!state) 115 return NULL; 116 117 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 118 119 return &state->base; 120 } 121 122 static void mdp5_global_destroy_state(struct drm_private_obj *obj, 123 struct drm_private_state *state) 124 { 125 struct mdp5_global_state *mdp5_state = to_mdp5_global_state(state); 126 127 kfree(mdp5_state); 128 } 129 130 static const struct drm_private_state_funcs mdp5_global_state_funcs = { 131 .atomic_duplicate_state = mdp5_global_duplicate_state, 132 .atomic_destroy_state = mdp5_global_destroy_state, 133 }; 134 135 static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms) 136 { 137 struct mdp5_global_state *state; 138 139 drm_modeset_lock_init(&mdp5_kms->glob_state_lock); 140 141 state = kzalloc(sizeof(*state), GFP_KERNEL); 142 if (!state) 143 return -ENOMEM; 144 145 state->mdp5_kms = mdp5_kms; 146 147 drm_atomic_private_obj_init(mdp5_kms->dev, &mdp5_kms->glob_state, 148 &state->base, 149 &mdp5_global_state_funcs); 150 return 0; 151 } 152 153 static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) 154 { 155 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 156 struct device *dev = &mdp5_kms->pdev->dev; 157 struct mdp5_global_state *global_state; 158 159 global_state = mdp5_get_existing_global_state(mdp5_kms); 160 161 pm_runtime_get_sync(dev); 162 163 if (mdp5_kms->smp) 164 mdp5_smp_prepare_commit(mdp5_kms->smp, &global_state->smp); 165 } 166 167 static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) 168 { 169 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 170 struct device *dev = &mdp5_kms->pdev->dev; 171 struct mdp5_global_state *global_state; 172 173 drm_atomic_helper_wait_for_vblanks(mdp5_kms->dev, state); 174 175 global_state = mdp5_get_existing_global_state(mdp5_kms); 176 177 if (mdp5_kms->smp) 178 mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp); 179 180 pm_runtime_put_sync(dev); 181 } 182 183 static void mdp5_wait_for_crtc_commit_done(struct msm_kms *kms, 184 struct drm_crtc *crtc) 185 { 186 mdp5_crtc_wait_for_commit_done(crtc); 187 } 188 189 static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate, 190 struct drm_encoder *encoder) 191 { 192 return rate; 193 } 194 195 static int mdp5_set_split_display(struct msm_kms *kms, 196 struct drm_encoder *encoder, 197 struct drm_encoder *slave_encoder, 198 bool is_cmd_mode) 199 { 200 if (is_cmd_mode) 201 return mdp5_cmd_encoder_set_split_display(encoder, 202 slave_encoder); 203 else 204 return mdp5_vid_encoder_set_split_display(encoder, 205 slave_encoder); 206 } 207 208 static void mdp5_set_encoder_mode(struct msm_kms *kms, 209 struct drm_encoder *encoder, 210 bool cmd_mode) 211 { 212 mdp5_encoder_set_intf_mode(encoder, cmd_mode); 213 } 214 215 static void mdp5_kms_destroy(struct msm_kms *kms) 216 { 217 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 218 struct msm_gem_address_space *aspace = kms->aspace; 219 int i; 220 221 for (i = 0; i < mdp5_kms->num_hwmixers; i++) 222 mdp5_mixer_destroy(mdp5_kms->hwmixers[i]); 223 224 for (i = 0; i < mdp5_kms->num_hwpipes; i++) 225 mdp5_pipe_destroy(mdp5_kms->hwpipes[i]); 226 227 if (aspace) { 228 aspace->mmu->funcs->detach(aspace->mmu, 229 iommu_ports, ARRAY_SIZE(iommu_ports)); 230 msm_gem_address_space_put(aspace); 231 } 232 } 233 234 #ifdef CONFIG_DEBUG_FS 235 static int smp_show(struct seq_file *m, void *arg) 236 { 237 struct drm_info_node *node = (struct drm_info_node *) m->private; 238 struct drm_device *dev = node->minor->dev; 239 struct msm_drm_private *priv = dev->dev_private; 240 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); 241 struct drm_printer p = drm_seq_file_printer(m); 242 243 if (!mdp5_kms->smp) { 244 drm_printf(&p, "no SMP pool\n"); 245 return 0; 246 } 247 248 mdp5_smp_dump(mdp5_kms->smp, &p); 249 250 return 0; 251 } 252 253 static struct drm_info_list mdp5_debugfs_list[] = { 254 {"smp", smp_show }, 255 }; 256 257 static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) 258 { 259 struct drm_device *dev = minor->dev; 260 int ret; 261 262 ret = drm_debugfs_create_files(mdp5_debugfs_list, 263 ARRAY_SIZE(mdp5_debugfs_list), 264 minor->debugfs_root, minor); 265 266 if (ret) { 267 DRM_DEV_ERROR(dev->dev, "could not install mdp5_debugfs_list\n"); 268 return ret; 269 } 270 271 return 0; 272 } 273 #endif 274 275 static const struct mdp_kms_funcs kms_funcs = { 276 .base = { 277 .hw_init = mdp5_hw_init, 278 .irq_preinstall = mdp5_irq_preinstall, 279 .irq_postinstall = mdp5_irq_postinstall, 280 .irq_uninstall = mdp5_irq_uninstall, 281 .irq = mdp5_irq, 282 .enable_vblank = mdp5_enable_vblank, 283 .disable_vblank = mdp5_disable_vblank, 284 .prepare_commit = mdp5_prepare_commit, 285 .complete_commit = mdp5_complete_commit, 286 .wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done, 287 .get_format = mdp_get_format, 288 .round_pixclk = mdp5_round_pixclk, 289 .set_split_display = mdp5_set_split_display, 290 .set_encoder_mode = mdp5_set_encoder_mode, 291 .destroy = mdp5_kms_destroy, 292 #ifdef CONFIG_DEBUG_FS 293 .debugfs_init = mdp5_kms_debugfs_init, 294 #endif 295 }, 296 .set_irqmask = mdp5_set_irqmask, 297 }; 298 299 int mdp5_disable(struct mdp5_kms *mdp5_kms) 300 { 301 DBG(""); 302 303 mdp5_kms->enable_count--; 304 WARN_ON(mdp5_kms->enable_count < 0); 305 306 clk_disable_unprepare(mdp5_kms->ahb_clk); 307 clk_disable_unprepare(mdp5_kms->axi_clk); 308 clk_disable_unprepare(mdp5_kms->core_clk); 309 if (mdp5_kms->lut_clk) 310 clk_disable_unprepare(mdp5_kms->lut_clk); 311 312 return 0; 313 } 314 315 int mdp5_enable(struct mdp5_kms *mdp5_kms) 316 { 317 DBG(""); 318 319 mdp5_kms->enable_count++; 320 321 clk_prepare_enable(mdp5_kms->ahb_clk); 322 clk_prepare_enable(mdp5_kms->axi_clk); 323 clk_prepare_enable(mdp5_kms->core_clk); 324 if (mdp5_kms->lut_clk) 325 clk_prepare_enable(mdp5_kms->lut_clk); 326 327 return 0; 328 } 329 330 static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, 331 struct mdp5_interface *intf, 332 struct mdp5_ctl *ctl) 333 { 334 struct drm_device *dev = mdp5_kms->dev; 335 struct msm_drm_private *priv = dev->dev_private; 336 struct drm_encoder *encoder; 337 338 encoder = mdp5_encoder_init(dev, intf, ctl); 339 if (IS_ERR(encoder)) { 340 DRM_DEV_ERROR(dev->dev, "failed to construct encoder\n"); 341 return encoder; 342 } 343 344 priv->encoders[priv->num_encoders++] = encoder; 345 346 return encoder; 347 } 348 349 static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num) 350 { 351 const enum mdp5_intf_type *intfs = hw_cfg->intf.connect; 352 const int intf_cnt = ARRAY_SIZE(hw_cfg->intf.connect); 353 int id = 0, i; 354 355 for (i = 0; i < intf_cnt; i++) { 356 if (intfs[i] == INTF_DSI) { 357 if (intf_num == i) 358 return id; 359 360 id++; 361 } 362 } 363 364 return -EINVAL; 365 } 366 367 static int modeset_init_intf(struct mdp5_kms *mdp5_kms, 368 struct mdp5_interface *intf) 369 { 370 struct drm_device *dev = mdp5_kms->dev; 371 struct msm_drm_private *priv = dev->dev_private; 372 struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm; 373 struct mdp5_ctl *ctl; 374 struct drm_encoder *encoder; 375 int ret = 0; 376 377 switch (intf->type) { 378 case INTF_eDP: 379 if (!priv->edp) 380 break; 381 382 ctl = mdp5_ctlm_request(ctlm, intf->num); 383 if (!ctl) { 384 ret = -EINVAL; 385 break; 386 } 387 388 encoder = construct_encoder(mdp5_kms, intf, ctl); 389 if (IS_ERR(encoder)) { 390 ret = PTR_ERR(encoder); 391 break; 392 } 393 394 ret = msm_edp_modeset_init(priv->edp, dev, encoder); 395 break; 396 case INTF_HDMI: 397 if (!priv->hdmi) 398 break; 399 400 ctl = mdp5_ctlm_request(ctlm, intf->num); 401 if (!ctl) { 402 ret = -EINVAL; 403 break; 404 } 405 406 encoder = construct_encoder(mdp5_kms, intf, ctl); 407 if (IS_ERR(encoder)) { 408 ret = PTR_ERR(encoder); 409 break; 410 } 411 412 ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder); 413 break; 414 case INTF_DSI: 415 { 416 const struct mdp5_cfg_hw *hw_cfg = 417 mdp5_cfg_get_hw_config(mdp5_kms->cfg); 418 int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num); 419 420 if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) { 421 DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n", 422 intf->num); 423 ret = -EINVAL; 424 break; 425 } 426 427 if (!priv->dsi[dsi_id]) 428 break; 429 430 ctl = mdp5_ctlm_request(ctlm, intf->num); 431 if (!ctl) { 432 ret = -EINVAL; 433 break; 434 } 435 436 encoder = construct_encoder(mdp5_kms, intf, ctl); 437 if (IS_ERR(encoder)) { 438 ret = PTR_ERR(encoder); 439 break; 440 } 441 442 ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder); 443 break; 444 } 445 default: 446 DRM_DEV_ERROR(dev->dev, "unknown intf: %d\n", intf->type); 447 ret = -EINVAL; 448 break; 449 } 450 451 return ret; 452 } 453 454 static int modeset_init(struct mdp5_kms *mdp5_kms) 455 { 456 struct drm_device *dev = mdp5_kms->dev; 457 struct msm_drm_private *priv = dev->dev_private; 458 const struct mdp5_cfg_hw *hw_cfg; 459 unsigned int num_crtcs; 460 int i, ret, pi = 0, ci = 0; 461 struct drm_plane *primary[MAX_BASES] = { NULL }; 462 struct drm_plane *cursor[MAX_BASES] = { NULL }; 463 464 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 465 466 /* 467 * Construct encoders and modeset initialize connector devices 468 * for each external display interface. 469 */ 470 for (i = 0; i < mdp5_kms->num_intfs; i++) { 471 ret = modeset_init_intf(mdp5_kms, mdp5_kms->intfs[i]); 472 if (ret) 473 goto fail; 474 } 475 476 /* 477 * We should ideally have less number of encoders (set up by parsing 478 * the MDP5 interfaces) than the number of layer mixers present in HW, 479 * but let's be safe here anyway 480 */ 481 num_crtcs = min(priv->num_encoders, mdp5_kms->num_hwmixers); 482 483 /* 484 * Construct planes equaling the number of hw pipes, and CRTCs for the 485 * N encoders set up by the driver. The first N planes become primary 486 * planes for the CRTCs, with the remainder as overlay planes: 487 */ 488 for (i = 0; i < mdp5_kms->num_hwpipes; i++) { 489 struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; 490 struct drm_plane *plane; 491 enum drm_plane_type type; 492 493 if (i < num_crtcs) 494 type = DRM_PLANE_TYPE_PRIMARY; 495 else if (hwpipe->caps & MDP_PIPE_CAP_CURSOR) 496 type = DRM_PLANE_TYPE_CURSOR; 497 else 498 type = DRM_PLANE_TYPE_OVERLAY; 499 500 plane = mdp5_plane_init(dev, type); 501 if (IS_ERR(plane)) { 502 ret = PTR_ERR(plane); 503 DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret); 504 goto fail; 505 } 506 priv->planes[priv->num_planes++] = plane; 507 508 if (type == DRM_PLANE_TYPE_PRIMARY) 509 primary[pi++] = plane; 510 if (type == DRM_PLANE_TYPE_CURSOR) 511 cursor[ci++] = plane; 512 } 513 514 for (i = 0; i < num_crtcs; i++) { 515 struct drm_crtc *crtc; 516 517 crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i); 518 if (IS_ERR(crtc)) { 519 ret = PTR_ERR(crtc); 520 DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret); 521 goto fail; 522 } 523 priv->crtcs[priv->num_crtcs++] = crtc; 524 } 525 526 /* 527 * Now that we know the number of crtcs we've created, set the possible 528 * crtcs for the encoders 529 */ 530 for (i = 0; i < priv->num_encoders; i++) { 531 struct drm_encoder *encoder = priv->encoders[i]; 532 533 encoder->possible_crtcs = (1 << priv->num_crtcs) - 1; 534 } 535 536 return 0; 537 538 fail: 539 return ret; 540 } 541 542 static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms, 543 u32 *major, u32 *minor) 544 { 545 struct device *dev = &mdp5_kms->pdev->dev; 546 u32 version; 547 548 pm_runtime_get_sync(dev); 549 version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION); 550 pm_runtime_put_sync(dev); 551 552 *major = FIELD(version, MDP5_HW_VERSION_MAJOR); 553 *minor = FIELD(version, MDP5_HW_VERSION_MINOR); 554 555 DRM_DEV_INFO(dev, "MDP5 version v%d.%d", *major, *minor); 556 } 557 558 static int get_clk(struct platform_device *pdev, struct clk **clkp, 559 const char *name, bool mandatory) 560 { 561 struct device *dev = &pdev->dev; 562 struct clk *clk = msm_clk_get(pdev, name); 563 if (IS_ERR(clk) && mandatory) { 564 DRM_DEV_ERROR(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); 565 return PTR_ERR(clk); 566 } 567 if (IS_ERR(clk)) 568 DBG("skipping %s", name); 569 else 570 *clkp = clk; 571 572 return 0; 573 } 574 575 static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc) 576 { 577 struct drm_device *dev = crtc->dev; 578 struct drm_encoder *encoder; 579 580 drm_for_each_encoder(encoder, dev) 581 if (encoder->crtc == crtc) 582 return encoder; 583 584 return NULL; 585 } 586 587 static bool mdp5_get_scanoutpos(struct drm_device *dev, unsigned int pipe, 588 bool in_vblank_irq, int *vpos, int *hpos, 589 ktime_t *stime, ktime_t *etime, 590 const struct drm_display_mode *mode) 591 { 592 struct msm_drm_private *priv = dev->dev_private; 593 struct drm_crtc *crtc; 594 struct drm_encoder *encoder; 595 int line, vsw, vbp, vactive_start, vactive_end, vfp_end; 596 597 crtc = priv->crtcs[pipe]; 598 if (!crtc) { 599 DRM_ERROR("Invalid crtc %d\n", pipe); 600 return false; 601 } 602 603 encoder = get_encoder_from_crtc(crtc); 604 if (!encoder) { 605 DRM_ERROR("no encoder found for crtc %d\n", pipe); 606 return false; 607 } 608 609 vsw = mode->crtc_vsync_end - mode->crtc_vsync_start; 610 vbp = mode->crtc_vtotal - mode->crtc_vsync_end; 611 612 /* 613 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at 614 * the end of VFP. Translate the porch values relative to the line 615 * counter positions. 616 */ 617 618 vactive_start = vsw + vbp + 1; 619 620 vactive_end = vactive_start + mode->crtc_vdisplay; 621 622 /* last scan line before VSYNC */ 623 vfp_end = mode->crtc_vtotal; 624 625 if (stime) 626 *stime = ktime_get(); 627 628 line = mdp5_encoder_get_linecount(encoder); 629 630 if (line < vactive_start) { 631 line -= vactive_start; 632 } else if (line > vactive_end) { 633 line = line - vfp_end - vactive_start; 634 } else { 635 line -= vactive_start; 636 } 637 638 *vpos = line; 639 *hpos = 0; 640 641 if (etime) 642 *etime = ktime_get(); 643 644 return true; 645 } 646 647 static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 648 { 649 struct msm_drm_private *priv = dev->dev_private; 650 struct drm_crtc *crtc; 651 struct drm_encoder *encoder; 652 653 if (pipe >= priv->num_crtcs) 654 return 0; 655 656 crtc = priv->crtcs[pipe]; 657 if (!crtc) 658 return 0; 659 660 encoder = get_encoder_from_crtc(crtc); 661 if (!encoder) 662 return 0; 663 664 return mdp5_encoder_get_framecount(encoder); 665 } 666 667 struct msm_kms *mdp5_kms_init(struct drm_device *dev) 668 { 669 struct msm_drm_private *priv = dev->dev_private; 670 struct platform_device *pdev; 671 struct mdp5_kms *mdp5_kms; 672 struct mdp5_cfg *config; 673 struct msm_kms *kms; 674 struct msm_gem_address_space *aspace; 675 int irq, i, ret; 676 677 /* priv->kms would have been populated by the MDP5 driver */ 678 kms = priv->kms; 679 if (!kms) 680 return NULL; 681 682 mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 683 684 mdp_kms_init(&mdp5_kms->base, &kms_funcs); 685 686 pdev = mdp5_kms->pdev; 687 688 irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 689 if (irq < 0) { 690 ret = irq; 691 DRM_DEV_ERROR(&pdev->dev, "failed to get irq: %d\n", ret); 692 goto fail; 693 } 694 695 kms->irq = irq; 696 697 config = mdp5_cfg_get_config(mdp5_kms->cfg); 698 699 /* make sure things are off before attaching iommu (bootloader could 700 * have left things on, in which case we'll start getting faults if 701 * we don't disable): 702 */ 703 pm_runtime_get_sync(&pdev->dev); 704 for (i = 0; i < MDP5_INTF_NUM_MAX; i++) { 705 if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) || 706 !config->hw->intf.base[i]) 707 continue; 708 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0); 709 710 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3); 711 } 712 mdelay(16); 713 714 if (config->platform.iommu) { 715 aspace = msm_gem_address_space_create(&pdev->dev, 716 config->platform.iommu, "mdp5"); 717 if (IS_ERR(aspace)) { 718 ret = PTR_ERR(aspace); 719 goto fail; 720 } 721 722 kms->aspace = aspace; 723 724 ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, 725 ARRAY_SIZE(iommu_ports)); 726 if (ret) { 727 DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n", 728 ret); 729 goto fail; 730 } 731 } else { 732 DRM_DEV_INFO(&pdev->dev, 733 "no iommu, fallback to phys contig buffers for scanout\n"); 734 aspace = NULL; 735 } 736 737 pm_runtime_put_sync(&pdev->dev); 738 739 ret = modeset_init(mdp5_kms); 740 if (ret) { 741 DRM_DEV_ERROR(&pdev->dev, "modeset_init failed: %d\n", ret); 742 goto fail; 743 } 744 745 dev->mode_config.min_width = 0; 746 dev->mode_config.min_height = 0; 747 dev->mode_config.max_width = 0xffff; 748 dev->mode_config.max_height = 0xffff; 749 750 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; 751 dev->driver->get_scanout_position = mdp5_get_scanoutpos; 752 dev->driver->get_vblank_counter = mdp5_get_vblank_counter; 753 dev->max_vblank_count = 0xffffffff; 754 dev->vblank_disable_immediate = true; 755 756 return kms; 757 fail: 758 if (kms) 759 mdp5_kms_destroy(kms); 760 return ERR_PTR(ret); 761 } 762 763 static void mdp5_destroy(struct platform_device *pdev) 764 { 765 struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); 766 int i; 767 768 if (mdp5_kms->ctlm) 769 mdp5_ctlm_destroy(mdp5_kms->ctlm); 770 if (mdp5_kms->smp) 771 mdp5_smp_destroy(mdp5_kms->smp); 772 if (mdp5_kms->cfg) 773 mdp5_cfg_destroy(mdp5_kms->cfg); 774 775 for (i = 0; i < mdp5_kms->num_intfs; i++) 776 kfree(mdp5_kms->intfs[i]); 777 778 if (mdp5_kms->rpm_enabled) 779 pm_runtime_disable(&pdev->dev); 780 781 drm_atomic_private_obj_fini(&mdp5_kms->glob_state); 782 drm_modeset_lock_fini(&mdp5_kms->glob_state_lock); 783 } 784 785 static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt, 786 const enum mdp5_pipe *pipes, const uint32_t *offsets, 787 uint32_t caps) 788 { 789 struct drm_device *dev = mdp5_kms->dev; 790 int i, ret; 791 792 for (i = 0; i < cnt; i++) { 793 struct mdp5_hw_pipe *hwpipe; 794 795 hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps); 796 if (IS_ERR(hwpipe)) { 797 ret = PTR_ERR(hwpipe); 798 DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n", 799 pipe2name(pipes[i]), ret); 800 return ret; 801 } 802 hwpipe->idx = mdp5_kms->num_hwpipes; 803 mdp5_kms->hwpipes[mdp5_kms->num_hwpipes++] = hwpipe; 804 } 805 806 return 0; 807 } 808 809 static int hwpipe_init(struct mdp5_kms *mdp5_kms) 810 { 811 static const enum mdp5_pipe rgb_planes[] = { 812 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3, 813 }; 814 static const enum mdp5_pipe vig_planes[] = { 815 SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3, 816 }; 817 static const enum mdp5_pipe dma_planes[] = { 818 SSPP_DMA0, SSPP_DMA1, 819 }; 820 static const enum mdp5_pipe cursor_planes[] = { 821 SSPP_CURSOR0, SSPP_CURSOR1, 822 }; 823 const struct mdp5_cfg_hw *hw_cfg; 824 int ret; 825 826 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 827 828 /* Construct RGB pipes: */ 829 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_rgb.count, rgb_planes, 830 hw_cfg->pipe_rgb.base, hw_cfg->pipe_rgb.caps); 831 if (ret) 832 return ret; 833 834 /* Construct video (VIG) pipes: */ 835 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_vig.count, vig_planes, 836 hw_cfg->pipe_vig.base, hw_cfg->pipe_vig.caps); 837 if (ret) 838 return ret; 839 840 /* Construct DMA pipes: */ 841 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_dma.count, dma_planes, 842 hw_cfg->pipe_dma.base, hw_cfg->pipe_dma.caps); 843 if (ret) 844 return ret; 845 846 /* Construct cursor pipes: */ 847 ret = construct_pipes(mdp5_kms, hw_cfg->pipe_cursor.count, 848 cursor_planes, hw_cfg->pipe_cursor.base, 849 hw_cfg->pipe_cursor.caps); 850 if (ret) 851 return ret; 852 853 return 0; 854 } 855 856 static int hwmixer_init(struct mdp5_kms *mdp5_kms) 857 { 858 struct drm_device *dev = mdp5_kms->dev; 859 const struct mdp5_cfg_hw *hw_cfg; 860 int i, ret; 861 862 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 863 864 for (i = 0; i < hw_cfg->lm.count; i++) { 865 struct mdp5_hw_mixer *mixer; 866 867 mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]); 868 if (IS_ERR(mixer)) { 869 ret = PTR_ERR(mixer); 870 DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n", 871 i, ret); 872 return ret; 873 } 874 875 mixer->idx = mdp5_kms->num_hwmixers; 876 mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer; 877 } 878 879 return 0; 880 } 881 882 static int interface_init(struct mdp5_kms *mdp5_kms) 883 { 884 struct drm_device *dev = mdp5_kms->dev; 885 const struct mdp5_cfg_hw *hw_cfg; 886 const enum mdp5_intf_type *intf_types; 887 int i; 888 889 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 890 intf_types = hw_cfg->intf.connect; 891 892 for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) { 893 struct mdp5_interface *intf; 894 895 if (intf_types[i] == INTF_DISABLED) 896 continue; 897 898 intf = kzalloc(sizeof(*intf), GFP_KERNEL); 899 if (!intf) { 900 DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i); 901 return -ENOMEM; 902 } 903 904 intf->num = i; 905 intf->type = intf_types[i]; 906 intf->mode = MDP5_INTF_MODE_NONE; 907 intf->idx = mdp5_kms->num_intfs; 908 mdp5_kms->intfs[mdp5_kms->num_intfs++] = intf; 909 } 910 911 return 0; 912 } 913 914 static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) 915 { 916 struct msm_drm_private *priv = dev->dev_private; 917 struct mdp5_kms *mdp5_kms; 918 struct mdp5_cfg *config; 919 u32 major, minor; 920 int ret; 921 922 mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL); 923 if (!mdp5_kms) { 924 ret = -ENOMEM; 925 goto fail; 926 } 927 928 platform_set_drvdata(pdev, mdp5_kms); 929 930 spin_lock_init(&mdp5_kms->resource_lock); 931 932 mdp5_kms->dev = dev; 933 mdp5_kms->pdev = pdev; 934 935 ret = mdp5_global_obj_init(mdp5_kms); 936 if (ret) 937 goto fail; 938 939 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5"); 940 if (IS_ERR(mdp5_kms->mmio)) { 941 ret = PTR_ERR(mdp5_kms->mmio); 942 goto fail; 943 } 944 945 /* mandatory clocks: */ 946 ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true); 947 if (ret) 948 goto fail; 949 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true); 950 if (ret) 951 goto fail; 952 ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true); 953 if (ret) 954 goto fail; 955 ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true); 956 if (ret) 957 goto fail; 958 959 /* optional clocks: */ 960 get_clk(pdev, &mdp5_kms->lut_clk, "lut", false); 961 962 /* we need to set a default rate before enabling. Set a safe 963 * rate first, then figure out hw revision, and then set a 964 * more optimal rate: 965 */ 966 clk_set_rate(mdp5_kms->core_clk, 200000000); 967 968 pm_runtime_enable(&pdev->dev); 969 mdp5_kms->rpm_enabled = true; 970 971 read_mdp_hw_revision(mdp5_kms, &major, &minor); 972 973 mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor); 974 if (IS_ERR(mdp5_kms->cfg)) { 975 ret = PTR_ERR(mdp5_kms->cfg); 976 mdp5_kms->cfg = NULL; 977 goto fail; 978 } 979 980 config = mdp5_cfg_get_config(mdp5_kms->cfg); 981 mdp5_kms->caps = config->hw->mdp.caps; 982 983 /* TODO: compute core clock rate at runtime */ 984 clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk); 985 986 /* 987 * Some chipsets have a Shared Memory Pool (SMP), while others 988 * have dedicated latency buffering per source pipe instead; 989 * this section initializes the SMP: 990 */ 991 if (mdp5_kms->caps & MDP_CAP_SMP) { 992 mdp5_kms->smp = mdp5_smp_init(mdp5_kms, &config->hw->smp); 993 if (IS_ERR(mdp5_kms->smp)) { 994 ret = PTR_ERR(mdp5_kms->smp); 995 mdp5_kms->smp = NULL; 996 goto fail; 997 } 998 } 999 1000 mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg); 1001 if (IS_ERR(mdp5_kms->ctlm)) { 1002 ret = PTR_ERR(mdp5_kms->ctlm); 1003 mdp5_kms->ctlm = NULL; 1004 goto fail; 1005 } 1006 1007 ret = hwpipe_init(mdp5_kms); 1008 if (ret) 1009 goto fail; 1010 1011 ret = hwmixer_init(mdp5_kms); 1012 if (ret) 1013 goto fail; 1014 1015 ret = interface_init(mdp5_kms); 1016 if (ret) 1017 goto fail; 1018 1019 /* set uninit-ed kms */ 1020 priv->kms = &mdp5_kms->base.base; 1021 1022 return 0; 1023 fail: 1024 mdp5_destroy(pdev); 1025 return ret; 1026 } 1027 1028 static int mdp5_bind(struct device *dev, struct device *master, void *data) 1029 { 1030 struct drm_device *ddev = dev_get_drvdata(master); 1031 struct platform_device *pdev = to_platform_device(dev); 1032 1033 DBG(""); 1034 1035 return mdp5_init(pdev, ddev); 1036 } 1037 1038 static void mdp5_unbind(struct device *dev, struct device *master, 1039 void *data) 1040 { 1041 struct platform_device *pdev = to_platform_device(dev); 1042 1043 mdp5_destroy(pdev); 1044 } 1045 1046 static const struct component_ops mdp5_ops = { 1047 .bind = mdp5_bind, 1048 .unbind = mdp5_unbind, 1049 }; 1050 1051 static int mdp5_dev_probe(struct platform_device *pdev) 1052 { 1053 DBG(""); 1054 return component_add(&pdev->dev, &mdp5_ops); 1055 } 1056 1057 static int mdp5_dev_remove(struct platform_device *pdev) 1058 { 1059 DBG(""); 1060 component_del(&pdev->dev, &mdp5_ops); 1061 return 0; 1062 } 1063 1064 static __maybe_unused int mdp5_runtime_suspend(struct device *dev) 1065 { 1066 struct platform_device *pdev = to_platform_device(dev); 1067 struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); 1068 1069 DBG(""); 1070 1071 return mdp5_disable(mdp5_kms); 1072 } 1073 1074 static __maybe_unused int mdp5_runtime_resume(struct device *dev) 1075 { 1076 struct platform_device *pdev = to_platform_device(dev); 1077 struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); 1078 1079 DBG(""); 1080 1081 return mdp5_enable(mdp5_kms); 1082 } 1083 1084 static const struct dev_pm_ops mdp5_pm_ops = { 1085 SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL) 1086 }; 1087 1088 static const struct of_device_id mdp5_dt_match[] = { 1089 { .compatible = "qcom,mdp5", }, 1090 /* to support downstream DT files */ 1091 { .compatible = "qcom,mdss_mdp", }, 1092 {} 1093 }; 1094 MODULE_DEVICE_TABLE(of, mdp5_dt_match); 1095 1096 static struct platform_driver mdp5_driver = { 1097 .probe = mdp5_dev_probe, 1098 .remove = mdp5_dev_remove, 1099 .driver = { 1100 .name = "msm_mdp", 1101 .of_match_table = mdp5_dt_match, 1102 .pm = &mdp5_pm_ops, 1103 }, 1104 }; 1105 1106 void __init msm_mdp_register(void) 1107 { 1108 DBG(""); 1109 platform_driver_register(&mdp5_driver); 1110 } 1111 1112 void __exit msm_mdp_unregister(void) 1113 { 1114 DBG(""); 1115 platform_driver_unregister(&mdp5_driver); 1116 } 1117