1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2013 Red Hat 5 * Author: Rob Clark <robdclark@gmail.com> 6 */ 7 8 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ 9 10 #include <linux/debugfs.h> 11 #include <linux/dma-buf.h> 12 #include <linux/of_irq.h> 13 14 #include <drm/drm_crtc.h> 15 #include <drm/drm_file.h> 16 17 #include "msm_drv.h" 18 #include "msm_mmu.h" 19 #include "msm_gem.h" 20 21 #include "dpu_kms.h" 22 #include "dpu_core_irq.h" 23 #include "dpu_formats.h" 24 #include "dpu_hw_vbif.h" 25 #include "dpu_vbif.h" 26 #include "dpu_encoder.h" 27 #include "dpu_plane.h" 28 #include "dpu_crtc.h" 29 30 #define CREATE_TRACE_POINTS 31 #include "dpu_trace.h" 32 33 static const char * const iommu_ports[] = { 34 "mdp_0", 35 }; 36 37 /* 38 * To enable overall DRM driver logging 39 * # echo 0x2 > /sys/module/drm/parameters/debug 40 * 41 * To enable DRM driver h/w logging 42 * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask 43 * 44 * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_) 45 */ 46 #define DPU_DEBUGFS_DIR "msm_dpu" 47 #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask" 48 49 static int dpu_kms_hw_init(struct msm_kms *kms); 50 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms); 51 52 static unsigned long dpu_iomap_size(struct platform_device *pdev, 53 const char *name) 54 { 55 struct resource *res; 56 57 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); 58 if (!res) { 59 DRM_ERROR("failed to get memory resource: %s\n", name); 60 return 0; 61 } 62 63 return resource_size(res); 64 } 65 66 #ifdef CONFIG_DEBUG_FS 67 static int _dpu_danger_signal_status(struct seq_file *s, 68 bool danger_status) 69 { 70 struct dpu_kms *kms = (struct dpu_kms *)s->private; 71 struct msm_drm_private *priv; 72 struct dpu_danger_safe_status status; 73 int i; 74 75 if (!kms->dev || !kms->dev->dev_private || !kms->hw_mdp) { 76 DPU_ERROR("invalid arg(s)\n"); 77 return 0; 78 } 79 80 priv = kms->dev->dev_private; 81 memset(&status, 0, sizeof(struct dpu_danger_safe_status)); 82 83 pm_runtime_get_sync(&kms->pdev->dev); 84 if (danger_status) { 85 seq_puts(s, "\nDanger signal status:\n"); 86 if (kms->hw_mdp->ops.get_danger_status) 87 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp, 88 &status); 89 } else { 90 seq_puts(s, "\nSafe signal status:\n"); 91 if (kms->hw_mdp->ops.get_danger_status) 92 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp, 93 &status); 94 } 95 pm_runtime_put_sync(&kms->pdev->dev); 96 97 seq_printf(s, "MDP : 0x%x\n", status.mdp); 98 99 for (i = SSPP_VIG0; i < SSPP_MAX; i++) 100 seq_printf(s, "SSPP%d : 0x%x \t", i - SSPP_VIG0, 101 status.sspp[i]); 102 seq_puts(s, "\n"); 103 104 return 0; 105 } 106 107 #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \ 108 static int __prefix ## _open(struct inode *inode, struct file *file) \ 109 { \ 110 return single_open(file, __prefix ## _show, inode->i_private); \ 111 } \ 112 static const struct file_operations __prefix ## _fops = { \ 113 .owner = THIS_MODULE, \ 114 .open = __prefix ## _open, \ 115 .release = single_release, \ 116 .read = seq_read, \ 117 .llseek = seq_lseek, \ 118 } 119 120 static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v) 121 { 122 return _dpu_danger_signal_status(s, true); 123 } 124 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats); 125 126 static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v) 127 { 128 return _dpu_danger_signal_status(s, false); 129 } 130 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats); 131 132 static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms, 133 struct dentry *parent) 134 { 135 struct dentry *entry = debugfs_create_dir("danger", parent); 136 137 debugfs_create_file("danger_status", 0600, entry, 138 dpu_kms, &dpu_debugfs_danger_stats_fops); 139 debugfs_create_file("safe_status", 0600, entry, 140 dpu_kms, &dpu_debugfs_safe_stats_fops); 141 } 142 143 static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data) 144 { 145 struct dpu_debugfs_regset32 *regset = s->private; 146 struct dpu_kms *dpu_kms = regset->dpu_kms; 147 struct drm_device *dev; 148 struct msm_drm_private *priv; 149 void __iomem *base; 150 uint32_t i, addr; 151 152 if (!dpu_kms->mmio) 153 return 0; 154 155 dev = dpu_kms->dev; 156 if (!dev) 157 return 0; 158 159 priv = dev->dev_private; 160 if (!priv) 161 return 0; 162 163 base = dpu_kms->mmio + regset->offset; 164 165 /* insert padding spaces, if needed */ 166 if (regset->offset & 0xF) { 167 seq_printf(s, "[%x]", regset->offset & ~0xF); 168 for (i = 0; i < (regset->offset & 0xF); i += 4) 169 seq_puts(s, " "); 170 } 171 172 pm_runtime_get_sync(&dpu_kms->pdev->dev); 173 174 /* main register output */ 175 for (i = 0; i < regset->blk_len; i += 4) { 176 addr = regset->offset + i; 177 if ((addr & 0xF) == 0x0) 178 seq_printf(s, i ? "\n[%x]" : "[%x]", addr); 179 seq_printf(s, " %08x", readl_relaxed(base + i)); 180 } 181 seq_puts(s, "\n"); 182 pm_runtime_put_sync(&dpu_kms->pdev->dev); 183 184 return 0; 185 } 186 187 static int dpu_debugfs_open_regset32(struct inode *inode, 188 struct file *file) 189 { 190 return single_open(file, _dpu_debugfs_show_regset32, inode->i_private); 191 } 192 193 static const struct file_operations dpu_fops_regset32 = { 194 .open = dpu_debugfs_open_regset32, 195 .read = seq_read, 196 .llseek = seq_lseek, 197 .release = single_release, 198 }; 199 200 void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset, 201 uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms) 202 { 203 if (regset) { 204 regset->offset = offset; 205 regset->blk_len = length; 206 regset->dpu_kms = dpu_kms; 207 } 208 } 209 210 void dpu_debugfs_create_regset32(const char *name, umode_t mode, 211 void *parent, struct dpu_debugfs_regset32 *regset) 212 { 213 if (!name || !regset || !regset->dpu_kms || !regset->blk_len) 214 return; 215 216 /* make sure offset is a multiple of 4 */ 217 regset->offset = round_down(regset->offset, 4); 218 219 debugfs_create_file(name, mode, parent, regset, &dpu_fops_regset32); 220 } 221 222 static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) 223 { 224 struct dpu_kms *dpu_kms = to_dpu_kms(kms); 225 void *p = dpu_hw_util_get_log_mask_ptr(); 226 struct dentry *entry; 227 228 if (!p) 229 return -EINVAL; 230 231 entry = debugfs_create_dir("debug", minor->debugfs_root); 232 233 debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p); 234 235 dpu_debugfs_danger_init(dpu_kms, entry); 236 dpu_debugfs_vbif_init(dpu_kms, entry); 237 dpu_debugfs_core_irq_init(dpu_kms, entry); 238 239 return dpu_core_perf_debugfs_init(dpu_kms, entry); 240 } 241 #endif 242 243 static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) 244 { 245 return dpu_crtc_vblank(crtc, true); 246 } 247 248 static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) 249 { 250 dpu_crtc_vblank(crtc, false); 251 } 252 253 static void dpu_kms_enable_commit(struct msm_kms *kms) 254 { 255 struct dpu_kms *dpu_kms = to_dpu_kms(kms); 256 pm_runtime_get_sync(&dpu_kms->pdev->dev); 257 } 258 259 static void dpu_kms_disable_commit(struct msm_kms *kms) 260 { 261 struct dpu_kms *dpu_kms = to_dpu_kms(kms); 262 pm_runtime_put_sync(&dpu_kms->pdev->dev); 263 } 264 265 static ktime_t dpu_kms_vsync_time(struct msm_kms *kms, struct drm_crtc *crtc) 266 { 267 struct drm_encoder *encoder; 268 269 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) { 270 ktime_t vsync_time; 271 272 if (dpu_encoder_vsync_time(encoder, &vsync_time) == 0) 273 return vsync_time; 274 } 275 276 return ktime_get(); 277 } 278 279 static void dpu_kms_prepare_commit(struct msm_kms *kms, 280 struct drm_atomic_state *state) 281 { 282 struct dpu_kms *dpu_kms; 283 struct msm_drm_private *priv; 284 struct drm_device *dev; 285 struct drm_crtc *crtc; 286 struct drm_crtc_state *crtc_state; 287 struct drm_encoder *encoder; 288 int i; 289 290 if (!kms) 291 return; 292 dpu_kms = to_dpu_kms(kms); 293 dev = dpu_kms->dev; 294 295 if (!dev || !dev->dev_private) 296 return; 297 priv = dev->dev_private; 298 299 /* Call prepare_commit for all affected encoders */ 300 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 301 drm_for_each_encoder_mask(encoder, crtc->dev, 302 crtc_state->encoder_mask) { 303 dpu_encoder_prepare_commit(encoder); 304 } 305 } 306 } 307 308 static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask) 309 { 310 struct dpu_kms *dpu_kms = to_dpu_kms(kms); 311 struct drm_crtc *crtc; 312 313 for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) { 314 if (!crtc->state->active) 315 continue; 316 317 trace_dpu_kms_commit(DRMID(crtc)); 318 dpu_crtc_commit_kickoff(crtc); 319 } 320 } 321 322 /* 323 * Override the encoder enable since we need to setup the inline rotator and do 324 * some crtc magic before enabling any bridge that might be present. 325 */ 326 void dpu_kms_encoder_enable(struct drm_encoder *encoder) 327 { 328 const struct drm_encoder_helper_funcs *funcs = encoder->helper_private; 329 struct drm_device *dev = encoder->dev; 330 struct drm_crtc *crtc; 331 332 /* Forward this enable call to the commit hook */ 333 if (funcs && funcs->commit) 334 funcs->commit(encoder); 335 336 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 337 drm_for_each_crtc(crtc, dev) { 338 if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder))) 339 continue; 340 341 trace_dpu_kms_enc_enable(DRMID(crtc)); 342 } 343 } 344 345 static void dpu_kms_complete_commit(struct msm_kms *kms, unsigned crtc_mask) 346 { 347 struct dpu_kms *dpu_kms = to_dpu_kms(kms); 348 struct drm_crtc *crtc; 349 350 DPU_ATRACE_BEGIN("kms_complete_commit"); 351 352 for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) 353 dpu_crtc_complete_commit(crtc); 354 355 DPU_ATRACE_END("kms_complete_commit"); 356 } 357 358 static void dpu_kms_wait_for_commit_done(struct msm_kms *kms, 359 struct drm_crtc *crtc) 360 { 361 struct drm_encoder *encoder; 362 struct drm_device *dev; 363 int ret; 364 365 if (!kms || !crtc || !crtc->state) { 366 DPU_ERROR("invalid params\n"); 367 return; 368 } 369 370 dev = crtc->dev; 371 372 if (!crtc->state->enable) { 373 DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id); 374 return; 375 } 376 377 if (!crtc->state->active) { 378 DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id); 379 return; 380 } 381 382 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 383 if (encoder->crtc != crtc) 384 continue; 385 /* 386 * Wait for post-flush if necessary to delay before 387 * plane_cleanup. For example, wait for vsync in case of video 388 * mode panels. This may be a no-op for command mode panels. 389 */ 390 trace_dpu_kms_wait_for_commit_done(DRMID(crtc)); 391 ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE); 392 if (ret && ret != -EWOULDBLOCK) { 393 DPU_ERROR("wait for commit done returned %d\n", ret); 394 break; 395 } 396 } 397 } 398 399 static void dpu_kms_wait_flush(struct msm_kms *kms, unsigned crtc_mask) 400 { 401 struct dpu_kms *dpu_kms = to_dpu_kms(kms); 402 struct drm_crtc *crtc; 403 404 for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) 405 dpu_kms_wait_for_commit_done(kms, crtc); 406 } 407 408 static int _dpu_kms_initialize_dsi(struct drm_device *dev, 409 struct msm_drm_private *priv, 410 struct dpu_kms *dpu_kms) 411 { 412 struct drm_encoder *encoder = NULL; 413 int i, rc = 0; 414 415 if (!(priv->dsi[0] || priv->dsi[1])) 416 return rc; 417 418 /*TODO: Support two independent DSI connectors */ 419 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI); 420 if (IS_ERR(encoder)) { 421 DPU_ERROR("encoder init failed for dsi display\n"); 422 return PTR_ERR(encoder); 423 } 424 425 priv->encoders[priv->num_encoders++] = encoder; 426 427 for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { 428 if (!priv->dsi[i]) 429 continue; 430 431 rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder); 432 if (rc) { 433 DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n", 434 i, rc); 435 break; 436 } 437 } 438 439 return rc; 440 } 441 442 /** 443 * _dpu_kms_setup_displays - create encoders, bridges and connectors 444 * for underlying displays 445 * @dev: Pointer to drm device structure 446 * @priv: Pointer to private drm device data 447 * @dpu_kms: Pointer to dpu kms structure 448 * Returns: Zero on success 449 */ 450 static int _dpu_kms_setup_displays(struct drm_device *dev, 451 struct msm_drm_private *priv, 452 struct dpu_kms *dpu_kms) 453 { 454 /** 455 * Extend this function to initialize other 456 * types of displays 457 */ 458 459 return _dpu_kms_initialize_dsi(dev, priv, dpu_kms); 460 } 461 462 static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms) 463 { 464 struct msm_drm_private *priv; 465 int i; 466 467 if (!dpu_kms) { 468 DPU_ERROR("invalid dpu_kms\n"); 469 return; 470 } else if (!dpu_kms->dev) { 471 DPU_ERROR("invalid dev\n"); 472 return; 473 } else if (!dpu_kms->dev->dev_private) { 474 DPU_ERROR("invalid dev_private\n"); 475 return; 476 } 477 priv = dpu_kms->dev->dev_private; 478 479 for (i = 0; i < priv->num_crtcs; i++) 480 priv->crtcs[i]->funcs->destroy(priv->crtcs[i]); 481 priv->num_crtcs = 0; 482 483 for (i = 0; i < priv->num_planes; i++) 484 priv->planes[i]->funcs->destroy(priv->planes[i]); 485 priv->num_planes = 0; 486 487 for (i = 0; i < priv->num_connectors; i++) 488 priv->connectors[i]->funcs->destroy(priv->connectors[i]); 489 priv->num_connectors = 0; 490 491 for (i = 0; i < priv->num_encoders; i++) 492 priv->encoders[i]->funcs->destroy(priv->encoders[i]); 493 priv->num_encoders = 0; 494 } 495 496 static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms) 497 { 498 struct drm_device *dev; 499 struct drm_plane *primary_planes[MAX_PLANES], *plane; 500 struct drm_plane *cursor_planes[MAX_PLANES] = { NULL }; 501 struct drm_crtc *crtc; 502 503 struct msm_drm_private *priv; 504 struct dpu_mdss_cfg *catalog; 505 506 int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret; 507 int max_crtc_count; 508 509 dev = dpu_kms->dev; 510 priv = dev->dev_private; 511 catalog = dpu_kms->catalog; 512 513 /* 514 * Create encoder and query display drivers to create 515 * bridges and connectors 516 */ 517 ret = _dpu_kms_setup_displays(dev, priv, dpu_kms); 518 if (ret) 519 goto fail; 520 521 max_crtc_count = min(catalog->mixer_count, priv->num_encoders); 522 523 /* Create the planes, keeping track of one primary/cursor per crtc */ 524 for (i = 0; i < catalog->sspp_count; i++) { 525 enum drm_plane_type type; 526 527 if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)) 528 && cursor_planes_idx < max_crtc_count) 529 type = DRM_PLANE_TYPE_CURSOR; 530 else if (primary_planes_idx < max_crtc_count) 531 type = DRM_PLANE_TYPE_PRIMARY; 532 else 533 type = DRM_PLANE_TYPE_OVERLAY; 534 535 DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n", 536 type, catalog->sspp[i].features, 537 catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)); 538 539 plane = dpu_plane_init(dev, catalog->sspp[i].id, type, 540 (1UL << max_crtc_count) - 1, 0); 541 if (IS_ERR(plane)) { 542 DPU_ERROR("dpu_plane_init failed\n"); 543 ret = PTR_ERR(plane); 544 goto fail; 545 } 546 priv->planes[priv->num_planes++] = plane; 547 548 if (type == DRM_PLANE_TYPE_CURSOR) 549 cursor_planes[cursor_planes_idx++] = plane; 550 else if (type == DRM_PLANE_TYPE_PRIMARY) 551 primary_planes[primary_planes_idx++] = plane; 552 } 553 554 max_crtc_count = min(max_crtc_count, primary_planes_idx); 555 556 /* Create one CRTC per encoder */ 557 for (i = 0; i < max_crtc_count; i++) { 558 crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]); 559 if (IS_ERR(crtc)) { 560 ret = PTR_ERR(crtc); 561 goto fail; 562 } 563 priv->crtcs[priv->num_crtcs++] = crtc; 564 } 565 566 /* All CRTCs are compatible with all encoders */ 567 for (i = 0; i < priv->num_encoders; i++) 568 priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1; 569 570 return 0; 571 fail: 572 _dpu_kms_drm_obj_destroy(dpu_kms); 573 return ret; 574 } 575 576 static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate, 577 struct drm_encoder *encoder) 578 { 579 return rate; 580 } 581 582 static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms) 583 { 584 struct drm_device *dev; 585 int i; 586 587 dev = dpu_kms->dev; 588 if (!dev) 589 return; 590 591 if (dpu_kms->hw_intr) 592 dpu_hw_intr_destroy(dpu_kms->hw_intr); 593 dpu_kms->hw_intr = NULL; 594 595 /* safe to call these more than once during shutdown */ 596 _dpu_kms_mmu_destroy(dpu_kms); 597 598 if (dpu_kms->catalog) { 599 for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { 600 u32 vbif_idx = dpu_kms->catalog->vbif[i].id; 601 602 if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx]) 603 dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]); 604 } 605 } 606 607 if (dpu_kms->rm_init) 608 dpu_rm_destroy(&dpu_kms->rm); 609 dpu_kms->rm_init = false; 610 611 if (dpu_kms->catalog) 612 dpu_hw_catalog_deinit(dpu_kms->catalog); 613 dpu_kms->catalog = NULL; 614 615 if (dpu_kms->vbif[VBIF_NRT]) 616 devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]); 617 dpu_kms->vbif[VBIF_NRT] = NULL; 618 619 if (dpu_kms->vbif[VBIF_RT]) 620 devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]); 621 dpu_kms->vbif[VBIF_RT] = NULL; 622 623 if (dpu_kms->hw_mdp) 624 dpu_hw_mdp_destroy(dpu_kms->hw_mdp); 625 dpu_kms->hw_mdp = NULL; 626 627 if (dpu_kms->mmio) 628 devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio); 629 dpu_kms->mmio = NULL; 630 } 631 632 static void dpu_kms_destroy(struct msm_kms *kms) 633 { 634 struct dpu_kms *dpu_kms; 635 636 if (!kms) { 637 DPU_ERROR("invalid kms\n"); 638 return; 639 } 640 641 dpu_kms = to_dpu_kms(kms); 642 643 _dpu_kms_hw_destroy(dpu_kms); 644 } 645 646 static void _dpu_kms_set_encoder_mode(struct msm_kms *kms, 647 struct drm_encoder *encoder, 648 bool cmd_mode) 649 { 650 struct msm_display_info info; 651 struct msm_drm_private *priv = encoder->dev->dev_private; 652 int i, rc = 0; 653 654 memset(&info, 0, sizeof(info)); 655 656 info.intf_type = encoder->encoder_type; 657 info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE : 658 MSM_DISPLAY_CAP_VID_MODE; 659 660 /* TODO: No support for DSI swap */ 661 for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { 662 if (priv->dsi[i]) { 663 info.h_tile_instance[info.num_of_h_tiles] = i; 664 info.num_of_h_tiles++; 665 } 666 } 667 668 rc = dpu_encoder_setup(encoder->dev, encoder, &info); 669 if (rc) 670 DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n", 671 encoder->base.id, rc); 672 } 673 674 static irqreturn_t dpu_irq(struct msm_kms *kms) 675 { 676 struct dpu_kms *dpu_kms = to_dpu_kms(kms); 677 678 return dpu_core_irq(dpu_kms); 679 } 680 681 static void dpu_irq_preinstall(struct msm_kms *kms) 682 { 683 struct dpu_kms *dpu_kms = to_dpu_kms(kms); 684 685 dpu_core_irq_preinstall(dpu_kms); 686 } 687 688 static void dpu_irq_uninstall(struct msm_kms *kms) 689 { 690 struct dpu_kms *dpu_kms = to_dpu_kms(kms); 691 692 dpu_core_irq_uninstall(dpu_kms); 693 } 694 695 static const struct msm_kms_funcs kms_funcs = { 696 .hw_init = dpu_kms_hw_init, 697 .irq_preinstall = dpu_irq_preinstall, 698 .irq_uninstall = dpu_irq_uninstall, 699 .irq = dpu_irq, 700 .enable_commit = dpu_kms_enable_commit, 701 .disable_commit = dpu_kms_disable_commit, 702 .vsync_time = dpu_kms_vsync_time, 703 .prepare_commit = dpu_kms_prepare_commit, 704 .flush_commit = dpu_kms_flush_commit, 705 .wait_flush = dpu_kms_wait_flush, 706 .complete_commit = dpu_kms_complete_commit, 707 .enable_vblank = dpu_kms_enable_vblank, 708 .disable_vblank = dpu_kms_disable_vblank, 709 .check_modified_format = dpu_format_check_modified_format, 710 .get_format = dpu_get_msm_format, 711 .round_pixclk = dpu_kms_round_pixclk, 712 .destroy = dpu_kms_destroy, 713 .set_encoder_mode = _dpu_kms_set_encoder_mode, 714 #ifdef CONFIG_DEBUG_FS 715 .debugfs_init = dpu_kms_debugfs_init, 716 #endif 717 }; 718 719 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms) 720 { 721 struct msm_mmu *mmu; 722 723 if (!dpu_kms->base.aspace) 724 return; 725 726 mmu = dpu_kms->base.aspace->mmu; 727 728 mmu->funcs->detach(mmu, (const char **)iommu_ports, 729 ARRAY_SIZE(iommu_ports)); 730 msm_gem_address_space_put(dpu_kms->base.aspace); 731 732 dpu_kms->base.aspace = NULL; 733 } 734 735 static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms) 736 { 737 struct iommu_domain *domain; 738 struct msm_gem_address_space *aspace; 739 int ret; 740 741 domain = iommu_domain_alloc(&platform_bus_type); 742 if (!domain) 743 return 0; 744 745 domain->geometry.aperture_start = 0x1000; 746 domain->geometry.aperture_end = 0xffffffff; 747 748 aspace = msm_gem_address_space_create(dpu_kms->dev->dev, 749 domain, "dpu1"); 750 if (IS_ERR(aspace)) { 751 iommu_domain_free(domain); 752 return PTR_ERR(aspace); 753 } 754 755 ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, 756 ARRAY_SIZE(iommu_ports)); 757 if (ret) { 758 DPU_ERROR("failed to attach iommu %d\n", ret); 759 msm_gem_address_space_put(aspace); 760 return ret; 761 } 762 763 dpu_kms->base.aspace = aspace; 764 return 0; 765 } 766 767 static struct dss_clk *_dpu_kms_get_clk(struct dpu_kms *dpu_kms, 768 char *clock_name) 769 { 770 struct dss_module_power *mp = &dpu_kms->mp; 771 int i; 772 773 for (i = 0; i < mp->num_clk; i++) { 774 if (!strcmp(mp->clk_config[i].clk_name, clock_name)) 775 return &mp->clk_config[i]; 776 } 777 778 return NULL; 779 } 780 781 u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name) 782 { 783 struct dss_clk *clk; 784 785 clk = _dpu_kms_get_clk(dpu_kms, clock_name); 786 if (!clk) 787 return -EINVAL; 788 789 return clk_get_rate(clk->clk); 790 } 791 792 static int dpu_kms_hw_init(struct msm_kms *kms) 793 { 794 struct dpu_kms *dpu_kms; 795 struct drm_device *dev; 796 struct msm_drm_private *priv; 797 int i, rc = -EINVAL; 798 799 if (!kms) { 800 DPU_ERROR("invalid kms\n"); 801 return rc; 802 } 803 804 dpu_kms = to_dpu_kms(kms); 805 dev = dpu_kms->dev; 806 if (!dev) { 807 DPU_ERROR("invalid device\n"); 808 return rc; 809 } 810 811 priv = dev->dev_private; 812 if (!priv) { 813 DPU_ERROR("invalid private data\n"); 814 return rc; 815 } 816 817 atomic_set(&dpu_kms->bandwidth_ref, 0); 818 819 dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp"); 820 if (IS_ERR(dpu_kms->mmio)) { 821 rc = PTR_ERR(dpu_kms->mmio); 822 DPU_ERROR("mdp register memory map failed: %d\n", rc); 823 dpu_kms->mmio = NULL; 824 goto error; 825 } 826 DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio); 827 dpu_kms->mmio_len = dpu_iomap_size(dpu_kms->pdev, "mdp"); 828 829 dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif", "vbif"); 830 if (IS_ERR(dpu_kms->vbif[VBIF_RT])) { 831 rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]); 832 DPU_ERROR("vbif register memory map failed: %d\n", rc); 833 dpu_kms->vbif[VBIF_RT] = NULL; 834 goto error; 835 } 836 dpu_kms->vbif_len[VBIF_RT] = dpu_iomap_size(dpu_kms->pdev, "vbif"); 837 dpu_kms->vbif[VBIF_NRT] = msm_ioremap(dpu_kms->pdev, "vbif_nrt", "vbif_nrt"); 838 if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) { 839 dpu_kms->vbif[VBIF_NRT] = NULL; 840 DPU_DEBUG("VBIF NRT is not defined"); 841 } else { 842 dpu_kms->vbif_len[VBIF_NRT] = dpu_iomap_size(dpu_kms->pdev, 843 "vbif_nrt"); 844 } 845 846 dpu_kms->reg_dma = msm_ioremap(dpu_kms->pdev, "regdma", "regdma"); 847 if (IS_ERR(dpu_kms->reg_dma)) { 848 dpu_kms->reg_dma = NULL; 849 DPU_DEBUG("REG_DMA is not defined"); 850 } else { 851 dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma"); 852 } 853 854 pm_runtime_get_sync(&dpu_kms->pdev->dev); 855 856 dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0); 857 858 pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev); 859 860 dpu_kms->catalog = dpu_hw_catalog_init(dpu_kms->core_rev); 861 if (IS_ERR_OR_NULL(dpu_kms->catalog)) { 862 rc = PTR_ERR(dpu_kms->catalog); 863 if (!dpu_kms->catalog) 864 rc = -EINVAL; 865 DPU_ERROR("catalog init failed: %d\n", rc); 866 dpu_kms->catalog = NULL; 867 goto power_error; 868 } 869 870 /* 871 * Now we need to read the HW catalog and initialize resources such as 872 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc 873 */ 874 rc = _dpu_kms_mmu_init(dpu_kms); 875 if (rc) { 876 DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc); 877 goto power_error; 878 } 879 880 rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio); 881 if (rc) { 882 DPU_ERROR("rm init failed: %d\n", rc); 883 goto power_error; 884 } 885 886 dpu_kms->rm_init = true; 887 888 dpu_kms->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, dpu_kms->mmio, 889 dpu_kms->catalog); 890 if (IS_ERR(dpu_kms->hw_mdp)) { 891 rc = PTR_ERR(dpu_kms->hw_mdp); 892 DPU_ERROR("failed to get hw_mdp: %d\n", rc); 893 dpu_kms->hw_mdp = NULL; 894 goto power_error; 895 } 896 897 for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { 898 u32 vbif_idx = dpu_kms->catalog->vbif[i].id; 899 900 dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx, 901 dpu_kms->vbif[vbif_idx], dpu_kms->catalog); 902 if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) { 903 rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]); 904 if (!dpu_kms->hw_vbif[vbif_idx]) 905 rc = -EINVAL; 906 DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc); 907 dpu_kms->hw_vbif[vbif_idx] = NULL; 908 goto power_error; 909 } 910 } 911 912 rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog, 913 _dpu_kms_get_clk(dpu_kms, "core")); 914 if (rc) { 915 DPU_ERROR("failed to init perf %d\n", rc); 916 goto perf_err; 917 } 918 919 dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog); 920 if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) { 921 rc = PTR_ERR(dpu_kms->hw_intr); 922 DPU_ERROR("hw_intr init failed: %d\n", rc); 923 dpu_kms->hw_intr = NULL; 924 goto hw_intr_init_err; 925 } 926 927 dev->mode_config.min_width = 0; 928 dev->mode_config.min_height = 0; 929 930 /* 931 * max crtc width is equal to the max mixer width * 2 and max height is 932 * is 4K 933 */ 934 dev->mode_config.max_width = 935 dpu_kms->catalog->caps->max_mixer_width * 2; 936 dev->mode_config.max_height = 4096; 937 938 /* 939 * Support format modifiers for compression etc. 940 */ 941 dev->mode_config.allow_fb_modifiers = true; 942 943 /* 944 * _dpu_kms_drm_obj_init should create the DRM related objects 945 * i.e. CRTCs, planes, encoders, connectors and so forth 946 */ 947 rc = _dpu_kms_drm_obj_init(dpu_kms); 948 if (rc) { 949 DPU_ERROR("modeset init failed: %d\n", rc); 950 goto drm_obj_init_err; 951 } 952 953 dpu_vbif_init_memtypes(dpu_kms); 954 955 pm_runtime_put_sync(&dpu_kms->pdev->dev); 956 957 return 0; 958 959 drm_obj_init_err: 960 dpu_core_perf_destroy(&dpu_kms->perf); 961 hw_intr_init_err: 962 perf_err: 963 power_error: 964 pm_runtime_put_sync(&dpu_kms->pdev->dev); 965 error: 966 _dpu_kms_hw_destroy(dpu_kms); 967 968 return rc; 969 } 970 971 struct msm_kms *dpu_kms_init(struct drm_device *dev) 972 { 973 struct msm_drm_private *priv; 974 struct dpu_kms *dpu_kms; 975 int irq; 976 977 if (!dev || !dev->dev_private) { 978 DPU_ERROR("drm device node invalid\n"); 979 return ERR_PTR(-EINVAL); 980 } 981 982 priv = dev->dev_private; 983 dpu_kms = to_dpu_kms(priv->kms); 984 985 irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0); 986 if (irq < 0) { 987 DPU_ERROR("failed to get irq: %d\n", irq); 988 return ERR_PTR(irq); 989 } 990 dpu_kms->base.irq = irq; 991 992 return &dpu_kms->base; 993 } 994 995 static int dpu_bind(struct device *dev, struct device *master, void *data) 996 { 997 struct drm_device *ddev = dev_get_drvdata(master); 998 struct platform_device *pdev = to_platform_device(dev); 999 struct msm_drm_private *priv = ddev->dev_private; 1000 struct dpu_kms *dpu_kms; 1001 struct dss_module_power *mp; 1002 int ret = 0; 1003 1004 dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL); 1005 if (!dpu_kms) 1006 return -ENOMEM; 1007 1008 mp = &dpu_kms->mp; 1009 ret = msm_dss_parse_clock(pdev, mp); 1010 if (ret) { 1011 DPU_ERROR("failed to parse clocks, ret=%d\n", ret); 1012 return ret; 1013 } 1014 1015 platform_set_drvdata(pdev, dpu_kms); 1016 1017 msm_kms_init(&dpu_kms->base, &kms_funcs); 1018 dpu_kms->dev = ddev; 1019 dpu_kms->pdev = pdev; 1020 1021 pm_runtime_enable(&pdev->dev); 1022 dpu_kms->rpm_enabled = true; 1023 1024 priv->kms = &dpu_kms->base; 1025 return ret; 1026 } 1027 1028 static void dpu_unbind(struct device *dev, struct device *master, void *data) 1029 { 1030 struct platform_device *pdev = to_platform_device(dev); 1031 struct dpu_kms *dpu_kms = platform_get_drvdata(pdev); 1032 struct dss_module_power *mp = &dpu_kms->mp; 1033 1034 msm_dss_put_clk(mp->clk_config, mp->num_clk); 1035 devm_kfree(&pdev->dev, mp->clk_config); 1036 mp->num_clk = 0; 1037 1038 if (dpu_kms->rpm_enabled) 1039 pm_runtime_disable(&pdev->dev); 1040 } 1041 1042 static const struct component_ops dpu_ops = { 1043 .bind = dpu_bind, 1044 .unbind = dpu_unbind, 1045 }; 1046 1047 static int dpu_dev_probe(struct platform_device *pdev) 1048 { 1049 return component_add(&pdev->dev, &dpu_ops); 1050 } 1051 1052 static int dpu_dev_remove(struct platform_device *pdev) 1053 { 1054 component_del(&pdev->dev, &dpu_ops); 1055 return 0; 1056 } 1057 1058 static int __maybe_unused dpu_runtime_suspend(struct device *dev) 1059 { 1060 int rc = -1; 1061 struct platform_device *pdev = to_platform_device(dev); 1062 struct dpu_kms *dpu_kms = platform_get_drvdata(pdev); 1063 struct drm_device *ddev; 1064 struct dss_module_power *mp = &dpu_kms->mp; 1065 1066 ddev = dpu_kms->dev; 1067 if (!ddev) { 1068 DPU_ERROR("invalid drm_device\n"); 1069 return rc; 1070 } 1071 1072 rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false); 1073 if (rc) 1074 DPU_ERROR("clock disable failed rc:%d\n", rc); 1075 1076 return rc; 1077 } 1078 1079 static int __maybe_unused dpu_runtime_resume(struct device *dev) 1080 { 1081 int rc = -1; 1082 struct platform_device *pdev = to_platform_device(dev); 1083 struct dpu_kms *dpu_kms = platform_get_drvdata(pdev); 1084 struct drm_encoder *encoder; 1085 struct drm_device *ddev; 1086 struct dss_module_power *mp = &dpu_kms->mp; 1087 1088 ddev = dpu_kms->dev; 1089 if (!ddev) { 1090 DPU_ERROR("invalid drm_device\n"); 1091 return rc; 1092 } 1093 1094 rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true); 1095 if (rc) { 1096 DPU_ERROR("clock enable failed rc:%d\n", rc); 1097 return rc; 1098 } 1099 1100 dpu_vbif_init_memtypes(dpu_kms); 1101 1102 drm_for_each_encoder(encoder, ddev) 1103 dpu_encoder_virt_runtime_resume(encoder); 1104 1105 return rc; 1106 } 1107 1108 static const struct dev_pm_ops dpu_pm_ops = { 1109 SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL) 1110 }; 1111 1112 static const struct of_device_id dpu_dt_match[] = { 1113 { .compatible = "qcom,sdm845-dpu", }, 1114 {} 1115 }; 1116 MODULE_DEVICE_TABLE(of, dpu_dt_match); 1117 1118 static struct platform_driver dpu_driver = { 1119 .probe = dpu_dev_probe, 1120 .remove = dpu_dev_remove, 1121 .driver = { 1122 .name = "msm_dpu", 1123 .of_match_table = dpu_dt_match, 1124 .pm = &dpu_pm_ops, 1125 }, 1126 }; 1127 1128 void __init msm_dpu_register(void) 1129 { 1130 platform_driver_register(&dpu_driver); 1131 } 1132 1133 void __exit msm_dpu_unregister(void) 1134 { 1135 platform_driver_unregister(&dpu_driver); 1136 } 1137