1 /* 2 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published by 8 * the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include <linux/kthread.h> 20 #include <uapi/linux/sched/types.h> 21 #include <drm/drm_of.h> 22 23 #include "msm_drv.h" 24 #include "msm_debugfs.h" 25 #include "msm_fence.h" 26 #include "msm_gem.h" 27 #include "msm_gpu.h" 28 #include "msm_kms.h" 29 #include "adreno/adreno_gpu.h" 30 31 32 /* 33 * MSM driver version: 34 * - 1.0.0 - initial interface 35 * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers 36 * - 1.2.0 - adds explicit fence support for submit ioctl 37 * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW + 38 * SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for 39 * MSM_GEM_INFO ioctl. 40 * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get 41 * GEM object's debug name 42 * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl 43 */ 44 #define MSM_VERSION_MAJOR 1 45 #define MSM_VERSION_MINOR 5 46 #define MSM_VERSION_PATCHLEVEL 0 47 48 static const struct drm_mode_config_funcs mode_config_funcs = { 49 .fb_create = msm_framebuffer_create, 50 .output_poll_changed = drm_fb_helper_output_poll_changed, 51 .atomic_check = drm_atomic_helper_check, 52 .atomic_commit = drm_atomic_helper_commit, 53 }; 54 55 static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = { 56 .atomic_commit_tail = msm_atomic_commit_tail, 57 }; 58 59 #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING 60 static bool reglog = false; 61 MODULE_PARM_DESC(reglog, "Enable register read/write logging"); 62 module_param(reglog, bool, 0600); 63 #else 64 #define reglog 0 65 #endif 66 67 #ifdef CONFIG_DRM_FBDEV_EMULATION 68 static bool fbdev = true; 69 MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer"); 70 module_param(fbdev, bool, 0600); 71 #endif 72 73 static char *vram = "16m"; 74 MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)"); 75 module_param(vram, charp, 0); 76 77 bool dumpstate = false; 78 MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors"); 79 module_param(dumpstate, bool, 0600); 80 81 static bool modeset = true; 82 MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)"); 83 module_param(modeset, bool, 0600); 84 85 /* 86 * Util/helpers: 87 */ 88 89 int msm_clk_bulk_get(struct device *dev, struct clk_bulk_data **bulk) 90 { 91 struct property *prop; 92 const char *name; 93 struct clk_bulk_data *local; 94 int i = 0, ret, count; 95 96 count = of_property_count_strings(dev->of_node, "clock-names"); 97 if (count < 1) 98 return 0; 99 100 local = devm_kcalloc(dev, sizeof(struct clk_bulk_data *), 101 count, GFP_KERNEL); 102 if (!local) 103 return -ENOMEM; 104 105 of_property_for_each_string(dev->of_node, "clock-names", prop, name) { 106 local[i].id = devm_kstrdup(dev, name, GFP_KERNEL); 107 if (!local[i].id) { 108 devm_kfree(dev, local); 109 return -ENOMEM; 110 } 111 112 i++; 113 } 114 115 ret = devm_clk_bulk_get(dev, count, local); 116 117 if (ret) { 118 for (i = 0; i < count; i++) 119 devm_kfree(dev, (void *) local[i].id); 120 devm_kfree(dev, local); 121 122 return ret; 123 } 124 125 *bulk = local; 126 return count; 127 } 128 129 struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count, 130 const char *name) 131 { 132 int i; 133 char n[32]; 134 135 snprintf(n, sizeof(n), "%s_clk", name); 136 137 for (i = 0; bulk && i < count; i++) { 138 if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n)) 139 return bulk[i].clk; 140 } 141 142 143 return NULL; 144 } 145 146 struct clk *msm_clk_get(struct platform_device *pdev, const char *name) 147 { 148 struct clk *clk; 149 char name2[32]; 150 151 clk = devm_clk_get(&pdev->dev, name); 152 if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER) 153 return clk; 154 155 snprintf(name2, sizeof(name2), "%s_clk", name); 156 157 clk = devm_clk_get(&pdev->dev, name2); 158 if (!IS_ERR(clk)) 159 dev_warn(&pdev->dev, "Using legacy clk name binding. Use " 160 "\"%s\" instead of \"%s\"\n", name, name2); 161 162 return clk; 163 } 164 165 void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, 166 const char *dbgname) 167 { 168 struct resource *res; 169 unsigned long size; 170 void __iomem *ptr; 171 172 if (name) 173 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); 174 else 175 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 176 177 if (!res) { 178 DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name); 179 return ERR_PTR(-EINVAL); 180 } 181 182 size = resource_size(res); 183 184 ptr = devm_ioremap_nocache(&pdev->dev, res->start, size); 185 if (!ptr) { 186 DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name); 187 return ERR_PTR(-ENOMEM); 188 } 189 190 if (reglog) 191 printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size); 192 193 return ptr; 194 } 195 196 void msm_writel(u32 data, void __iomem *addr) 197 { 198 if (reglog) 199 printk(KERN_DEBUG "IO:W %p %08x\n", addr, data); 200 writel(data, addr); 201 } 202 203 u32 msm_readl(const void __iomem *addr) 204 { 205 u32 val = readl(addr); 206 if (reglog) 207 pr_err("IO:R %p %08x\n", addr, val); 208 return val; 209 } 210 211 struct msm_vblank_work { 212 struct work_struct work; 213 int crtc_id; 214 bool enable; 215 struct msm_drm_private *priv; 216 }; 217 218 static void vblank_ctrl_worker(struct work_struct *work) 219 { 220 struct msm_vblank_work *vbl_work = container_of(work, 221 struct msm_vblank_work, work); 222 struct msm_drm_private *priv = vbl_work->priv; 223 struct msm_kms *kms = priv->kms; 224 225 if (vbl_work->enable) 226 kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]); 227 else 228 kms->funcs->disable_vblank(kms, priv->crtcs[vbl_work->crtc_id]); 229 230 kfree(vbl_work); 231 } 232 233 static int vblank_ctrl_queue_work(struct msm_drm_private *priv, 234 int crtc_id, bool enable) 235 { 236 struct msm_vblank_work *vbl_work; 237 238 vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC); 239 if (!vbl_work) 240 return -ENOMEM; 241 242 INIT_WORK(&vbl_work->work, vblank_ctrl_worker); 243 244 vbl_work->crtc_id = crtc_id; 245 vbl_work->enable = enable; 246 vbl_work->priv = priv; 247 248 queue_work(priv->wq, &vbl_work->work); 249 250 return 0; 251 } 252 253 static int msm_drm_uninit(struct device *dev) 254 { 255 struct platform_device *pdev = to_platform_device(dev); 256 struct drm_device *ddev = platform_get_drvdata(pdev); 257 struct msm_drm_private *priv = ddev->dev_private; 258 struct msm_kms *kms = priv->kms; 259 struct msm_mdss *mdss = priv->mdss; 260 int i; 261 262 /* 263 * Shutdown the hw if we're far enough along where things might be on. 264 * If we run this too early, we'll end up panicking in any variety of 265 * places. Since we don't register the drm device until late in 266 * msm_drm_init, drm_dev->registered is used as an indicator that the 267 * shutdown will be successful. 268 */ 269 if (ddev->registered) { 270 drm_dev_unregister(ddev); 271 drm_atomic_helper_shutdown(ddev); 272 } 273 274 /* We must cancel and cleanup any pending vblank enable/disable 275 * work before drm_irq_uninstall() to avoid work re-enabling an 276 * irq after uninstall has disabled it. 277 */ 278 279 flush_workqueue(priv->wq); 280 281 /* clean up event worker threads */ 282 for (i = 0; i < priv->num_crtcs; i++) { 283 if (priv->event_thread[i].thread) { 284 kthread_destroy_worker(&priv->event_thread[i].worker); 285 priv->event_thread[i].thread = NULL; 286 } 287 } 288 289 msm_gem_shrinker_cleanup(ddev); 290 291 drm_kms_helper_poll_fini(ddev); 292 293 msm_perf_debugfs_cleanup(priv); 294 msm_rd_debugfs_cleanup(priv); 295 296 #ifdef CONFIG_DRM_FBDEV_EMULATION 297 if (fbdev && priv->fbdev) 298 msm_fbdev_free(ddev); 299 #endif 300 301 drm_mode_config_cleanup(ddev); 302 303 pm_runtime_get_sync(dev); 304 drm_irq_uninstall(ddev); 305 pm_runtime_put_sync(dev); 306 307 if (kms && kms->funcs) 308 kms->funcs->destroy(kms); 309 310 if (priv->vram.paddr) { 311 unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING; 312 drm_mm_takedown(&priv->vram.mm); 313 dma_free_attrs(dev, priv->vram.size, NULL, 314 priv->vram.paddr, attrs); 315 } 316 317 component_unbind_all(dev, ddev); 318 319 if (mdss && mdss->funcs) 320 mdss->funcs->destroy(ddev); 321 322 ddev->dev_private = NULL; 323 drm_dev_put(ddev); 324 325 destroy_workqueue(priv->wq); 326 kfree(priv); 327 328 return 0; 329 } 330 331 #define KMS_MDP4 4 332 #define KMS_MDP5 5 333 #define KMS_DPU 3 334 335 static int get_mdp_ver(struct platform_device *pdev) 336 { 337 struct device *dev = &pdev->dev; 338 339 return (int) (unsigned long) of_device_get_match_data(dev); 340 } 341 342 #include <linux/of_address.h> 343 344 bool msm_use_mmu(struct drm_device *dev) 345 { 346 struct msm_drm_private *priv = dev->dev_private; 347 348 /* a2xx comes with its own MMU */ 349 return priv->is_a2xx || iommu_present(&platform_bus_type); 350 } 351 352 static int msm_init_vram(struct drm_device *dev) 353 { 354 struct msm_drm_private *priv = dev->dev_private; 355 struct device_node *node; 356 unsigned long size = 0; 357 int ret = 0; 358 359 /* In the device-tree world, we could have a 'memory-region' 360 * phandle, which gives us a link to our "vram". Allocating 361 * is all nicely abstracted behind the dma api, but we need 362 * to know the entire size to allocate it all in one go. There 363 * are two cases: 364 * 1) device with no IOMMU, in which case we need exclusive 365 * access to a VRAM carveout big enough for all gpu 366 * buffers 367 * 2) device with IOMMU, but where the bootloader puts up 368 * a splash screen. In this case, the VRAM carveout 369 * need only be large enough for fbdev fb. But we need 370 * exclusive access to the buffer to avoid the kernel 371 * using those pages for other purposes (which appears 372 * as corruption on screen before we have a chance to 373 * load and do initial modeset) 374 */ 375 376 node = of_parse_phandle(dev->dev->of_node, "memory-region", 0); 377 if (node) { 378 struct resource r; 379 ret = of_address_to_resource(node, 0, &r); 380 of_node_put(node); 381 if (ret) 382 return ret; 383 size = r.end - r.start; 384 DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start); 385 386 /* if we have no IOMMU, then we need to use carveout allocator. 387 * Grab the entire CMA chunk carved out in early startup in 388 * mach-msm: 389 */ 390 } else if (!msm_use_mmu(dev)) { 391 DRM_INFO("using %s VRAM carveout\n", vram); 392 size = memparse(vram, NULL); 393 } 394 395 if (size) { 396 unsigned long attrs = 0; 397 void *p; 398 399 priv->vram.size = size; 400 401 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); 402 spin_lock_init(&priv->vram.lock); 403 404 attrs |= DMA_ATTR_NO_KERNEL_MAPPING; 405 attrs |= DMA_ATTR_WRITE_COMBINE; 406 407 /* note that for no-kernel-mapping, the vaddr returned 408 * is bogus, but non-null if allocation succeeded: 409 */ 410 p = dma_alloc_attrs(dev->dev, size, 411 &priv->vram.paddr, GFP_KERNEL, attrs); 412 if (!p) { 413 DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n"); 414 priv->vram.paddr = 0; 415 return -ENOMEM; 416 } 417 418 DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n", 419 (uint32_t)priv->vram.paddr, 420 (uint32_t)(priv->vram.paddr + size)); 421 } 422 423 return ret; 424 } 425 426 static int msm_drm_init(struct device *dev, struct drm_driver *drv) 427 { 428 struct platform_device *pdev = to_platform_device(dev); 429 struct drm_device *ddev; 430 struct msm_drm_private *priv; 431 struct msm_kms *kms; 432 struct msm_mdss *mdss; 433 int ret, i; 434 struct sched_param param; 435 436 ddev = drm_dev_alloc(drv, dev); 437 if (IS_ERR(ddev)) { 438 DRM_DEV_ERROR(dev, "failed to allocate drm_device\n"); 439 return PTR_ERR(ddev); 440 } 441 442 platform_set_drvdata(pdev, ddev); 443 444 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 445 if (!priv) { 446 ret = -ENOMEM; 447 goto err_put_drm_dev; 448 } 449 450 ddev->dev_private = priv; 451 priv->dev = ddev; 452 453 switch (get_mdp_ver(pdev)) { 454 case KMS_MDP5: 455 ret = mdp5_mdss_init(ddev); 456 break; 457 case KMS_DPU: 458 ret = dpu_mdss_init(ddev); 459 break; 460 default: 461 ret = 0; 462 break; 463 } 464 if (ret) 465 goto err_free_priv; 466 467 mdss = priv->mdss; 468 469 priv->wq = alloc_ordered_workqueue("msm", 0); 470 471 INIT_WORK(&priv->free_work, msm_gem_free_work); 472 init_llist_head(&priv->free_list); 473 474 INIT_LIST_HEAD(&priv->inactive_list); 475 476 drm_mode_config_init(ddev); 477 478 /* Bind all our sub-components: */ 479 ret = component_bind_all(dev, ddev); 480 if (ret) 481 goto err_destroy_mdss; 482 483 ret = msm_init_vram(ddev); 484 if (ret) 485 goto err_msm_uninit; 486 487 msm_gem_shrinker_init(ddev); 488 489 switch (get_mdp_ver(pdev)) { 490 case KMS_MDP4: 491 kms = mdp4_kms_init(ddev); 492 priv->kms = kms; 493 break; 494 case KMS_MDP5: 495 kms = mdp5_kms_init(ddev); 496 break; 497 case KMS_DPU: 498 kms = dpu_kms_init(ddev); 499 priv->kms = kms; 500 break; 501 default: 502 /* valid only for the dummy headless case, where of_node=NULL */ 503 WARN_ON(dev->of_node); 504 kms = NULL; 505 break; 506 } 507 508 if (IS_ERR(kms)) { 509 DRM_DEV_ERROR(dev, "failed to load kms\n"); 510 ret = PTR_ERR(kms); 511 priv->kms = NULL; 512 goto err_msm_uninit; 513 } 514 515 /* Enable normalization of plane zpos */ 516 ddev->mode_config.normalize_zpos = true; 517 518 if (kms) { 519 ret = kms->funcs->hw_init(kms); 520 if (ret) { 521 DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret); 522 goto err_msm_uninit; 523 } 524 } 525 526 ddev->mode_config.funcs = &mode_config_funcs; 527 ddev->mode_config.helper_private = &mode_config_helper_funcs; 528 529 /** 530 * this priority was found during empiric testing to have appropriate 531 * realtime scheduling to process display updates and interact with 532 * other real time and normal priority task 533 */ 534 param.sched_priority = 16; 535 for (i = 0; i < priv->num_crtcs; i++) { 536 /* initialize event thread */ 537 priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id; 538 kthread_init_worker(&priv->event_thread[i].worker); 539 priv->event_thread[i].dev = ddev; 540 priv->event_thread[i].thread = 541 kthread_run(kthread_worker_fn, 542 &priv->event_thread[i].worker, 543 "crtc_event:%d", priv->event_thread[i].crtc_id); 544 if (IS_ERR(priv->event_thread[i].thread)) { 545 DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n"); 546 priv->event_thread[i].thread = NULL; 547 goto err_msm_uninit; 548 } 549 550 ret = sched_setscheduler(priv->event_thread[i].thread, 551 SCHED_FIFO, ¶m); 552 if (ret) 553 dev_warn(dev, "event_thread set priority failed:%d\n", 554 ret); 555 } 556 557 ret = drm_vblank_init(ddev, priv->num_crtcs); 558 if (ret < 0) { 559 DRM_DEV_ERROR(dev, "failed to initialize vblank\n"); 560 goto err_msm_uninit; 561 } 562 563 if (kms) { 564 pm_runtime_get_sync(dev); 565 ret = drm_irq_install(ddev, kms->irq); 566 pm_runtime_put_sync(dev); 567 if (ret < 0) { 568 DRM_DEV_ERROR(dev, "failed to install IRQ handler\n"); 569 goto err_msm_uninit; 570 } 571 } 572 573 ret = drm_dev_register(ddev, 0); 574 if (ret) 575 goto err_msm_uninit; 576 577 drm_mode_config_reset(ddev); 578 579 #ifdef CONFIG_DRM_FBDEV_EMULATION 580 if (kms && fbdev) 581 priv->fbdev = msm_fbdev_init(ddev); 582 #endif 583 584 ret = msm_debugfs_late_init(ddev); 585 if (ret) 586 goto err_msm_uninit; 587 588 drm_kms_helper_poll_init(ddev); 589 590 return 0; 591 592 err_msm_uninit: 593 msm_drm_uninit(dev); 594 return ret; 595 err_destroy_mdss: 596 if (mdss && mdss->funcs) 597 mdss->funcs->destroy(ddev); 598 err_free_priv: 599 kfree(priv); 600 err_put_drm_dev: 601 drm_dev_put(ddev); 602 return ret; 603 } 604 605 /* 606 * DRM operations: 607 */ 608 609 static void load_gpu(struct drm_device *dev) 610 { 611 static DEFINE_MUTEX(init_lock); 612 struct msm_drm_private *priv = dev->dev_private; 613 614 mutex_lock(&init_lock); 615 616 if (!priv->gpu) 617 priv->gpu = adreno_load_gpu(dev); 618 619 mutex_unlock(&init_lock); 620 } 621 622 static int context_init(struct drm_device *dev, struct drm_file *file) 623 { 624 struct msm_drm_private *priv = dev->dev_private; 625 struct msm_file_private *ctx; 626 627 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 628 if (!ctx) 629 return -ENOMEM; 630 631 msm_submitqueue_init(dev, ctx); 632 633 ctx->aspace = priv->gpu->aspace; 634 file->driver_priv = ctx; 635 636 return 0; 637 } 638 639 static int msm_open(struct drm_device *dev, struct drm_file *file) 640 { 641 /* For now, load gpu on open.. to avoid the requirement of having 642 * firmware in the initrd. 643 */ 644 load_gpu(dev); 645 646 return context_init(dev, file); 647 } 648 649 static void context_close(struct msm_file_private *ctx) 650 { 651 msm_submitqueue_close(ctx); 652 kfree(ctx); 653 } 654 655 static void msm_postclose(struct drm_device *dev, struct drm_file *file) 656 { 657 struct msm_drm_private *priv = dev->dev_private; 658 struct msm_file_private *ctx = file->driver_priv; 659 660 mutex_lock(&dev->struct_mutex); 661 if (ctx == priv->lastctx) 662 priv->lastctx = NULL; 663 mutex_unlock(&dev->struct_mutex); 664 665 context_close(ctx); 666 } 667 668 static irqreturn_t msm_irq(int irq, void *arg) 669 { 670 struct drm_device *dev = arg; 671 struct msm_drm_private *priv = dev->dev_private; 672 struct msm_kms *kms = priv->kms; 673 BUG_ON(!kms); 674 return kms->funcs->irq(kms); 675 } 676 677 static void msm_irq_preinstall(struct drm_device *dev) 678 { 679 struct msm_drm_private *priv = dev->dev_private; 680 struct msm_kms *kms = priv->kms; 681 BUG_ON(!kms); 682 kms->funcs->irq_preinstall(kms); 683 } 684 685 static int msm_irq_postinstall(struct drm_device *dev) 686 { 687 struct msm_drm_private *priv = dev->dev_private; 688 struct msm_kms *kms = priv->kms; 689 BUG_ON(!kms); 690 691 if (kms->funcs->irq_postinstall) 692 return kms->funcs->irq_postinstall(kms); 693 694 return 0; 695 } 696 697 static void msm_irq_uninstall(struct drm_device *dev) 698 { 699 struct msm_drm_private *priv = dev->dev_private; 700 struct msm_kms *kms = priv->kms; 701 BUG_ON(!kms); 702 kms->funcs->irq_uninstall(kms); 703 } 704 705 static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe) 706 { 707 struct msm_drm_private *priv = dev->dev_private; 708 struct msm_kms *kms = priv->kms; 709 if (!kms) 710 return -ENXIO; 711 DBG("dev=%p, crtc=%u", dev, pipe); 712 return vblank_ctrl_queue_work(priv, pipe, true); 713 } 714 715 static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe) 716 { 717 struct msm_drm_private *priv = dev->dev_private; 718 struct msm_kms *kms = priv->kms; 719 if (!kms) 720 return; 721 DBG("dev=%p, crtc=%u", dev, pipe); 722 vblank_ctrl_queue_work(priv, pipe, false); 723 } 724 725 /* 726 * DRM ioctls: 727 */ 728 729 static int msm_ioctl_get_param(struct drm_device *dev, void *data, 730 struct drm_file *file) 731 { 732 struct msm_drm_private *priv = dev->dev_private; 733 struct drm_msm_param *args = data; 734 struct msm_gpu *gpu; 735 736 /* for now, we just have 3d pipe.. eventually this would need to 737 * be more clever to dispatch to appropriate gpu module: 738 */ 739 if (args->pipe != MSM_PIPE_3D0) 740 return -EINVAL; 741 742 gpu = priv->gpu; 743 744 if (!gpu) 745 return -ENXIO; 746 747 return gpu->funcs->get_param(gpu, args->param, &args->value); 748 } 749 750 static int msm_ioctl_gem_new(struct drm_device *dev, void *data, 751 struct drm_file *file) 752 { 753 struct drm_msm_gem_new *args = data; 754 755 if (args->flags & ~MSM_BO_FLAGS) { 756 DRM_ERROR("invalid flags: %08x\n", args->flags); 757 return -EINVAL; 758 } 759 760 return msm_gem_new_handle(dev, file, args->size, 761 args->flags, &args->handle, NULL); 762 } 763 764 static inline ktime_t to_ktime(struct drm_msm_timespec timeout) 765 { 766 return ktime_set(timeout.tv_sec, timeout.tv_nsec); 767 } 768 769 static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data, 770 struct drm_file *file) 771 { 772 struct drm_msm_gem_cpu_prep *args = data; 773 struct drm_gem_object *obj; 774 ktime_t timeout = to_ktime(args->timeout); 775 int ret; 776 777 if (args->op & ~MSM_PREP_FLAGS) { 778 DRM_ERROR("invalid op: %08x\n", args->op); 779 return -EINVAL; 780 } 781 782 obj = drm_gem_object_lookup(file, args->handle); 783 if (!obj) 784 return -ENOENT; 785 786 ret = msm_gem_cpu_prep(obj, args->op, &timeout); 787 788 drm_gem_object_put_unlocked(obj); 789 790 return ret; 791 } 792 793 static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data, 794 struct drm_file *file) 795 { 796 struct drm_msm_gem_cpu_fini *args = data; 797 struct drm_gem_object *obj; 798 int ret; 799 800 obj = drm_gem_object_lookup(file, args->handle); 801 if (!obj) 802 return -ENOENT; 803 804 ret = msm_gem_cpu_fini(obj); 805 806 drm_gem_object_put_unlocked(obj); 807 808 return ret; 809 } 810 811 static int msm_ioctl_gem_info_iova(struct drm_device *dev, 812 struct drm_gem_object *obj, uint64_t *iova) 813 { 814 struct msm_drm_private *priv = dev->dev_private; 815 816 if (!priv->gpu) 817 return -EINVAL; 818 819 /* 820 * Don't pin the memory here - just get an address so that userspace can 821 * be productive 822 */ 823 return msm_gem_get_iova(obj, priv->gpu->aspace, iova); 824 } 825 826 static int msm_ioctl_gem_info(struct drm_device *dev, void *data, 827 struct drm_file *file) 828 { 829 struct drm_msm_gem_info *args = data; 830 struct drm_gem_object *obj; 831 struct msm_gem_object *msm_obj; 832 int i, ret = 0; 833 834 if (args->pad) 835 return -EINVAL; 836 837 switch (args->info) { 838 case MSM_INFO_GET_OFFSET: 839 case MSM_INFO_GET_IOVA: 840 /* value returned as immediate, not pointer, so len==0: */ 841 if (args->len) 842 return -EINVAL; 843 break; 844 case MSM_INFO_SET_NAME: 845 case MSM_INFO_GET_NAME: 846 break; 847 default: 848 return -EINVAL; 849 } 850 851 obj = drm_gem_object_lookup(file, args->handle); 852 if (!obj) 853 return -ENOENT; 854 855 msm_obj = to_msm_bo(obj); 856 857 switch (args->info) { 858 case MSM_INFO_GET_OFFSET: 859 args->value = msm_gem_mmap_offset(obj); 860 break; 861 case MSM_INFO_GET_IOVA: 862 ret = msm_ioctl_gem_info_iova(dev, obj, &args->value); 863 break; 864 case MSM_INFO_SET_NAME: 865 /* length check should leave room for terminating null: */ 866 if (args->len >= sizeof(msm_obj->name)) { 867 ret = -EINVAL; 868 break; 869 } 870 if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value), 871 args->len)) { 872 msm_obj->name[0] = '\0'; 873 ret = -EFAULT; 874 break; 875 } 876 msm_obj->name[args->len] = '\0'; 877 for (i = 0; i < args->len; i++) { 878 if (!isprint(msm_obj->name[i])) { 879 msm_obj->name[i] = '\0'; 880 break; 881 } 882 } 883 break; 884 case MSM_INFO_GET_NAME: 885 if (args->value && (args->len < strlen(msm_obj->name))) { 886 ret = -EINVAL; 887 break; 888 } 889 args->len = strlen(msm_obj->name); 890 if (args->value) { 891 if (copy_to_user(u64_to_user_ptr(args->value), 892 msm_obj->name, args->len)) 893 ret = -EFAULT; 894 } 895 break; 896 } 897 898 drm_gem_object_put_unlocked(obj); 899 900 return ret; 901 } 902 903 static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, 904 struct drm_file *file) 905 { 906 struct msm_drm_private *priv = dev->dev_private; 907 struct drm_msm_wait_fence *args = data; 908 ktime_t timeout = to_ktime(args->timeout); 909 struct msm_gpu_submitqueue *queue; 910 struct msm_gpu *gpu = priv->gpu; 911 int ret; 912 913 if (args->pad) { 914 DRM_ERROR("invalid pad: %08x\n", args->pad); 915 return -EINVAL; 916 } 917 918 if (!gpu) 919 return 0; 920 921 queue = msm_submitqueue_get(file->driver_priv, args->queueid); 922 if (!queue) 923 return -ENOENT; 924 925 ret = msm_wait_fence(gpu->rb[queue->prio]->fctx, args->fence, &timeout, 926 true); 927 928 msm_submitqueue_put(queue); 929 return ret; 930 } 931 932 static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data, 933 struct drm_file *file) 934 { 935 struct drm_msm_gem_madvise *args = data; 936 struct drm_gem_object *obj; 937 int ret; 938 939 switch (args->madv) { 940 case MSM_MADV_DONTNEED: 941 case MSM_MADV_WILLNEED: 942 break; 943 default: 944 return -EINVAL; 945 } 946 947 ret = mutex_lock_interruptible(&dev->struct_mutex); 948 if (ret) 949 return ret; 950 951 obj = drm_gem_object_lookup(file, args->handle); 952 if (!obj) { 953 ret = -ENOENT; 954 goto unlock; 955 } 956 957 ret = msm_gem_madvise(obj, args->madv); 958 if (ret >= 0) { 959 args->retained = ret; 960 ret = 0; 961 } 962 963 drm_gem_object_put(obj); 964 965 unlock: 966 mutex_unlock(&dev->struct_mutex); 967 return ret; 968 } 969 970 971 static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data, 972 struct drm_file *file) 973 { 974 struct drm_msm_submitqueue *args = data; 975 976 if (args->flags & ~MSM_SUBMITQUEUE_FLAGS) 977 return -EINVAL; 978 979 return msm_submitqueue_create(dev, file->driver_priv, args->prio, 980 args->flags, &args->id); 981 } 982 983 static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data, 984 struct drm_file *file) 985 { 986 return msm_submitqueue_query(dev, file->driver_priv, data); 987 } 988 989 static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data, 990 struct drm_file *file) 991 { 992 u32 id = *(u32 *) data; 993 994 return msm_submitqueue_remove(file->driver_priv, id); 995 } 996 997 static const struct drm_ioctl_desc msm_ioctls[] = { 998 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW), 999 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW), 1000 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW), 1001 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW), 1002 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW), 1003 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW), 1004 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW), 1005 DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW), 1006 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_AUTH|DRM_RENDER_ALLOW), 1007 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW), 1008 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_AUTH|DRM_RENDER_ALLOW), 1009 }; 1010 1011 static const struct vm_operations_struct vm_ops = { 1012 .fault = msm_gem_fault, 1013 .open = drm_gem_vm_open, 1014 .close = drm_gem_vm_close, 1015 }; 1016 1017 static const struct file_operations fops = { 1018 .owner = THIS_MODULE, 1019 .open = drm_open, 1020 .release = drm_release, 1021 .unlocked_ioctl = drm_ioctl, 1022 .compat_ioctl = drm_compat_ioctl, 1023 .poll = drm_poll, 1024 .read = drm_read, 1025 .llseek = no_llseek, 1026 .mmap = msm_gem_mmap, 1027 }; 1028 1029 static struct drm_driver msm_driver = { 1030 .driver_features = DRIVER_GEM | 1031 DRIVER_PRIME | 1032 DRIVER_RENDER | 1033 DRIVER_ATOMIC | 1034 DRIVER_MODESET, 1035 .open = msm_open, 1036 .postclose = msm_postclose, 1037 .lastclose = drm_fb_helper_lastclose, 1038 .irq_handler = msm_irq, 1039 .irq_preinstall = msm_irq_preinstall, 1040 .irq_postinstall = msm_irq_postinstall, 1041 .irq_uninstall = msm_irq_uninstall, 1042 .enable_vblank = msm_enable_vblank, 1043 .disable_vblank = msm_disable_vblank, 1044 .gem_free_object_unlocked = msm_gem_free_object, 1045 .gem_vm_ops = &vm_ops, 1046 .dumb_create = msm_gem_dumb_create, 1047 .dumb_map_offset = msm_gem_dumb_map_offset, 1048 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 1049 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 1050 .gem_prime_export = drm_gem_prime_export, 1051 .gem_prime_import = drm_gem_prime_import, 1052 .gem_prime_pin = msm_gem_prime_pin, 1053 .gem_prime_unpin = msm_gem_prime_unpin, 1054 .gem_prime_get_sg_table = msm_gem_prime_get_sg_table, 1055 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table, 1056 .gem_prime_vmap = msm_gem_prime_vmap, 1057 .gem_prime_vunmap = msm_gem_prime_vunmap, 1058 .gem_prime_mmap = msm_gem_prime_mmap, 1059 #ifdef CONFIG_DEBUG_FS 1060 .debugfs_init = msm_debugfs_init, 1061 #endif 1062 .ioctls = msm_ioctls, 1063 .num_ioctls = ARRAY_SIZE(msm_ioctls), 1064 .fops = &fops, 1065 .name = "msm", 1066 .desc = "MSM Snapdragon DRM", 1067 .date = "20130625", 1068 .major = MSM_VERSION_MAJOR, 1069 .minor = MSM_VERSION_MINOR, 1070 .patchlevel = MSM_VERSION_PATCHLEVEL, 1071 }; 1072 1073 #ifdef CONFIG_PM_SLEEP 1074 static int msm_pm_suspend(struct device *dev) 1075 { 1076 struct drm_device *ddev = dev_get_drvdata(dev); 1077 struct msm_drm_private *priv = ddev->dev_private; 1078 1079 if (WARN_ON(priv->pm_state)) 1080 drm_atomic_state_put(priv->pm_state); 1081 1082 priv->pm_state = drm_atomic_helper_suspend(ddev); 1083 if (IS_ERR(priv->pm_state)) { 1084 int ret = PTR_ERR(priv->pm_state); 1085 DRM_ERROR("Failed to suspend dpu, %d\n", ret); 1086 return ret; 1087 } 1088 1089 return 0; 1090 } 1091 1092 static int msm_pm_resume(struct device *dev) 1093 { 1094 struct drm_device *ddev = dev_get_drvdata(dev); 1095 struct msm_drm_private *priv = ddev->dev_private; 1096 int ret; 1097 1098 if (WARN_ON(!priv->pm_state)) 1099 return -ENOENT; 1100 1101 ret = drm_atomic_helper_resume(ddev, priv->pm_state); 1102 if (!ret) 1103 priv->pm_state = NULL; 1104 1105 return ret; 1106 } 1107 #endif 1108 1109 #ifdef CONFIG_PM 1110 static int msm_runtime_suspend(struct device *dev) 1111 { 1112 struct drm_device *ddev = dev_get_drvdata(dev); 1113 struct msm_drm_private *priv = ddev->dev_private; 1114 struct msm_mdss *mdss = priv->mdss; 1115 1116 DBG(""); 1117 1118 if (mdss && mdss->funcs) 1119 return mdss->funcs->disable(mdss); 1120 1121 return 0; 1122 } 1123 1124 static int msm_runtime_resume(struct device *dev) 1125 { 1126 struct drm_device *ddev = dev_get_drvdata(dev); 1127 struct msm_drm_private *priv = ddev->dev_private; 1128 struct msm_mdss *mdss = priv->mdss; 1129 1130 DBG(""); 1131 1132 if (mdss && mdss->funcs) 1133 return mdss->funcs->enable(mdss); 1134 1135 return 0; 1136 } 1137 #endif 1138 1139 static const struct dev_pm_ops msm_pm_ops = { 1140 SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume) 1141 SET_RUNTIME_PM_OPS(msm_runtime_suspend, msm_runtime_resume, NULL) 1142 }; 1143 1144 /* 1145 * Componentized driver support: 1146 */ 1147 1148 /* 1149 * NOTE: duplication of the same code as exynos or imx (or probably any other). 1150 * so probably some room for some helpers 1151 */ 1152 static int compare_of(struct device *dev, void *data) 1153 { 1154 return dev->of_node == data; 1155 } 1156 1157 /* 1158 * Identify what components need to be added by parsing what remote-endpoints 1159 * our MDP output ports are connected to. In the case of LVDS on MDP4, there 1160 * is no external component that we need to add since LVDS is within MDP4 1161 * itself. 1162 */ 1163 static int add_components_mdp(struct device *mdp_dev, 1164 struct component_match **matchptr) 1165 { 1166 struct device_node *np = mdp_dev->of_node; 1167 struct device_node *ep_node; 1168 struct device *master_dev; 1169 1170 /* 1171 * on MDP4 based platforms, the MDP platform device is the component 1172 * master that adds other display interface components to itself. 1173 * 1174 * on MDP5 based platforms, the MDSS platform device is the component 1175 * master that adds MDP5 and other display interface components to 1176 * itself. 1177 */ 1178 if (of_device_is_compatible(np, "qcom,mdp4")) 1179 master_dev = mdp_dev; 1180 else 1181 master_dev = mdp_dev->parent; 1182 1183 for_each_endpoint_of_node(np, ep_node) { 1184 struct device_node *intf; 1185 struct of_endpoint ep; 1186 int ret; 1187 1188 ret = of_graph_parse_endpoint(ep_node, &ep); 1189 if (ret) { 1190 DRM_DEV_ERROR(mdp_dev, "unable to parse port endpoint\n"); 1191 of_node_put(ep_node); 1192 return ret; 1193 } 1194 1195 /* 1196 * The LCDC/LVDS port on MDP4 is a speacial case where the 1197 * remote-endpoint isn't a component that we need to add 1198 */ 1199 if (of_device_is_compatible(np, "qcom,mdp4") && 1200 ep.port == 0) 1201 continue; 1202 1203 /* 1204 * It's okay if some of the ports don't have a remote endpoint 1205 * specified. It just means that the port isn't connected to 1206 * any external interface. 1207 */ 1208 intf = of_graph_get_remote_port_parent(ep_node); 1209 if (!intf) 1210 continue; 1211 1212 if (of_device_is_available(intf)) 1213 drm_of_component_match_add(master_dev, matchptr, 1214 compare_of, intf); 1215 1216 of_node_put(intf); 1217 } 1218 1219 return 0; 1220 } 1221 1222 static int compare_name_mdp(struct device *dev, void *data) 1223 { 1224 return (strstr(dev_name(dev), "mdp") != NULL); 1225 } 1226 1227 static int add_display_components(struct device *dev, 1228 struct component_match **matchptr) 1229 { 1230 struct device *mdp_dev; 1231 int ret; 1232 1233 /* 1234 * MDP5/DPU based devices don't have a flat hierarchy. There is a top 1235 * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc. 1236 * Populate the children devices, find the MDP5/DPU node, and then add 1237 * the interfaces to our components list. 1238 */ 1239 if (of_device_is_compatible(dev->of_node, "qcom,mdss") || 1240 of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) { 1241 ret = of_platform_populate(dev->of_node, NULL, NULL, dev); 1242 if (ret) { 1243 DRM_DEV_ERROR(dev, "failed to populate children devices\n"); 1244 return ret; 1245 } 1246 1247 mdp_dev = device_find_child(dev, NULL, compare_name_mdp); 1248 if (!mdp_dev) { 1249 DRM_DEV_ERROR(dev, "failed to find MDSS MDP node\n"); 1250 of_platform_depopulate(dev); 1251 return -ENODEV; 1252 } 1253 1254 put_device(mdp_dev); 1255 1256 /* add the MDP component itself */ 1257 drm_of_component_match_add(dev, matchptr, compare_of, 1258 mdp_dev->of_node); 1259 } else { 1260 /* MDP4 */ 1261 mdp_dev = dev; 1262 } 1263 1264 ret = add_components_mdp(mdp_dev, matchptr); 1265 if (ret) 1266 of_platform_depopulate(dev); 1267 1268 return ret; 1269 } 1270 1271 /* 1272 * We don't know what's the best binding to link the gpu with the drm device. 1273 * Fow now, we just hunt for all the possible gpus that we support, and add them 1274 * as components. 1275 */ 1276 static const struct of_device_id msm_gpu_match[] = { 1277 { .compatible = "qcom,adreno" }, 1278 { .compatible = "qcom,adreno-3xx" }, 1279 { .compatible = "amd,imageon" }, 1280 { .compatible = "qcom,kgsl-3d0" }, 1281 { }, 1282 }; 1283 1284 static int add_gpu_components(struct device *dev, 1285 struct component_match **matchptr) 1286 { 1287 struct device_node *np; 1288 1289 np = of_find_matching_node(NULL, msm_gpu_match); 1290 if (!np) 1291 return 0; 1292 1293 drm_of_component_match_add(dev, matchptr, compare_of, np); 1294 1295 of_node_put(np); 1296 1297 return 0; 1298 } 1299 1300 static int msm_drm_bind(struct device *dev) 1301 { 1302 return msm_drm_init(dev, &msm_driver); 1303 } 1304 1305 static void msm_drm_unbind(struct device *dev) 1306 { 1307 msm_drm_uninit(dev); 1308 } 1309 1310 static const struct component_master_ops msm_drm_ops = { 1311 .bind = msm_drm_bind, 1312 .unbind = msm_drm_unbind, 1313 }; 1314 1315 /* 1316 * Platform driver: 1317 */ 1318 1319 static int msm_pdev_probe(struct platform_device *pdev) 1320 { 1321 struct component_match *match = NULL; 1322 int ret; 1323 1324 if (get_mdp_ver(pdev)) { 1325 ret = add_display_components(&pdev->dev, &match); 1326 if (ret) 1327 return ret; 1328 } 1329 1330 ret = add_gpu_components(&pdev->dev, &match); 1331 if (ret) 1332 goto fail; 1333 1334 /* on all devices that I am aware of, iommu's which can map 1335 * any address the cpu can see are used: 1336 */ 1337 ret = dma_set_mask_and_coherent(&pdev->dev, ~0); 1338 if (ret) 1339 goto fail; 1340 1341 ret = component_master_add_with_match(&pdev->dev, &msm_drm_ops, match); 1342 if (ret) 1343 goto fail; 1344 1345 return 0; 1346 1347 fail: 1348 of_platform_depopulate(&pdev->dev); 1349 return ret; 1350 } 1351 1352 static int msm_pdev_remove(struct platform_device *pdev) 1353 { 1354 component_master_del(&pdev->dev, &msm_drm_ops); 1355 of_platform_depopulate(&pdev->dev); 1356 1357 return 0; 1358 } 1359 1360 static const struct of_device_id dt_match[] = { 1361 { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 }, 1362 { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 }, 1363 { .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU }, 1364 {} 1365 }; 1366 MODULE_DEVICE_TABLE(of, dt_match); 1367 1368 static struct platform_driver msm_platform_driver = { 1369 .probe = msm_pdev_probe, 1370 .remove = msm_pdev_remove, 1371 .driver = { 1372 .name = "msm", 1373 .of_match_table = dt_match, 1374 .pm = &msm_pm_ops, 1375 }, 1376 }; 1377 1378 static int __init msm_drm_register(void) 1379 { 1380 if (!modeset) 1381 return -EINVAL; 1382 1383 DBG("init"); 1384 msm_mdp_register(); 1385 msm_dpu_register(); 1386 msm_dsi_register(); 1387 msm_edp_register(); 1388 msm_hdmi_register(); 1389 adreno_register(); 1390 return platform_driver_register(&msm_platform_driver); 1391 } 1392 1393 static void __exit msm_drm_unregister(void) 1394 { 1395 DBG("fini"); 1396 platform_driver_unregister(&msm_platform_driver); 1397 msm_hdmi_unregister(); 1398 adreno_unregister(); 1399 msm_edp_unregister(); 1400 msm_dsi_unregister(); 1401 msm_mdp_unregister(); 1402 msm_dpu_unregister(); 1403 } 1404 1405 module_init(msm_drm_register); 1406 module_exit(msm_drm_unregister); 1407 1408 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com"); 1409 MODULE_DESCRIPTION("MSM DRM Driver"); 1410 MODULE_LICENSE("GPL"); 1411