1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved. 4 * Copyright (C) 2013 Red Hat 5 * Author: Rob Clark <robdclark@gmail.com> 6 */ 7 8 #include <linux/dma-mapping.h> 9 #include <linux/fault-inject.h> 10 #include <linux/kthread.h> 11 #include <linux/sched/mm.h> 12 #include <linux/uaccess.h> 13 #include <uapi/linux/sched/types.h> 14 15 #include <drm/drm_bridge.h> 16 #include <drm/drm_drv.h> 17 #include <drm/drm_file.h> 18 #include <drm/drm_ioctl.h> 19 #include <drm/drm_prime.h> 20 #include <drm/drm_of.h> 21 #include <drm/drm_vblank.h> 22 23 #include "disp/msm_disp_snapshot.h" 24 #include "msm_drv.h" 25 #include "msm_debugfs.h" 26 #include "msm_fence.h" 27 #include "msm_gem.h" 28 #include "msm_gpu.h" 29 #include "msm_kms.h" 30 #include "msm_mmu.h" 31 #include "adreno/adreno_gpu.h" 32 33 /* 34 * MSM driver version: 35 * - 1.0.0 - initial interface 36 * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers 37 * - 1.2.0 - adds explicit fence support for submit ioctl 38 * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW + 39 * SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for 40 * MSM_GEM_INFO ioctl. 41 * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get 42 * GEM object's debug name 43 * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl 44 * - 1.6.0 - Syncobj support 45 * - 1.7.0 - Add MSM_PARAM_SUSPENDS to access suspend count 46 * - 1.8.0 - Add MSM_BO_CACHED_COHERENT for supported GPUs (a6xx) 47 * - 1.9.0 - Add MSM_SUBMIT_FENCE_SN_IN 48 * - 1.10.0 - Add MSM_SUBMIT_BO_NO_IMPLICIT 49 */ 50 #define MSM_VERSION_MAJOR 1 51 #define MSM_VERSION_MINOR 10 52 #define MSM_VERSION_PATCHLEVEL 0 53 54 static const struct drm_mode_config_funcs mode_config_funcs = { 55 .fb_create = msm_framebuffer_create, 56 .output_poll_changed = drm_fb_helper_output_poll_changed, 57 .atomic_check = drm_atomic_helper_check, 58 .atomic_commit = drm_atomic_helper_commit, 59 }; 60 61 static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = { 62 .atomic_commit_tail = msm_atomic_commit_tail, 63 }; 64 65 #ifdef CONFIG_DRM_FBDEV_EMULATION 66 static bool fbdev = true; 67 MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer"); 68 module_param(fbdev, bool, 0600); 69 #endif 70 71 static char *vram = "16m"; 72 MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)"); 73 module_param(vram, charp, 0); 74 75 bool dumpstate; 76 MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors"); 77 module_param(dumpstate, bool, 0600); 78 79 static bool modeset = true; 80 MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)"); 81 module_param(modeset, bool, 0600); 82 83 #ifdef CONFIG_FAULT_INJECTION 84 DECLARE_FAULT_ATTR(fail_gem_alloc); 85 DECLARE_FAULT_ATTR(fail_gem_iova); 86 #endif 87 88 static irqreturn_t msm_irq(int irq, void *arg) 89 { 90 struct drm_device *dev = arg; 91 struct msm_drm_private *priv = dev->dev_private; 92 struct msm_kms *kms = priv->kms; 93 94 BUG_ON(!kms); 95 96 return kms->funcs->irq(kms); 97 } 98 99 static void msm_irq_preinstall(struct drm_device *dev) 100 { 101 struct msm_drm_private *priv = dev->dev_private; 102 struct msm_kms *kms = priv->kms; 103 104 BUG_ON(!kms); 105 106 kms->funcs->irq_preinstall(kms); 107 } 108 109 static int msm_irq_postinstall(struct drm_device *dev) 110 { 111 struct msm_drm_private *priv = dev->dev_private; 112 struct msm_kms *kms = priv->kms; 113 114 BUG_ON(!kms); 115 116 if (kms->funcs->irq_postinstall) 117 return kms->funcs->irq_postinstall(kms); 118 119 return 0; 120 } 121 122 static int msm_irq_install(struct drm_device *dev, unsigned int irq) 123 { 124 struct msm_drm_private *priv = dev->dev_private; 125 struct msm_kms *kms = priv->kms; 126 int ret; 127 128 if (irq == IRQ_NOTCONNECTED) 129 return -ENOTCONN; 130 131 msm_irq_preinstall(dev); 132 133 ret = request_irq(irq, msm_irq, 0, dev->driver->name, dev); 134 if (ret) 135 return ret; 136 137 kms->irq_requested = true; 138 139 ret = msm_irq_postinstall(dev); 140 if (ret) { 141 free_irq(irq, dev); 142 return ret; 143 } 144 145 return 0; 146 } 147 148 static void msm_irq_uninstall(struct drm_device *dev) 149 { 150 struct msm_drm_private *priv = dev->dev_private; 151 struct msm_kms *kms = priv->kms; 152 153 if (!priv->kms) 154 return; 155 156 kms->funcs->irq_uninstall(kms); 157 if (kms->irq_requested) 158 free_irq(kms->irq, dev); 159 } 160 161 struct msm_vblank_work { 162 struct work_struct work; 163 int crtc_id; 164 bool enable; 165 struct msm_drm_private *priv; 166 }; 167 168 static void vblank_ctrl_worker(struct work_struct *work) 169 { 170 struct msm_vblank_work *vbl_work = container_of(work, 171 struct msm_vblank_work, work); 172 struct msm_drm_private *priv = vbl_work->priv; 173 struct msm_kms *kms = priv->kms; 174 175 if (vbl_work->enable) 176 kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]); 177 else 178 kms->funcs->disable_vblank(kms, priv->crtcs[vbl_work->crtc_id]); 179 180 kfree(vbl_work); 181 } 182 183 static int vblank_ctrl_queue_work(struct msm_drm_private *priv, 184 int crtc_id, bool enable) 185 { 186 struct msm_vblank_work *vbl_work; 187 188 vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC); 189 if (!vbl_work) 190 return -ENOMEM; 191 192 INIT_WORK(&vbl_work->work, vblank_ctrl_worker); 193 194 vbl_work->crtc_id = crtc_id; 195 vbl_work->enable = enable; 196 vbl_work->priv = priv; 197 198 queue_work(priv->wq, &vbl_work->work); 199 200 return 0; 201 } 202 203 static int msm_drm_uninit(struct device *dev) 204 { 205 struct platform_device *pdev = to_platform_device(dev); 206 struct msm_drm_private *priv = platform_get_drvdata(pdev); 207 struct drm_device *ddev = priv->dev; 208 struct msm_kms *kms = priv->kms; 209 int i; 210 211 /* 212 * Shutdown the hw if we're far enough along where things might be on. 213 * If we run this too early, we'll end up panicking in any variety of 214 * places. Since we don't register the drm device until late in 215 * msm_drm_init, drm_dev->registered is used as an indicator that the 216 * shutdown will be successful. 217 */ 218 if (ddev->registered) { 219 drm_dev_unregister(ddev); 220 drm_atomic_helper_shutdown(ddev); 221 } 222 223 /* We must cancel and cleanup any pending vblank enable/disable 224 * work before msm_irq_uninstall() to avoid work re-enabling an 225 * irq after uninstall has disabled it. 226 */ 227 228 flush_workqueue(priv->wq); 229 230 /* clean up event worker threads */ 231 for (i = 0; i < priv->num_crtcs; i++) { 232 if (priv->event_thread[i].worker) 233 kthread_destroy_worker(priv->event_thread[i].worker); 234 } 235 236 msm_gem_shrinker_cleanup(ddev); 237 238 drm_kms_helper_poll_fini(ddev); 239 240 msm_perf_debugfs_cleanup(priv); 241 msm_rd_debugfs_cleanup(priv); 242 243 #ifdef CONFIG_DRM_FBDEV_EMULATION 244 if (fbdev && priv->fbdev) 245 msm_fbdev_free(ddev); 246 #endif 247 248 msm_disp_snapshot_destroy(ddev); 249 250 drm_mode_config_cleanup(ddev); 251 252 for (i = 0; i < priv->num_bridges; i++) 253 drm_bridge_remove(priv->bridges[i]); 254 priv->num_bridges = 0; 255 256 pm_runtime_get_sync(dev); 257 msm_irq_uninstall(ddev); 258 pm_runtime_put_sync(dev); 259 260 if (kms && kms->funcs) 261 kms->funcs->destroy(kms); 262 263 if (priv->vram.paddr) { 264 unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING; 265 drm_mm_takedown(&priv->vram.mm); 266 dma_free_attrs(dev, priv->vram.size, NULL, 267 priv->vram.paddr, attrs); 268 } 269 270 component_unbind_all(dev, ddev); 271 272 ddev->dev_private = NULL; 273 destroy_workqueue(priv->wq); 274 275 return 0; 276 } 277 278 #include <linux/of_address.h> 279 280 struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev) 281 { 282 struct msm_gem_address_space *aspace; 283 struct msm_mmu *mmu; 284 struct device *mdp_dev = dev->dev; 285 struct device *mdss_dev = mdp_dev->parent; 286 struct device *iommu_dev; 287 288 /* 289 * IOMMUs can be a part of MDSS device tree binding, or the 290 * MDP/DPU device. 291 */ 292 if (device_iommu_mapped(mdp_dev)) 293 iommu_dev = mdp_dev; 294 else 295 iommu_dev = mdss_dev; 296 297 mmu = msm_iommu_new(iommu_dev, 0); 298 if (IS_ERR(mmu)) 299 return ERR_CAST(mmu); 300 301 if (!mmu) { 302 drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n"); 303 return NULL; 304 } 305 306 aspace = msm_gem_address_space_create(mmu, "mdp_kms", 307 0x1000, 0x100000000 - 0x1000); 308 if (IS_ERR(aspace)) { 309 dev_err(mdp_dev, "aspace create, error %pe\n", aspace); 310 mmu->funcs->destroy(mmu); 311 } 312 313 return aspace; 314 } 315 316 bool msm_use_mmu(struct drm_device *dev) 317 { 318 struct msm_drm_private *priv = dev->dev_private; 319 320 /* 321 * a2xx comes with its own MMU 322 * On other platforms IOMMU can be declared specified either for the 323 * MDP/DPU device or for its parent, MDSS device. 324 */ 325 return priv->is_a2xx || 326 device_iommu_mapped(dev->dev) || 327 device_iommu_mapped(dev->dev->parent); 328 } 329 330 static int msm_init_vram(struct drm_device *dev) 331 { 332 struct msm_drm_private *priv = dev->dev_private; 333 struct device_node *node; 334 unsigned long size = 0; 335 int ret = 0; 336 337 /* In the device-tree world, we could have a 'memory-region' 338 * phandle, which gives us a link to our "vram". Allocating 339 * is all nicely abstracted behind the dma api, but we need 340 * to know the entire size to allocate it all in one go. There 341 * are two cases: 342 * 1) device with no IOMMU, in which case we need exclusive 343 * access to a VRAM carveout big enough for all gpu 344 * buffers 345 * 2) device with IOMMU, but where the bootloader puts up 346 * a splash screen. In this case, the VRAM carveout 347 * need only be large enough for fbdev fb. But we need 348 * exclusive access to the buffer to avoid the kernel 349 * using those pages for other purposes (which appears 350 * as corruption on screen before we have a chance to 351 * load and do initial modeset) 352 */ 353 354 node = of_parse_phandle(dev->dev->of_node, "memory-region", 0); 355 if (node) { 356 struct resource r; 357 ret = of_address_to_resource(node, 0, &r); 358 of_node_put(node); 359 if (ret) 360 return ret; 361 size = r.end - r.start + 1; 362 DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start); 363 364 /* if we have no IOMMU, then we need to use carveout allocator. 365 * Grab the entire DMA chunk carved out in early startup in 366 * mach-msm: 367 */ 368 } else if (!msm_use_mmu(dev)) { 369 DRM_INFO("using %s VRAM carveout\n", vram); 370 size = memparse(vram, NULL); 371 } 372 373 if (size) { 374 unsigned long attrs = 0; 375 void *p; 376 377 priv->vram.size = size; 378 379 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); 380 spin_lock_init(&priv->vram.lock); 381 382 attrs |= DMA_ATTR_NO_KERNEL_MAPPING; 383 attrs |= DMA_ATTR_WRITE_COMBINE; 384 385 /* note that for no-kernel-mapping, the vaddr returned 386 * is bogus, but non-null if allocation succeeded: 387 */ 388 p = dma_alloc_attrs(dev->dev, size, 389 &priv->vram.paddr, GFP_KERNEL, attrs); 390 if (!p) { 391 DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n"); 392 priv->vram.paddr = 0; 393 return -ENOMEM; 394 } 395 396 DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n", 397 (uint32_t)priv->vram.paddr, 398 (uint32_t)(priv->vram.paddr + size)); 399 } 400 401 return ret; 402 } 403 404 static int msm_drm_init(struct device *dev, const struct drm_driver *drv) 405 { 406 struct msm_drm_private *priv = dev_get_drvdata(dev); 407 struct drm_device *ddev; 408 struct msm_kms *kms; 409 int ret, i; 410 411 if (drm_firmware_drivers_only()) 412 return -ENODEV; 413 414 ddev = drm_dev_alloc(drv, dev); 415 if (IS_ERR(ddev)) { 416 DRM_DEV_ERROR(dev, "failed to allocate drm_device\n"); 417 return PTR_ERR(ddev); 418 } 419 ddev->dev_private = priv; 420 priv->dev = ddev; 421 422 priv->wq = alloc_ordered_workqueue("msm", 0); 423 if (!priv->wq) 424 return -ENOMEM; 425 426 INIT_LIST_HEAD(&priv->objects); 427 mutex_init(&priv->obj_lock); 428 429 /* 430 * Initialize the LRUs: 431 */ 432 mutex_init(&priv->lru.lock); 433 drm_gem_lru_init(&priv->lru.unbacked, &priv->lru.lock); 434 drm_gem_lru_init(&priv->lru.pinned, &priv->lru.lock); 435 drm_gem_lru_init(&priv->lru.willneed, &priv->lru.lock); 436 drm_gem_lru_init(&priv->lru.dontneed, &priv->lru.lock); 437 438 /* Teach lockdep about lock ordering wrt. shrinker: */ 439 fs_reclaim_acquire(GFP_KERNEL); 440 might_lock(&priv->lru.lock); 441 fs_reclaim_release(GFP_KERNEL); 442 443 drm_mode_config_init(ddev); 444 445 ret = msm_init_vram(ddev); 446 if (ret) 447 goto err_drm_dev_put; 448 449 /* Bind all our sub-components: */ 450 ret = component_bind_all(dev, ddev); 451 if (ret) 452 goto err_drm_dev_put; 453 454 dma_set_max_seg_size(dev, UINT_MAX); 455 456 msm_gem_shrinker_init(ddev); 457 458 if (priv->kms_init) { 459 ret = priv->kms_init(ddev); 460 if (ret) { 461 DRM_DEV_ERROR(dev, "failed to load kms\n"); 462 priv->kms = NULL; 463 goto err_msm_uninit; 464 } 465 kms = priv->kms; 466 } else { 467 /* valid only for the dummy headless case, where of_node=NULL */ 468 WARN_ON(dev->of_node); 469 kms = NULL; 470 } 471 472 /* Enable normalization of plane zpos */ 473 ddev->mode_config.normalize_zpos = true; 474 475 if (kms) { 476 kms->dev = ddev; 477 ret = kms->funcs->hw_init(kms); 478 if (ret) { 479 DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret); 480 goto err_msm_uninit; 481 } 482 } 483 484 drm_helper_move_panel_connectors_to_head(ddev); 485 486 ddev->mode_config.funcs = &mode_config_funcs; 487 ddev->mode_config.helper_private = &mode_config_helper_funcs; 488 489 for (i = 0; i < priv->num_crtcs; i++) { 490 /* initialize event thread */ 491 priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id; 492 priv->event_thread[i].dev = ddev; 493 priv->event_thread[i].worker = kthread_create_worker(0, 494 "crtc_event:%d", priv->event_thread[i].crtc_id); 495 if (IS_ERR(priv->event_thread[i].worker)) { 496 ret = PTR_ERR(priv->event_thread[i].worker); 497 DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n"); 498 priv->event_thread[i].worker = NULL; 499 goto err_msm_uninit; 500 } 501 502 sched_set_fifo(priv->event_thread[i].worker->task); 503 } 504 505 ret = drm_vblank_init(ddev, priv->num_crtcs); 506 if (ret < 0) { 507 DRM_DEV_ERROR(dev, "failed to initialize vblank\n"); 508 goto err_msm_uninit; 509 } 510 511 if (kms) { 512 pm_runtime_get_sync(dev); 513 ret = msm_irq_install(ddev, kms->irq); 514 pm_runtime_put_sync(dev); 515 if (ret < 0) { 516 DRM_DEV_ERROR(dev, "failed to install IRQ handler\n"); 517 goto err_msm_uninit; 518 } 519 } 520 521 ret = drm_dev_register(ddev, 0); 522 if (ret) 523 goto err_msm_uninit; 524 525 if (kms) { 526 ret = msm_disp_snapshot_init(ddev); 527 if (ret) 528 DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret); 529 } 530 drm_mode_config_reset(ddev); 531 532 #ifdef CONFIG_DRM_FBDEV_EMULATION 533 if (kms && fbdev) 534 priv->fbdev = msm_fbdev_init(ddev); 535 #endif 536 537 ret = msm_debugfs_late_init(ddev); 538 if (ret) 539 goto err_msm_uninit; 540 541 drm_kms_helper_poll_init(ddev); 542 543 return 0; 544 545 err_msm_uninit: 546 msm_drm_uninit(dev); 547 err_drm_dev_put: 548 drm_dev_put(ddev); 549 return ret; 550 } 551 552 /* 553 * DRM operations: 554 */ 555 556 static void load_gpu(struct drm_device *dev) 557 { 558 static DEFINE_MUTEX(init_lock); 559 struct msm_drm_private *priv = dev->dev_private; 560 561 mutex_lock(&init_lock); 562 563 if (!priv->gpu) 564 priv->gpu = adreno_load_gpu(dev); 565 566 mutex_unlock(&init_lock); 567 } 568 569 static int context_init(struct drm_device *dev, struct drm_file *file) 570 { 571 static atomic_t ident = ATOMIC_INIT(0); 572 struct msm_drm_private *priv = dev->dev_private; 573 struct msm_file_private *ctx; 574 575 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 576 if (!ctx) 577 return -ENOMEM; 578 579 INIT_LIST_HEAD(&ctx->submitqueues); 580 rwlock_init(&ctx->queuelock); 581 582 kref_init(&ctx->ref); 583 msm_submitqueue_init(dev, ctx); 584 585 ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current); 586 file->driver_priv = ctx; 587 588 ctx->seqno = atomic_inc_return(&ident); 589 590 return 0; 591 } 592 593 static int msm_open(struct drm_device *dev, struct drm_file *file) 594 { 595 /* For now, load gpu on open.. to avoid the requirement of having 596 * firmware in the initrd. 597 */ 598 load_gpu(dev); 599 600 return context_init(dev, file); 601 } 602 603 static void context_close(struct msm_file_private *ctx) 604 { 605 msm_submitqueue_close(ctx); 606 msm_file_private_put(ctx); 607 } 608 609 static void msm_postclose(struct drm_device *dev, struct drm_file *file) 610 { 611 struct msm_drm_private *priv = dev->dev_private; 612 struct msm_file_private *ctx = file->driver_priv; 613 614 /* 615 * It is not possible to set sysprof param to non-zero if gpu 616 * is not initialized: 617 */ 618 if (priv->gpu) 619 msm_file_private_set_sysprof(ctx, priv->gpu, 0); 620 621 context_close(ctx); 622 } 623 624 int msm_crtc_enable_vblank(struct drm_crtc *crtc) 625 { 626 struct drm_device *dev = crtc->dev; 627 unsigned int pipe = crtc->index; 628 struct msm_drm_private *priv = dev->dev_private; 629 struct msm_kms *kms = priv->kms; 630 if (!kms) 631 return -ENXIO; 632 drm_dbg_vbl(dev, "crtc=%u", pipe); 633 return vblank_ctrl_queue_work(priv, pipe, true); 634 } 635 636 void msm_crtc_disable_vblank(struct drm_crtc *crtc) 637 { 638 struct drm_device *dev = crtc->dev; 639 unsigned int pipe = crtc->index; 640 struct msm_drm_private *priv = dev->dev_private; 641 struct msm_kms *kms = priv->kms; 642 if (!kms) 643 return; 644 drm_dbg_vbl(dev, "crtc=%u", pipe); 645 vblank_ctrl_queue_work(priv, pipe, false); 646 } 647 648 /* 649 * DRM ioctls: 650 */ 651 652 static int msm_ioctl_get_param(struct drm_device *dev, void *data, 653 struct drm_file *file) 654 { 655 struct msm_drm_private *priv = dev->dev_private; 656 struct drm_msm_param *args = data; 657 struct msm_gpu *gpu; 658 659 /* for now, we just have 3d pipe.. eventually this would need to 660 * be more clever to dispatch to appropriate gpu module: 661 */ 662 if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0)) 663 return -EINVAL; 664 665 gpu = priv->gpu; 666 667 if (!gpu) 668 return -ENXIO; 669 670 return gpu->funcs->get_param(gpu, file->driver_priv, 671 args->param, &args->value, &args->len); 672 } 673 674 static int msm_ioctl_set_param(struct drm_device *dev, void *data, 675 struct drm_file *file) 676 { 677 struct msm_drm_private *priv = dev->dev_private; 678 struct drm_msm_param *args = data; 679 struct msm_gpu *gpu; 680 681 if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0)) 682 return -EINVAL; 683 684 gpu = priv->gpu; 685 686 if (!gpu) 687 return -ENXIO; 688 689 return gpu->funcs->set_param(gpu, file->driver_priv, 690 args->param, args->value, args->len); 691 } 692 693 static int msm_ioctl_gem_new(struct drm_device *dev, void *data, 694 struct drm_file *file) 695 { 696 struct drm_msm_gem_new *args = data; 697 uint32_t flags = args->flags; 698 699 if (args->flags & ~MSM_BO_FLAGS) { 700 DRM_ERROR("invalid flags: %08x\n", args->flags); 701 return -EINVAL; 702 } 703 704 /* 705 * Uncached CPU mappings are deprecated, as of: 706 * 707 * 9ef364432db4 ("drm/msm: deprecate MSM_BO_UNCACHED (map as writecombine instead)") 708 * 709 * So promote them to WC. 710 */ 711 if (flags & MSM_BO_UNCACHED) { 712 flags &= ~MSM_BO_CACHED; 713 flags |= MSM_BO_WC; 714 } 715 716 if (should_fail(&fail_gem_alloc, args->size)) 717 return -ENOMEM; 718 719 return msm_gem_new_handle(dev, file, args->size, 720 args->flags, &args->handle, NULL); 721 } 722 723 static inline ktime_t to_ktime(struct drm_msm_timespec timeout) 724 { 725 return ktime_set(timeout.tv_sec, timeout.tv_nsec); 726 } 727 728 static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data, 729 struct drm_file *file) 730 { 731 struct drm_msm_gem_cpu_prep *args = data; 732 struct drm_gem_object *obj; 733 ktime_t timeout = to_ktime(args->timeout); 734 int ret; 735 736 if (args->op & ~MSM_PREP_FLAGS) { 737 DRM_ERROR("invalid op: %08x\n", args->op); 738 return -EINVAL; 739 } 740 741 obj = drm_gem_object_lookup(file, args->handle); 742 if (!obj) 743 return -ENOENT; 744 745 ret = msm_gem_cpu_prep(obj, args->op, &timeout); 746 747 drm_gem_object_put(obj); 748 749 return ret; 750 } 751 752 static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data, 753 struct drm_file *file) 754 { 755 struct drm_msm_gem_cpu_fini *args = data; 756 struct drm_gem_object *obj; 757 int ret; 758 759 obj = drm_gem_object_lookup(file, args->handle); 760 if (!obj) 761 return -ENOENT; 762 763 ret = msm_gem_cpu_fini(obj); 764 765 drm_gem_object_put(obj); 766 767 return ret; 768 } 769 770 static int msm_ioctl_gem_info_iova(struct drm_device *dev, 771 struct drm_file *file, struct drm_gem_object *obj, 772 uint64_t *iova) 773 { 774 struct msm_drm_private *priv = dev->dev_private; 775 struct msm_file_private *ctx = file->driver_priv; 776 777 if (!priv->gpu) 778 return -EINVAL; 779 780 if (should_fail(&fail_gem_iova, obj->size)) 781 return -ENOMEM; 782 783 /* 784 * Don't pin the memory here - just get an address so that userspace can 785 * be productive 786 */ 787 return msm_gem_get_iova(obj, ctx->aspace, iova); 788 } 789 790 static int msm_ioctl_gem_info_set_iova(struct drm_device *dev, 791 struct drm_file *file, struct drm_gem_object *obj, 792 uint64_t iova) 793 { 794 struct msm_drm_private *priv = dev->dev_private; 795 struct msm_file_private *ctx = file->driver_priv; 796 797 if (!priv->gpu) 798 return -EINVAL; 799 800 /* Only supported if per-process address space is supported: */ 801 if (priv->gpu->aspace == ctx->aspace) 802 return -EOPNOTSUPP; 803 804 if (should_fail(&fail_gem_iova, obj->size)) 805 return -ENOMEM; 806 807 return msm_gem_set_iova(obj, ctx->aspace, iova); 808 } 809 810 static int msm_ioctl_gem_info(struct drm_device *dev, void *data, 811 struct drm_file *file) 812 { 813 struct drm_msm_gem_info *args = data; 814 struct drm_gem_object *obj; 815 struct msm_gem_object *msm_obj; 816 int i, ret = 0; 817 818 if (args->pad) 819 return -EINVAL; 820 821 switch (args->info) { 822 case MSM_INFO_GET_OFFSET: 823 case MSM_INFO_GET_IOVA: 824 case MSM_INFO_SET_IOVA: 825 case MSM_INFO_GET_FLAGS: 826 /* value returned as immediate, not pointer, so len==0: */ 827 if (args->len) 828 return -EINVAL; 829 break; 830 case MSM_INFO_SET_NAME: 831 case MSM_INFO_GET_NAME: 832 break; 833 default: 834 return -EINVAL; 835 } 836 837 obj = drm_gem_object_lookup(file, args->handle); 838 if (!obj) 839 return -ENOENT; 840 841 msm_obj = to_msm_bo(obj); 842 843 switch (args->info) { 844 case MSM_INFO_GET_OFFSET: 845 args->value = msm_gem_mmap_offset(obj); 846 break; 847 case MSM_INFO_GET_IOVA: 848 ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value); 849 break; 850 case MSM_INFO_SET_IOVA: 851 ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value); 852 break; 853 case MSM_INFO_GET_FLAGS: 854 if (obj->import_attach) { 855 ret = -EINVAL; 856 break; 857 } 858 /* Hide internal kernel-only flags: */ 859 args->value = to_msm_bo(obj)->flags & MSM_BO_FLAGS; 860 ret = 0; 861 break; 862 case MSM_INFO_SET_NAME: 863 /* length check should leave room for terminating null: */ 864 if (args->len >= sizeof(msm_obj->name)) { 865 ret = -EINVAL; 866 break; 867 } 868 if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value), 869 args->len)) { 870 msm_obj->name[0] = '\0'; 871 ret = -EFAULT; 872 break; 873 } 874 msm_obj->name[args->len] = '\0'; 875 for (i = 0; i < args->len; i++) { 876 if (!isprint(msm_obj->name[i])) { 877 msm_obj->name[i] = '\0'; 878 break; 879 } 880 } 881 break; 882 case MSM_INFO_GET_NAME: 883 if (args->value && (args->len < strlen(msm_obj->name))) { 884 ret = -EINVAL; 885 break; 886 } 887 args->len = strlen(msm_obj->name); 888 if (args->value) { 889 if (copy_to_user(u64_to_user_ptr(args->value), 890 msm_obj->name, args->len)) 891 ret = -EFAULT; 892 } 893 break; 894 } 895 896 drm_gem_object_put(obj); 897 898 return ret; 899 } 900 901 static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id, 902 ktime_t timeout) 903 { 904 struct dma_fence *fence; 905 int ret; 906 907 if (fence_after(fence_id, queue->last_fence)) { 908 DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n", 909 fence_id, queue->last_fence); 910 return -EINVAL; 911 } 912 913 /* 914 * Map submitqueue scoped "seqno" (which is actually an idr key) 915 * back to underlying dma-fence 916 * 917 * The fence is removed from the fence_idr when the submit is 918 * retired, so if the fence is not found it means there is nothing 919 * to wait for 920 */ 921 ret = mutex_lock_interruptible(&queue->idr_lock); 922 if (ret) 923 return ret; 924 fence = idr_find(&queue->fence_idr, fence_id); 925 if (fence) 926 fence = dma_fence_get_rcu(fence); 927 mutex_unlock(&queue->idr_lock); 928 929 if (!fence) 930 return 0; 931 932 ret = dma_fence_wait_timeout(fence, true, timeout_to_jiffies(&timeout)); 933 if (ret == 0) { 934 ret = -ETIMEDOUT; 935 } else if (ret != -ERESTARTSYS) { 936 ret = 0; 937 } 938 939 dma_fence_put(fence); 940 941 return ret; 942 } 943 944 static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, 945 struct drm_file *file) 946 { 947 struct msm_drm_private *priv = dev->dev_private; 948 struct drm_msm_wait_fence *args = data; 949 struct msm_gpu_submitqueue *queue; 950 int ret; 951 952 if (args->pad) { 953 DRM_ERROR("invalid pad: %08x\n", args->pad); 954 return -EINVAL; 955 } 956 957 if (!priv->gpu) 958 return 0; 959 960 queue = msm_submitqueue_get(file->driver_priv, args->queueid); 961 if (!queue) 962 return -ENOENT; 963 964 ret = wait_fence(queue, args->fence, to_ktime(args->timeout)); 965 966 msm_submitqueue_put(queue); 967 968 return ret; 969 } 970 971 static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data, 972 struct drm_file *file) 973 { 974 struct drm_msm_gem_madvise *args = data; 975 struct drm_gem_object *obj; 976 int ret; 977 978 switch (args->madv) { 979 case MSM_MADV_DONTNEED: 980 case MSM_MADV_WILLNEED: 981 break; 982 default: 983 return -EINVAL; 984 } 985 986 obj = drm_gem_object_lookup(file, args->handle); 987 if (!obj) { 988 return -ENOENT; 989 } 990 991 ret = msm_gem_madvise(obj, args->madv); 992 if (ret >= 0) { 993 args->retained = ret; 994 ret = 0; 995 } 996 997 drm_gem_object_put(obj); 998 999 return ret; 1000 } 1001 1002 1003 static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data, 1004 struct drm_file *file) 1005 { 1006 struct drm_msm_submitqueue *args = data; 1007 1008 if (args->flags & ~MSM_SUBMITQUEUE_FLAGS) 1009 return -EINVAL; 1010 1011 return msm_submitqueue_create(dev, file->driver_priv, args->prio, 1012 args->flags, &args->id); 1013 } 1014 1015 static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data, 1016 struct drm_file *file) 1017 { 1018 return msm_submitqueue_query(dev, file->driver_priv, data); 1019 } 1020 1021 static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data, 1022 struct drm_file *file) 1023 { 1024 u32 id = *(u32 *) data; 1025 1026 return msm_submitqueue_remove(file->driver_priv, id); 1027 } 1028 1029 static const struct drm_ioctl_desc msm_ioctls[] = { 1030 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_RENDER_ALLOW), 1031 DRM_IOCTL_DEF_DRV(MSM_SET_PARAM, msm_ioctl_set_param, DRM_RENDER_ALLOW), 1032 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_RENDER_ALLOW), 1033 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_RENDER_ALLOW), 1034 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_RENDER_ALLOW), 1035 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_RENDER_ALLOW), 1036 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_RENDER_ALLOW), 1037 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_RENDER_ALLOW), 1038 DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_RENDER_ALLOW), 1039 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW), 1040 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW), 1041 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW), 1042 }; 1043 1044 static void msm_fop_show_fdinfo(struct seq_file *m, struct file *f) 1045 { 1046 struct drm_file *file = f->private_data; 1047 struct drm_device *dev = file->minor->dev; 1048 struct msm_drm_private *priv = dev->dev_private; 1049 struct drm_printer p = drm_seq_file_printer(m); 1050 1051 if (!priv->gpu) 1052 return; 1053 1054 msm_gpu_show_fdinfo(priv->gpu, file->driver_priv, &p); 1055 } 1056 1057 static const struct file_operations fops = { 1058 .owner = THIS_MODULE, 1059 DRM_GEM_FOPS, 1060 .show_fdinfo = msm_fop_show_fdinfo, 1061 }; 1062 1063 static const struct drm_driver msm_driver = { 1064 .driver_features = DRIVER_GEM | 1065 DRIVER_RENDER | 1066 DRIVER_ATOMIC | 1067 DRIVER_MODESET | 1068 DRIVER_SYNCOBJ, 1069 .open = msm_open, 1070 .postclose = msm_postclose, 1071 .lastclose = drm_fb_helper_lastclose, 1072 .dumb_create = msm_gem_dumb_create, 1073 .dumb_map_offset = msm_gem_dumb_map_offset, 1074 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 1075 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 1076 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table, 1077 .gem_prime_mmap = msm_gem_prime_mmap, 1078 #ifdef CONFIG_DEBUG_FS 1079 .debugfs_init = msm_debugfs_init, 1080 #endif 1081 .ioctls = msm_ioctls, 1082 .num_ioctls = ARRAY_SIZE(msm_ioctls), 1083 .fops = &fops, 1084 .name = "msm", 1085 .desc = "MSM Snapdragon DRM", 1086 .date = "20130625", 1087 .major = MSM_VERSION_MAJOR, 1088 .minor = MSM_VERSION_MINOR, 1089 .patchlevel = MSM_VERSION_PATCHLEVEL, 1090 }; 1091 1092 int msm_pm_prepare(struct device *dev) 1093 { 1094 struct msm_drm_private *priv = dev_get_drvdata(dev); 1095 struct drm_device *ddev = priv ? priv->dev : NULL; 1096 1097 if (!priv || !priv->kms) 1098 return 0; 1099 1100 return drm_mode_config_helper_suspend(ddev); 1101 } 1102 1103 void msm_pm_complete(struct device *dev) 1104 { 1105 struct msm_drm_private *priv = dev_get_drvdata(dev); 1106 struct drm_device *ddev = priv ? priv->dev : NULL; 1107 1108 if (!priv || !priv->kms) 1109 return; 1110 1111 drm_mode_config_helper_resume(ddev); 1112 } 1113 1114 static const struct dev_pm_ops msm_pm_ops = { 1115 .prepare = msm_pm_prepare, 1116 .complete = msm_pm_complete, 1117 }; 1118 1119 /* 1120 * Componentized driver support: 1121 */ 1122 1123 /* 1124 * Identify what components need to be added by parsing what remote-endpoints 1125 * our MDP output ports are connected to. In the case of LVDS on MDP4, there 1126 * is no external component that we need to add since LVDS is within MDP4 1127 * itself. 1128 */ 1129 static int add_components_mdp(struct device *master_dev, 1130 struct component_match **matchptr) 1131 { 1132 struct device_node *np = master_dev->of_node; 1133 struct device_node *ep_node; 1134 1135 for_each_endpoint_of_node(np, ep_node) { 1136 struct device_node *intf; 1137 struct of_endpoint ep; 1138 int ret; 1139 1140 ret = of_graph_parse_endpoint(ep_node, &ep); 1141 if (ret) { 1142 DRM_DEV_ERROR(master_dev, "unable to parse port endpoint\n"); 1143 of_node_put(ep_node); 1144 return ret; 1145 } 1146 1147 /* 1148 * The LCDC/LVDS port on MDP4 is a speacial case where the 1149 * remote-endpoint isn't a component that we need to add 1150 */ 1151 if (of_device_is_compatible(np, "qcom,mdp4") && 1152 ep.port == 0) 1153 continue; 1154 1155 /* 1156 * It's okay if some of the ports don't have a remote endpoint 1157 * specified. It just means that the port isn't connected to 1158 * any external interface. 1159 */ 1160 intf = of_graph_get_remote_port_parent(ep_node); 1161 if (!intf) 1162 continue; 1163 1164 if (of_device_is_available(intf)) 1165 drm_of_component_match_add(master_dev, matchptr, 1166 component_compare_of, intf); 1167 1168 of_node_put(intf); 1169 } 1170 1171 return 0; 1172 } 1173 1174 /* 1175 * We don't know what's the best binding to link the gpu with the drm device. 1176 * Fow now, we just hunt for all the possible gpus that we support, and add them 1177 * as components. 1178 */ 1179 static const struct of_device_id msm_gpu_match[] = { 1180 { .compatible = "qcom,adreno" }, 1181 { .compatible = "qcom,adreno-3xx" }, 1182 { .compatible = "amd,imageon" }, 1183 { .compatible = "qcom,kgsl-3d0" }, 1184 { }, 1185 }; 1186 1187 static int add_gpu_components(struct device *dev, 1188 struct component_match **matchptr) 1189 { 1190 struct device_node *np; 1191 1192 np = of_find_matching_node(NULL, msm_gpu_match); 1193 if (!np) 1194 return 0; 1195 1196 if (of_device_is_available(np)) 1197 drm_of_component_match_add(dev, matchptr, component_compare_of, np); 1198 1199 of_node_put(np); 1200 1201 return 0; 1202 } 1203 1204 static int msm_drm_bind(struct device *dev) 1205 { 1206 return msm_drm_init(dev, &msm_driver); 1207 } 1208 1209 static void msm_drm_unbind(struct device *dev) 1210 { 1211 msm_drm_uninit(dev); 1212 } 1213 1214 const struct component_master_ops msm_drm_ops = { 1215 .bind = msm_drm_bind, 1216 .unbind = msm_drm_unbind, 1217 }; 1218 1219 int msm_drv_probe(struct device *master_dev, 1220 int (*kms_init)(struct drm_device *dev)) 1221 { 1222 struct msm_drm_private *priv; 1223 struct component_match *match = NULL; 1224 int ret; 1225 1226 priv = devm_kzalloc(master_dev, sizeof(*priv), GFP_KERNEL); 1227 if (!priv) 1228 return -ENOMEM; 1229 1230 priv->kms_init = kms_init; 1231 dev_set_drvdata(master_dev, priv); 1232 1233 /* Add mdp components if we have KMS. */ 1234 if (kms_init) { 1235 ret = add_components_mdp(master_dev, &match); 1236 if (ret) 1237 return ret; 1238 } 1239 1240 ret = add_gpu_components(master_dev, &match); 1241 if (ret) 1242 return ret; 1243 1244 /* on all devices that I am aware of, iommu's which can map 1245 * any address the cpu can see are used: 1246 */ 1247 ret = dma_set_mask_and_coherent(master_dev, ~0); 1248 if (ret) 1249 return ret; 1250 1251 ret = component_master_add_with_match(master_dev, &msm_drm_ops, match); 1252 if (ret) 1253 return ret; 1254 1255 return 0; 1256 } 1257 1258 /* 1259 * Platform driver: 1260 * Used only for headlesss GPU instances 1261 */ 1262 1263 static int msm_pdev_probe(struct platform_device *pdev) 1264 { 1265 return msm_drv_probe(&pdev->dev, NULL); 1266 } 1267 1268 static int msm_pdev_remove(struct platform_device *pdev) 1269 { 1270 component_master_del(&pdev->dev, &msm_drm_ops); 1271 1272 return 0; 1273 } 1274 1275 void msm_drv_shutdown(struct platform_device *pdev) 1276 { 1277 struct msm_drm_private *priv = platform_get_drvdata(pdev); 1278 struct drm_device *drm = priv ? priv->dev : NULL; 1279 1280 /* 1281 * Shutdown the hw if we're far enough along where things might be on. 1282 * If we run this too early, we'll end up panicking in any variety of 1283 * places. Since we don't register the drm device until late in 1284 * msm_drm_init, drm_dev->registered is used as an indicator that the 1285 * shutdown will be successful. 1286 */ 1287 if (drm && drm->registered && priv->kms) 1288 drm_atomic_helper_shutdown(drm); 1289 } 1290 1291 static struct platform_driver msm_platform_driver = { 1292 .probe = msm_pdev_probe, 1293 .remove = msm_pdev_remove, 1294 .shutdown = msm_drv_shutdown, 1295 .driver = { 1296 .name = "msm", 1297 .pm = &msm_pm_ops, 1298 }, 1299 }; 1300 1301 static int __init msm_drm_register(void) 1302 { 1303 if (!modeset) 1304 return -EINVAL; 1305 1306 DBG("init"); 1307 msm_mdp_register(); 1308 msm_dpu_register(); 1309 msm_dsi_register(); 1310 msm_hdmi_register(); 1311 msm_dp_register(); 1312 adreno_register(); 1313 msm_mdp4_register(); 1314 msm_mdss_register(); 1315 return platform_driver_register(&msm_platform_driver); 1316 } 1317 1318 static void __exit msm_drm_unregister(void) 1319 { 1320 DBG("fini"); 1321 platform_driver_unregister(&msm_platform_driver); 1322 msm_mdss_unregister(); 1323 msm_mdp4_unregister(); 1324 msm_dp_unregister(); 1325 msm_hdmi_unregister(); 1326 adreno_unregister(); 1327 msm_dsi_unregister(); 1328 msm_mdp_unregister(); 1329 msm_dpu_unregister(); 1330 } 1331 1332 module_init(msm_drm_register); 1333 module_exit(msm_drm_unregister); 1334 1335 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com"); 1336 MODULE_DESCRIPTION("MSM DRM Driver"); 1337 MODULE_LICENSE("GPL"); 1338