1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 Avionic Design GmbH 4 * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved. 5 */ 6 7 #include <linux/bitops.h> 8 #include <linux/host1x.h> 9 #include <linux/idr.h> 10 #include <linux/iommu.h> 11 #include <linux/module.h> 12 #include <linux/platform_device.h> 13 14 #include <drm/drm_atomic.h> 15 #include <drm/drm_atomic_helper.h> 16 #include <drm/drm_debugfs.h> 17 #include <drm/drm_drv.h> 18 #include <drm/drm_fourcc.h> 19 #include <drm/drm_ioctl.h> 20 #include <drm/drm_prime.h> 21 #include <drm/drm_vblank.h> 22 23 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) 24 #include <asm/dma-iommu.h> 25 #endif 26 27 #include "drm.h" 28 #include "gem.h" 29 30 #define DRIVER_NAME "tegra" 31 #define DRIVER_DESC "NVIDIA Tegra graphics" 32 #define DRIVER_DATE "20120330" 33 #define DRIVER_MAJOR 0 34 #define DRIVER_MINOR 0 35 #define DRIVER_PATCHLEVEL 0 36 37 #define CARVEOUT_SZ SZ_64M 38 #define CDMA_GATHER_FETCHES_MAX_NB 16383 39 40 struct tegra_drm_file { 41 struct idr contexts; 42 struct mutex lock; 43 }; 44 45 static int tegra_atomic_check(struct drm_device *drm, 46 struct drm_atomic_state *state) 47 { 48 int err; 49 50 err = drm_atomic_helper_check(drm, state); 51 if (err < 0) 52 return err; 53 54 return tegra_display_hub_atomic_check(drm, state); 55 } 56 57 static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = { 58 .fb_create = tegra_fb_create, 59 #ifdef CONFIG_DRM_FBDEV_EMULATION 60 .output_poll_changed = drm_fb_helper_output_poll_changed, 61 #endif 62 .atomic_check = tegra_atomic_check, 63 .atomic_commit = drm_atomic_helper_commit, 64 }; 65 66 static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state) 67 { 68 struct drm_device *drm = old_state->dev; 69 struct tegra_drm *tegra = drm->dev_private; 70 71 if (tegra->hub) { 72 drm_atomic_helper_commit_modeset_disables(drm, old_state); 73 tegra_display_hub_atomic_commit(drm, old_state); 74 drm_atomic_helper_commit_planes(drm, old_state, 0); 75 drm_atomic_helper_commit_modeset_enables(drm, old_state); 76 drm_atomic_helper_commit_hw_done(old_state); 77 drm_atomic_helper_wait_for_vblanks(drm, old_state); 78 drm_atomic_helper_cleanup_planes(drm, old_state); 79 } else { 80 drm_atomic_helper_commit_tail_rpm(old_state); 81 } 82 } 83 84 static const struct drm_mode_config_helper_funcs 85 tegra_drm_mode_config_helpers = { 86 .atomic_commit_tail = tegra_atomic_commit_tail, 87 }; 88 89 static int tegra_drm_load(struct drm_device *drm, unsigned long flags) 90 { 91 struct host1x_device *device = to_host1x_device(drm->dev); 92 struct tegra_drm *tegra; 93 int err; 94 95 tegra = kzalloc(sizeof(*tegra), GFP_KERNEL); 96 if (!tegra) 97 return -ENOMEM; 98 99 if (iommu_present(&platform_bus_type)) { 100 tegra->domain = iommu_domain_alloc(&platform_bus_type); 101 if (!tegra->domain) { 102 err = -ENOMEM; 103 goto free; 104 } 105 106 err = iova_cache_get(); 107 if (err < 0) 108 goto domain; 109 } 110 111 mutex_init(&tegra->clients_lock); 112 INIT_LIST_HEAD(&tegra->clients); 113 114 drm->dev_private = tegra; 115 tegra->drm = drm; 116 117 drm_mode_config_init(drm); 118 119 drm->mode_config.min_width = 0; 120 drm->mode_config.min_height = 0; 121 122 drm->mode_config.max_width = 4096; 123 drm->mode_config.max_height = 4096; 124 125 drm->mode_config.allow_fb_modifiers = true; 126 127 drm->mode_config.normalize_zpos = true; 128 129 drm->mode_config.funcs = &tegra_drm_mode_config_funcs; 130 drm->mode_config.helper_private = &tegra_drm_mode_config_helpers; 131 132 err = tegra_drm_fb_prepare(drm); 133 if (err < 0) 134 goto config; 135 136 drm_kms_helper_poll_init(drm); 137 138 err = host1x_device_init(device); 139 if (err < 0) 140 goto fbdev; 141 142 if (tegra->domain) { 143 u64 carveout_start, carveout_end, gem_start, gem_end; 144 u64 dma_mask = dma_get_mask(&device->dev); 145 dma_addr_t start, end; 146 unsigned long order; 147 148 start = tegra->domain->geometry.aperture_start & dma_mask; 149 end = tegra->domain->geometry.aperture_end & dma_mask; 150 151 gem_start = start; 152 gem_end = end - CARVEOUT_SZ; 153 carveout_start = gem_end + 1; 154 carveout_end = end; 155 156 order = __ffs(tegra->domain->pgsize_bitmap); 157 init_iova_domain(&tegra->carveout.domain, 1UL << order, 158 carveout_start >> order); 159 160 tegra->carveout.shift = iova_shift(&tegra->carveout.domain); 161 tegra->carveout.limit = carveout_end >> tegra->carveout.shift; 162 163 drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1); 164 mutex_init(&tegra->mm_lock); 165 166 DRM_DEBUG("IOMMU apertures:\n"); 167 DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end); 168 DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start, 169 carveout_end); 170 } 171 172 if (tegra->hub) { 173 err = tegra_display_hub_prepare(tegra->hub); 174 if (err < 0) 175 goto device; 176 } 177 178 /* 179 * We don't use the drm_irq_install() helpers provided by the DRM 180 * core, so we need to set this manually in order to allow the 181 * DRM_IOCTL_WAIT_VBLANK to operate correctly. 182 */ 183 drm->irq_enabled = true; 184 185 /* syncpoints are used for full 32-bit hardware VBLANK counters */ 186 drm->max_vblank_count = 0xffffffff; 187 188 err = drm_vblank_init(drm, drm->mode_config.num_crtc); 189 if (err < 0) 190 goto hub; 191 192 drm_mode_config_reset(drm); 193 194 err = tegra_drm_fb_init(drm); 195 if (err < 0) 196 goto hub; 197 198 return 0; 199 200 hub: 201 if (tegra->hub) 202 tegra_display_hub_cleanup(tegra->hub); 203 device: 204 host1x_device_exit(device); 205 fbdev: 206 drm_kms_helper_poll_fini(drm); 207 tegra_drm_fb_free(drm); 208 config: 209 drm_mode_config_cleanup(drm); 210 211 if (tegra->domain) { 212 mutex_destroy(&tegra->mm_lock); 213 drm_mm_takedown(&tegra->mm); 214 put_iova_domain(&tegra->carveout.domain); 215 iova_cache_put(); 216 } 217 domain: 218 if (tegra->domain) 219 iommu_domain_free(tegra->domain); 220 free: 221 kfree(tegra); 222 return err; 223 } 224 225 static void tegra_drm_unload(struct drm_device *drm) 226 { 227 struct host1x_device *device = to_host1x_device(drm->dev); 228 struct tegra_drm *tegra = drm->dev_private; 229 int err; 230 231 drm_kms_helper_poll_fini(drm); 232 tegra_drm_fb_exit(drm); 233 drm_atomic_helper_shutdown(drm); 234 drm_mode_config_cleanup(drm); 235 236 err = host1x_device_exit(device); 237 if (err < 0) 238 return; 239 240 if (tegra->domain) { 241 mutex_destroy(&tegra->mm_lock); 242 drm_mm_takedown(&tegra->mm); 243 put_iova_domain(&tegra->carveout.domain); 244 iova_cache_put(); 245 iommu_domain_free(tegra->domain); 246 } 247 248 kfree(tegra); 249 } 250 251 static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) 252 { 253 struct tegra_drm_file *fpriv; 254 255 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 256 if (!fpriv) 257 return -ENOMEM; 258 259 idr_init(&fpriv->contexts); 260 mutex_init(&fpriv->lock); 261 filp->driver_priv = fpriv; 262 263 return 0; 264 } 265 266 static void tegra_drm_context_free(struct tegra_drm_context *context) 267 { 268 context->client->ops->close_channel(context); 269 kfree(context); 270 } 271 272 static struct host1x_bo * 273 host1x_bo_lookup(struct drm_file *file, u32 handle) 274 { 275 struct drm_gem_object *gem; 276 struct tegra_bo *bo; 277 278 gem = drm_gem_object_lookup(file, handle); 279 if (!gem) 280 return NULL; 281 282 bo = to_tegra_bo(gem); 283 return &bo->base; 284 } 285 286 static int host1x_reloc_copy_from_user(struct host1x_reloc *dest, 287 struct drm_tegra_reloc __user *src, 288 struct drm_device *drm, 289 struct drm_file *file) 290 { 291 u32 cmdbuf, target; 292 int err; 293 294 err = get_user(cmdbuf, &src->cmdbuf.handle); 295 if (err < 0) 296 return err; 297 298 err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset); 299 if (err < 0) 300 return err; 301 302 err = get_user(target, &src->target.handle); 303 if (err < 0) 304 return err; 305 306 err = get_user(dest->target.offset, &src->target.offset); 307 if (err < 0) 308 return err; 309 310 err = get_user(dest->shift, &src->shift); 311 if (err < 0) 312 return err; 313 314 dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf); 315 if (!dest->cmdbuf.bo) 316 return -ENOENT; 317 318 dest->target.bo = host1x_bo_lookup(file, target); 319 if (!dest->target.bo) 320 return -ENOENT; 321 322 return 0; 323 } 324 325 int tegra_drm_submit(struct tegra_drm_context *context, 326 struct drm_tegra_submit *args, struct drm_device *drm, 327 struct drm_file *file) 328 { 329 struct host1x_client *client = &context->client->base; 330 unsigned int num_cmdbufs = args->num_cmdbufs; 331 unsigned int num_relocs = args->num_relocs; 332 struct drm_tegra_cmdbuf __user *user_cmdbufs; 333 struct drm_tegra_reloc __user *user_relocs; 334 struct drm_tegra_syncpt __user *user_syncpt; 335 struct drm_tegra_syncpt syncpt; 336 struct host1x *host1x = dev_get_drvdata(drm->dev->parent); 337 struct drm_gem_object **refs; 338 struct host1x_syncpt *sp; 339 struct host1x_job *job; 340 unsigned int num_refs; 341 int err; 342 343 user_cmdbufs = u64_to_user_ptr(args->cmdbufs); 344 user_relocs = u64_to_user_ptr(args->relocs); 345 user_syncpt = u64_to_user_ptr(args->syncpts); 346 347 /* We don't yet support other than one syncpt_incr struct per submit */ 348 if (args->num_syncpts != 1) 349 return -EINVAL; 350 351 /* We don't yet support waitchks */ 352 if (args->num_waitchks != 0) 353 return -EINVAL; 354 355 job = host1x_job_alloc(context->channel, args->num_cmdbufs, 356 args->num_relocs); 357 if (!job) 358 return -ENOMEM; 359 360 job->num_relocs = args->num_relocs; 361 job->client = client; 362 job->class = client->class; 363 job->serialize = true; 364 365 /* 366 * Track referenced BOs so that they can be unreferenced after the 367 * submission is complete. 368 */ 369 num_refs = num_cmdbufs + num_relocs * 2; 370 371 refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL); 372 if (!refs) { 373 err = -ENOMEM; 374 goto put; 375 } 376 377 /* reuse as an iterator later */ 378 num_refs = 0; 379 380 while (num_cmdbufs) { 381 struct drm_tegra_cmdbuf cmdbuf; 382 struct host1x_bo *bo; 383 struct tegra_bo *obj; 384 u64 offset; 385 386 if (copy_from_user(&cmdbuf, user_cmdbufs, sizeof(cmdbuf))) { 387 err = -EFAULT; 388 goto fail; 389 } 390 391 /* 392 * The maximum number of CDMA gather fetches is 16383, a higher 393 * value means the words count is malformed. 394 */ 395 if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) { 396 err = -EINVAL; 397 goto fail; 398 } 399 400 bo = host1x_bo_lookup(file, cmdbuf.handle); 401 if (!bo) { 402 err = -ENOENT; 403 goto fail; 404 } 405 406 offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32); 407 obj = host1x_to_tegra_bo(bo); 408 refs[num_refs++] = &obj->gem; 409 410 /* 411 * Gather buffer base address must be 4-bytes aligned, 412 * unaligned offset is malformed and cause commands stream 413 * corruption on the buffer address relocation. 414 */ 415 if (offset & 3 || offset > obj->gem.size) { 416 err = -EINVAL; 417 goto fail; 418 } 419 420 host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset); 421 num_cmdbufs--; 422 user_cmdbufs++; 423 } 424 425 /* copy and resolve relocations from submit */ 426 while (num_relocs--) { 427 struct host1x_reloc *reloc; 428 struct tegra_bo *obj; 429 430 err = host1x_reloc_copy_from_user(&job->relocs[num_relocs], 431 &user_relocs[num_relocs], drm, 432 file); 433 if (err < 0) 434 goto fail; 435 436 reloc = &job->relocs[num_relocs]; 437 obj = host1x_to_tegra_bo(reloc->cmdbuf.bo); 438 refs[num_refs++] = &obj->gem; 439 440 /* 441 * The unaligned cmdbuf offset will cause an unaligned write 442 * during of the relocations patching, corrupting the commands 443 * stream. 444 */ 445 if (reloc->cmdbuf.offset & 3 || 446 reloc->cmdbuf.offset >= obj->gem.size) { 447 err = -EINVAL; 448 goto fail; 449 } 450 451 obj = host1x_to_tegra_bo(reloc->target.bo); 452 refs[num_refs++] = &obj->gem; 453 454 if (reloc->target.offset >= obj->gem.size) { 455 err = -EINVAL; 456 goto fail; 457 } 458 } 459 460 if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) { 461 err = -EFAULT; 462 goto fail; 463 } 464 465 /* check whether syncpoint ID is valid */ 466 sp = host1x_syncpt_get(host1x, syncpt.id); 467 if (!sp) { 468 err = -ENOENT; 469 goto fail; 470 } 471 472 job->is_addr_reg = context->client->ops->is_addr_reg; 473 job->is_valid_class = context->client->ops->is_valid_class; 474 job->syncpt_incrs = syncpt.incrs; 475 job->syncpt_id = syncpt.id; 476 job->timeout = 10000; 477 478 if (args->timeout && args->timeout < 10000) 479 job->timeout = args->timeout; 480 481 err = host1x_job_pin(job, context->client->base.dev); 482 if (err) 483 goto fail; 484 485 err = host1x_job_submit(job); 486 if (err) { 487 host1x_job_unpin(job); 488 goto fail; 489 } 490 491 args->fence = job->syncpt_end; 492 493 fail: 494 while (num_refs--) 495 drm_gem_object_put_unlocked(refs[num_refs]); 496 497 kfree(refs); 498 499 put: 500 host1x_job_put(job); 501 return err; 502 } 503 504 505 #ifdef CONFIG_DRM_TEGRA_STAGING 506 static int tegra_gem_create(struct drm_device *drm, void *data, 507 struct drm_file *file) 508 { 509 struct drm_tegra_gem_create *args = data; 510 struct tegra_bo *bo; 511 512 bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags, 513 &args->handle); 514 if (IS_ERR(bo)) 515 return PTR_ERR(bo); 516 517 return 0; 518 } 519 520 static int tegra_gem_mmap(struct drm_device *drm, void *data, 521 struct drm_file *file) 522 { 523 struct drm_tegra_gem_mmap *args = data; 524 struct drm_gem_object *gem; 525 struct tegra_bo *bo; 526 527 gem = drm_gem_object_lookup(file, args->handle); 528 if (!gem) 529 return -EINVAL; 530 531 bo = to_tegra_bo(gem); 532 533 args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node); 534 535 drm_gem_object_put_unlocked(gem); 536 537 return 0; 538 } 539 540 static int tegra_syncpt_read(struct drm_device *drm, void *data, 541 struct drm_file *file) 542 { 543 struct host1x *host = dev_get_drvdata(drm->dev->parent); 544 struct drm_tegra_syncpt_read *args = data; 545 struct host1x_syncpt *sp; 546 547 sp = host1x_syncpt_get(host, args->id); 548 if (!sp) 549 return -EINVAL; 550 551 args->value = host1x_syncpt_read_min(sp); 552 return 0; 553 } 554 555 static int tegra_syncpt_incr(struct drm_device *drm, void *data, 556 struct drm_file *file) 557 { 558 struct host1x *host1x = dev_get_drvdata(drm->dev->parent); 559 struct drm_tegra_syncpt_incr *args = data; 560 struct host1x_syncpt *sp; 561 562 sp = host1x_syncpt_get(host1x, args->id); 563 if (!sp) 564 return -EINVAL; 565 566 return host1x_syncpt_incr(sp); 567 } 568 569 static int tegra_syncpt_wait(struct drm_device *drm, void *data, 570 struct drm_file *file) 571 { 572 struct host1x *host1x = dev_get_drvdata(drm->dev->parent); 573 struct drm_tegra_syncpt_wait *args = data; 574 struct host1x_syncpt *sp; 575 576 sp = host1x_syncpt_get(host1x, args->id); 577 if (!sp) 578 return -EINVAL; 579 580 return host1x_syncpt_wait(sp, args->thresh, 581 msecs_to_jiffies(args->timeout), 582 &args->value); 583 } 584 585 static int tegra_client_open(struct tegra_drm_file *fpriv, 586 struct tegra_drm_client *client, 587 struct tegra_drm_context *context) 588 { 589 int err; 590 591 err = client->ops->open_channel(client, context); 592 if (err < 0) 593 return err; 594 595 err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL); 596 if (err < 0) { 597 client->ops->close_channel(context); 598 return err; 599 } 600 601 context->client = client; 602 context->id = err; 603 604 return 0; 605 } 606 607 static int tegra_open_channel(struct drm_device *drm, void *data, 608 struct drm_file *file) 609 { 610 struct tegra_drm_file *fpriv = file->driver_priv; 611 struct tegra_drm *tegra = drm->dev_private; 612 struct drm_tegra_open_channel *args = data; 613 struct tegra_drm_context *context; 614 struct tegra_drm_client *client; 615 int err = -ENODEV; 616 617 context = kzalloc(sizeof(*context), GFP_KERNEL); 618 if (!context) 619 return -ENOMEM; 620 621 mutex_lock(&fpriv->lock); 622 623 list_for_each_entry(client, &tegra->clients, list) 624 if (client->base.class == args->client) { 625 err = tegra_client_open(fpriv, client, context); 626 if (err < 0) 627 break; 628 629 args->context = context->id; 630 break; 631 } 632 633 if (err < 0) 634 kfree(context); 635 636 mutex_unlock(&fpriv->lock); 637 return err; 638 } 639 640 static int tegra_close_channel(struct drm_device *drm, void *data, 641 struct drm_file *file) 642 { 643 struct tegra_drm_file *fpriv = file->driver_priv; 644 struct drm_tegra_close_channel *args = data; 645 struct tegra_drm_context *context; 646 int err = 0; 647 648 mutex_lock(&fpriv->lock); 649 650 context = idr_find(&fpriv->contexts, args->context); 651 if (!context) { 652 err = -EINVAL; 653 goto unlock; 654 } 655 656 idr_remove(&fpriv->contexts, context->id); 657 tegra_drm_context_free(context); 658 659 unlock: 660 mutex_unlock(&fpriv->lock); 661 return err; 662 } 663 664 static int tegra_get_syncpt(struct drm_device *drm, void *data, 665 struct drm_file *file) 666 { 667 struct tegra_drm_file *fpriv = file->driver_priv; 668 struct drm_tegra_get_syncpt *args = data; 669 struct tegra_drm_context *context; 670 struct host1x_syncpt *syncpt; 671 int err = 0; 672 673 mutex_lock(&fpriv->lock); 674 675 context = idr_find(&fpriv->contexts, args->context); 676 if (!context) { 677 err = -ENODEV; 678 goto unlock; 679 } 680 681 if (args->index >= context->client->base.num_syncpts) { 682 err = -EINVAL; 683 goto unlock; 684 } 685 686 syncpt = context->client->base.syncpts[args->index]; 687 args->id = host1x_syncpt_id(syncpt); 688 689 unlock: 690 mutex_unlock(&fpriv->lock); 691 return err; 692 } 693 694 static int tegra_submit(struct drm_device *drm, void *data, 695 struct drm_file *file) 696 { 697 struct tegra_drm_file *fpriv = file->driver_priv; 698 struct drm_tegra_submit *args = data; 699 struct tegra_drm_context *context; 700 int err; 701 702 mutex_lock(&fpriv->lock); 703 704 context = idr_find(&fpriv->contexts, args->context); 705 if (!context) { 706 err = -ENODEV; 707 goto unlock; 708 } 709 710 err = context->client->ops->submit(context, args, drm, file); 711 712 unlock: 713 mutex_unlock(&fpriv->lock); 714 return err; 715 } 716 717 static int tegra_get_syncpt_base(struct drm_device *drm, void *data, 718 struct drm_file *file) 719 { 720 struct tegra_drm_file *fpriv = file->driver_priv; 721 struct drm_tegra_get_syncpt_base *args = data; 722 struct tegra_drm_context *context; 723 struct host1x_syncpt_base *base; 724 struct host1x_syncpt *syncpt; 725 int err = 0; 726 727 mutex_lock(&fpriv->lock); 728 729 context = idr_find(&fpriv->contexts, args->context); 730 if (!context) { 731 err = -ENODEV; 732 goto unlock; 733 } 734 735 if (args->syncpt >= context->client->base.num_syncpts) { 736 err = -EINVAL; 737 goto unlock; 738 } 739 740 syncpt = context->client->base.syncpts[args->syncpt]; 741 742 base = host1x_syncpt_get_base(syncpt); 743 if (!base) { 744 err = -ENXIO; 745 goto unlock; 746 } 747 748 args->id = host1x_syncpt_base_id(base); 749 750 unlock: 751 mutex_unlock(&fpriv->lock); 752 return err; 753 } 754 755 static int tegra_gem_set_tiling(struct drm_device *drm, void *data, 756 struct drm_file *file) 757 { 758 struct drm_tegra_gem_set_tiling *args = data; 759 enum tegra_bo_tiling_mode mode; 760 struct drm_gem_object *gem; 761 unsigned long value = 0; 762 struct tegra_bo *bo; 763 764 switch (args->mode) { 765 case DRM_TEGRA_GEM_TILING_MODE_PITCH: 766 mode = TEGRA_BO_TILING_MODE_PITCH; 767 768 if (args->value != 0) 769 return -EINVAL; 770 771 break; 772 773 case DRM_TEGRA_GEM_TILING_MODE_TILED: 774 mode = TEGRA_BO_TILING_MODE_TILED; 775 776 if (args->value != 0) 777 return -EINVAL; 778 779 break; 780 781 case DRM_TEGRA_GEM_TILING_MODE_BLOCK: 782 mode = TEGRA_BO_TILING_MODE_BLOCK; 783 784 if (args->value > 5) 785 return -EINVAL; 786 787 value = args->value; 788 break; 789 790 default: 791 return -EINVAL; 792 } 793 794 gem = drm_gem_object_lookup(file, args->handle); 795 if (!gem) 796 return -ENOENT; 797 798 bo = to_tegra_bo(gem); 799 800 bo->tiling.mode = mode; 801 bo->tiling.value = value; 802 803 drm_gem_object_put_unlocked(gem); 804 805 return 0; 806 } 807 808 static int tegra_gem_get_tiling(struct drm_device *drm, void *data, 809 struct drm_file *file) 810 { 811 struct drm_tegra_gem_get_tiling *args = data; 812 struct drm_gem_object *gem; 813 struct tegra_bo *bo; 814 int err = 0; 815 816 gem = drm_gem_object_lookup(file, args->handle); 817 if (!gem) 818 return -ENOENT; 819 820 bo = to_tegra_bo(gem); 821 822 switch (bo->tiling.mode) { 823 case TEGRA_BO_TILING_MODE_PITCH: 824 args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH; 825 args->value = 0; 826 break; 827 828 case TEGRA_BO_TILING_MODE_TILED: 829 args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED; 830 args->value = 0; 831 break; 832 833 case TEGRA_BO_TILING_MODE_BLOCK: 834 args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK; 835 args->value = bo->tiling.value; 836 break; 837 838 default: 839 err = -EINVAL; 840 break; 841 } 842 843 drm_gem_object_put_unlocked(gem); 844 845 return err; 846 } 847 848 static int tegra_gem_set_flags(struct drm_device *drm, void *data, 849 struct drm_file *file) 850 { 851 struct drm_tegra_gem_set_flags *args = data; 852 struct drm_gem_object *gem; 853 struct tegra_bo *bo; 854 855 if (args->flags & ~DRM_TEGRA_GEM_FLAGS) 856 return -EINVAL; 857 858 gem = drm_gem_object_lookup(file, args->handle); 859 if (!gem) 860 return -ENOENT; 861 862 bo = to_tegra_bo(gem); 863 bo->flags = 0; 864 865 if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP) 866 bo->flags |= TEGRA_BO_BOTTOM_UP; 867 868 drm_gem_object_put_unlocked(gem); 869 870 return 0; 871 } 872 873 static int tegra_gem_get_flags(struct drm_device *drm, void *data, 874 struct drm_file *file) 875 { 876 struct drm_tegra_gem_get_flags *args = data; 877 struct drm_gem_object *gem; 878 struct tegra_bo *bo; 879 880 gem = drm_gem_object_lookup(file, args->handle); 881 if (!gem) 882 return -ENOENT; 883 884 bo = to_tegra_bo(gem); 885 args->flags = 0; 886 887 if (bo->flags & TEGRA_BO_BOTTOM_UP) 888 args->flags |= DRM_TEGRA_GEM_BOTTOM_UP; 889 890 drm_gem_object_put_unlocked(gem); 891 892 return 0; 893 } 894 #endif 895 896 static const struct drm_ioctl_desc tegra_drm_ioctls[] = { 897 #ifdef CONFIG_DRM_TEGRA_STAGING 898 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, 899 DRM_RENDER_ALLOW), 900 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, 901 DRM_RENDER_ALLOW), 902 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, 903 DRM_RENDER_ALLOW), 904 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, 905 DRM_RENDER_ALLOW), 906 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, 907 DRM_RENDER_ALLOW), 908 DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, 909 DRM_RENDER_ALLOW), 910 DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, 911 DRM_RENDER_ALLOW), 912 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, 913 DRM_RENDER_ALLOW), 914 DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, 915 DRM_RENDER_ALLOW), 916 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, 917 DRM_RENDER_ALLOW), 918 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, 919 DRM_RENDER_ALLOW), 920 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, 921 DRM_RENDER_ALLOW), 922 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, 923 DRM_RENDER_ALLOW), 924 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, 925 DRM_RENDER_ALLOW), 926 #endif 927 }; 928 929 static const struct file_operations tegra_drm_fops = { 930 .owner = THIS_MODULE, 931 .open = drm_open, 932 .release = drm_release, 933 .unlocked_ioctl = drm_ioctl, 934 .mmap = tegra_drm_mmap, 935 .poll = drm_poll, 936 .read = drm_read, 937 .compat_ioctl = drm_compat_ioctl, 938 .llseek = noop_llseek, 939 }; 940 941 static int tegra_drm_context_cleanup(int id, void *p, void *data) 942 { 943 struct tegra_drm_context *context = p; 944 945 tegra_drm_context_free(context); 946 947 return 0; 948 } 949 950 static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file) 951 { 952 struct tegra_drm_file *fpriv = file->driver_priv; 953 954 mutex_lock(&fpriv->lock); 955 idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL); 956 mutex_unlock(&fpriv->lock); 957 958 idr_destroy(&fpriv->contexts); 959 mutex_destroy(&fpriv->lock); 960 kfree(fpriv); 961 } 962 963 #ifdef CONFIG_DEBUG_FS 964 static int tegra_debugfs_framebuffers(struct seq_file *s, void *data) 965 { 966 struct drm_info_node *node = (struct drm_info_node *)s->private; 967 struct drm_device *drm = node->minor->dev; 968 struct drm_framebuffer *fb; 969 970 mutex_lock(&drm->mode_config.fb_lock); 971 972 list_for_each_entry(fb, &drm->mode_config.fb_list, head) { 973 seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n", 974 fb->base.id, fb->width, fb->height, 975 fb->format->depth, 976 fb->format->cpp[0] * 8, 977 drm_framebuffer_read_refcount(fb)); 978 } 979 980 mutex_unlock(&drm->mode_config.fb_lock); 981 982 return 0; 983 } 984 985 static int tegra_debugfs_iova(struct seq_file *s, void *data) 986 { 987 struct drm_info_node *node = (struct drm_info_node *)s->private; 988 struct drm_device *drm = node->minor->dev; 989 struct tegra_drm *tegra = drm->dev_private; 990 struct drm_printer p = drm_seq_file_printer(s); 991 992 if (tegra->domain) { 993 mutex_lock(&tegra->mm_lock); 994 drm_mm_print(&tegra->mm, &p); 995 mutex_unlock(&tegra->mm_lock); 996 } 997 998 return 0; 999 } 1000 1001 static struct drm_info_list tegra_debugfs_list[] = { 1002 { "framebuffers", tegra_debugfs_framebuffers, 0 }, 1003 { "iova", tegra_debugfs_iova, 0 }, 1004 }; 1005 1006 static int tegra_debugfs_init(struct drm_minor *minor) 1007 { 1008 return drm_debugfs_create_files(tegra_debugfs_list, 1009 ARRAY_SIZE(tegra_debugfs_list), 1010 minor->debugfs_root, minor); 1011 } 1012 #endif 1013 1014 static struct drm_driver tegra_drm_driver = { 1015 .driver_features = DRIVER_MODESET | DRIVER_GEM | 1016 DRIVER_ATOMIC | DRIVER_RENDER, 1017 .load = tegra_drm_load, 1018 .unload = tegra_drm_unload, 1019 .open = tegra_drm_open, 1020 .postclose = tegra_drm_postclose, 1021 .lastclose = drm_fb_helper_lastclose, 1022 1023 #if defined(CONFIG_DEBUG_FS) 1024 .debugfs_init = tegra_debugfs_init, 1025 #endif 1026 1027 .gem_free_object_unlocked = tegra_bo_free_object, 1028 .gem_vm_ops = &tegra_bo_vm_ops, 1029 1030 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 1031 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 1032 .gem_prime_export = tegra_gem_prime_export, 1033 .gem_prime_import = tegra_gem_prime_import, 1034 1035 .dumb_create = tegra_bo_dumb_create, 1036 1037 .ioctls = tegra_drm_ioctls, 1038 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls), 1039 .fops = &tegra_drm_fops, 1040 1041 .name = DRIVER_NAME, 1042 .desc = DRIVER_DESC, 1043 .date = DRIVER_DATE, 1044 .major = DRIVER_MAJOR, 1045 .minor = DRIVER_MINOR, 1046 .patchlevel = DRIVER_PATCHLEVEL, 1047 }; 1048 1049 int tegra_drm_register_client(struct tegra_drm *tegra, 1050 struct tegra_drm_client *client) 1051 { 1052 mutex_lock(&tegra->clients_lock); 1053 list_add_tail(&client->list, &tegra->clients); 1054 client->drm = tegra; 1055 mutex_unlock(&tegra->clients_lock); 1056 1057 return 0; 1058 } 1059 1060 int tegra_drm_unregister_client(struct tegra_drm *tegra, 1061 struct tegra_drm_client *client) 1062 { 1063 mutex_lock(&tegra->clients_lock); 1064 list_del_init(&client->list); 1065 client->drm = NULL; 1066 mutex_unlock(&tegra->clients_lock); 1067 1068 return 0; 1069 } 1070 1071 struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client, 1072 bool shared) 1073 { 1074 struct drm_device *drm = dev_get_drvdata(client->parent); 1075 struct tegra_drm *tegra = drm->dev_private; 1076 struct iommu_group *group = NULL; 1077 int err; 1078 1079 if (tegra->domain) { 1080 group = iommu_group_get(client->dev); 1081 if (!group) { 1082 dev_err(client->dev, "failed to get IOMMU group\n"); 1083 return ERR_PTR(-ENODEV); 1084 } 1085 1086 if (!shared || (shared && (group != tegra->group))) { 1087 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) 1088 if (client->dev->archdata.mapping) { 1089 struct dma_iommu_mapping *mapping = 1090 to_dma_iommu_mapping(client->dev); 1091 arm_iommu_detach_device(client->dev); 1092 arm_iommu_release_mapping(mapping); 1093 } 1094 #endif 1095 err = iommu_attach_group(tegra->domain, group); 1096 if (err < 0) { 1097 iommu_group_put(group); 1098 return ERR_PTR(err); 1099 } 1100 1101 if (shared && !tegra->group) 1102 tegra->group = group; 1103 } 1104 } 1105 1106 return group; 1107 } 1108 1109 void host1x_client_iommu_detach(struct host1x_client *client, 1110 struct iommu_group *group) 1111 { 1112 struct drm_device *drm = dev_get_drvdata(client->parent); 1113 struct tegra_drm *tegra = drm->dev_private; 1114 1115 if (group) { 1116 if (group == tegra->group) { 1117 iommu_detach_group(tegra->domain, group); 1118 tegra->group = NULL; 1119 } 1120 1121 iommu_group_put(group); 1122 } 1123 } 1124 1125 void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma) 1126 { 1127 struct iova *alloc; 1128 void *virt; 1129 gfp_t gfp; 1130 int err; 1131 1132 if (tegra->domain) 1133 size = iova_align(&tegra->carveout.domain, size); 1134 else 1135 size = PAGE_ALIGN(size); 1136 1137 gfp = GFP_KERNEL | __GFP_ZERO; 1138 if (!tegra->domain) { 1139 /* 1140 * Many units only support 32-bit addresses, even on 64-bit 1141 * SoCs. If there is no IOMMU to translate into a 32-bit IO 1142 * virtual address space, force allocations to be in the 1143 * lower 32-bit range. 1144 */ 1145 gfp |= GFP_DMA; 1146 } 1147 1148 virt = (void *)__get_free_pages(gfp, get_order(size)); 1149 if (!virt) 1150 return ERR_PTR(-ENOMEM); 1151 1152 if (!tegra->domain) { 1153 /* 1154 * If IOMMU is disabled, devices address physical memory 1155 * directly. 1156 */ 1157 *dma = virt_to_phys(virt); 1158 return virt; 1159 } 1160 1161 alloc = alloc_iova(&tegra->carveout.domain, 1162 size >> tegra->carveout.shift, 1163 tegra->carveout.limit, true); 1164 if (!alloc) { 1165 err = -EBUSY; 1166 goto free_pages; 1167 } 1168 1169 *dma = iova_dma_addr(&tegra->carveout.domain, alloc); 1170 err = iommu_map(tegra->domain, *dma, virt_to_phys(virt), 1171 size, IOMMU_READ | IOMMU_WRITE); 1172 if (err < 0) 1173 goto free_iova; 1174 1175 return virt; 1176 1177 free_iova: 1178 __free_iova(&tegra->carveout.domain, alloc); 1179 free_pages: 1180 free_pages((unsigned long)virt, get_order(size)); 1181 1182 return ERR_PTR(err); 1183 } 1184 1185 void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt, 1186 dma_addr_t dma) 1187 { 1188 if (tegra->domain) 1189 size = iova_align(&tegra->carveout.domain, size); 1190 else 1191 size = PAGE_ALIGN(size); 1192 1193 if (tegra->domain) { 1194 iommu_unmap(tegra->domain, dma, size); 1195 free_iova(&tegra->carveout.domain, 1196 iova_pfn(&tegra->carveout.domain, dma)); 1197 } 1198 1199 free_pages((unsigned long)virt, get_order(size)); 1200 } 1201 1202 static int host1x_drm_probe(struct host1x_device *dev) 1203 { 1204 struct drm_driver *driver = &tegra_drm_driver; 1205 struct drm_device *drm; 1206 int err; 1207 1208 drm = drm_dev_alloc(driver, &dev->dev); 1209 if (IS_ERR(drm)) 1210 return PTR_ERR(drm); 1211 1212 dev_set_drvdata(&dev->dev, drm); 1213 1214 err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb", false); 1215 if (err < 0) 1216 goto put; 1217 1218 err = drm_dev_register(drm, 0); 1219 if (err < 0) 1220 goto put; 1221 1222 return 0; 1223 1224 put: 1225 drm_dev_put(drm); 1226 return err; 1227 } 1228 1229 static int host1x_drm_remove(struct host1x_device *dev) 1230 { 1231 struct drm_device *drm = dev_get_drvdata(&dev->dev); 1232 1233 drm_dev_unregister(drm); 1234 drm_dev_put(drm); 1235 1236 return 0; 1237 } 1238 1239 #ifdef CONFIG_PM_SLEEP 1240 static int host1x_drm_suspend(struct device *dev) 1241 { 1242 struct drm_device *drm = dev_get_drvdata(dev); 1243 1244 return drm_mode_config_helper_suspend(drm); 1245 } 1246 1247 static int host1x_drm_resume(struct device *dev) 1248 { 1249 struct drm_device *drm = dev_get_drvdata(dev); 1250 1251 return drm_mode_config_helper_resume(drm); 1252 } 1253 #endif 1254 1255 static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend, 1256 host1x_drm_resume); 1257 1258 static const struct of_device_id host1x_drm_subdevs[] = { 1259 { .compatible = "nvidia,tegra20-dc", }, 1260 { .compatible = "nvidia,tegra20-hdmi", }, 1261 { .compatible = "nvidia,tegra20-gr2d", }, 1262 { .compatible = "nvidia,tegra20-gr3d", }, 1263 { .compatible = "nvidia,tegra30-dc", }, 1264 { .compatible = "nvidia,tegra30-hdmi", }, 1265 { .compatible = "nvidia,tegra30-gr2d", }, 1266 { .compatible = "nvidia,tegra30-gr3d", }, 1267 { .compatible = "nvidia,tegra114-dsi", }, 1268 { .compatible = "nvidia,tegra114-hdmi", }, 1269 { .compatible = "nvidia,tegra114-gr3d", }, 1270 { .compatible = "nvidia,tegra124-dc", }, 1271 { .compatible = "nvidia,tegra124-sor", }, 1272 { .compatible = "nvidia,tegra124-hdmi", }, 1273 { .compatible = "nvidia,tegra124-dsi", }, 1274 { .compatible = "nvidia,tegra124-vic", }, 1275 { .compatible = "nvidia,tegra132-dsi", }, 1276 { .compatible = "nvidia,tegra210-dc", }, 1277 { .compatible = "nvidia,tegra210-dsi", }, 1278 { .compatible = "nvidia,tegra210-sor", }, 1279 { .compatible = "nvidia,tegra210-sor1", }, 1280 { .compatible = "nvidia,tegra210-vic", }, 1281 { .compatible = "nvidia,tegra186-display", }, 1282 { .compatible = "nvidia,tegra186-dc", }, 1283 { .compatible = "nvidia,tegra186-sor", }, 1284 { .compatible = "nvidia,tegra186-sor1", }, 1285 { .compatible = "nvidia,tegra186-vic", }, 1286 { .compatible = "nvidia,tegra194-display", }, 1287 { .compatible = "nvidia,tegra194-dc", }, 1288 { .compatible = "nvidia,tegra194-sor", }, 1289 { .compatible = "nvidia,tegra194-vic", }, 1290 { /* sentinel */ } 1291 }; 1292 1293 static struct host1x_driver host1x_drm_driver = { 1294 .driver = { 1295 .name = "drm", 1296 .pm = &host1x_drm_pm_ops, 1297 }, 1298 .probe = host1x_drm_probe, 1299 .remove = host1x_drm_remove, 1300 .subdevs = host1x_drm_subdevs, 1301 }; 1302 1303 static struct platform_driver * const drivers[] = { 1304 &tegra_display_hub_driver, 1305 &tegra_dc_driver, 1306 &tegra_hdmi_driver, 1307 &tegra_dsi_driver, 1308 &tegra_dpaux_driver, 1309 &tegra_sor_driver, 1310 &tegra_gr2d_driver, 1311 &tegra_gr3d_driver, 1312 &tegra_vic_driver, 1313 }; 1314 1315 static int __init host1x_drm_init(void) 1316 { 1317 int err; 1318 1319 err = host1x_driver_register(&host1x_drm_driver); 1320 if (err < 0) 1321 return err; 1322 1323 err = platform_register_drivers(drivers, ARRAY_SIZE(drivers)); 1324 if (err < 0) 1325 goto unregister_host1x; 1326 1327 return 0; 1328 1329 unregister_host1x: 1330 host1x_driver_unregister(&host1x_drm_driver); 1331 return err; 1332 } 1333 module_init(host1x_drm_init); 1334 1335 static void __exit host1x_drm_exit(void) 1336 { 1337 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); 1338 host1x_driver_unregister(&host1x_drm_driver); 1339 } 1340 module_exit(host1x_drm_exit); 1341 1342 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); 1343 MODULE_DESCRIPTION("NVIDIA Tegra DRM driver"); 1344 MODULE_LICENSE("GPL v2"); 1345