1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 Avionic Design GmbH 4 * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved. 5 */ 6 7 #include <linux/bitops.h> 8 #include <linux/host1x.h> 9 #include <linux/idr.h> 10 #include <linux/iommu.h> 11 12 #include <drm/drm_atomic.h> 13 #include <drm/drm_atomic_helper.h> 14 15 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) 16 #include <asm/dma-iommu.h> 17 #endif 18 19 #include "drm.h" 20 #include "gem.h" 21 22 #define DRIVER_NAME "tegra" 23 #define DRIVER_DESC "NVIDIA Tegra graphics" 24 #define DRIVER_DATE "20120330" 25 #define DRIVER_MAJOR 0 26 #define DRIVER_MINOR 0 27 #define DRIVER_PATCHLEVEL 0 28 29 #define CARVEOUT_SZ SZ_64M 30 #define CDMA_GATHER_FETCHES_MAX_NB 16383 31 32 struct tegra_drm_file { 33 struct idr contexts; 34 struct mutex lock; 35 }; 36 37 static int tegra_atomic_check(struct drm_device *drm, 38 struct drm_atomic_state *state) 39 { 40 int err; 41 42 err = drm_atomic_helper_check(drm, state); 43 if (err < 0) 44 return err; 45 46 return tegra_display_hub_atomic_check(drm, state); 47 } 48 49 static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = { 50 .fb_create = tegra_fb_create, 51 #ifdef CONFIG_DRM_FBDEV_EMULATION 52 .output_poll_changed = drm_fb_helper_output_poll_changed, 53 #endif 54 .atomic_check = tegra_atomic_check, 55 .atomic_commit = drm_atomic_helper_commit, 56 }; 57 58 static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state) 59 { 60 struct drm_device *drm = old_state->dev; 61 struct tegra_drm *tegra = drm->dev_private; 62 63 if (tegra->hub) { 64 drm_atomic_helper_commit_modeset_disables(drm, old_state); 65 tegra_display_hub_atomic_commit(drm, old_state); 66 drm_atomic_helper_commit_planes(drm, old_state, 0); 67 drm_atomic_helper_commit_modeset_enables(drm, old_state); 68 drm_atomic_helper_commit_hw_done(old_state); 69 drm_atomic_helper_wait_for_vblanks(drm, old_state); 70 drm_atomic_helper_cleanup_planes(drm, old_state); 71 } else { 72 drm_atomic_helper_commit_tail_rpm(old_state); 73 } 74 } 75 76 static const struct drm_mode_config_helper_funcs 77 tegra_drm_mode_config_helpers = { 78 .atomic_commit_tail = tegra_atomic_commit_tail, 79 }; 80 81 static int tegra_drm_load(struct drm_device *drm, unsigned long flags) 82 { 83 struct host1x_device *device = to_host1x_device(drm->dev); 84 struct tegra_drm *tegra; 85 int err; 86 87 tegra = kzalloc(sizeof(*tegra), GFP_KERNEL); 88 if (!tegra) 89 return -ENOMEM; 90 91 if (iommu_present(&platform_bus_type)) { 92 tegra->domain = iommu_domain_alloc(&platform_bus_type); 93 if (!tegra->domain) { 94 err = -ENOMEM; 95 goto free; 96 } 97 98 err = iova_cache_get(); 99 if (err < 0) 100 goto domain; 101 } 102 103 mutex_init(&tegra->clients_lock); 104 INIT_LIST_HEAD(&tegra->clients); 105 106 drm->dev_private = tegra; 107 tegra->drm = drm; 108 109 drm_mode_config_init(drm); 110 111 drm->mode_config.min_width = 0; 112 drm->mode_config.min_height = 0; 113 114 drm->mode_config.max_width = 4096; 115 drm->mode_config.max_height = 4096; 116 117 drm->mode_config.allow_fb_modifiers = true; 118 119 drm->mode_config.normalize_zpos = true; 120 121 drm->mode_config.funcs = &tegra_drm_mode_config_funcs; 122 drm->mode_config.helper_private = &tegra_drm_mode_config_helpers; 123 124 err = tegra_drm_fb_prepare(drm); 125 if (err < 0) 126 goto config; 127 128 drm_kms_helper_poll_init(drm); 129 130 err = host1x_device_init(device); 131 if (err < 0) 132 goto fbdev; 133 134 if (tegra->domain) { 135 u64 carveout_start, carveout_end, gem_start, gem_end; 136 u64 dma_mask = dma_get_mask(&device->dev); 137 dma_addr_t start, end; 138 unsigned long order; 139 140 start = tegra->domain->geometry.aperture_start & dma_mask; 141 end = tegra->domain->geometry.aperture_end & dma_mask; 142 143 gem_start = start; 144 gem_end = end - CARVEOUT_SZ; 145 carveout_start = gem_end + 1; 146 carveout_end = end; 147 148 order = __ffs(tegra->domain->pgsize_bitmap); 149 init_iova_domain(&tegra->carveout.domain, 1UL << order, 150 carveout_start >> order); 151 152 tegra->carveout.shift = iova_shift(&tegra->carveout.domain); 153 tegra->carveout.limit = carveout_end >> tegra->carveout.shift; 154 155 drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1); 156 mutex_init(&tegra->mm_lock); 157 158 DRM_DEBUG("IOMMU apertures:\n"); 159 DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end); 160 DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start, 161 carveout_end); 162 } 163 164 if (tegra->hub) { 165 err = tegra_display_hub_prepare(tegra->hub); 166 if (err < 0) 167 goto device; 168 } 169 170 /* 171 * We don't use the drm_irq_install() helpers provided by the DRM 172 * core, so we need to set this manually in order to allow the 173 * DRM_IOCTL_WAIT_VBLANK to operate correctly. 174 */ 175 drm->irq_enabled = true; 176 177 /* syncpoints are used for full 32-bit hardware VBLANK counters */ 178 drm->max_vblank_count = 0xffffffff; 179 180 err = drm_vblank_init(drm, drm->mode_config.num_crtc); 181 if (err < 0) 182 goto hub; 183 184 drm_mode_config_reset(drm); 185 186 err = tegra_drm_fb_init(drm); 187 if (err < 0) 188 goto hub; 189 190 return 0; 191 192 hub: 193 if (tegra->hub) 194 tegra_display_hub_cleanup(tegra->hub); 195 device: 196 host1x_device_exit(device); 197 fbdev: 198 drm_kms_helper_poll_fini(drm); 199 tegra_drm_fb_free(drm); 200 config: 201 drm_mode_config_cleanup(drm); 202 203 if (tegra->domain) { 204 mutex_destroy(&tegra->mm_lock); 205 drm_mm_takedown(&tegra->mm); 206 put_iova_domain(&tegra->carveout.domain); 207 iova_cache_put(); 208 } 209 domain: 210 if (tegra->domain) 211 iommu_domain_free(tegra->domain); 212 free: 213 kfree(tegra); 214 return err; 215 } 216 217 static void tegra_drm_unload(struct drm_device *drm) 218 { 219 struct host1x_device *device = to_host1x_device(drm->dev); 220 struct tegra_drm *tegra = drm->dev_private; 221 int err; 222 223 drm_kms_helper_poll_fini(drm); 224 tegra_drm_fb_exit(drm); 225 drm_atomic_helper_shutdown(drm); 226 drm_mode_config_cleanup(drm); 227 228 err = host1x_device_exit(device); 229 if (err < 0) 230 return; 231 232 if (tegra->domain) { 233 mutex_destroy(&tegra->mm_lock); 234 drm_mm_takedown(&tegra->mm); 235 put_iova_domain(&tegra->carveout.domain); 236 iova_cache_put(); 237 iommu_domain_free(tegra->domain); 238 } 239 240 kfree(tegra); 241 } 242 243 static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) 244 { 245 struct tegra_drm_file *fpriv; 246 247 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 248 if (!fpriv) 249 return -ENOMEM; 250 251 idr_init(&fpriv->contexts); 252 mutex_init(&fpriv->lock); 253 filp->driver_priv = fpriv; 254 255 return 0; 256 } 257 258 static void tegra_drm_context_free(struct tegra_drm_context *context) 259 { 260 context->client->ops->close_channel(context); 261 kfree(context); 262 } 263 264 static struct host1x_bo * 265 host1x_bo_lookup(struct drm_file *file, u32 handle) 266 { 267 struct drm_gem_object *gem; 268 struct tegra_bo *bo; 269 270 gem = drm_gem_object_lookup(file, handle); 271 if (!gem) 272 return NULL; 273 274 bo = to_tegra_bo(gem); 275 return &bo->base; 276 } 277 278 static int host1x_reloc_copy_from_user(struct host1x_reloc *dest, 279 struct drm_tegra_reloc __user *src, 280 struct drm_device *drm, 281 struct drm_file *file) 282 { 283 u32 cmdbuf, target; 284 int err; 285 286 err = get_user(cmdbuf, &src->cmdbuf.handle); 287 if (err < 0) 288 return err; 289 290 err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset); 291 if (err < 0) 292 return err; 293 294 err = get_user(target, &src->target.handle); 295 if (err < 0) 296 return err; 297 298 err = get_user(dest->target.offset, &src->target.offset); 299 if (err < 0) 300 return err; 301 302 err = get_user(dest->shift, &src->shift); 303 if (err < 0) 304 return err; 305 306 dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf); 307 if (!dest->cmdbuf.bo) 308 return -ENOENT; 309 310 dest->target.bo = host1x_bo_lookup(file, target); 311 if (!dest->target.bo) 312 return -ENOENT; 313 314 return 0; 315 } 316 317 int tegra_drm_submit(struct tegra_drm_context *context, 318 struct drm_tegra_submit *args, struct drm_device *drm, 319 struct drm_file *file) 320 { 321 struct host1x_client *client = &context->client->base; 322 unsigned int num_cmdbufs = args->num_cmdbufs; 323 unsigned int num_relocs = args->num_relocs; 324 struct drm_tegra_cmdbuf __user *user_cmdbufs; 325 struct drm_tegra_reloc __user *user_relocs; 326 struct drm_tegra_syncpt __user *user_syncpt; 327 struct drm_tegra_syncpt syncpt; 328 struct host1x *host1x = dev_get_drvdata(drm->dev->parent); 329 struct drm_gem_object **refs; 330 struct host1x_syncpt *sp; 331 struct host1x_job *job; 332 unsigned int num_refs; 333 int err; 334 335 user_cmdbufs = u64_to_user_ptr(args->cmdbufs); 336 user_relocs = u64_to_user_ptr(args->relocs); 337 user_syncpt = u64_to_user_ptr(args->syncpts); 338 339 /* We don't yet support other than one syncpt_incr struct per submit */ 340 if (args->num_syncpts != 1) 341 return -EINVAL; 342 343 /* We don't yet support waitchks */ 344 if (args->num_waitchks != 0) 345 return -EINVAL; 346 347 job = host1x_job_alloc(context->channel, args->num_cmdbufs, 348 args->num_relocs); 349 if (!job) 350 return -ENOMEM; 351 352 job->num_relocs = args->num_relocs; 353 job->client = client; 354 job->class = client->class; 355 job->serialize = true; 356 357 /* 358 * Track referenced BOs so that they can be unreferenced after the 359 * submission is complete. 360 */ 361 num_refs = num_cmdbufs + num_relocs * 2; 362 363 refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL); 364 if (!refs) { 365 err = -ENOMEM; 366 goto put; 367 } 368 369 /* reuse as an iterator later */ 370 num_refs = 0; 371 372 while (num_cmdbufs) { 373 struct drm_tegra_cmdbuf cmdbuf; 374 struct host1x_bo *bo; 375 struct tegra_bo *obj; 376 u64 offset; 377 378 if (copy_from_user(&cmdbuf, user_cmdbufs, sizeof(cmdbuf))) { 379 err = -EFAULT; 380 goto fail; 381 } 382 383 /* 384 * The maximum number of CDMA gather fetches is 16383, a higher 385 * value means the words count is malformed. 386 */ 387 if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) { 388 err = -EINVAL; 389 goto fail; 390 } 391 392 bo = host1x_bo_lookup(file, cmdbuf.handle); 393 if (!bo) { 394 err = -ENOENT; 395 goto fail; 396 } 397 398 offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32); 399 obj = host1x_to_tegra_bo(bo); 400 refs[num_refs++] = &obj->gem; 401 402 /* 403 * Gather buffer base address must be 4-bytes aligned, 404 * unaligned offset is malformed and cause commands stream 405 * corruption on the buffer address relocation. 406 */ 407 if (offset & 3 || offset > obj->gem.size) { 408 err = -EINVAL; 409 goto fail; 410 } 411 412 host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset); 413 num_cmdbufs--; 414 user_cmdbufs++; 415 } 416 417 /* copy and resolve relocations from submit */ 418 while (num_relocs--) { 419 struct host1x_reloc *reloc; 420 struct tegra_bo *obj; 421 422 err = host1x_reloc_copy_from_user(&job->relocs[num_relocs], 423 &user_relocs[num_relocs], drm, 424 file); 425 if (err < 0) 426 goto fail; 427 428 reloc = &job->relocs[num_relocs]; 429 obj = host1x_to_tegra_bo(reloc->cmdbuf.bo); 430 refs[num_refs++] = &obj->gem; 431 432 /* 433 * The unaligned cmdbuf offset will cause an unaligned write 434 * during of the relocations patching, corrupting the commands 435 * stream. 436 */ 437 if (reloc->cmdbuf.offset & 3 || 438 reloc->cmdbuf.offset >= obj->gem.size) { 439 err = -EINVAL; 440 goto fail; 441 } 442 443 obj = host1x_to_tegra_bo(reloc->target.bo); 444 refs[num_refs++] = &obj->gem; 445 446 if (reloc->target.offset >= obj->gem.size) { 447 err = -EINVAL; 448 goto fail; 449 } 450 } 451 452 if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) { 453 err = -EFAULT; 454 goto fail; 455 } 456 457 /* check whether syncpoint ID is valid */ 458 sp = host1x_syncpt_get(host1x, syncpt.id); 459 if (!sp) { 460 err = -ENOENT; 461 goto fail; 462 } 463 464 job->is_addr_reg = context->client->ops->is_addr_reg; 465 job->is_valid_class = context->client->ops->is_valid_class; 466 job->syncpt_incrs = syncpt.incrs; 467 job->syncpt_id = syncpt.id; 468 job->timeout = 10000; 469 470 if (args->timeout && args->timeout < 10000) 471 job->timeout = args->timeout; 472 473 err = host1x_job_pin(job, context->client->base.dev); 474 if (err) 475 goto fail; 476 477 err = host1x_job_submit(job); 478 if (err) { 479 host1x_job_unpin(job); 480 goto fail; 481 } 482 483 args->fence = job->syncpt_end; 484 485 fail: 486 while (num_refs--) 487 drm_gem_object_put_unlocked(refs[num_refs]); 488 489 kfree(refs); 490 491 put: 492 host1x_job_put(job); 493 return err; 494 } 495 496 497 #ifdef CONFIG_DRM_TEGRA_STAGING 498 static int tegra_gem_create(struct drm_device *drm, void *data, 499 struct drm_file *file) 500 { 501 struct drm_tegra_gem_create *args = data; 502 struct tegra_bo *bo; 503 504 bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags, 505 &args->handle); 506 if (IS_ERR(bo)) 507 return PTR_ERR(bo); 508 509 return 0; 510 } 511 512 static int tegra_gem_mmap(struct drm_device *drm, void *data, 513 struct drm_file *file) 514 { 515 struct drm_tegra_gem_mmap *args = data; 516 struct drm_gem_object *gem; 517 struct tegra_bo *bo; 518 519 gem = drm_gem_object_lookup(file, args->handle); 520 if (!gem) 521 return -EINVAL; 522 523 bo = to_tegra_bo(gem); 524 525 args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node); 526 527 drm_gem_object_put_unlocked(gem); 528 529 return 0; 530 } 531 532 static int tegra_syncpt_read(struct drm_device *drm, void *data, 533 struct drm_file *file) 534 { 535 struct host1x *host = dev_get_drvdata(drm->dev->parent); 536 struct drm_tegra_syncpt_read *args = data; 537 struct host1x_syncpt *sp; 538 539 sp = host1x_syncpt_get(host, args->id); 540 if (!sp) 541 return -EINVAL; 542 543 args->value = host1x_syncpt_read_min(sp); 544 return 0; 545 } 546 547 static int tegra_syncpt_incr(struct drm_device *drm, void *data, 548 struct drm_file *file) 549 { 550 struct host1x *host1x = dev_get_drvdata(drm->dev->parent); 551 struct drm_tegra_syncpt_incr *args = data; 552 struct host1x_syncpt *sp; 553 554 sp = host1x_syncpt_get(host1x, args->id); 555 if (!sp) 556 return -EINVAL; 557 558 return host1x_syncpt_incr(sp); 559 } 560 561 static int tegra_syncpt_wait(struct drm_device *drm, void *data, 562 struct drm_file *file) 563 { 564 struct host1x *host1x = dev_get_drvdata(drm->dev->parent); 565 struct drm_tegra_syncpt_wait *args = data; 566 struct host1x_syncpt *sp; 567 568 sp = host1x_syncpt_get(host1x, args->id); 569 if (!sp) 570 return -EINVAL; 571 572 return host1x_syncpt_wait(sp, args->thresh, 573 msecs_to_jiffies(args->timeout), 574 &args->value); 575 } 576 577 static int tegra_client_open(struct tegra_drm_file *fpriv, 578 struct tegra_drm_client *client, 579 struct tegra_drm_context *context) 580 { 581 int err; 582 583 err = client->ops->open_channel(client, context); 584 if (err < 0) 585 return err; 586 587 err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL); 588 if (err < 0) { 589 client->ops->close_channel(context); 590 return err; 591 } 592 593 context->client = client; 594 context->id = err; 595 596 return 0; 597 } 598 599 static int tegra_open_channel(struct drm_device *drm, void *data, 600 struct drm_file *file) 601 { 602 struct tegra_drm_file *fpriv = file->driver_priv; 603 struct tegra_drm *tegra = drm->dev_private; 604 struct drm_tegra_open_channel *args = data; 605 struct tegra_drm_context *context; 606 struct tegra_drm_client *client; 607 int err = -ENODEV; 608 609 context = kzalloc(sizeof(*context), GFP_KERNEL); 610 if (!context) 611 return -ENOMEM; 612 613 mutex_lock(&fpriv->lock); 614 615 list_for_each_entry(client, &tegra->clients, list) 616 if (client->base.class == args->client) { 617 err = tegra_client_open(fpriv, client, context); 618 if (err < 0) 619 break; 620 621 args->context = context->id; 622 break; 623 } 624 625 if (err < 0) 626 kfree(context); 627 628 mutex_unlock(&fpriv->lock); 629 return err; 630 } 631 632 static int tegra_close_channel(struct drm_device *drm, void *data, 633 struct drm_file *file) 634 { 635 struct tegra_drm_file *fpriv = file->driver_priv; 636 struct drm_tegra_close_channel *args = data; 637 struct tegra_drm_context *context; 638 int err = 0; 639 640 mutex_lock(&fpriv->lock); 641 642 context = idr_find(&fpriv->contexts, args->context); 643 if (!context) { 644 err = -EINVAL; 645 goto unlock; 646 } 647 648 idr_remove(&fpriv->contexts, context->id); 649 tegra_drm_context_free(context); 650 651 unlock: 652 mutex_unlock(&fpriv->lock); 653 return err; 654 } 655 656 static int tegra_get_syncpt(struct drm_device *drm, void *data, 657 struct drm_file *file) 658 { 659 struct tegra_drm_file *fpriv = file->driver_priv; 660 struct drm_tegra_get_syncpt *args = data; 661 struct tegra_drm_context *context; 662 struct host1x_syncpt *syncpt; 663 int err = 0; 664 665 mutex_lock(&fpriv->lock); 666 667 context = idr_find(&fpriv->contexts, args->context); 668 if (!context) { 669 err = -ENODEV; 670 goto unlock; 671 } 672 673 if (args->index >= context->client->base.num_syncpts) { 674 err = -EINVAL; 675 goto unlock; 676 } 677 678 syncpt = context->client->base.syncpts[args->index]; 679 args->id = host1x_syncpt_id(syncpt); 680 681 unlock: 682 mutex_unlock(&fpriv->lock); 683 return err; 684 } 685 686 static int tegra_submit(struct drm_device *drm, void *data, 687 struct drm_file *file) 688 { 689 struct tegra_drm_file *fpriv = file->driver_priv; 690 struct drm_tegra_submit *args = data; 691 struct tegra_drm_context *context; 692 int err; 693 694 mutex_lock(&fpriv->lock); 695 696 context = idr_find(&fpriv->contexts, args->context); 697 if (!context) { 698 err = -ENODEV; 699 goto unlock; 700 } 701 702 err = context->client->ops->submit(context, args, drm, file); 703 704 unlock: 705 mutex_unlock(&fpriv->lock); 706 return err; 707 } 708 709 static int tegra_get_syncpt_base(struct drm_device *drm, void *data, 710 struct drm_file *file) 711 { 712 struct tegra_drm_file *fpriv = file->driver_priv; 713 struct drm_tegra_get_syncpt_base *args = data; 714 struct tegra_drm_context *context; 715 struct host1x_syncpt_base *base; 716 struct host1x_syncpt *syncpt; 717 int err = 0; 718 719 mutex_lock(&fpriv->lock); 720 721 context = idr_find(&fpriv->contexts, args->context); 722 if (!context) { 723 err = -ENODEV; 724 goto unlock; 725 } 726 727 if (args->syncpt >= context->client->base.num_syncpts) { 728 err = -EINVAL; 729 goto unlock; 730 } 731 732 syncpt = context->client->base.syncpts[args->syncpt]; 733 734 base = host1x_syncpt_get_base(syncpt); 735 if (!base) { 736 err = -ENXIO; 737 goto unlock; 738 } 739 740 args->id = host1x_syncpt_base_id(base); 741 742 unlock: 743 mutex_unlock(&fpriv->lock); 744 return err; 745 } 746 747 static int tegra_gem_set_tiling(struct drm_device *drm, void *data, 748 struct drm_file *file) 749 { 750 struct drm_tegra_gem_set_tiling *args = data; 751 enum tegra_bo_tiling_mode mode; 752 struct drm_gem_object *gem; 753 unsigned long value = 0; 754 struct tegra_bo *bo; 755 756 switch (args->mode) { 757 case DRM_TEGRA_GEM_TILING_MODE_PITCH: 758 mode = TEGRA_BO_TILING_MODE_PITCH; 759 760 if (args->value != 0) 761 return -EINVAL; 762 763 break; 764 765 case DRM_TEGRA_GEM_TILING_MODE_TILED: 766 mode = TEGRA_BO_TILING_MODE_TILED; 767 768 if (args->value != 0) 769 return -EINVAL; 770 771 break; 772 773 case DRM_TEGRA_GEM_TILING_MODE_BLOCK: 774 mode = TEGRA_BO_TILING_MODE_BLOCK; 775 776 if (args->value > 5) 777 return -EINVAL; 778 779 value = args->value; 780 break; 781 782 default: 783 return -EINVAL; 784 } 785 786 gem = drm_gem_object_lookup(file, args->handle); 787 if (!gem) 788 return -ENOENT; 789 790 bo = to_tegra_bo(gem); 791 792 bo->tiling.mode = mode; 793 bo->tiling.value = value; 794 795 drm_gem_object_put_unlocked(gem); 796 797 return 0; 798 } 799 800 static int tegra_gem_get_tiling(struct drm_device *drm, void *data, 801 struct drm_file *file) 802 { 803 struct drm_tegra_gem_get_tiling *args = data; 804 struct drm_gem_object *gem; 805 struct tegra_bo *bo; 806 int err = 0; 807 808 gem = drm_gem_object_lookup(file, args->handle); 809 if (!gem) 810 return -ENOENT; 811 812 bo = to_tegra_bo(gem); 813 814 switch (bo->tiling.mode) { 815 case TEGRA_BO_TILING_MODE_PITCH: 816 args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH; 817 args->value = 0; 818 break; 819 820 case TEGRA_BO_TILING_MODE_TILED: 821 args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED; 822 args->value = 0; 823 break; 824 825 case TEGRA_BO_TILING_MODE_BLOCK: 826 args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK; 827 args->value = bo->tiling.value; 828 break; 829 830 default: 831 err = -EINVAL; 832 break; 833 } 834 835 drm_gem_object_put_unlocked(gem); 836 837 return err; 838 } 839 840 static int tegra_gem_set_flags(struct drm_device *drm, void *data, 841 struct drm_file *file) 842 { 843 struct drm_tegra_gem_set_flags *args = data; 844 struct drm_gem_object *gem; 845 struct tegra_bo *bo; 846 847 if (args->flags & ~DRM_TEGRA_GEM_FLAGS) 848 return -EINVAL; 849 850 gem = drm_gem_object_lookup(file, args->handle); 851 if (!gem) 852 return -ENOENT; 853 854 bo = to_tegra_bo(gem); 855 bo->flags = 0; 856 857 if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP) 858 bo->flags |= TEGRA_BO_BOTTOM_UP; 859 860 drm_gem_object_put_unlocked(gem); 861 862 return 0; 863 } 864 865 static int tegra_gem_get_flags(struct drm_device *drm, void *data, 866 struct drm_file *file) 867 { 868 struct drm_tegra_gem_get_flags *args = data; 869 struct drm_gem_object *gem; 870 struct tegra_bo *bo; 871 872 gem = drm_gem_object_lookup(file, args->handle); 873 if (!gem) 874 return -ENOENT; 875 876 bo = to_tegra_bo(gem); 877 args->flags = 0; 878 879 if (bo->flags & TEGRA_BO_BOTTOM_UP) 880 args->flags |= DRM_TEGRA_GEM_BOTTOM_UP; 881 882 drm_gem_object_put_unlocked(gem); 883 884 return 0; 885 } 886 #endif 887 888 static const struct drm_ioctl_desc tegra_drm_ioctls[] = { 889 #ifdef CONFIG_DRM_TEGRA_STAGING 890 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, 891 DRM_UNLOCKED | DRM_RENDER_ALLOW), 892 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, 893 DRM_UNLOCKED | DRM_RENDER_ALLOW), 894 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, 895 DRM_UNLOCKED | DRM_RENDER_ALLOW), 896 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, 897 DRM_UNLOCKED | DRM_RENDER_ALLOW), 898 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, 899 DRM_UNLOCKED | DRM_RENDER_ALLOW), 900 DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, 901 DRM_UNLOCKED | DRM_RENDER_ALLOW), 902 DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, 903 DRM_UNLOCKED | DRM_RENDER_ALLOW), 904 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, 905 DRM_UNLOCKED | DRM_RENDER_ALLOW), 906 DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, 907 DRM_UNLOCKED | DRM_RENDER_ALLOW), 908 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, 909 DRM_UNLOCKED | DRM_RENDER_ALLOW), 910 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, 911 DRM_UNLOCKED | DRM_RENDER_ALLOW), 912 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, 913 DRM_UNLOCKED | DRM_RENDER_ALLOW), 914 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, 915 DRM_UNLOCKED | DRM_RENDER_ALLOW), 916 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, 917 DRM_UNLOCKED | DRM_RENDER_ALLOW), 918 #endif 919 }; 920 921 static const struct file_operations tegra_drm_fops = { 922 .owner = THIS_MODULE, 923 .open = drm_open, 924 .release = drm_release, 925 .unlocked_ioctl = drm_ioctl, 926 .mmap = tegra_drm_mmap, 927 .poll = drm_poll, 928 .read = drm_read, 929 .compat_ioctl = drm_compat_ioctl, 930 .llseek = noop_llseek, 931 }; 932 933 static int tegra_drm_context_cleanup(int id, void *p, void *data) 934 { 935 struct tegra_drm_context *context = p; 936 937 tegra_drm_context_free(context); 938 939 return 0; 940 } 941 942 static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file) 943 { 944 struct tegra_drm_file *fpriv = file->driver_priv; 945 946 mutex_lock(&fpriv->lock); 947 idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL); 948 mutex_unlock(&fpriv->lock); 949 950 idr_destroy(&fpriv->contexts); 951 mutex_destroy(&fpriv->lock); 952 kfree(fpriv); 953 } 954 955 #ifdef CONFIG_DEBUG_FS 956 static int tegra_debugfs_framebuffers(struct seq_file *s, void *data) 957 { 958 struct drm_info_node *node = (struct drm_info_node *)s->private; 959 struct drm_device *drm = node->minor->dev; 960 struct drm_framebuffer *fb; 961 962 mutex_lock(&drm->mode_config.fb_lock); 963 964 list_for_each_entry(fb, &drm->mode_config.fb_list, head) { 965 seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n", 966 fb->base.id, fb->width, fb->height, 967 fb->format->depth, 968 fb->format->cpp[0] * 8, 969 drm_framebuffer_read_refcount(fb)); 970 } 971 972 mutex_unlock(&drm->mode_config.fb_lock); 973 974 return 0; 975 } 976 977 static int tegra_debugfs_iova(struct seq_file *s, void *data) 978 { 979 struct drm_info_node *node = (struct drm_info_node *)s->private; 980 struct drm_device *drm = node->minor->dev; 981 struct tegra_drm *tegra = drm->dev_private; 982 struct drm_printer p = drm_seq_file_printer(s); 983 984 if (tegra->domain) { 985 mutex_lock(&tegra->mm_lock); 986 drm_mm_print(&tegra->mm, &p); 987 mutex_unlock(&tegra->mm_lock); 988 } 989 990 return 0; 991 } 992 993 static struct drm_info_list tegra_debugfs_list[] = { 994 { "framebuffers", tegra_debugfs_framebuffers, 0 }, 995 { "iova", tegra_debugfs_iova, 0 }, 996 }; 997 998 static int tegra_debugfs_init(struct drm_minor *minor) 999 { 1000 return drm_debugfs_create_files(tegra_debugfs_list, 1001 ARRAY_SIZE(tegra_debugfs_list), 1002 minor->debugfs_root, minor); 1003 } 1004 #endif 1005 1006 static struct drm_driver tegra_drm_driver = { 1007 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | 1008 DRIVER_ATOMIC | DRIVER_RENDER, 1009 .load = tegra_drm_load, 1010 .unload = tegra_drm_unload, 1011 .open = tegra_drm_open, 1012 .postclose = tegra_drm_postclose, 1013 .lastclose = drm_fb_helper_lastclose, 1014 1015 #if defined(CONFIG_DEBUG_FS) 1016 .debugfs_init = tegra_debugfs_init, 1017 #endif 1018 1019 .gem_free_object_unlocked = tegra_bo_free_object, 1020 .gem_vm_ops = &tegra_bo_vm_ops, 1021 1022 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 1023 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 1024 .gem_prime_export = tegra_gem_prime_export, 1025 .gem_prime_import = tegra_gem_prime_import, 1026 1027 .dumb_create = tegra_bo_dumb_create, 1028 1029 .ioctls = tegra_drm_ioctls, 1030 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls), 1031 .fops = &tegra_drm_fops, 1032 1033 .name = DRIVER_NAME, 1034 .desc = DRIVER_DESC, 1035 .date = DRIVER_DATE, 1036 .major = DRIVER_MAJOR, 1037 .minor = DRIVER_MINOR, 1038 .patchlevel = DRIVER_PATCHLEVEL, 1039 }; 1040 1041 int tegra_drm_register_client(struct tegra_drm *tegra, 1042 struct tegra_drm_client *client) 1043 { 1044 mutex_lock(&tegra->clients_lock); 1045 list_add_tail(&client->list, &tegra->clients); 1046 client->drm = tegra; 1047 mutex_unlock(&tegra->clients_lock); 1048 1049 return 0; 1050 } 1051 1052 int tegra_drm_unregister_client(struct tegra_drm *tegra, 1053 struct tegra_drm_client *client) 1054 { 1055 mutex_lock(&tegra->clients_lock); 1056 list_del_init(&client->list); 1057 client->drm = NULL; 1058 mutex_unlock(&tegra->clients_lock); 1059 1060 return 0; 1061 } 1062 1063 struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client, 1064 bool shared) 1065 { 1066 struct drm_device *drm = dev_get_drvdata(client->parent); 1067 struct tegra_drm *tegra = drm->dev_private; 1068 struct iommu_group *group = NULL; 1069 int err; 1070 1071 if (tegra->domain) { 1072 group = iommu_group_get(client->dev); 1073 if (!group) { 1074 dev_err(client->dev, "failed to get IOMMU group\n"); 1075 return ERR_PTR(-ENODEV); 1076 } 1077 1078 if (!shared || (shared && (group != tegra->group))) { 1079 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) 1080 if (client->dev->archdata.mapping) { 1081 struct dma_iommu_mapping *mapping = 1082 to_dma_iommu_mapping(client->dev); 1083 arm_iommu_detach_device(client->dev); 1084 arm_iommu_release_mapping(mapping); 1085 } 1086 #endif 1087 err = iommu_attach_group(tegra->domain, group); 1088 if (err < 0) { 1089 iommu_group_put(group); 1090 return ERR_PTR(err); 1091 } 1092 1093 if (shared && !tegra->group) 1094 tegra->group = group; 1095 } 1096 } 1097 1098 return group; 1099 } 1100 1101 void host1x_client_iommu_detach(struct host1x_client *client, 1102 struct iommu_group *group) 1103 { 1104 struct drm_device *drm = dev_get_drvdata(client->parent); 1105 struct tegra_drm *tegra = drm->dev_private; 1106 1107 if (group) { 1108 if (group == tegra->group) { 1109 iommu_detach_group(tegra->domain, group); 1110 tegra->group = NULL; 1111 } 1112 1113 iommu_group_put(group); 1114 } 1115 } 1116 1117 void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma) 1118 { 1119 struct iova *alloc; 1120 void *virt; 1121 gfp_t gfp; 1122 int err; 1123 1124 if (tegra->domain) 1125 size = iova_align(&tegra->carveout.domain, size); 1126 else 1127 size = PAGE_ALIGN(size); 1128 1129 gfp = GFP_KERNEL | __GFP_ZERO; 1130 if (!tegra->domain) { 1131 /* 1132 * Many units only support 32-bit addresses, even on 64-bit 1133 * SoCs. If there is no IOMMU to translate into a 32-bit IO 1134 * virtual address space, force allocations to be in the 1135 * lower 32-bit range. 1136 */ 1137 gfp |= GFP_DMA; 1138 } 1139 1140 virt = (void *)__get_free_pages(gfp, get_order(size)); 1141 if (!virt) 1142 return ERR_PTR(-ENOMEM); 1143 1144 if (!tegra->domain) { 1145 /* 1146 * If IOMMU is disabled, devices address physical memory 1147 * directly. 1148 */ 1149 *dma = virt_to_phys(virt); 1150 return virt; 1151 } 1152 1153 alloc = alloc_iova(&tegra->carveout.domain, 1154 size >> tegra->carveout.shift, 1155 tegra->carveout.limit, true); 1156 if (!alloc) { 1157 err = -EBUSY; 1158 goto free_pages; 1159 } 1160 1161 *dma = iova_dma_addr(&tegra->carveout.domain, alloc); 1162 err = iommu_map(tegra->domain, *dma, virt_to_phys(virt), 1163 size, IOMMU_READ | IOMMU_WRITE); 1164 if (err < 0) 1165 goto free_iova; 1166 1167 return virt; 1168 1169 free_iova: 1170 __free_iova(&tegra->carveout.domain, alloc); 1171 free_pages: 1172 free_pages((unsigned long)virt, get_order(size)); 1173 1174 return ERR_PTR(err); 1175 } 1176 1177 void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt, 1178 dma_addr_t dma) 1179 { 1180 if (tegra->domain) 1181 size = iova_align(&tegra->carveout.domain, size); 1182 else 1183 size = PAGE_ALIGN(size); 1184 1185 if (tegra->domain) { 1186 iommu_unmap(tegra->domain, dma, size); 1187 free_iova(&tegra->carveout.domain, 1188 iova_pfn(&tegra->carveout.domain, dma)); 1189 } 1190 1191 free_pages((unsigned long)virt, get_order(size)); 1192 } 1193 1194 static int host1x_drm_probe(struct host1x_device *dev) 1195 { 1196 struct drm_driver *driver = &tegra_drm_driver; 1197 struct drm_device *drm; 1198 int err; 1199 1200 drm = drm_dev_alloc(driver, &dev->dev); 1201 if (IS_ERR(drm)) 1202 return PTR_ERR(drm); 1203 1204 dev_set_drvdata(&dev->dev, drm); 1205 1206 err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb", false); 1207 if (err < 0) 1208 goto put; 1209 1210 err = drm_dev_register(drm, 0); 1211 if (err < 0) 1212 goto put; 1213 1214 return 0; 1215 1216 put: 1217 drm_dev_put(drm); 1218 return err; 1219 } 1220 1221 static int host1x_drm_remove(struct host1x_device *dev) 1222 { 1223 struct drm_device *drm = dev_get_drvdata(&dev->dev); 1224 1225 drm_dev_unregister(drm); 1226 drm_dev_put(drm); 1227 1228 return 0; 1229 } 1230 1231 #ifdef CONFIG_PM_SLEEP 1232 static int host1x_drm_suspend(struct device *dev) 1233 { 1234 struct drm_device *drm = dev_get_drvdata(dev); 1235 1236 return drm_mode_config_helper_suspend(drm); 1237 } 1238 1239 static int host1x_drm_resume(struct device *dev) 1240 { 1241 struct drm_device *drm = dev_get_drvdata(dev); 1242 1243 return drm_mode_config_helper_resume(drm); 1244 } 1245 #endif 1246 1247 static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend, 1248 host1x_drm_resume); 1249 1250 static const struct of_device_id host1x_drm_subdevs[] = { 1251 { .compatible = "nvidia,tegra20-dc", }, 1252 { .compatible = "nvidia,tegra20-hdmi", }, 1253 { .compatible = "nvidia,tegra20-gr2d", }, 1254 { .compatible = "nvidia,tegra20-gr3d", }, 1255 { .compatible = "nvidia,tegra30-dc", }, 1256 { .compatible = "nvidia,tegra30-hdmi", }, 1257 { .compatible = "nvidia,tegra30-gr2d", }, 1258 { .compatible = "nvidia,tegra30-gr3d", }, 1259 { .compatible = "nvidia,tegra114-dsi", }, 1260 { .compatible = "nvidia,tegra114-hdmi", }, 1261 { .compatible = "nvidia,tegra114-gr3d", }, 1262 { .compatible = "nvidia,tegra124-dc", }, 1263 { .compatible = "nvidia,tegra124-sor", }, 1264 { .compatible = "nvidia,tegra124-hdmi", }, 1265 { .compatible = "nvidia,tegra124-dsi", }, 1266 { .compatible = "nvidia,tegra124-vic", }, 1267 { .compatible = "nvidia,tegra132-dsi", }, 1268 { .compatible = "nvidia,tegra210-dc", }, 1269 { .compatible = "nvidia,tegra210-dsi", }, 1270 { .compatible = "nvidia,tegra210-sor", }, 1271 { .compatible = "nvidia,tegra210-sor1", }, 1272 { .compatible = "nvidia,tegra210-vic", }, 1273 { .compatible = "nvidia,tegra186-display", }, 1274 { .compatible = "nvidia,tegra186-dc", }, 1275 { .compatible = "nvidia,tegra186-sor", }, 1276 { .compatible = "nvidia,tegra186-sor1", }, 1277 { .compatible = "nvidia,tegra186-vic", }, 1278 { .compatible = "nvidia,tegra194-display", }, 1279 { .compatible = "nvidia,tegra194-dc", }, 1280 { .compatible = "nvidia,tegra194-sor", }, 1281 { .compatible = "nvidia,tegra194-vic", }, 1282 { /* sentinel */ } 1283 }; 1284 1285 static struct host1x_driver host1x_drm_driver = { 1286 .driver = { 1287 .name = "drm", 1288 .pm = &host1x_drm_pm_ops, 1289 }, 1290 .probe = host1x_drm_probe, 1291 .remove = host1x_drm_remove, 1292 .subdevs = host1x_drm_subdevs, 1293 }; 1294 1295 static struct platform_driver * const drivers[] = { 1296 &tegra_display_hub_driver, 1297 &tegra_dc_driver, 1298 &tegra_hdmi_driver, 1299 &tegra_dsi_driver, 1300 &tegra_dpaux_driver, 1301 &tegra_sor_driver, 1302 &tegra_gr2d_driver, 1303 &tegra_gr3d_driver, 1304 &tegra_vic_driver, 1305 }; 1306 1307 static int __init host1x_drm_init(void) 1308 { 1309 int err; 1310 1311 err = host1x_driver_register(&host1x_drm_driver); 1312 if (err < 0) 1313 return err; 1314 1315 err = platform_register_drivers(drivers, ARRAY_SIZE(drivers)); 1316 if (err < 0) 1317 goto unregister_host1x; 1318 1319 return 0; 1320 1321 unregister_host1x: 1322 host1x_driver_unregister(&host1x_drm_driver); 1323 return err; 1324 } 1325 module_init(host1x_drm_init); 1326 1327 static void __exit host1x_drm_exit(void) 1328 { 1329 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); 1330 host1x_driver_unregister(&host1x_drm_driver); 1331 } 1332 module_exit(host1x_drm_exit); 1333 1334 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); 1335 MODULE_DESCRIPTION("NVIDIA Tegra DRM driver"); 1336 MODULE_LICENSE("GPL v2"); 1337