1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 Avionic Design GmbH 4 * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved. 5 */ 6 7 #include <linux/bitops.h> 8 #include <linux/host1x.h> 9 #include <linux/idr.h> 10 #include <linux/iommu.h> 11 #include <linux/module.h> 12 #include <linux/platform_device.h> 13 #include <linux/pm_runtime.h> 14 15 #include <drm/drm_aperture.h> 16 #include <drm/drm_atomic.h> 17 #include <drm/drm_atomic_helper.h> 18 #include <drm/drm_debugfs.h> 19 #include <drm/drm_drv.h> 20 #include <drm/drm_fourcc.h> 21 #include <drm/drm_framebuffer.h> 22 #include <drm/drm_ioctl.h> 23 #include <drm/drm_prime.h> 24 #include <drm/drm_vblank.h> 25 26 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) 27 #include <asm/dma-iommu.h> 28 #endif 29 30 #include "dc.h" 31 #include "drm.h" 32 #include "gem.h" 33 #include "uapi.h" 34 35 #define DRIVER_NAME "tegra" 36 #define DRIVER_DESC "NVIDIA Tegra graphics" 37 #define DRIVER_DATE "20120330" 38 #define DRIVER_MAJOR 1 39 #define DRIVER_MINOR 0 40 #define DRIVER_PATCHLEVEL 0 41 42 #define CARVEOUT_SZ SZ_64M 43 #define CDMA_GATHER_FETCHES_MAX_NB 16383 44 45 static int tegra_atomic_check(struct drm_device *drm, 46 struct drm_atomic_state *state) 47 { 48 int err; 49 50 err = drm_atomic_helper_check(drm, state); 51 if (err < 0) 52 return err; 53 54 return tegra_display_hub_atomic_check(drm, state); 55 } 56 57 static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = { 58 .fb_create = tegra_fb_create, 59 #ifdef CONFIG_DRM_FBDEV_EMULATION 60 .output_poll_changed = drm_fb_helper_output_poll_changed, 61 #endif 62 .atomic_check = tegra_atomic_check, 63 .atomic_commit = drm_atomic_helper_commit, 64 }; 65 66 static void tegra_atomic_post_commit(struct drm_device *drm, 67 struct drm_atomic_state *old_state) 68 { 69 struct drm_crtc_state *old_crtc_state __maybe_unused; 70 struct drm_crtc *crtc; 71 unsigned int i; 72 73 for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) 74 tegra_crtc_atomic_post_commit(crtc, old_state); 75 } 76 77 static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state) 78 { 79 struct drm_device *drm = old_state->dev; 80 struct tegra_drm *tegra = drm->dev_private; 81 82 if (tegra->hub) { 83 bool fence_cookie = dma_fence_begin_signalling(); 84 85 drm_atomic_helper_commit_modeset_disables(drm, old_state); 86 tegra_display_hub_atomic_commit(drm, old_state); 87 drm_atomic_helper_commit_planes(drm, old_state, 0); 88 drm_atomic_helper_commit_modeset_enables(drm, old_state); 89 drm_atomic_helper_commit_hw_done(old_state); 90 dma_fence_end_signalling(fence_cookie); 91 drm_atomic_helper_wait_for_vblanks(drm, old_state); 92 drm_atomic_helper_cleanup_planes(drm, old_state); 93 } else { 94 drm_atomic_helper_commit_tail_rpm(old_state); 95 } 96 97 tegra_atomic_post_commit(drm, old_state); 98 } 99 100 static const struct drm_mode_config_helper_funcs 101 tegra_drm_mode_config_helpers = { 102 .atomic_commit_tail = tegra_atomic_commit_tail, 103 }; 104 105 static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) 106 { 107 struct tegra_drm_file *fpriv; 108 109 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 110 if (!fpriv) 111 return -ENOMEM; 112 113 idr_init_base(&fpriv->legacy_contexts, 1); 114 xa_init_flags(&fpriv->contexts, XA_FLAGS_ALLOC1); 115 xa_init(&fpriv->syncpoints); 116 mutex_init(&fpriv->lock); 117 filp->driver_priv = fpriv; 118 119 return 0; 120 } 121 122 static void tegra_drm_context_free(struct tegra_drm_context *context) 123 { 124 context->client->ops->close_channel(context); 125 pm_runtime_put(context->client->base.dev); 126 kfree(context); 127 } 128 129 static int host1x_reloc_copy_from_user(struct host1x_reloc *dest, 130 struct drm_tegra_reloc __user *src, 131 struct drm_device *drm, 132 struct drm_file *file) 133 { 134 u32 cmdbuf, target; 135 int err; 136 137 err = get_user(cmdbuf, &src->cmdbuf.handle); 138 if (err < 0) 139 return err; 140 141 err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset); 142 if (err < 0) 143 return err; 144 145 err = get_user(target, &src->target.handle); 146 if (err < 0) 147 return err; 148 149 err = get_user(dest->target.offset, &src->target.offset); 150 if (err < 0) 151 return err; 152 153 err = get_user(dest->shift, &src->shift); 154 if (err < 0) 155 return err; 156 157 dest->flags = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE; 158 159 dest->cmdbuf.bo = tegra_gem_lookup(file, cmdbuf); 160 if (!dest->cmdbuf.bo) 161 return -ENOENT; 162 163 dest->target.bo = tegra_gem_lookup(file, target); 164 if (!dest->target.bo) 165 return -ENOENT; 166 167 return 0; 168 } 169 170 int tegra_drm_submit(struct tegra_drm_context *context, 171 struct drm_tegra_submit *args, struct drm_device *drm, 172 struct drm_file *file) 173 { 174 struct host1x_client *client = &context->client->base; 175 unsigned int num_cmdbufs = args->num_cmdbufs; 176 unsigned int num_relocs = args->num_relocs; 177 struct drm_tegra_cmdbuf __user *user_cmdbufs; 178 struct drm_tegra_reloc __user *user_relocs; 179 struct drm_tegra_syncpt __user *user_syncpt; 180 struct drm_tegra_syncpt syncpt; 181 struct host1x *host1x = dev_get_drvdata(drm->dev->parent); 182 struct drm_gem_object **refs; 183 struct host1x_syncpt *sp = NULL; 184 struct host1x_job *job; 185 unsigned int num_refs; 186 int err; 187 188 user_cmdbufs = u64_to_user_ptr(args->cmdbufs); 189 user_relocs = u64_to_user_ptr(args->relocs); 190 user_syncpt = u64_to_user_ptr(args->syncpts); 191 192 /* We don't yet support other than one syncpt_incr struct per submit */ 193 if (args->num_syncpts != 1) 194 return -EINVAL; 195 196 /* We don't yet support waitchks */ 197 if (args->num_waitchks != 0) 198 return -EINVAL; 199 200 job = host1x_job_alloc(context->channel, args->num_cmdbufs, 201 args->num_relocs, false); 202 if (!job) 203 return -ENOMEM; 204 205 job->num_relocs = args->num_relocs; 206 job->client = client; 207 job->class = client->class; 208 job->serialize = true; 209 job->syncpt_recovery = true; 210 211 /* 212 * Track referenced BOs so that they can be unreferenced after the 213 * submission is complete. 214 */ 215 num_refs = num_cmdbufs + num_relocs * 2; 216 217 refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL); 218 if (!refs) { 219 err = -ENOMEM; 220 goto put; 221 } 222 223 /* reuse as an iterator later */ 224 num_refs = 0; 225 226 while (num_cmdbufs) { 227 struct drm_tegra_cmdbuf cmdbuf; 228 struct host1x_bo *bo; 229 struct tegra_bo *obj; 230 u64 offset; 231 232 if (copy_from_user(&cmdbuf, user_cmdbufs, sizeof(cmdbuf))) { 233 err = -EFAULT; 234 goto fail; 235 } 236 237 /* 238 * The maximum number of CDMA gather fetches is 16383, a higher 239 * value means the words count is malformed. 240 */ 241 if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) { 242 err = -EINVAL; 243 goto fail; 244 } 245 246 bo = tegra_gem_lookup(file, cmdbuf.handle); 247 if (!bo) { 248 err = -ENOENT; 249 goto fail; 250 } 251 252 offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32); 253 obj = host1x_to_tegra_bo(bo); 254 refs[num_refs++] = &obj->gem; 255 256 /* 257 * Gather buffer base address must be 4-bytes aligned, 258 * unaligned offset is malformed and cause commands stream 259 * corruption on the buffer address relocation. 260 */ 261 if (offset & 3 || offset > obj->gem.size) { 262 err = -EINVAL; 263 goto fail; 264 } 265 266 host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset); 267 num_cmdbufs--; 268 user_cmdbufs++; 269 } 270 271 /* copy and resolve relocations from submit */ 272 while (num_relocs--) { 273 struct host1x_reloc *reloc; 274 struct tegra_bo *obj; 275 276 err = host1x_reloc_copy_from_user(&job->relocs[num_relocs], 277 &user_relocs[num_relocs], drm, 278 file); 279 if (err < 0) 280 goto fail; 281 282 reloc = &job->relocs[num_relocs]; 283 obj = host1x_to_tegra_bo(reloc->cmdbuf.bo); 284 refs[num_refs++] = &obj->gem; 285 286 /* 287 * The unaligned cmdbuf offset will cause an unaligned write 288 * during of the relocations patching, corrupting the commands 289 * stream. 290 */ 291 if (reloc->cmdbuf.offset & 3 || 292 reloc->cmdbuf.offset >= obj->gem.size) { 293 err = -EINVAL; 294 goto fail; 295 } 296 297 obj = host1x_to_tegra_bo(reloc->target.bo); 298 refs[num_refs++] = &obj->gem; 299 300 if (reloc->target.offset >= obj->gem.size) { 301 err = -EINVAL; 302 goto fail; 303 } 304 } 305 306 if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) { 307 err = -EFAULT; 308 goto fail; 309 } 310 311 /* Syncpoint ref will be dropped on job release. */ 312 sp = host1x_syncpt_get_by_id(host1x, syncpt.id); 313 if (!sp) { 314 err = -ENOENT; 315 goto fail; 316 } 317 318 job->is_addr_reg = context->client->ops->is_addr_reg; 319 job->is_valid_class = context->client->ops->is_valid_class; 320 job->syncpt_incrs = syncpt.incrs; 321 job->syncpt = sp; 322 job->timeout = 10000; 323 324 if (args->timeout && args->timeout < 10000) 325 job->timeout = args->timeout; 326 327 err = host1x_job_pin(job, context->client->base.dev); 328 if (err) 329 goto fail; 330 331 err = host1x_job_submit(job); 332 if (err) { 333 host1x_job_unpin(job); 334 goto fail; 335 } 336 337 args->fence = job->syncpt_end; 338 339 fail: 340 while (num_refs--) 341 drm_gem_object_put(refs[num_refs]); 342 343 kfree(refs); 344 345 put: 346 host1x_job_put(job); 347 return err; 348 } 349 350 351 #ifdef CONFIG_DRM_TEGRA_STAGING 352 static int tegra_gem_create(struct drm_device *drm, void *data, 353 struct drm_file *file) 354 { 355 struct drm_tegra_gem_create *args = data; 356 struct tegra_bo *bo; 357 358 bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags, 359 &args->handle); 360 if (IS_ERR(bo)) 361 return PTR_ERR(bo); 362 363 return 0; 364 } 365 366 static int tegra_gem_mmap(struct drm_device *drm, void *data, 367 struct drm_file *file) 368 { 369 struct drm_tegra_gem_mmap *args = data; 370 struct drm_gem_object *gem; 371 struct tegra_bo *bo; 372 373 gem = drm_gem_object_lookup(file, args->handle); 374 if (!gem) 375 return -EINVAL; 376 377 bo = to_tegra_bo(gem); 378 379 args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node); 380 381 drm_gem_object_put(gem); 382 383 return 0; 384 } 385 386 static int tegra_syncpt_read(struct drm_device *drm, void *data, 387 struct drm_file *file) 388 { 389 struct host1x *host = dev_get_drvdata(drm->dev->parent); 390 struct drm_tegra_syncpt_read *args = data; 391 struct host1x_syncpt *sp; 392 393 sp = host1x_syncpt_get_by_id_noref(host, args->id); 394 if (!sp) 395 return -EINVAL; 396 397 args->value = host1x_syncpt_read_min(sp); 398 return 0; 399 } 400 401 static int tegra_syncpt_incr(struct drm_device *drm, void *data, 402 struct drm_file *file) 403 { 404 struct host1x *host1x = dev_get_drvdata(drm->dev->parent); 405 struct drm_tegra_syncpt_incr *args = data; 406 struct host1x_syncpt *sp; 407 408 sp = host1x_syncpt_get_by_id_noref(host1x, args->id); 409 if (!sp) 410 return -EINVAL; 411 412 return host1x_syncpt_incr(sp); 413 } 414 415 static int tegra_syncpt_wait(struct drm_device *drm, void *data, 416 struct drm_file *file) 417 { 418 struct host1x *host1x = dev_get_drvdata(drm->dev->parent); 419 struct drm_tegra_syncpt_wait *args = data; 420 struct host1x_syncpt *sp; 421 422 sp = host1x_syncpt_get_by_id_noref(host1x, args->id); 423 if (!sp) 424 return -EINVAL; 425 426 return host1x_syncpt_wait(sp, args->thresh, 427 msecs_to_jiffies(args->timeout), 428 &args->value); 429 } 430 431 static int tegra_client_open(struct tegra_drm_file *fpriv, 432 struct tegra_drm_client *client, 433 struct tegra_drm_context *context) 434 { 435 int err; 436 437 err = pm_runtime_resume_and_get(client->base.dev); 438 if (err) 439 return err; 440 441 err = client->ops->open_channel(client, context); 442 if (err < 0) { 443 pm_runtime_put(client->base.dev); 444 return err; 445 } 446 447 err = idr_alloc(&fpriv->legacy_contexts, context, 1, 0, GFP_KERNEL); 448 if (err < 0) { 449 client->ops->close_channel(context); 450 pm_runtime_put(client->base.dev); 451 return err; 452 } 453 454 context->client = client; 455 context->id = err; 456 457 return 0; 458 } 459 460 static int tegra_open_channel(struct drm_device *drm, void *data, 461 struct drm_file *file) 462 { 463 struct tegra_drm_file *fpriv = file->driver_priv; 464 struct tegra_drm *tegra = drm->dev_private; 465 struct drm_tegra_open_channel *args = data; 466 struct tegra_drm_context *context; 467 struct tegra_drm_client *client; 468 int err = -ENODEV; 469 470 context = kzalloc(sizeof(*context), GFP_KERNEL); 471 if (!context) 472 return -ENOMEM; 473 474 mutex_lock(&fpriv->lock); 475 476 list_for_each_entry(client, &tegra->clients, list) 477 if (client->base.class == args->client) { 478 err = tegra_client_open(fpriv, client, context); 479 if (err < 0) 480 break; 481 482 args->context = context->id; 483 break; 484 } 485 486 if (err < 0) 487 kfree(context); 488 489 mutex_unlock(&fpriv->lock); 490 return err; 491 } 492 493 static int tegra_close_channel(struct drm_device *drm, void *data, 494 struct drm_file *file) 495 { 496 struct tegra_drm_file *fpriv = file->driver_priv; 497 struct drm_tegra_close_channel *args = data; 498 struct tegra_drm_context *context; 499 int err = 0; 500 501 mutex_lock(&fpriv->lock); 502 503 context = idr_find(&fpriv->legacy_contexts, args->context); 504 if (!context) { 505 err = -EINVAL; 506 goto unlock; 507 } 508 509 idr_remove(&fpriv->legacy_contexts, context->id); 510 tegra_drm_context_free(context); 511 512 unlock: 513 mutex_unlock(&fpriv->lock); 514 return err; 515 } 516 517 static int tegra_get_syncpt(struct drm_device *drm, void *data, 518 struct drm_file *file) 519 { 520 struct tegra_drm_file *fpriv = file->driver_priv; 521 struct drm_tegra_get_syncpt *args = data; 522 struct tegra_drm_context *context; 523 struct host1x_syncpt *syncpt; 524 int err = 0; 525 526 mutex_lock(&fpriv->lock); 527 528 context = idr_find(&fpriv->legacy_contexts, args->context); 529 if (!context) { 530 err = -ENODEV; 531 goto unlock; 532 } 533 534 if (args->index >= context->client->base.num_syncpts) { 535 err = -EINVAL; 536 goto unlock; 537 } 538 539 syncpt = context->client->base.syncpts[args->index]; 540 args->id = host1x_syncpt_id(syncpt); 541 542 unlock: 543 mutex_unlock(&fpriv->lock); 544 return err; 545 } 546 547 static int tegra_submit(struct drm_device *drm, void *data, 548 struct drm_file *file) 549 { 550 struct tegra_drm_file *fpriv = file->driver_priv; 551 struct drm_tegra_submit *args = data; 552 struct tegra_drm_context *context; 553 int err; 554 555 mutex_lock(&fpriv->lock); 556 557 context = idr_find(&fpriv->legacy_contexts, args->context); 558 if (!context) { 559 err = -ENODEV; 560 goto unlock; 561 } 562 563 err = context->client->ops->submit(context, args, drm, file); 564 565 unlock: 566 mutex_unlock(&fpriv->lock); 567 return err; 568 } 569 570 static int tegra_get_syncpt_base(struct drm_device *drm, void *data, 571 struct drm_file *file) 572 { 573 struct tegra_drm_file *fpriv = file->driver_priv; 574 struct drm_tegra_get_syncpt_base *args = data; 575 struct tegra_drm_context *context; 576 struct host1x_syncpt_base *base; 577 struct host1x_syncpt *syncpt; 578 int err = 0; 579 580 mutex_lock(&fpriv->lock); 581 582 context = idr_find(&fpriv->legacy_contexts, args->context); 583 if (!context) { 584 err = -ENODEV; 585 goto unlock; 586 } 587 588 if (args->syncpt >= context->client->base.num_syncpts) { 589 err = -EINVAL; 590 goto unlock; 591 } 592 593 syncpt = context->client->base.syncpts[args->syncpt]; 594 595 base = host1x_syncpt_get_base(syncpt); 596 if (!base) { 597 err = -ENXIO; 598 goto unlock; 599 } 600 601 args->id = host1x_syncpt_base_id(base); 602 603 unlock: 604 mutex_unlock(&fpriv->lock); 605 return err; 606 } 607 608 static int tegra_gem_set_tiling(struct drm_device *drm, void *data, 609 struct drm_file *file) 610 { 611 struct drm_tegra_gem_set_tiling *args = data; 612 enum tegra_bo_tiling_mode mode; 613 struct drm_gem_object *gem; 614 unsigned long value = 0; 615 struct tegra_bo *bo; 616 617 switch (args->mode) { 618 case DRM_TEGRA_GEM_TILING_MODE_PITCH: 619 mode = TEGRA_BO_TILING_MODE_PITCH; 620 621 if (args->value != 0) 622 return -EINVAL; 623 624 break; 625 626 case DRM_TEGRA_GEM_TILING_MODE_TILED: 627 mode = TEGRA_BO_TILING_MODE_TILED; 628 629 if (args->value != 0) 630 return -EINVAL; 631 632 break; 633 634 case DRM_TEGRA_GEM_TILING_MODE_BLOCK: 635 mode = TEGRA_BO_TILING_MODE_BLOCK; 636 637 if (args->value > 5) 638 return -EINVAL; 639 640 value = args->value; 641 break; 642 643 default: 644 return -EINVAL; 645 } 646 647 gem = drm_gem_object_lookup(file, args->handle); 648 if (!gem) 649 return -ENOENT; 650 651 bo = to_tegra_bo(gem); 652 653 bo->tiling.mode = mode; 654 bo->tiling.value = value; 655 656 drm_gem_object_put(gem); 657 658 return 0; 659 } 660 661 static int tegra_gem_get_tiling(struct drm_device *drm, void *data, 662 struct drm_file *file) 663 { 664 struct drm_tegra_gem_get_tiling *args = data; 665 struct drm_gem_object *gem; 666 struct tegra_bo *bo; 667 int err = 0; 668 669 gem = drm_gem_object_lookup(file, args->handle); 670 if (!gem) 671 return -ENOENT; 672 673 bo = to_tegra_bo(gem); 674 675 switch (bo->tiling.mode) { 676 case TEGRA_BO_TILING_MODE_PITCH: 677 args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH; 678 args->value = 0; 679 break; 680 681 case TEGRA_BO_TILING_MODE_TILED: 682 args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED; 683 args->value = 0; 684 break; 685 686 case TEGRA_BO_TILING_MODE_BLOCK: 687 args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK; 688 args->value = bo->tiling.value; 689 break; 690 691 default: 692 err = -EINVAL; 693 break; 694 } 695 696 drm_gem_object_put(gem); 697 698 return err; 699 } 700 701 static int tegra_gem_set_flags(struct drm_device *drm, void *data, 702 struct drm_file *file) 703 { 704 struct drm_tegra_gem_set_flags *args = data; 705 struct drm_gem_object *gem; 706 struct tegra_bo *bo; 707 708 if (args->flags & ~DRM_TEGRA_GEM_FLAGS) 709 return -EINVAL; 710 711 gem = drm_gem_object_lookup(file, args->handle); 712 if (!gem) 713 return -ENOENT; 714 715 bo = to_tegra_bo(gem); 716 bo->flags = 0; 717 718 if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP) 719 bo->flags |= TEGRA_BO_BOTTOM_UP; 720 721 drm_gem_object_put(gem); 722 723 return 0; 724 } 725 726 static int tegra_gem_get_flags(struct drm_device *drm, void *data, 727 struct drm_file *file) 728 { 729 struct drm_tegra_gem_get_flags *args = data; 730 struct drm_gem_object *gem; 731 struct tegra_bo *bo; 732 733 gem = drm_gem_object_lookup(file, args->handle); 734 if (!gem) 735 return -ENOENT; 736 737 bo = to_tegra_bo(gem); 738 args->flags = 0; 739 740 if (bo->flags & TEGRA_BO_BOTTOM_UP) 741 args->flags |= DRM_TEGRA_GEM_BOTTOM_UP; 742 743 drm_gem_object_put(gem); 744 745 return 0; 746 } 747 #endif 748 749 static const struct drm_ioctl_desc tegra_drm_ioctls[] = { 750 #ifdef CONFIG_DRM_TEGRA_STAGING 751 DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_OPEN, tegra_drm_ioctl_channel_open, 752 DRM_RENDER_ALLOW), 753 DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_CLOSE, tegra_drm_ioctl_channel_close, 754 DRM_RENDER_ALLOW), 755 DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_MAP, tegra_drm_ioctl_channel_map, 756 DRM_RENDER_ALLOW), 757 DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_UNMAP, tegra_drm_ioctl_channel_unmap, 758 DRM_RENDER_ALLOW), 759 DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_SUBMIT, tegra_drm_ioctl_channel_submit, 760 DRM_RENDER_ALLOW), 761 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_ALLOCATE, tegra_drm_ioctl_syncpoint_allocate, 762 DRM_RENDER_ALLOW), 763 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_FREE, tegra_drm_ioctl_syncpoint_free, 764 DRM_RENDER_ALLOW), 765 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_WAIT, tegra_drm_ioctl_syncpoint_wait, 766 DRM_RENDER_ALLOW), 767 768 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_RENDER_ALLOW), 769 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_RENDER_ALLOW), 770 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, 771 DRM_RENDER_ALLOW), 772 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, 773 DRM_RENDER_ALLOW), 774 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, 775 DRM_RENDER_ALLOW), 776 DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, 777 DRM_RENDER_ALLOW), 778 DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, 779 DRM_RENDER_ALLOW), 780 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, 781 DRM_RENDER_ALLOW), 782 DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, 783 DRM_RENDER_ALLOW), 784 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, 785 DRM_RENDER_ALLOW), 786 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, 787 DRM_RENDER_ALLOW), 788 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, 789 DRM_RENDER_ALLOW), 790 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, 791 DRM_RENDER_ALLOW), 792 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, 793 DRM_RENDER_ALLOW), 794 #endif 795 }; 796 797 static const struct file_operations tegra_drm_fops = { 798 .owner = THIS_MODULE, 799 .open = drm_open, 800 .release = drm_release, 801 .unlocked_ioctl = drm_ioctl, 802 .mmap = tegra_drm_mmap, 803 .poll = drm_poll, 804 .read = drm_read, 805 .compat_ioctl = drm_compat_ioctl, 806 .llseek = noop_llseek, 807 }; 808 809 static int tegra_drm_context_cleanup(int id, void *p, void *data) 810 { 811 struct tegra_drm_context *context = p; 812 813 tegra_drm_context_free(context); 814 815 return 0; 816 } 817 818 static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file) 819 { 820 struct tegra_drm_file *fpriv = file->driver_priv; 821 822 mutex_lock(&fpriv->lock); 823 idr_for_each(&fpriv->legacy_contexts, tegra_drm_context_cleanup, NULL); 824 tegra_drm_uapi_close_file(fpriv); 825 mutex_unlock(&fpriv->lock); 826 827 idr_destroy(&fpriv->legacy_contexts); 828 mutex_destroy(&fpriv->lock); 829 kfree(fpriv); 830 } 831 832 #ifdef CONFIG_DEBUG_FS 833 static int tegra_debugfs_framebuffers(struct seq_file *s, void *data) 834 { 835 struct drm_info_node *node = (struct drm_info_node *)s->private; 836 struct drm_device *drm = node->minor->dev; 837 struct drm_framebuffer *fb; 838 839 mutex_lock(&drm->mode_config.fb_lock); 840 841 list_for_each_entry(fb, &drm->mode_config.fb_list, head) { 842 seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n", 843 fb->base.id, fb->width, fb->height, 844 fb->format->depth, 845 fb->format->cpp[0] * 8, 846 drm_framebuffer_read_refcount(fb)); 847 } 848 849 mutex_unlock(&drm->mode_config.fb_lock); 850 851 return 0; 852 } 853 854 static int tegra_debugfs_iova(struct seq_file *s, void *data) 855 { 856 struct drm_info_node *node = (struct drm_info_node *)s->private; 857 struct drm_device *drm = node->minor->dev; 858 struct tegra_drm *tegra = drm->dev_private; 859 struct drm_printer p = drm_seq_file_printer(s); 860 861 if (tegra->domain) { 862 mutex_lock(&tegra->mm_lock); 863 drm_mm_print(&tegra->mm, &p); 864 mutex_unlock(&tegra->mm_lock); 865 } 866 867 return 0; 868 } 869 870 static struct drm_info_list tegra_debugfs_list[] = { 871 { "framebuffers", tegra_debugfs_framebuffers, 0 }, 872 { "iova", tegra_debugfs_iova, 0 }, 873 }; 874 875 static void tegra_debugfs_init(struct drm_minor *minor) 876 { 877 drm_debugfs_create_files(tegra_debugfs_list, 878 ARRAY_SIZE(tegra_debugfs_list), 879 minor->debugfs_root, minor); 880 } 881 #endif 882 883 static const struct drm_driver tegra_drm_driver = { 884 .driver_features = DRIVER_MODESET | DRIVER_GEM | 885 DRIVER_ATOMIC | DRIVER_RENDER | DRIVER_SYNCOBJ, 886 .open = tegra_drm_open, 887 .postclose = tegra_drm_postclose, 888 .lastclose = drm_fb_helper_lastclose, 889 890 #if defined(CONFIG_DEBUG_FS) 891 .debugfs_init = tegra_debugfs_init, 892 #endif 893 894 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 895 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 896 .gem_prime_import = tegra_gem_prime_import, 897 898 .dumb_create = tegra_bo_dumb_create, 899 900 .ioctls = tegra_drm_ioctls, 901 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls), 902 .fops = &tegra_drm_fops, 903 904 .name = DRIVER_NAME, 905 .desc = DRIVER_DESC, 906 .date = DRIVER_DATE, 907 .major = DRIVER_MAJOR, 908 .minor = DRIVER_MINOR, 909 .patchlevel = DRIVER_PATCHLEVEL, 910 }; 911 912 int tegra_drm_register_client(struct tegra_drm *tegra, 913 struct tegra_drm_client *client) 914 { 915 /* 916 * When MLOCKs are implemented, change to allocate a shared channel 917 * only when MLOCKs are disabled. 918 */ 919 client->shared_channel = host1x_channel_request(&client->base); 920 if (!client->shared_channel) 921 return -EBUSY; 922 923 mutex_lock(&tegra->clients_lock); 924 list_add_tail(&client->list, &tegra->clients); 925 client->drm = tegra; 926 mutex_unlock(&tegra->clients_lock); 927 928 return 0; 929 } 930 931 int tegra_drm_unregister_client(struct tegra_drm *tegra, 932 struct tegra_drm_client *client) 933 { 934 mutex_lock(&tegra->clients_lock); 935 list_del_init(&client->list); 936 client->drm = NULL; 937 mutex_unlock(&tegra->clients_lock); 938 939 if (client->shared_channel) 940 host1x_channel_put(client->shared_channel); 941 942 return 0; 943 } 944 945 int host1x_client_iommu_attach(struct host1x_client *client) 946 { 947 struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev); 948 struct drm_device *drm = dev_get_drvdata(client->host); 949 struct tegra_drm *tegra = drm->dev_private; 950 struct iommu_group *group = NULL; 951 int err; 952 953 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) 954 if (client->dev->archdata.mapping) { 955 struct dma_iommu_mapping *mapping = 956 to_dma_iommu_mapping(client->dev); 957 arm_iommu_detach_device(client->dev); 958 arm_iommu_release_mapping(mapping); 959 960 domain = iommu_get_domain_for_dev(client->dev); 961 } 962 #endif 963 964 /* 965 * If the host1x client is already attached to an IOMMU domain that is 966 * not the shared IOMMU domain, don't try to attach it to a different 967 * domain. This allows using the IOMMU-backed DMA API. 968 */ 969 if (domain && domain != tegra->domain) 970 return 0; 971 972 if (tegra->domain) { 973 group = iommu_group_get(client->dev); 974 if (!group) 975 return -ENODEV; 976 977 if (domain != tegra->domain) { 978 err = iommu_attach_group(tegra->domain, group); 979 if (err < 0) { 980 iommu_group_put(group); 981 return err; 982 } 983 } 984 985 tegra->use_explicit_iommu = true; 986 } 987 988 client->group = group; 989 990 return 0; 991 } 992 993 void host1x_client_iommu_detach(struct host1x_client *client) 994 { 995 struct drm_device *drm = dev_get_drvdata(client->host); 996 struct tegra_drm *tegra = drm->dev_private; 997 struct iommu_domain *domain; 998 999 if (client->group) { 1000 /* 1001 * Devices that are part of the same group may no longer be 1002 * attached to a domain at this point because their group may 1003 * have been detached by an earlier client. 1004 */ 1005 domain = iommu_get_domain_for_dev(client->dev); 1006 if (domain) 1007 iommu_detach_group(tegra->domain, client->group); 1008 1009 iommu_group_put(client->group); 1010 client->group = NULL; 1011 } 1012 } 1013 1014 void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma) 1015 { 1016 struct iova *alloc; 1017 void *virt; 1018 gfp_t gfp; 1019 int err; 1020 1021 if (tegra->domain) 1022 size = iova_align(&tegra->carveout.domain, size); 1023 else 1024 size = PAGE_ALIGN(size); 1025 1026 gfp = GFP_KERNEL | __GFP_ZERO; 1027 if (!tegra->domain) { 1028 /* 1029 * Many units only support 32-bit addresses, even on 64-bit 1030 * SoCs. If there is no IOMMU to translate into a 32-bit IO 1031 * virtual address space, force allocations to be in the 1032 * lower 32-bit range. 1033 */ 1034 gfp |= GFP_DMA; 1035 } 1036 1037 virt = (void *)__get_free_pages(gfp, get_order(size)); 1038 if (!virt) 1039 return ERR_PTR(-ENOMEM); 1040 1041 if (!tegra->domain) { 1042 /* 1043 * If IOMMU is disabled, devices address physical memory 1044 * directly. 1045 */ 1046 *dma = virt_to_phys(virt); 1047 return virt; 1048 } 1049 1050 alloc = alloc_iova(&tegra->carveout.domain, 1051 size >> tegra->carveout.shift, 1052 tegra->carveout.limit, true); 1053 if (!alloc) { 1054 err = -EBUSY; 1055 goto free_pages; 1056 } 1057 1058 *dma = iova_dma_addr(&tegra->carveout.domain, alloc); 1059 err = iommu_map(tegra->domain, *dma, virt_to_phys(virt), 1060 size, IOMMU_READ | IOMMU_WRITE); 1061 if (err < 0) 1062 goto free_iova; 1063 1064 return virt; 1065 1066 free_iova: 1067 __free_iova(&tegra->carveout.domain, alloc); 1068 free_pages: 1069 free_pages((unsigned long)virt, get_order(size)); 1070 1071 return ERR_PTR(err); 1072 } 1073 1074 void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt, 1075 dma_addr_t dma) 1076 { 1077 if (tegra->domain) 1078 size = iova_align(&tegra->carveout.domain, size); 1079 else 1080 size = PAGE_ALIGN(size); 1081 1082 if (tegra->domain) { 1083 iommu_unmap(tegra->domain, dma, size); 1084 free_iova(&tegra->carveout.domain, 1085 iova_pfn(&tegra->carveout.domain, dma)); 1086 } 1087 1088 free_pages((unsigned long)virt, get_order(size)); 1089 } 1090 1091 static bool host1x_drm_wants_iommu(struct host1x_device *dev) 1092 { 1093 struct host1x *host1x = dev_get_drvdata(dev->dev.parent); 1094 struct iommu_domain *domain; 1095 1096 /* 1097 * If the Tegra DRM clients are backed by an IOMMU, push buffers are 1098 * likely to be allocated beyond the 32-bit boundary if sufficient 1099 * system memory is available. This is problematic on earlier Tegra 1100 * generations where host1x supports a maximum of 32 address bits in 1101 * the GATHER opcode. In this case, unless host1x is behind an IOMMU 1102 * as well it won't be able to process buffers allocated beyond the 1103 * 32-bit boundary. 1104 * 1105 * The DMA API will use bounce buffers in this case, so that could 1106 * perhaps still be made to work, even if less efficient, but there 1107 * is another catch: in order to perform cache maintenance on pages 1108 * allocated for discontiguous buffers we need to map and unmap the 1109 * SG table representing these buffers. This is fine for something 1110 * small like a push buffer, but it exhausts the bounce buffer pool 1111 * (typically on the order of a few MiB) for framebuffers (many MiB 1112 * for any modern resolution). 1113 * 1114 * Work around this by making sure that Tegra DRM clients only use 1115 * an IOMMU if the parent host1x also uses an IOMMU. 1116 * 1117 * Note that there's still a small gap here that we don't cover: if 1118 * the DMA API is backed by an IOMMU there's no way to control which 1119 * device is attached to an IOMMU and which isn't, except via wiring 1120 * up the device tree appropriately. This is considered an problem 1121 * of integration, so care must be taken for the DT to be consistent. 1122 */ 1123 domain = iommu_get_domain_for_dev(dev->dev.parent); 1124 1125 /* 1126 * Tegra20 and Tegra30 don't support addressing memory beyond the 1127 * 32-bit boundary, so the regular GATHER opcodes will always be 1128 * sufficient and whether or not the host1x is attached to an IOMMU 1129 * doesn't matter. 1130 */ 1131 if (!domain && host1x_get_dma_mask(host1x) <= DMA_BIT_MASK(32)) 1132 return true; 1133 1134 return domain != NULL; 1135 } 1136 1137 static int host1x_drm_probe(struct host1x_device *dev) 1138 { 1139 struct tegra_drm *tegra; 1140 struct drm_device *drm; 1141 int err; 1142 1143 drm = drm_dev_alloc(&tegra_drm_driver, &dev->dev); 1144 if (IS_ERR(drm)) 1145 return PTR_ERR(drm); 1146 1147 tegra = kzalloc(sizeof(*tegra), GFP_KERNEL); 1148 if (!tegra) { 1149 err = -ENOMEM; 1150 goto put; 1151 } 1152 1153 if (host1x_drm_wants_iommu(dev) && iommu_present(&platform_bus_type)) { 1154 tegra->domain = iommu_domain_alloc(&platform_bus_type); 1155 if (!tegra->domain) { 1156 err = -ENOMEM; 1157 goto free; 1158 } 1159 1160 err = iova_cache_get(); 1161 if (err < 0) 1162 goto domain; 1163 } 1164 1165 mutex_init(&tegra->clients_lock); 1166 INIT_LIST_HEAD(&tegra->clients); 1167 1168 dev_set_drvdata(&dev->dev, drm); 1169 drm->dev_private = tegra; 1170 tegra->drm = drm; 1171 1172 drm_mode_config_init(drm); 1173 1174 drm->mode_config.min_width = 0; 1175 drm->mode_config.min_height = 0; 1176 drm->mode_config.max_width = 0; 1177 drm->mode_config.max_height = 0; 1178 1179 drm->mode_config.normalize_zpos = true; 1180 1181 drm->mode_config.funcs = &tegra_drm_mode_config_funcs; 1182 drm->mode_config.helper_private = &tegra_drm_mode_config_helpers; 1183 1184 err = tegra_drm_fb_prepare(drm); 1185 if (err < 0) 1186 goto config; 1187 1188 drm_kms_helper_poll_init(drm); 1189 1190 err = host1x_device_init(dev); 1191 if (err < 0) 1192 goto fbdev; 1193 1194 /* 1195 * Now that all display controller have been initialized, the maximum 1196 * supported resolution is known and the bitmask for horizontal and 1197 * vertical bitfields can be computed. 1198 */ 1199 tegra->hmask = drm->mode_config.max_width - 1; 1200 tegra->vmask = drm->mode_config.max_height - 1; 1201 1202 if (tegra->use_explicit_iommu) { 1203 u64 carveout_start, carveout_end, gem_start, gem_end; 1204 u64 dma_mask = dma_get_mask(&dev->dev); 1205 dma_addr_t start, end; 1206 unsigned long order; 1207 1208 start = tegra->domain->geometry.aperture_start & dma_mask; 1209 end = tegra->domain->geometry.aperture_end & dma_mask; 1210 1211 gem_start = start; 1212 gem_end = end - CARVEOUT_SZ; 1213 carveout_start = gem_end + 1; 1214 carveout_end = end; 1215 1216 order = __ffs(tegra->domain->pgsize_bitmap); 1217 init_iova_domain(&tegra->carveout.domain, 1UL << order, 1218 carveout_start >> order); 1219 1220 tegra->carveout.shift = iova_shift(&tegra->carveout.domain); 1221 tegra->carveout.limit = carveout_end >> tegra->carveout.shift; 1222 1223 drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1); 1224 mutex_init(&tegra->mm_lock); 1225 1226 DRM_DEBUG_DRIVER("IOMMU apertures:\n"); 1227 DRM_DEBUG_DRIVER(" GEM: %#llx-%#llx\n", gem_start, gem_end); 1228 DRM_DEBUG_DRIVER(" Carveout: %#llx-%#llx\n", carveout_start, 1229 carveout_end); 1230 } else if (tegra->domain) { 1231 iommu_domain_free(tegra->domain); 1232 tegra->domain = NULL; 1233 iova_cache_put(); 1234 } 1235 1236 if (tegra->hub) { 1237 err = tegra_display_hub_prepare(tegra->hub); 1238 if (err < 0) 1239 goto device; 1240 } 1241 1242 /* syncpoints are used for full 32-bit hardware VBLANK counters */ 1243 drm->max_vblank_count = 0xffffffff; 1244 1245 err = drm_vblank_init(drm, drm->mode_config.num_crtc); 1246 if (err < 0) 1247 goto hub; 1248 1249 drm_mode_config_reset(drm); 1250 1251 err = drm_aperture_remove_framebuffers(false, &tegra_drm_driver); 1252 if (err < 0) 1253 goto hub; 1254 1255 err = tegra_drm_fb_init(drm); 1256 if (err < 0) 1257 goto hub; 1258 1259 err = drm_dev_register(drm, 0); 1260 if (err < 0) 1261 goto fb; 1262 1263 return 0; 1264 1265 fb: 1266 tegra_drm_fb_exit(drm); 1267 hub: 1268 if (tegra->hub) 1269 tegra_display_hub_cleanup(tegra->hub); 1270 device: 1271 if (tegra->domain) { 1272 mutex_destroy(&tegra->mm_lock); 1273 drm_mm_takedown(&tegra->mm); 1274 put_iova_domain(&tegra->carveout.domain); 1275 iova_cache_put(); 1276 } 1277 1278 host1x_device_exit(dev); 1279 fbdev: 1280 drm_kms_helper_poll_fini(drm); 1281 tegra_drm_fb_free(drm); 1282 config: 1283 drm_mode_config_cleanup(drm); 1284 domain: 1285 if (tegra->domain) 1286 iommu_domain_free(tegra->domain); 1287 free: 1288 kfree(tegra); 1289 put: 1290 drm_dev_put(drm); 1291 return err; 1292 } 1293 1294 static int host1x_drm_remove(struct host1x_device *dev) 1295 { 1296 struct drm_device *drm = dev_get_drvdata(&dev->dev); 1297 struct tegra_drm *tegra = drm->dev_private; 1298 int err; 1299 1300 drm_dev_unregister(drm); 1301 1302 drm_kms_helper_poll_fini(drm); 1303 tegra_drm_fb_exit(drm); 1304 drm_atomic_helper_shutdown(drm); 1305 drm_mode_config_cleanup(drm); 1306 1307 if (tegra->hub) 1308 tegra_display_hub_cleanup(tegra->hub); 1309 1310 err = host1x_device_exit(dev); 1311 if (err < 0) 1312 dev_err(&dev->dev, "host1x device cleanup failed: %d\n", err); 1313 1314 if (tegra->domain) { 1315 mutex_destroy(&tegra->mm_lock); 1316 drm_mm_takedown(&tegra->mm); 1317 put_iova_domain(&tegra->carveout.domain); 1318 iova_cache_put(); 1319 iommu_domain_free(tegra->domain); 1320 } 1321 1322 kfree(tegra); 1323 drm_dev_put(drm); 1324 1325 return 0; 1326 } 1327 1328 #ifdef CONFIG_PM_SLEEP 1329 static int host1x_drm_suspend(struct device *dev) 1330 { 1331 struct drm_device *drm = dev_get_drvdata(dev); 1332 1333 return drm_mode_config_helper_suspend(drm); 1334 } 1335 1336 static int host1x_drm_resume(struct device *dev) 1337 { 1338 struct drm_device *drm = dev_get_drvdata(dev); 1339 1340 return drm_mode_config_helper_resume(drm); 1341 } 1342 #endif 1343 1344 static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend, 1345 host1x_drm_resume); 1346 1347 static const struct of_device_id host1x_drm_subdevs[] = { 1348 { .compatible = "nvidia,tegra20-dc", }, 1349 { .compatible = "nvidia,tegra20-hdmi", }, 1350 { .compatible = "nvidia,tegra20-gr2d", }, 1351 { .compatible = "nvidia,tegra20-gr3d", }, 1352 { .compatible = "nvidia,tegra30-dc", }, 1353 { .compatible = "nvidia,tegra30-hdmi", }, 1354 { .compatible = "nvidia,tegra30-gr2d", }, 1355 { .compatible = "nvidia,tegra30-gr3d", }, 1356 { .compatible = "nvidia,tegra114-dc", }, 1357 { .compatible = "nvidia,tegra114-dsi", }, 1358 { .compatible = "nvidia,tegra114-hdmi", }, 1359 { .compatible = "nvidia,tegra114-gr2d", }, 1360 { .compatible = "nvidia,tegra114-gr3d", }, 1361 { .compatible = "nvidia,tegra124-dc", }, 1362 { .compatible = "nvidia,tegra124-sor", }, 1363 { .compatible = "nvidia,tegra124-hdmi", }, 1364 { .compatible = "nvidia,tegra124-dsi", }, 1365 { .compatible = "nvidia,tegra124-vic", }, 1366 { .compatible = "nvidia,tegra132-dsi", }, 1367 { .compatible = "nvidia,tegra210-dc", }, 1368 { .compatible = "nvidia,tegra210-dsi", }, 1369 { .compatible = "nvidia,tegra210-sor", }, 1370 { .compatible = "nvidia,tegra210-sor1", }, 1371 { .compatible = "nvidia,tegra210-vic", }, 1372 { .compatible = "nvidia,tegra210-nvdec", }, 1373 { .compatible = "nvidia,tegra186-display", }, 1374 { .compatible = "nvidia,tegra186-dc", }, 1375 { .compatible = "nvidia,tegra186-sor", }, 1376 { .compatible = "nvidia,tegra186-sor1", }, 1377 { .compatible = "nvidia,tegra186-vic", }, 1378 { .compatible = "nvidia,tegra186-nvdec", }, 1379 { .compatible = "nvidia,tegra194-display", }, 1380 { .compatible = "nvidia,tegra194-dc", }, 1381 { .compatible = "nvidia,tegra194-sor", }, 1382 { .compatible = "nvidia,tegra194-vic", }, 1383 { .compatible = "nvidia,tegra194-nvdec", }, 1384 { .compatible = "nvidia,tegra234-vic", }, 1385 { /* sentinel */ } 1386 }; 1387 1388 static struct host1x_driver host1x_drm_driver = { 1389 .driver = { 1390 .name = "drm", 1391 .pm = &host1x_drm_pm_ops, 1392 }, 1393 .probe = host1x_drm_probe, 1394 .remove = host1x_drm_remove, 1395 .subdevs = host1x_drm_subdevs, 1396 }; 1397 1398 static struct platform_driver * const drivers[] = { 1399 &tegra_display_hub_driver, 1400 &tegra_dc_driver, 1401 &tegra_hdmi_driver, 1402 &tegra_dsi_driver, 1403 &tegra_dpaux_driver, 1404 &tegra_sor_driver, 1405 &tegra_gr2d_driver, 1406 &tegra_gr3d_driver, 1407 &tegra_vic_driver, 1408 &tegra_nvdec_driver, 1409 }; 1410 1411 static int __init host1x_drm_init(void) 1412 { 1413 int err; 1414 1415 if (drm_firmware_drivers_only()) 1416 return -ENODEV; 1417 1418 err = host1x_driver_register(&host1x_drm_driver); 1419 if (err < 0) 1420 return err; 1421 1422 err = platform_register_drivers(drivers, ARRAY_SIZE(drivers)); 1423 if (err < 0) 1424 goto unregister_host1x; 1425 1426 return 0; 1427 1428 unregister_host1x: 1429 host1x_driver_unregister(&host1x_drm_driver); 1430 return err; 1431 } 1432 module_init(host1x_drm_init); 1433 1434 static void __exit host1x_drm_exit(void) 1435 { 1436 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); 1437 host1x_driver_unregister(&host1x_drm_driver); 1438 } 1439 module_exit(host1x_drm_exit); 1440 1441 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); 1442 MODULE_DESCRIPTION("NVIDIA Tegra DRM driver"); 1443 MODULE_LICENSE("GPL v2"); 1444