1 /* 2 * Copyright (C) 2012 Avionic Design GmbH 3 * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 */ 9 10 #include <linux/bitops.h> 11 #include <linux/host1x.h> 12 #include <linux/idr.h> 13 #include <linux/iommu.h> 14 15 #include <drm/drm_atomic.h> 16 #include <drm/drm_atomic_helper.h> 17 18 #include "drm.h" 19 #include "gem.h" 20 21 #define DRIVER_NAME "tegra" 22 #define DRIVER_DESC "NVIDIA Tegra graphics" 23 #define DRIVER_DATE "20120330" 24 #define DRIVER_MAJOR 0 25 #define DRIVER_MINOR 0 26 #define DRIVER_PATCHLEVEL 0 27 28 #define CARVEOUT_SZ SZ_64M 29 30 struct tegra_drm_file { 31 struct idr contexts; 32 struct mutex lock; 33 }; 34 35 static void tegra_atomic_schedule(struct tegra_drm *tegra, 36 struct drm_atomic_state *state) 37 { 38 tegra->commit.state = state; 39 schedule_work(&tegra->commit.work); 40 } 41 42 static void tegra_atomic_complete(struct tegra_drm *tegra, 43 struct drm_atomic_state *state) 44 { 45 struct drm_device *drm = tegra->drm; 46 47 /* 48 * Everything below can be run asynchronously without the need to grab 49 * any modeset locks at all under one condition: It must be guaranteed 50 * that the asynchronous work has either been cancelled (if the driver 51 * supports it, which at least requires that the framebuffers get 52 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed 53 * before the new state gets committed on the software side with 54 * drm_atomic_helper_swap_state(). 55 * 56 * This scheme allows new atomic state updates to be prepared and 57 * checked in parallel to the asynchronous completion of the previous 58 * update. Which is important since compositors need to figure out the 59 * composition of the next frame right after having submitted the 60 * current layout. 61 */ 62 63 drm_atomic_helper_commit_modeset_disables(drm, state); 64 drm_atomic_helper_commit_modeset_enables(drm, state); 65 drm_atomic_helper_commit_planes(drm, state, 66 DRM_PLANE_COMMIT_ACTIVE_ONLY); 67 68 drm_atomic_helper_wait_for_vblanks(drm, state); 69 70 drm_atomic_helper_cleanup_planes(drm, state); 71 drm_atomic_state_put(state); 72 } 73 74 static void tegra_atomic_work(struct work_struct *work) 75 { 76 struct tegra_drm *tegra = container_of(work, struct tegra_drm, 77 commit.work); 78 79 tegra_atomic_complete(tegra, tegra->commit.state); 80 } 81 82 static int tegra_atomic_commit(struct drm_device *drm, 83 struct drm_atomic_state *state, bool nonblock) 84 { 85 struct tegra_drm *tegra = drm->dev_private; 86 int err; 87 88 err = drm_atomic_helper_prepare_planes(drm, state); 89 if (err) 90 return err; 91 92 /* serialize outstanding nonblocking commits */ 93 mutex_lock(&tegra->commit.lock); 94 flush_work(&tegra->commit.work); 95 96 /* 97 * This is the point of no return - everything below never fails except 98 * when the hw goes bonghits. Which means we can commit the new state on 99 * the software side now. 100 */ 101 102 drm_atomic_helper_swap_state(state, true); 103 104 drm_atomic_state_get(state); 105 if (nonblock) 106 tegra_atomic_schedule(tegra, state); 107 else 108 tegra_atomic_complete(tegra, state); 109 110 mutex_unlock(&tegra->commit.lock); 111 return 0; 112 } 113 114 static const struct drm_mode_config_funcs tegra_drm_mode_funcs = { 115 .fb_create = tegra_fb_create, 116 #ifdef CONFIG_DRM_FBDEV_EMULATION 117 .output_poll_changed = tegra_fb_output_poll_changed, 118 #endif 119 .atomic_check = drm_atomic_helper_check, 120 .atomic_commit = tegra_atomic_commit, 121 }; 122 123 static int tegra_drm_load(struct drm_device *drm, unsigned long flags) 124 { 125 struct host1x_device *device = to_host1x_device(drm->dev); 126 struct tegra_drm *tegra; 127 int err; 128 129 tegra = kzalloc(sizeof(*tegra), GFP_KERNEL); 130 if (!tegra) 131 return -ENOMEM; 132 133 if (iommu_present(&platform_bus_type)) { 134 u64 carveout_start, carveout_end, gem_start, gem_end; 135 struct iommu_domain_geometry *geometry; 136 unsigned long order; 137 138 tegra->domain = iommu_domain_alloc(&platform_bus_type); 139 if (!tegra->domain) { 140 err = -ENOMEM; 141 goto free; 142 } 143 144 geometry = &tegra->domain->geometry; 145 gem_start = geometry->aperture_start; 146 gem_end = geometry->aperture_end - CARVEOUT_SZ; 147 carveout_start = gem_end + 1; 148 carveout_end = geometry->aperture_end; 149 150 order = __ffs(tegra->domain->pgsize_bitmap); 151 init_iova_domain(&tegra->carveout.domain, 1UL << order, 152 carveout_start >> order, 153 carveout_end >> order); 154 155 tegra->carveout.shift = iova_shift(&tegra->carveout.domain); 156 tegra->carveout.limit = carveout_end >> tegra->carveout.shift; 157 158 drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1); 159 mutex_init(&tegra->mm_lock); 160 161 DRM_DEBUG("IOMMU apertures:\n"); 162 DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end); 163 DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start, 164 carveout_end); 165 } 166 167 mutex_init(&tegra->clients_lock); 168 INIT_LIST_HEAD(&tegra->clients); 169 170 mutex_init(&tegra->commit.lock); 171 INIT_WORK(&tegra->commit.work, tegra_atomic_work); 172 173 drm->dev_private = tegra; 174 tegra->drm = drm; 175 176 drm_mode_config_init(drm); 177 178 drm->mode_config.min_width = 0; 179 drm->mode_config.min_height = 0; 180 181 drm->mode_config.max_width = 4096; 182 drm->mode_config.max_height = 4096; 183 184 drm->mode_config.allow_fb_modifiers = true; 185 186 drm->mode_config.funcs = &tegra_drm_mode_funcs; 187 188 err = tegra_drm_fb_prepare(drm); 189 if (err < 0) 190 goto config; 191 192 drm_kms_helper_poll_init(drm); 193 194 err = host1x_device_init(device); 195 if (err < 0) 196 goto fbdev; 197 198 /* 199 * We don't use the drm_irq_install() helpers provided by the DRM 200 * core, so we need to set this manually in order to allow the 201 * DRM_IOCTL_WAIT_VBLANK to operate correctly. 202 */ 203 drm->irq_enabled = true; 204 205 /* syncpoints are used for full 32-bit hardware VBLANK counters */ 206 drm->max_vblank_count = 0xffffffff; 207 208 err = drm_vblank_init(drm, drm->mode_config.num_crtc); 209 if (err < 0) 210 goto device; 211 212 drm_mode_config_reset(drm); 213 214 err = tegra_drm_fb_init(drm); 215 if (err < 0) 216 goto vblank; 217 218 return 0; 219 220 vblank: 221 drm_vblank_cleanup(drm); 222 device: 223 host1x_device_exit(device); 224 fbdev: 225 drm_kms_helper_poll_fini(drm); 226 tegra_drm_fb_free(drm); 227 config: 228 drm_mode_config_cleanup(drm); 229 230 if (tegra->domain) { 231 iommu_domain_free(tegra->domain); 232 drm_mm_takedown(&tegra->mm); 233 mutex_destroy(&tegra->mm_lock); 234 put_iova_domain(&tegra->carveout.domain); 235 } 236 free: 237 kfree(tegra); 238 return err; 239 } 240 241 static void tegra_drm_unload(struct drm_device *drm) 242 { 243 struct host1x_device *device = to_host1x_device(drm->dev); 244 struct tegra_drm *tegra = drm->dev_private; 245 int err; 246 247 drm_kms_helper_poll_fini(drm); 248 tegra_drm_fb_exit(drm); 249 drm_mode_config_cleanup(drm); 250 drm_vblank_cleanup(drm); 251 252 err = host1x_device_exit(device); 253 if (err < 0) 254 return; 255 256 if (tegra->domain) { 257 iommu_domain_free(tegra->domain); 258 drm_mm_takedown(&tegra->mm); 259 mutex_destroy(&tegra->mm_lock); 260 put_iova_domain(&tegra->carveout.domain); 261 } 262 263 kfree(tegra); 264 } 265 266 static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) 267 { 268 struct tegra_drm_file *fpriv; 269 270 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 271 if (!fpriv) 272 return -ENOMEM; 273 274 idr_init(&fpriv->contexts); 275 mutex_init(&fpriv->lock); 276 filp->driver_priv = fpriv; 277 278 return 0; 279 } 280 281 static void tegra_drm_context_free(struct tegra_drm_context *context) 282 { 283 context->client->ops->close_channel(context); 284 kfree(context); 285 } 286 287 static void tegra_drm_lastclose(struct drm_device *drm) 288 { 289 #ifdef CONFIG_DRM_FBDEV_EMULATION 290 struct tegra_drm *tegra = drm->dev_private; 291 292 tegra_fbdev_restore_mode(tegra->fbdev); 293 #endif 294 } 295 296 static struct host1x_bo * 297 host1x_bo_lookup(struct drm_file *file, u32 handle) 298 { 299 struct drm_gem_object *gem; 300 struct tegra_bo *bo; 301 302 gem = drm_gem_object_lookup(file, handle); 303 if (!gem) 304 return NULL; 305 306 drm_gem_object_unreference_unlocked(gem); 307 308 bo = to_tegra_bo(gem); 309 return &bo->base; 310 } 311 312 static int host1x_reloc_copy_from_user(struct host1x_reloc *dest, 313 struct drm_tegra_reloc __user *src, 314 struct drm_device *drm, 315 struct drm_file *file) 316 { 317 u32 cmdbuf, target; 318 int err; 319 320 err = get_user(cmdbuf, &src->cmdbuf.handle); 321 if (err < 0) 322 return err; 323 324 err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset); 325 if (err < 0) 326 return err; 327 328 err = get_user(target, &src->target.handle); 329 if (err < 0) 330 return err; 331 332 err = get_user(dest->target.offset, &src->target.offset); 333 if (err < 0) 334 return err; 335 336 err = get_user(dest->shift, &src->shift); 337 if (err < 0) 338 return err; 339 340 dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf); 341 if (!dest->cmdbuf.bo) 342 return -ENOENT; 343 344 dest->target.bo = host1x_bo_lookup(file, target); 345 if (!dest->target.bo) 346 return -ENOENT; 347 348 return 0; 349 } 350 351 int tegra_drm_submit(struct tegra_drm_context *context, 352 struct drm_tegra_submit *args, struct drm_device *drm, 353 struct drm_file *file) 354 { 355 unsigned int num_cmdbufs = args->num_cmdbufs; 356 unsigned int num_relocs = args->num_relocs; 357 unsigned int num_waitchks = args->num_waitchks; 358 struct drm_tegra_cmdbuf __user *cmdbufs = 359 (void __user *)(uintptr_t)args->cmdbufs; 360 struct drm_tegra_reloc __user *relocs = 361 (void __user *)(uintptr_t)args->relocs; 362 struct drm_tegra_waitchk __user *waitchks = 363 (void __user *)(uintptr_t)args->waitchks; 364 struct drm_tegra_syncpt syncpt; 365 struct host1x_job *job; 366 int err; 367 368 /* We don't yet support other than one syncpt_incr struct per submit */ 369 if (args->num_syncpts != 1) 370 return -EINVAL; 371 372 job = host1x_job_alloc(context->channel, args->num_cmdbufs, 373 args->num_relocs, args->num_waitchks); 374 if (!job) 375 return -ENOMEM; 376 377 job->num_relocs = args->num_relocs; 378 job->num_waitchk = args->num_waitchks; 379 job->client = (u32)args->context; 380 job->class = context->client->base.class; 381 job->serialize = true; 382 383 while (num_cmdbufs) { 384 struct drm_tegra_cmdbuf cmdbuf; 385 struct host1x_bo *bo; 386 387 if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) { 388 err = -EFAULT; 389 goto fail; 390 } 391 392 bo = host1x_bo_lookup(file, cmdbuf.handle); 393 if (!bo) { 394 err = -ENOENT; 395 goto fail; 396 } 397 398 host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset); 399 num_cmdbufs--; 400 cmdbufs++; 401 } 402 403 /* copy and resolve relocations from submit */ 404 while (num_relocs--) { 405 err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs], 406 &relocs[num_relocs], drm, 407 file); 408 if (err < 0) 409 goto fail; 410 } 411 412 if (copy_from_user(job->waitchk, waitchks, 413 sizeof(*waitchks) * num_waitchks)) { 414 err = -EFAULT; 415 goto fail; 416 } 417 418 if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts, 419 sizeof(syncpt))) { 420 err = -EFAULT; 421 goto fail; 422 } 423 424 job->is_addr_reg = context->client->ops->is_addr_reg; 425 job->syncpt_incrs = syncpt.incrs; 426 job->syncpt_id = syncpt.id; 427 job->timeout = 10000; 428 429 if (args->timeout && args->timeout < 10000) 430 job->timeout = args->timeout; 431 432 err = host1x_job_pin(job, context->client->base.dev); 433 if (err) 434 goto fail; 435 436 err = host1x_job_submit(job); 437 if (err) 438 goto fail_submit; 439 440 args->fence = job->syncpt_end; 441 442 host1x_job_put(job); 443 return 0; 444 445 fail_submit: 446 host1x_job_unpin(job); 447 fail: 448 host1x_job_put(job); 449 return err; 450 } 451 452 453 #ifdef CONFIG_DRM_TEGRA_STAGING 454 static int tegra_gem_create(struct drm_device *drm, void *data, 455 struct drm_file *file) 456 { 457 struct drm_tegra_gem_create *args = data; 458 struct tegra_bo *bo; 459 460 bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags, 461 &args->handle); 462 if (IS_ERR(bo)) 463 return PTR_ERR(bo); 464 465 return 0; 466 } 467 468 static int tegra_gem_mmap(struct drm_device *drm, void *data, 469 struct drm_file *file) 470 { 471 struct drm_tegra_gem_mmap *args = data; 472 struct drm_gem_object *gem; 473 struct tegra_bo *bo; 474 475 gem = drm_gem_object_lookup(file, args->handle); 476 if (!gem) 477 return -EINVAL; 478 479 bo = to_tegra_bo(gem); 480 481 args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node); 482 483 drm_gem_object_unreference_unlocked(gem); 484 485 return 0; 486 } 487 488 static int tegra_syncpt_read(struct drm_device *drm, void *data, 489 struct drm_file *file) 490 { 491 struct host1x *host = dev_get_drvdata(drm->dev->parent); 492 struct drm_tegra_syncpt_read *args = data; 493 struct host1x_syncpt *sp; 494 495 sp = host1x_syncpt_get(host, args->id); 496 if (!sp) 497 return -EINVAL; 498 499 args->value = host1x_syncpt_read_min(sp); 500 return 0; 501 } 502 503 static int tegra_syncpt_incr(struct drm_device *drm, void *data, 504 struct drm_file *file) 505 { 506 struct host1x *host1x = dev_get_drvdata(drm->dev->parent); 507 struct drm_tegra_syncpt_incr *args = data; 508 struct host1x_syncpt *sp; 509 510 sp = host1x_syncpt_get(host1x, args->id); 511 if (!sp) 512 return -EINVAL; 513 514 return host1x_syncpt_incr(sp); 515 } 516 517 static int tegra_syncpt_wait(struct drm_device *drm, void *data, 518 struct drm_file *file) 519 { 520 struct host1x *host1x = dev_get_drvdata(drm->dev->parent); 521 struct drm_tegra_syncpt_wait *args = data; 522 struct host1x_syncpt *sp; 523 524 sp = host1x_syncpt_get(host1x, args->id); 525 if (!sp) 526 return -EINVAL; 527 528 return host1x_syncpt_wait(sp, args->thresh, args->timeout, 529 &args->value); 530 } 531 532 static int tegra_client_open(struct tegra_drm_file *fpriv, 533 struct tegra_drm_client *client, 534 struct tegra_drm_context *context) 535 { 536 int err; 537 538 err = client->ops->open_channel(client, context); 539 if (err < 0) 540 return err; 541 542 err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL); 543 if (err < 0) { 544 client->ops->close_channel(context); 545 return err; 546 } 547 548 context->client = client; 549 context->id = err; 550 551 return 0; 552 } 553 554 static int tegra_open_channel(struct drm_device *drm, void *data, 555 struct drm_file *file) 556 { 557 struct tegra_drm_file *fpriv = file->driver_priv; 558 struct tegra_drm *tegra = drm->dev_private; 559 struct drm_tegra_open_channel *args = data; 560 struct tegra_drm_context *context; 561 struct tegra_drm_client *client; 562 int err = -ENODEV; 563 564 context = kzalloc(sizeof(*context), GFP_KERNEL); 565 if (!context) 566 return -ENOMEM; 567 568 mutex_lock(&fpriv->lock); 569 570 list_for_each_entry(client, &tegra->clients, list) 571 if (client->base.class == args->client) { 572 err = tegra_client_open(fpriv, client, context); 573 if (err < 0) 574 break; 575 576 args->context = context->id; 577 break; 578 } 579 580 if (err < 0) 581 kfree(context); 582 583 mutex_unlock(&fpriv->lock); 584 return err; 585 } 586 587 static int tegra_close_channel(struct drm_device *drm, void *data, 588 struct drm_file *file) 589 { 590 struct tegra_drm_file *fpriv = file->driver_priv; 591 struct drm_tegra_close_channel *args = data; 592 struct tegra_drm_context *context; 593 int err = 0; 594 595 mutex_lock(&fpriv->lock); 596 597 context = idr_find(&fpriv->contexts, args->context); 598 if (!context) { 599 err = -EINVAL; 600 goto unlock; 601 } 602 603 idr_remove(&fpriv->contexts, context->id); 604 tegra_drm_context_free(context); 605 606 unlock: 607 mutex_unlock(&fpriv->lock); 608 return err; 609 } 610 611 static int tegra_get_syncpt(struct drm_device *drm, void *data, 612 struct drm_file *file) 613 { 614 struct tegra_drm_file *fpriv = file->driver_priv; 615 struct drm_tegra_get_syncpt *args = data; 616 struct tegra_drm_context *context; 617 struct host1x_syncpt *syncpt; 618 int err = 0; 619 620 mutex_lock(&fpriv->lock); 621 622 context = idr_find(&fpriv->contexts, args->context); 623 if (!context) { 624 err = -ENODEV; 625 goto unlock; 626 } 627 628 if (args->index >= context->client->base.num_syncpts) { 629 err = -EINVAL; 630 goto unlock; 631 } 632 633 syncpt = context->client->base.syncpts[args->index]; 634 args->id = host1x_syncpt_id(syncpt); 635 636 unlock: 637 mutex_unlock(&fpriv->lock); 638 return err; 639 } 640 641 static int tegra_submit(struct drm_device *drm, void *data, 642 struct drm_file *file) 643 { 644 struct tegra_drm_file *fpriv = file->driver_priv; 645 struct drm_tegra_submit *args = data; 646 struct tegra_drm_context *context; 647 int err; 648 649 mutex_lock(&fpriv->lock); 650 651 context = idr_find(&fpriv->contexts, args->context); 652 if (!context) { 653 err = -ENODEV; 654 goto unlock; 655 } 656 657 err = context->client->ops->submit(context, args, drm, file); 658 659 unlock: 660 mutex_unlock(&fpriv->lock); 661 return err; 662 } 663 664 static int tegra_get_syncpt_base(struct drm_device *drm, void *data, 665 struct drm_file *file) 666 { 667 struct tegra_drm_file *fpriv = file->driver_priv; 668 struct drm_tegra_get_syncpt_base *args = data; 669 struct tegra_drm_context *context; 670 struct host1x_syncpt_base *base; 671 struct host1x_syncpt *syncpt; 672 int err = 0; 673 674 mutex_lock(&fpriv->lock); 675 676 context = idr_find(&fpriv->contexts, args->context); 677 if (!context) { 678 err = -ENODEV; 679 goto unlock; 680 } 681 682 if (args->syncpt >= context->client->base.num_syncpts) { 683 err = -EINVAL; 684 goto unlock; 685 } 686 687 syncpt = context->client->base.syncpts[args->syncpt]; 688 689 base = host1x_syncpt_get_base(syncpt); 690 if (!base) { 691 err = -ENXIO; 692 goto unlock; 693 } 694 695 args->id = host1x_syncpt_base_id(base); 696 697 unlock: 698 mutex_unlock(&fpriv->lock); 699 return err; 700 } 701 702 static int tegra_gem_set_tiling(struct drm_device *drm, void *data, 703 struct drm_file *file) 704 { 705 struct drm_tegra_gem_set_tiling *args = data; 706 enum tegra_bo_tiling_mode mode; 707 struct drm_gem_object *gem; 708 unsigned long value = 0; 709 struct tegra_bo *bo; 710 711 switch (args->mode) { 712 case DRM_TEGRA_GEM_TILING_MODE_PITCH: 713 mode = TEGRA_BO_TILING_MODE_PITCH; 714 715 if (args->value != 0) 716 return -EINVAL; 717 718 break; 719 720 case DRM_TEGRA_GEM_TILING_MODE_TILED: 721 mode = TEGRA_BO_TILING_MODE_TILED; 722 723 if (args->value != 0) 724 return -EINVAL; 725 726 break; 727 728 case DRM_TEGRA_GEM_TILING_MODE_BLOCK: 729 mode = TEGRA_BO_TILING_MODE_BLOCK; 730 731 if (args->value > 5) 732 return -EINVAL; 733 734 value = args->value; 735 break; 736 737 default: 738 return -EINVAL; 739 } 740 741 gem = drm_gem_object_lookup(file, args->handle); 742 if (!gem) 743 return -ENOENT; 744 745 bo = to_tegra_bo(gem); 746 747 bo->tiling.mode = mode; 748 bo->tiling.value = value; 749 750 drm_gem_object_unreference_unlocked(gem); 751 752 return 0; 753 } 754 755 static int tegra_gem_get_tiling(struct drm_device *drm, void *data, 756 struct drm_file *file) 757 { 758 struct drm_tegra_gem_get_tiling *args = data; 759 struct drm_gem_object *gem; 760 struct tegra_bo *bo; 761 int err = 0; 762 763 gem = drm_gem_object_lookup(file, args->handle); 764 if (!gem) 765 return -ENOENT; 766 767 bo = to_tegra_bo(gem); 768 769 switch (bo->tiling.mode) { 770 case TEGRA_BO_TILING_MODE_PITCH: 771 args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH; 772 args->value = 0; 773 break; 774 775 case TEGRA_BO_TILING_MODE_TILED: 776 args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED; 777 args->value = 0; 778 break; 779 780 case TEGRA_BO_TILING_MODE_BLOCK: 781 args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK; 782 args->value = bo->tiling.value; 783 break; 784 785 default: 786 err = -EINVAL; 787 break; 788 } 789 790 drm_gem_object_unreference_unlocked(gem); 791 792 return err; 793 } 794 795 static int tegra_gem_set_flags(struct drm_device *drm, void *data, 796 struct drm_file *file) 797 { 798 struct drm_tegra_gem_set_flags *args = data; 799 struct drm_gem_object *gem; 800 struct tegra_bo *bo; 801 802 if (args->flags & ~DRM_TEGRA_GEM_FLAGS) 803 return -EINVAL; 804 805 gem = drm_gem_object_lookup(file, args->handle); 806 if (!gem) 807 return -ENOENT; 808 809 bo = to_tegra_bo(gem); 810 bo->flags = 0; 811 812 if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP) 813 bo->flags |= TEGRA_BO_BOTTOM_UP; 814 815 drm_gem_object_unreference_unlocked(gem); 816 817 return 0; 818 } 819 820 static int tegra_gem_get_flags(struct drm_device *drm, void *data, 821 struct drm_file *file) 822 { 823 struct drm_tegra_gem_get_flags *args = data; 824 struct drm_gem_object *gem; 825 struct tegra_bo *bo; 826 827 gem = drm_gem_object_lookup(file, args->handle); 828 if (!gem) 829 return -ENOENT; 830 831 bo = to_tegra_bo(gem); 832 args->flags = 0; 833 834 if (bo->flags & TEGRA_BO_BOTTOM_UP) 835 args->flags |= DRM_TEGRA_GEM_BOTTOM_UP; 836 837 drm_gem_object_unreference_unlocked(gem); 838 839 return 0; 840 } 841 #endif 842 843 static const struct drm_ioctl_desc tegra_drm_ioctls[] = { 844 #ifdef CONFIG_DRM_TEGRA_STAGING 845 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, 0), 846 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, 0), 847 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, 0), 848 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, 0), 849 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, 0), 850 DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, 0), 851 DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, 0), 852 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, 0), 853 DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, 0), 854 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, 0), 855 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, 0), 856 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, 0), 857 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, 0), 858 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, 0), 859 #endif 860 }; 861 862 static const struct file_operations tegra_drm_fops = { 863 .owner = THIS_MODULE, 864 .open = drm_open, 865 .release = drm_release, 866 .unlocked_ioctl = drm_ioctl, 867 .mmap = tegra_drm_mmap, 868 .poll = drm_poll, 869 .read = drm_read, 870 .compat_ioctl = drm_compat_ioctl, 871 .llseek = noop_llseek, 872 }; 873 874 static int tegra_drm_context_cleanup(int id, void *p, void *data) 875 { 876 struct tegra_drm_context *context = p; 877 878 tegra_drm_context_free(context); 879 880 return 0; 881 } 882 883 static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file) 884 { 885 struct tegra_drm_file *fpriv = file->driver_priv; 886 887 mutex_lock(&fpriv->lock); 888 idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL); 889 mutex_unlock(&fpriv->lock); 890 891 idr_destroy(&fpriv->contexts); 892 mutex_destroy(&fpriv->lock); 893 kfree(fpriv); 894 } 895 896 #ifdef CONFIG_DEBUG_FS 897 static int tegra_debugfs_framebuffers(struct seq_file *s, void *data) 898 { 899 struct drm_info_node *node = (struct drm_info_node *)s->private; 900 struct drm_device *drm = node->minor->dev; 901 struct drm_framebuffer *fb; 902 903 mutex_lock(&drm->mode_config.fb_lock); 904 905 list_for_each_entry(fb, &drm->mode_config.fb_list, head) { 906 seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n", 907 fb->base.id, fb->width, fb->height, 908 fb->format->depth, 909 fb->format->cpp[0] * 8, 910 drm_framebuffer_read_refcount(fb)); 911 } 912 913 mutex_unlock(&drm->mode_config.fb_lock); 914 915 return 0; 916 } 917 918 static int tegra_debugfs_iova(struct seq_file *s, void *data) 919 { 920 struct drm_info_node *node = (struct drm_info_node *)s->private; 921 struct drm_device *drm = node->minor->dev; 922 struct tegra_drm *tegra = drm->dev_private; 923 struct drm_printer p = drm_seq_file_printer(s); 924 925 mutex_lock(&tegra->mm_lock); 926 drm_mm_print(&tegra->mm, &p); 927 mutex_unlock(&tegra->mm_lock); 928 929 return 0; 930 } 931 932 static struct drm_info_list tegra_debugfs_list[] = { 933 { "framebuffers", tegra_debugfs_framebuffers, 0 }, 934 { "iova", tegra_debugfs_iova, 0 }, 935 }; 936 937 static int tegra_debugfs_init(struct drm_minor *minor) 938 { 939 return drm_debugfs_create_files(tegra_debugfs_list, 940 ARRAY_SIZE(tegra_debugfs_list), 941 minor->debugfs_root, minor); 942 } 943 #endif 944 945 static struct drm_driver tegra_drm_driver = { 946 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | 947 DRIVER_ATOMIC, 948 .load = tegra_drm_load, 949 .unload = tegra_drm_unload, 950 .open = tegra_drm_open, 951 .preclose = tegra_drm_preclose, 952 .lastclose = tegra_drm_lastclose, 953 954 #if defined(CONFIG_DEBUG_FS) 955 .debugfs_init = tegra_debugfs_init, 956 #endif 957 958 .gem_free_object_unlocked = tegra_bo_free_object, 959 .gem_vm_ops = &tegra_bo_vm_ops, 960 961 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 962 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 963 .gem_prime_export = tegra_gem_prime_export, 964 .gem_prime_import = tegra_gem_prime_import, 965 966 .dumb_create = tegra_bo_dumb_create, 967 .dumb_map_offset = tegra_bo_dumb_map_offset, 968 .dumb_destroy = drm_gem_dumb_destroy, 969 970 .ioctls = tegra_drm_ioctls, 971 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls), 972 .fops = &tegra_drm_fops, 973 974 .name = DRIVER_NAME, 975 .desc = DRIVER_DESC, 976 .date = DRIVER_DATE, 977 .major = DRIVER_MAJOR, 978 .minor = DRIVER_MINOR, 979 .patchlevel = DRIVER_PATCHLEVEL, 980 }; 981 982 int tegra_drm_register_client(struct tegra_drm *tegra, 983 struct tegra_drm_client *client) 984 { 985 mutex_lock(&tegra->clients_lock); 986 list_add_tail(&client->list, &tegra->clients); 987 mutex_unlock(&tegra->clients_lock); 988 989 return 0; 990 } 991 992 int tegra_drm_unregister_client(struct tegra_drm *tegra, 993 struct tegra_drm_client *client) 994 { 995 mutex_lock(&tegra->clients_lock); 996 list_del_init(&client->list); 997 mutex_unlock(&tegra->clients_lock); 998 999 return 0; 1000 } 1001 1002 void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, 1003 dma_addr_t *dma) 1004 { 1005 struct iova *alloc; 1006 void *virt; 1007 gfp_t gfp; 1008 int err; 1009 1010 if (tegra->domain) 1011 size = iova_align(&tegra->carveout.domain, size); 1012 else 1013 size = PAGE_ALIGN(size); 1014 1015 gfp = GFP_KERNEL | __GFP_ZERO; 1016 if (!tegra->domain) { 1017 /* 1018 * Many units only support 32-bit addresses, even on 64-bit 1019 * SoCs. If there is no IOMMU to translate into a 32-bit IO 1020 * virtual address space, force allocations to be in the 1021 * lower 32-bit range. 1022 */ 1023 gfp |= GFP_DMA; 1024 } 1025 1026 virt = (void *)__get_free_pages(gfp, get_order(size)); 1027 if (!virt) 1028 return ERR_PTR(-ENOMEM); 1029 1030 if (!tegra->domain) { 1031 /* 1032 * If IOMMU is disabled, devices address physical memory 1033 * directly. 1034 */ 1035 *dma = virt_to_phys(virt); 1036 return virt; 1037 } 1038 1039 alloc = alloc_iova(&tegra->carveout.domain, 1040 size >> tegra->carveout.shift, 1041 tegra->carveout.limit, true); 1042 if (!alloc) { 1043 err = -EBUSY; 1044 goto free_pages; 1045 } 1046 1047 *dma = iova_dma_addr(&tegra->carveout.domain, alloc); 1048 err = iommu_map(tegra->domain, *dma, virt_to_phys(virt), 1049 size, IOMMU_READ | IOMMU_WRITE); 1050 if (err < 0) 1051 goto free_iova; 1052 1053 return virt; 1054 1055 free_iova: 1056 __free_iova(&tegra->carveout.domain, alloc); 1057 free_pages: 1058 free_pages((unsigned long)virt, get_order(size)); 1059 1060 return ERR_PTR(err); 1061 } 1062 1063 void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt, 1064 dma_addr_t dma) 1065 { 1066 if (tegra->domain) 1067 size = iova_align(&tegra->carveout.domain, size); 1068 else 1069 size = PAGE_ALIGN(size); 1070 1071 if (tegra->domain) { 1072 iommu_unmap(tegra->domain, dma, size); 1073 free_iova(&tegra->carveout.domain, 1074 iova_pfn(&tegra->carveout.domain, dma)); 1075 } 1076 1077 free_pages((unsigned long)virt, get_order(size)); 1078 } 1079 1080 static int host1x_drm_probe(struct host1x_device *dev) 1081 { 1082 struct drm_driver *driver = &tegra_drm_driver; 1083 struct drm_device *drm; 1084 int err; 1085 1086 drm = drm_dev_alloc(driver, &dev->dev); 1087 if (IS_ERR(drm)) 1088 return PTR_ERR(drm); 1089 1090 dev_set_drvdata(&dev->dev, drm); 1091 1092 err = drm_dev_register(drm, 0); 1093 if (err < 0) 1094 goto unref; 1095 1096 return 0; 1097 1098 unref: 1099 drm_dev_unref(drm); 1100 return err; 1101 } 1102 1103 static int host1x_drm_remove(struct host1x_device *dev) 1104 { 1105 struct drm_device *drm = dev_get_drvdata(&dev->dev); 1106 1107 drm_dev_unregister(drm); 1108 drm_dev_unref(drm); 1109 1110 return 0; 1111 } 1112 1113 #ifdef CONFIG_PM_SLEEP 1114 static int host1x_drm_suspend(struct device *dev) 1115 { 1116 struct drm_device *drm = dev_get_drvdata(dev); 1117 struct tegra_drm *tegra = drm->dev_private; 1118 1119 drm_kms_helper_poll_disable(drm); 1120 tegra_drm_fb_suspend(drm); 1121 1122 tegra->state = drm_atomic_helper_suspend(drm); 1123 if (IS_ERR(tegra->state)) { 1124 tegra_drm_fb_resume(drm); 1125 drm_kms_helper_poll_enable(drm); 1126 return PTR_ERR(tegra->state); 1127 } 1128 1129 return 0; 1130 } 1131 1132 static int host1x_drm_resume(struct device *dev) 1133 { 1134 struct drm_device *drm = dev_get_drvdata(dev); 1135 struct tegra_drm *tegra = drm->dev_private; 1136 1137 drm_atomic_helper_resume(drm, tegra->state); 1138 tegra_drm_fb_resume(drm); 1139 drm_kms_helper_poll_enable(drm); 1140 1141 return 0; 1142 } 1143 #endif 1144 1145 static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend, 1146 host1x_drm_resume); 1147 1148 static const struct of_device_id host1x_drm_subdevs[] = { 1149 { .compatible = "nvidia,tegra20-dc", }, 1150 { .compatible = "nvidia,tegra20-hdmi", }, 1151 { .compatible = "nvidia,tegra20-gr2d", }, 1152 { .compatible = "nvidia,tegra20-gr3d", }, 1153 { .compatible = "nvidia,tegra30-dc", }, 1154 { .compatible = "nvidia,tegra30-hdmi", }, 1155 { .compatible = "nvidia,tegra30-gr2d", }, 1156 { .compatible = "nvidia,tegra30-gr3d", }, 1157 { .compatible = "nvidia,tegra114-dsi", }, 1158 { .compatible = "nvidia,tegra114-hdmi", }, 1159 { .compatible = "nvidia,tegra114-gr3d", }, 1160 { .compatible = "nvidia,tegra124-dc", }, 1161 { .compatible = "nvidia,tegra124-sor", }, 1162 { .compatible = "nvidia,tegra124-hdmi", }, 1163 { .compatible = "nvidia,tegra124-dsi", }, 1164 { .compatible = "nvidia,tegra124-vic", }, 1165 { .compatible = "nvidia,tegra132-dsi", }, 1166 { .compatible = "nvidia,tegra210-dc", }, 1167 { .compatible = "nvidia,tegra210-dsi", }, 1168 { .compatible = "nvidia,tegra210-sor", }, 1169 { .compatible = "nvidia,tegra210-sor1", }, 1170 { .compatible = "nvidia,tegra210-vic", }, 1171 { /* sentinel */ } 1172 }; 1173 1174 static struct host1x_driver host1x_drm_driver = { 1175 .driver = { 1176 .name = "drm", 1177 .pm = &host1x_drm_pm_ops, 1178 }, 1179 .probe = host1x_drm_probe, 1180 .remove = host1x_drm_remove, 1181 .subdevs = host1x_drm_subdevs, 1182 }; 1183 1184 static struct platform_driver * const drivers[] = { 1185 &tegra_dc_driver, 1186 &tegra_hdmi_driver, 1187 &tegra_dsi_driver, 1188 &tegra_dpaux_driver, 1189 &tegra_sor_driver, 1190 &tegra_gr2d_driver, 1191 &tegra_gr3d_driver, 1192 &tegra_vic_driver, 1193 }; 1194 1195 static int __init host1x_drm_init(void) 1196 { 1197 int err; 1198 1199 err = host1x_driver_register(&host1x_drm_driver); 1200 if (err < 0) 1201 return err; 1202 1203 err = platform_register_drivers(drivers, ARRAY_SIZE(drivers)); 1204 if (err < 0) 1205 goto unregister_host1x; 1206 1207 return 0; 1208 1209 unregister_host1x: 1210 host1x_driver_unregister(&host1x_drm_driver); 1211 return err; 1212 } 1213 module_init(host1x_drm_init); 1214 1215 static void __exit host1x_drm_exit(void) 1216 { 1217 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); 1218 host1x_driver_unregister(&host1x_drm_driver); 1219 } 1220 module_exit(host1x_drm_exit); 1221 1222 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); 1223 MODULE_DESCRIPTION("NVIDIA Tegra DRM driver"); 1224 MODULE_LICENSE("GPL v2"); 1225