1 /* 2 * Copyright (C) 2012 Avionic Design GmbH 3 * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 */ 9 10 #include <linux/bitops.h> 11 #include <linux/host1x.h> 12 #include <linux/idr.h> 13 #include <linux/iommu.h> 14 15 #include <drm/drm_atomic.h> 16 #include <drm/drm_atomic_helper.h> 17 18 #include "drm.h" 19 #include "gem.h" 20 21 #define DRIVER_NAME "tegra" 22 #define DRIVER_DESC "NVIDIA Tegra graphics" 23 #define DRIVER_DATE "20120330" 24 #define DRIVER_MAJOR 0 25 #define DRIVER_MINOR 0 26 #define DRIVER_PATCHLEVEL 0 27 28 #define CARVEOUT_SZ SZ_64M 29 30 struct tegra_drm_file { 31 struct idr contexts; 32 struct mutex lock; 33 }; 34 35 static void tegra_atomic_schedule(struct tegra_drm *tegra, 36 struct drm_atomic_state *state) 37 { 38 tegra->commit.state = state; 39 schedule_work(&tegra->commit.work); 40 } 41 42 static void tegra_atomic_complete(struct tegra_drm *tegra, 43 struct drm_atomic_state *state) 44 { 45 struct drm_device *drm = tegra->drm; 46 47 /* 48 * Everything below can be run asynchronously without the need to grab 49 * any modeset locks at all under one condition: It must be guaranteed 50 * that the asynchronous work has either been cancelled (if the driver 51 * supports it, which at least requires that the framebuffers get 52 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed 53 * before the new state gets committed on the software side with 54 * drm_atomic_helper_swap_state(). 55 * 56 * This scheme allows new atomic state updates to be prepared and 57 * checked in parallel to the asynchronous completion of the previous 58 * update. Which is important since compositors need to figure out the 59 * composition of the next frame right after having submitted the 60 * current layout. 61 */ 62 63 drm_atomic_helper_commit_modeset_disables(drm, state); 64 drm_atomic_helper_commit_modeset_enables(drm, state); 65 drm_atomic_helper_commit_planes(drm, state, 66 DRM_PLANE_COMMIT_ACTIVE_ONLY); 67 68 drm_atomic_helper_wait_for_vblanks(drm, state); 69 70 drm_atomic_helper_cleanup_planes(drm, state); 71 drm_atomic_state_put(state); 72 } 73 74 static void tegra_atomic_work(struct work_struct *work) 75 { 76 struct tegra_drm *tegra = container_of(work, struct tegra_drm, 77 commit.work); 78 79 tegra_atomic_complete(tegra, tegra->commit.state); 80 } 81 82 static int tegra_atomic_commit(struct drm_device *drm, 83 struct drm_atomic_state *state, bool nonblock) 84 { 85 struct tegra_drm *tegra = drm->dev_private; 86 int err; 87 88 err = drm_atomic_helper_prepare_planes(drm, state); 89 if (err) 90 return err; 91 92 /* serialize outstanding nonblocking commits */ 93 mutex_lock(&tegra->commit.lock); 94 flush_work(&tegra->commit.work); 95 96 /* 97 * This is the point of no return - everything below never fails except 98 * when the hw goes bonghits. Which means we can commit the new state on 99 * the software side now. 100 */ 101 102 drm_atomic_helper_swap_state(state, true); 103 104 drm_atomic_state_get(state); 105 if (nonblock) 106 tegra_atomic_schedule(tegra, state); 107 else 108 tegra_atomic_complete(tegra, state); 109 110 mutex_unlock(&tegra->commit.lock); 111 return 0; 112 } 113 114 static const struct drm_mode_config_funcs tegra_drm_mode_funcs = { 115 .fb_create = tegra_fb_create, 116 #ifdef CONFIG_DRM_FBDEV_EMULATION 117 .output_poll_changed = tegra_fb_output_poll_changed, 118 #endif 119 .atomic_check = drm_atomic_helper_check, 120 .atomic_commit = tegra_atomic_commit, 121 }; 122 123 static int tegra_drm_load(struct drm_device *drm, unsigned long flags) 124 { 125 struct host1x_device *device = to_host1x_device(drm->dev); 126 struct tegra_drm *tegra; 127 int err; 128 129 tegra = kzalloc(sizeof(*tegra), GFP_KERNEL); 130 if (!tegra) 131 return -ENOMEM; 132 133 if (iommu_present(&platform_bus_type)) { 134 u64 carveout_start, carveout_end, gem_start, gem_end; 135 struct iommu_domain_geometry *geometry; 136 unsigned long order; 137 138 tegra->domain = iommu_domain_alloc(&platform_bus_type); 139 if (!tegra->domain) { 140 err = -ENOMEM; 141 goto free; 142 } 143 144 geometry = &tegra->domain->geometry; 145 gem_start = geometry->aperture_start; 146 gem_end = geometry->aperture_end - CARVEOUT_SZ; 147 carveout_start = gem_end + 1; 148 carveout_end = geometry->aperture_end; 149 150 order = __ffs(tegra->domain->pgsize_bitmap); 151 init_iova_domain(&tegra->carveout.domain, 1UL << order, 152 carveout_start >> order, 153 carveout_end >> order); 154 155 tegra->carveout.shift = iova_shift(&tegra->carveout.domain); 156 tegra->carveout.limit = carveout_end >> tegra->carveout.shift; 157 158 drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1); 159 mutex_init(&tegra->mm_lock); 160 161 DRM_DEBUG("IOMMU apertures:\n"); 162 DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end); 163 DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start, 164 carveout_end); 165 } 166 167 mutex_init(&tegra->clients_lock); 168 INIT_LIST_HEAD(&tegra->clients); 169 170 mutex_init(&tegra->commit.lock); 171 INIT_WORK(&tegra->commit.work, tegra_atomic_work); 172 173 drm->dev_private = tegra; 174 tegra->drm = drm; 175 176 drm_mode_config_init(drm); 177 178 drm->mode_config.min_width = 0; 179 drm->mode_config.min_height = 0; 180 181 drm->mode_config.max_width = 4096; 182 drm->mode_config.max_height = 4096; 183 184 drm->mode_config.allow_fb_modifiers = true; 185 186 drm->mode_config.funcs = &tegra_drm_mode_funcs; 187 188 err = tegra_drm_fb_prepare(drm); 189 if (err < 0) 190 goto config; 191 192 drm_kms_helper_poll_init(drm); 193 194 err = host1x_device_init(device); 195 if (err < 0) 196 goto fbdev; 197 198 /* 199 * We don't use the drm_irq_install() helpers provided by the DRM 200 * core, so we need to set this manually in order to allow the 201 * DRM_IOCTL_WAIT_VBLANK to operate correctly. 202 */ 203 drm->irq_enabled = true; 204 205 /* syncpoints are used for full 32-bit hardware VBLANK counters */ 206 drm->max_vblank_count = 0xffffffff; 207 208 err = drm_vblank_init(drm, drm->mode_config.num_crtc); 209 if (err < 0) 210 goto device; 211 212 drm_mode_config_reset(drm); 213 214 err = tegra_drm_fb_init(drm); 215 if (err < 0) 216 goto vblank; 217 218 return 0; 219 220 vblank: 221 drm_vblank_cleanup(drm); 222 device: 223 host1x_device_exit(device); 224 fbdev: 225 drm_kms_helper_poll_fini(drm); 226 tegra_drm_fb_free(drm); 227 config: 228 drm_mode_config_cleanup(drm); 229 230 if (tegra->domain) { 231 iommu_domain_free(tegra->domain); 232 drm_mm_takedown(&tegra->mm); 233 mutex_destroy(&tegra->mm_lock); 234 put_iova_domain(&tegra->carveout.domain); 235 } 236 free: 237 kfree(tegra); 238 return err; 239 } 240 241 static void tegra_drm_unload(struct drm_device *drm) 242 { 243 struct host1x_device *device = to_host1x_device(drm->dev); 244 struct tegra_drm *tegra = drm->dev_private; 245 int err; 246 247 drm_kms_helper_poll_fini(drm); 248 tegra_drm_fb_exit(drm); 249 drm_mode_config_cleanup(drm); 250 drm_vblank_cleanup(drm); 251 252 err = host1x_device_exit(device); 253 if (err < 0) 254 return; 255 256 if (tegra->domain) { 257 iommu_domain_free(tegra->domain); 258 drm_mm_takedown(&tegra->mm); 259 mutex_destroy(&tegra->mm_lock); 260 put_iova_domain(&tegra->carveout.domain); 261 } 262 263 kfree(tegra); 264 } 265 266 static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) 267 { 268 struct tegra_drm_file *fpriv; 269 270 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 271 if (!fpriv) 272 return -ENOMEM; 273 274 idr_init(&fpriv->contexts); 275 mutex_init(&fpriv->lock); 276 filp->driver_priv = fpriv; 277 278 return 0; 279 } 280 281 static void tegra_drm_context_free(struct tegra_drm_context *context) 282 { 283 context->client->ops->close_channel(context); 284 kfree(context); 285 } 286 287 static void tegra_drm_lastclose(struct drm_device *drm) 288 { 289 #ifdef CONFIG_DRM_FBDEV_EMULATION 290 struct tegra_drm *tegra = drm->dev_private; 291 292 tegra_fbdev_restore_mode(tegra->fbdev); 293 #endif 294 } 295 296 static struct host1x_bo * 297 host1x_bo_lookup(struct drm_file *file, u32 handle) 298 { 299 struct drm_gem_object *gem; 300 struct tegra_bo *bo; 301 302 gem = drm_gem_object_lookup(file, handle); 303 if (!gem) 304 return NULL; 305 306 drm_gem_object_unreference_unlocked(gem); 307 308 bo = to_tegra_bo(gem); 309 return &bo->base; 310 } 311 312 static int host1x_reloc_copy_from_user(struct host1x_reloc *dest, 313 struct drm_tegra_reloc __user *src, 314 struct drm_device *drm, 315 struct drm_file *file) 316 { 317 u32 cmdbuf, target; 318 int err; 319 320 err = get_user(cmdbuf, &src->cmdbuf.handle); 321 if (err < 0) 322 return err; 323 324 err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset); 325 if (err < 0) 326 return err; 327 328 err = get_user(target, &src->target.handle); 329 if (err < 0) 330 return err; 331 332 err = get_user(dest->target.offset, &src->target.offset); 333 if (err < 0) 334 return err; 335 336 err = get_user(dest->shift, &src->shift); 337 if (err < 0) 338 return err; 339 340 dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf); 341 if (!dest->cmdbuf.bo) 342 return -ENOENT; 343 344 dest->target.bo = host1x_bo_lookup(file, target); 345 if (!dest->target.bo) 346 return -ENOENT; 347 348 return 0; 349 } 350 351 int tegra_drm_submit(struct tegra_drm_context *context, 352 struct drm_tegra_submit *args, struct drm_device *drm, 353 struct drm_file *file) 354 { 355 unsigned int num_cmdbufs = args->num_cmdbufs; 356 unsigned int num_relocs = args->num_relocs; 357 unsigned int num_waitchks = args->num_waitchks; 358 struct drm_tegra_cmdbuf __user *cmdbufs = 359 (void __user *)(uintptr_t)args->cmdbufs; 360 struct drm_tegra_reloc __user *relocs = 361 (void __user *)(uintptr_t)args->relocs; 362 struct drm_tegra_waitchk __user *waitchks = 363 (void __user *)(uintptr_t)args->waitchks; 364 struct drm_tegra_syncpt syncpt; 365 struct host1x_job *job; 366 int err; 367 368 /* We don't yet support other than one syncpt_incr struct per submit */ 369 if (args->num_syncpts != 1) 370 return -EINVAL; 371 372 job = host1x_job_alloc(context->channel, args->num_cmdbufs, 373 args->num_relocs, args->num_waitchks); 374 if (!job) 375 return -ENOMEM; 376 377 job->num_relocs = args->num_relocs; 378 job->num_waitchk = args->num_waitchks; 379 job->client = (u32)args->context; 380 job->class = context->client->base.class; 381 job->serialize = true; 382 383 while (num_cmdbufs) { 384 struct drm_tegra_cmdbuf cmdbuf; 385 struct host1x_bo *bo; 386 387 if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) { 388 err = -EFAULT; 389 goto fail; 390 } 391 392 bo = host1x_bo_lookup(file, cmdbuf.handle); 393 if (!bo) { 394 err = -ENOENT; 395 goto fail; 396 } 397 398 host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset); 399 num_cmdbufs--; 400 cmdbufs++; 401 } 402 403 /* copy and resolve relocations from submit */ 404 while (num_relocs--) { 405 err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs], 406 &relocs[num_relocs], drm, 407 file); 408 if (err < 0) 409 goto fail; 410 } 411 412 if (copy_from_user(job->waitchk, waitchks, 413 sizeof(*waitchks) * num_waitchks)) { 414 err = -EFAULT; 415 goto fail; 416 } 417 418 if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts, 419 sizeof(syncpt))) { 420 err = -EFAULT; 421 goto fail; 422 } 423 424 job->is_addr_reg = context->client->ops->is_addr_reg; 425 job->syncpt_incrs = syncpt.incrs; 426 job->syncpt_id = syncpt.id; 427 job->timeout = 10000; 428 429 if (args->timeout && args->timeout < 10000) 430 job->timeout = args->timeout; 431 432 err = host1x_job_pin(job, context->client->base.dev); 433 if (err) 434 goto fail; 435 436 err = host1x_job_submit(job); 437 if (err) 438 goto fail_submit; 439 440 args->fence = job->syncpt_end; 441 442 host1x_job_put(job); 443 return 0; 444 445 fail_submit: 446 host1x_job_unpin(job); 447 fail: 448 host1x_job_put(job); 449 return err; 450 } 451 452 453 #ifdef CONFIG_DRM_TEGRA_STAGING 454 static struct tegra_drm_context * 455 tegra_drm_file_get_context(struct tegra_drm_file *file, u32 id) 456 { 457 struct tegra_drm_context *context; 458 459 mutex_lock(&file->lock); 460 context = idr_find(&file->contexts, id); 461 mutex_unlock(&file->lock); 462 463 return context; 464 } 465 466 static int tegra_gem_create(struct drm_device *drm, void *data, 467 struct drm_file *file) 468 { 469 struct drm_tegra_gem_create *args = data; 470 struct tegra_bo *bo; 471 472 bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags, 473 &args->handle); 474 if (IS_ERR(bo)) 475 return PTR_ERR(bo); 476 477 return 0; 478 } 479 480 static int tegra_gem_mmap(struct drm_device *drm, void *data, 481 struct drm_file *file) 482 { 483 struct drm_tegra_gem_mmap *args = data; 484 struct drm_gem_object *gem; 485 struct tegra_bo *bo; 486 487 gem = drm_gem_object_lookup(file, args->handle); 488 if (!gem) 489 return -EINVAL; 490 491 bo = to_tegra_bo(gem); 492 493 args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node); 494 495 drm_gem_object_unreference_unlocked(gem); 496 497 return 0; 498 } 499 500 static int tegra_syncpt_read(struct drm_device *drm, void *data, 501 struct drm_file *file) 502 { 503 struct host1x *host = dev_get_drvdata(drm->dev->parent); 504 struct drm_tegra_syncpt_read *args = data; 505 struct host1x_syncpt *sp; 506 507 sp = host1x_syncpt_get(host, args->id); 508 if (!sp) 509 return -EINVAL; 510 511 args->value = host1x_syncpt_read_min(sp); 512 return 0; 513 } 514 515 static int tegra_syncpt_incr(struct drm_device *drm, void *data, 516 struct drm_file *file) 517 { 518 struct host1x *host1x = dev_get_drvdata(drm->dev->parent); 519 struct drm_tegra_syncpt_incr *args = data; 520 struct host1x_syncpt *sp; 521 522 sp = host1x_syncpt_get(host1x, args->id); 523 if (!sp) 524 return -EINVAL; 525 526 return host1x_syncpt_incr(sp); 527 } 528 529 static int tegra_syncpt_wait(struct drm_device *drm, void *data, 530 struct drm_file *file) 531 { 532 struct host1x *host1x = dev_get_drvdata(drm->dev->parent); 533 struct drm_tegra_syncpt_wait *args = data; 534 struct host1x_syncpt *sp; 535 536 sp = host1x_syncpt_get(host1x, args->id); 537 if (!sp) 538 return -EINVAL; 539 540 return host1x_syncpt_wait(sp, args->thresh, args->timeout, 541 &args->value); 542 } 543 544 static int tegra_client_open(struct tegra_drm_file *fpriv, 545 struct tegra_drm_client *client, 546 struct tegra_drm_context *context) 547 { 548 int err; 549 550 err = client->ops->open_channel(client, context); 551 if (err < 0) 552 return err; 553 554 err = idr_alloc(&fpriv->contexts, context, 0, 0, GFP_KERNEL); 555 if (err < 0) { 556 client->ops->close_channel(context); 557 return err; 558 } 559 560 context->client = client; 561 context->id = err; 562 563 return 0; 564 } 565 566 static int tegra_open_channel(struct drm_device *drm, void *data, 567 struct drm_file *file) 568 { 569 struct tegra_drm_file *fpriv = file->driver_priv; 570 struct tegra_drm *tegra = drm->dev_private; 571 struct drm_tegra_open_channel *args = data; 572 struct tegra_drm_context *context; 573 struct tegra_drm_client *client; 574 int err = -ENODEV; 575 576 context = kzalloc(sizeof(*context), GFP_KERNEL); 577 if (!context) 578 return -ENOMEM; 579 580 mutex_lock(&fpriv->lock); 581 582 list_for_each_entry(client, &tegra->clients, list) 583 if (client->base.class == args->client) { 584 err = tegra_client_open(fpriv, client, context); 585 if (err < 0) 586 break; 587 588 args->context = context->id; 589 break; 590 } 591 592 if (err < 0) 593 kfree(context); 594 595 mutex_unlock(&fpriv->lock); 596 return err; 597 } 598 599 static int tegra_close_channel(struct drm_device *drm, void *data, 600 struct drm_file *file) 601 { 602 struct tegra_drm_file *fpriv = file->driver_priv; 603 struct drm_tegra_close_channel *args = data; 604 struct tegra_drm_context *context; 605 int err = 0; 606 607 mutex_lock(&fpriv->lock); 608 609 context = tegra_drm_file_get_context(fpriv, args->context); 610 if (!context) { 611 err = -EINVAL; 612 goto unlock; 613 } 614 615 idr_remove(&fpriv->contexts, context->id); 616 tegra_drm_context_free(context); 617 618 unlock: 619 mutex_unlock(&fpriv->lock); 620 return err; 621 } 622 623 static int tegra_get_syncpt(struct drm_device *drm, void *data, 624 struct drm_file *file) 625 { 626 struct tegra_drm_file *fpriv = file->driver_priv; 627 struct drm_tegra_get_syncpt *args = data; 628 struct tegra_drm_context *context; 629 struct host1x_syncpt *syncpt; 630 int err = 0; 631 632 mutex_lock(&fpriv->lock); 633 634 context = tegra_drm_file_get_context(fpriv, args->context); 635 if (!context) { 636 err = -ENODEV; 637 goto unlock; 638 } 639 640 if (args->index >= context->client->base.num_syncpts) { 641 err = -EINVAL; 642 goto unlock; 643 } 644 645 syncpt = context->client->base.syncpts[args->index]; 646 args->id = host1x_syncpt_id(syncpt); 647 648 unlock: 649 mutex_unlock(&fpriv->lock); 650 return err; 651 } 652 653 static int tegra_submit(struct drm_device *drm, void *data, 654 struct drm_file *file) 655 { 656 struct tegra_drm_file *fpriv = file->driver_priv; 657 struct drm_tegra_submit *args = data; 658 struct tegra_drm_context *context; 659 int err; 660 661 mutex_lock(&fpriv->lock); 662 663 context = tegra_drm_file_get_context(fpriv, args->context); 664 if (!context) { 665 err = -ENODEV; 666 goto unlock; 667 } 668 669 err = context->client->ops->submit(context, args, drm, file); 670 671 unlock: 672 mutex_unlock(&fpriv->lock); 673 return err; 674 } 675 676 static int tegra_get_syncpt_base(struct drm_device *drm, void *data, 677 struct drm_file *file) 678 { 679 struct tegra_drm_file *fpriv = file->driver_priv; 680 struct drm_tegra_get_syncpt_base *args = data; 681 struct tegra_drm_context *context; 682 struct host1x_syncpt_base *base; 683 struct host1x_syncpt *syncpt; 684 int err = 0; 685 686 mutex_lock(&fpriv->lock); 687 688 context = tegra_drm_file_get_context(fpriv, args->context); 689 if (!context) { 690 err = -ENODEV; 691 goto unlock; 692 } 693 694 if (args->syncpt >= context->client->base.num_syncpts) { 695 err = -EINVAL; 696 goto unlock; 697 } 698 699 syncpt = context->client->base.syncpts[args->syncpt]; 700 701 base = host1x_syncpt_get_base(syncpt); 702 if (!base) { 703 err = -ENXIO; 704 goto unlock; 705 } 706 707 args->id = host1x_syncpt_base_id(base); 708 709 unlock: 710 mutex_unlock(&fpriv->lock); 711 return err; 712 } 713 714 static int tegra_gem_set_tiling(struct drm_device *drm, void *data, 715 struct drm_file *file) 716 { 717 struct drm_tegra_gem_set_tiling *args = data; 718 enum tegra_bo_tiling_mode mode; 719 struct drm_gem_object *gem; 720 unsigned long value = 0; 721 struct tegra_bo *bo; 722 723 switch (args->mode) { 724 case DRM_TEGRA_GEM_TILING_MODE_PITCH: 725 mode = TEGRA_BO_TILING_MODE_PITCH; 726 727 if (args->value != 0) 728 return -EINVAL; 729 730 break; 731 732 case DRM_TEGRA_GEM_TILING_MODE_TILED: 733 mode = TEGRA_BO_TILING_MODE_TILED; 734 735 if (args->value != 0) 736 return -EINVAL; 737 738 break; 739 740 case DRM_TEGRA_GEM_TILING_MODE_BLOCK: 741 mode = TEGRA_BO_TILING_MODE_BLOCK; 742 743 if (args->value > 5) 744 return -EINVAL; 745 746 value = args->value; 747 break; 748 749 default: 750 return -EINVAL; 751 } 752 753 gem = drm_gem_object_lookup(file, args->handle); 754 if (!gem) 755 return -ENOENT; 756 757 bo = to_tegra_bo(gem); 758 759 bo->tiling.mode = mode; 760 bo->tiling.value = value; 761 762 drm_gem_object_unreference_unlocked(gem); 763 764 return 0; 765 } 766 767 static int tegra_gem_get_tiling(struct drm_device *drm, void *data, 768 struct drm_file *file) 769 { 770 struct drm_tegra_gem_get_tiling *args = data; 771 struct drm_gem_object *gem; 772 struct tegra_bo *bo; 773 int err = 0; 774 775 gem = drm_gem_object_lookup(file, args->handle); 776 if (!gem) 777 return -ENOENT; 778 779 bo = to_tegra_bo(gem); 780 781 switch (bo->tiling.mode) { 782 case TEGRA_BO_TILING_MODE_PITCH: 783 args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH; 784 args->value = 0; 785 break; 786 787 case TEGRA_BO_TILING_MODE_TILED: 788 args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED; 789 args->value = 0; 790 break; 791 792 case TEGRA_BO_TILING_MODE_BLOCK: 793 args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK; 794 args->value = bo->tiling.value; 795 break; 796 797 default: 798 err = -EINVAL; 799 break; 800 } 801 802 drm_gem_object_unreference_unlocked(gem); 803 804 return err; 805 } 806 807 static int tegra_gem_set_flags(struct drm_device *drm, void *data, 808 struct drm_file *file) 809 { 810 struct drm_tegra_gem_set_flags *args = data; 811 struct drm_gem_object *gem; 812 struct tegra_bo *bo; 813 814 if (args->flags & ~DRM_TEGRA_GEM_FLAGS) 815 return -EINVAL; 816 817 gem = drm_gem_object_lookup(file, args->handle); 818 if (!gem) 819 return -ENOENT; 820 821 bo = to_tegra_bo(gem); 822 bo->flags = 0; 823 824 if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP) 825 bo->flags |= TEGRA_BO_BOTTOM_UP; 826 827 drm_gem_object_unreference_unlocked(gem); 828 829 return 0; 830 } 831 832 static int tegra_gem_get_flags(struct drm_device *drm, void *data, 833 struct drm_file *file) 834 { 835 struct drm_tegra_gem_get_flags *args = data; 836 struct drm_gem_object *gem; 837 struct tegra_bo *bo; 838 839 gem = drm_gem_object_lookup(file, args->handle); 840 if (!gem) 841 return -ENOENT; 842 843 bo = to_tegra_bo(gem); 844 args->flags = 0; 845 846 if (bo->flags & TEGRA_BO_BOTTOM_UP) 847 args->flags |= DRM_TEGRA_GEM_BOTTOM_UP; 848 849 drm_gem_object_unreference_unlocked(gem); 850 851 return 0; 852 } 853 #endif 854 855 static const struct drm_ioctl_desc tegra_drm_ioctls[] = { 856 #ifdef CONFIG_DRM_TEGRA_STAGING 857 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, 0), 858 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, 0), 859 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, 0), 860 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, 0), 861 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, 0), 862 DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, 0), 863 DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, 0), 864 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, 0), 865 DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, 0), 866 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, 0), 867 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, 0), 868 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, 0), 869 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, 0), 870 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, 0), 871 #endif 872 }; 873 874 static const struct file_operations tegra_drm_fops = { 875 .owner = THIS_MODULE, 876 .open = drm_open, 877 .release = drm_release, 878 .unlocked_ioctl = drm_ioctl, 879 .mmap = tegra_drm_mmap, 880 .poll = drm_poll, 881 .read = drm_read, 882 .compat_ioctl = drm_compat_ioctl, 883 .llseek = noop_llseek, 884 }; 885 886 static int tegra_drm_context_cleanup(int id, void *p, void *data) 887 { 888 struct tegra_drm_context *context = p; 889 890 tegra_drm_context_free(context); 891 892 return 0; 893 } 894 895 static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file) 896 { 897 struct tegra_drm_file *fpriv = file->driver_priv; 898 899 mutex_lock(&fpriv->lock); 900 idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL); 901 mutex_unlock(&fpriv->lock); 902 903 idr_destroy(&fpriv->contexts); 904 mutex_destroy(&fpriv->lock); 905 kfree(fpriv); 906 } 907 908 #ifdef CONFIG_DEBUG_FS 909 static int tegra_debugfs_framebuffers(struct seq_file *s, void *data) 910 { 911 struct drm_info_node *node = (struct drm_info_node *)s->private; 912 struct drm_device *drm = node->minor->dev; 913 struct drm_framebuffer *fb; 914 915 mutex_lock(&drm->mode_config.fb_lock); 916 917 list_for_each_entry(fb, &drm->mode_config.fb_list, head) { 918 seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n", 919 fb->base.id, fb->width, fb->height, 920 fb->format->depth, 921 fb->format->cpp[0] * 8, 922 drm_framebuffer_read_refcount(fb)); 923 } 924 925 mutex_unlock(&drm->mode_config.fb_lock); 926 927 return 0; 928 } 929 930 static int tegra_debugfs_iova(struct seq_file *s, void *data) 931 { 932 struct drm_info_node *node = (struct drm_info_node *)s->private; 933 struct drm_device *drm = node->minor->dev; 934 struct tegra_drm *tegra = drm->dev_private; 935 struct drm_printer p = drm_seq_file_printer(s); 936 937 mutex_lock(&tegra->mm_lock); 938 drm_mm_print(&tegra->mm, &p); 939 mutex_unlock(&tegra->mm_lock); 940 941 return 0; 942 } 943 944 static struct drm_info_list tegra_debugfs_list[] = { 945 { "framebuffers", tegra_debugfs_framebuffers, 0 }, 946 { "iova", tegra_debugfs_iova, 0 }, 947 }; 948 949 static int tegra_debugfs_init(struct drm_minor *minor) 950 { 951 return drm_debugfs_create_files(tegra_debugfs_list, 952 ARRAY_SIZE(tegra_debugfs_list), 953 minor->debugfs_root, minor); 954 } 955 #endif 956 957 static struct drm_driver tegra_drm_driver = { 958 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | 959 DRIVER_ATOMIC, 960 .load = tegra_drm_load, 961 .unload = tegra_drm_unload, 962 .open = tegra_drm_open, 963 .preclose = tegra_drm_preclose, 964 .lastclose = tegra_drm_lastclose, 965 966 #if defined(CONFIG_DEBUG_FS) 967 .debugfs_init = tegra_debugfs_init, 968 #endif 969 970 .gem_free_object_unlocked = tegra_bo_free_object, 971 .gem_vm_ops = &tegra_bo_vm_ops, 972 973 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 974 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 975 .gem_prime_export = tegra_gem_prime_export, 976 .gem_prime_import = tegra_gem_prime_import, 977 978 .dumb_create = tegra_bo_dumb_create, 979 .dumb_map_offset = tegra_bo_dumb_map_offset, 980 .dumb_destroy = drm_gem_dumb_destroy, 981 982 .ioctls = tegra_drm_ioctls, 983 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls), 984 .fops = &tegra_drm_fops, 985 986 .name = DRIVER_NAME, 987 .desc = DRIVER_DESC, 988 .date = DRIVER_DATE, 989 .major = DRIVER_MAJOR, 990 .minor = DRIVER_MINOR, 991 .patchlevel = DRIVER_PATCHLEVEL, 992 }; 993 994 int tegra_drm_register_client(struct tegra_drm *tegra, 995 struct tegra_drm_client *client) 996 { 997 mutex_lock(&tegra->clients_lock); 998 list_add_tail(&client->list, &tegra->clients); 999 mutex_unlock(&tegra->clients_lock); 1000 1001 return 0; 1002 } 1003 1004 int tegra_drm_unregister_client(struct tegra_drm *tegra, 1005 struct tegra_drm_client *client) 1006 { 1007 mutex_lock(&tegra->clients_lock); 1008 list_del_init(&client->list); 1009 mutex_unlock(&tegra->clients_lock); 1010 1011 return 0; 1012 } 1013 1014 void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, 1015 dma_addr_t *dma) 1016 { 1017 struct iova *alloc; 1018 void *virt; 1019 gfp_t gfp; 1020 int err; 1021 1022 if (tegra->domain) 1023 size = iova_align(&tegra->carveout.domain, size); 1024 else 1025 size = PAGE_ALIGN(size); 1026 1027 gfp = GFP_KERNEL | __GFP_ZERO; 1028 if (!tegra->domain) { 1029 /* 1030 * Many units only support 32-bit addresses, even on 64-bit 1031 * SoCs. If there is no IOMMU to translate into a 32-bit IO 1032 * virtual address space, force allocations to be in the 1033 * lower 32-bit range. 1034 */ 1035 gfp |= GFP_DMA; 1036 } 1037 1038 virt = (void *)__get_free_pages(gfp, get_order(size)); 1039 if (!virt) 1040 return ERR_PTR(-ENOMEM); 1041 1042 if (!tegra->domain) { 1043 /* 1044 * If IOMMU is disabled, devices address physical memory 1045 * directly. 1046 */ 1047 *dma = virt_to_phys(virt); 1048 return virt; 1049 } 1050 1051 alloc = alloc_iova(&tegra->carveout.domain, 1052 size >> tegra->carveout.shift, 1053 tegra->carveout.limit, true); 1054 if (!alloc) { 1055 err = -EBUSY; 1056 goto free_pages; 1057 } 1058 1059 *dma = iova_dma_addr(&tegra->carveout.domain, alloc); 1060 err = iommu_map(tegra->domain, *dma, virt_to_phys(virt), 1061 size, IOMMU_READ | IOMMU_WRITE); 1062 if (err < 0) 1063 goto free_iova; 1064 1065 return virt; 1066 1067 free_iova: 1068 __free_iova(&tegra->carveout.domain, alloc); 1069 free_pages: 1070 free_pages((unsigned long)virt, get_order(size)); 1071 1072 return ERR_PTR(err); 1073 } 1074 1075 void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt, 1076 dma_addr_t dma) 1077 { 1078 if (tegra->domain) 1079 size = iova_align(&tegra->carveout.domain, size); 1080 else 1081 size = PAGE_ALIGN(size); 1082 1083 if (tegra->domain) { 1084 iommu_unmap(tegra->domain, dma, size); 1085 free_iova(&tegra->carveout.domain, 1086 iova_pfn(&tegra->carveout.domain, dma)); 1087 } 1088 1089 free_pages((unsigned long)virt, get_order(size)); 1090 } 1091 1092 static int host1x_drm_probe(struct host1x_device *dev) 1093 { 1094 struct drm_driver *driver = &tegra_drm_driver; 1095 struct drm_device *drm; 1096 int err; 1097 1098 drm = drm_dev_alloc(driver, &dev->dev); 1099 if (IS_ERR(drm)) 1100 return PTR_ERR(drm); 1101 1102 dev_set_drvdata(&dev->dev, drm); 1103 1104 err = drm_dev_register(drm, 0); 1105 if (err < 0) 1106 goto unref; 1107 1108 return 0; 1109 1110 unref: 1111 drm_dev_unref(drm); 1112 return err; 1113 } 1114 1115 static int host1x_drm_remove(struct host1x_device *dev) 1116 { 1117 struct drm_device *drm = dev_get_drvdata(&dev->dev); 1118 1119 drm_dev_unregister(drm); 1120 drm_dev_unref(drm); 1121 1122 return 0; 1123 } 1124 1125 #ifdef CONFIG_PM_SLEEP 1126 static int host1x_drm_suspend(struct device *dev) 1127 { 1128 struct drm_device *drm = dev_get_drvdata(dev); 1129 struct tegra_drm *tegra = drm->dev_private; 1130 1131 drm_kms_helper_poll_disable(drm); 1132 tegra_drm_fb_suspend(drm); 1133 1134 tegra->state = drm_atomic_helper_suspend(drm); 1135 if (IS_ERR(tegra->state)) { 1136 tegra_drm_fb_resume(drm); 1137 drm_kms_helper_poll_enable(drm); 1138 return PTR_ERR(tegra->state); 1139 } 1140 1141 return 0; 1142 } 1143 1144 static int host1x_drm_resume(struct device *dev) 1145 { 1146 struct drm_device *drm = dev_get_drvdata(dev); 1147 struct tegra_drm *tegra = drm->dev_private; 1148 1149 drm_atomic_helper_resume(drm, tegra->state); 1150 tegra_drm_fb_resume(drm); 1151 drm_kms_helper_poll_enable(drm); 1152 1153 return 0; 1154 } 1155 #endif 1156 1157 static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend, 1158 host1x_drm_resume); 1159 1160 static const struct of_device_id host1x_drm_subdevs[] = { 1161 { .compatible = "nvidia,tegra20-dc", }, 1162 { .compatible = "nvidia,tegra20-hdmi", }, 1163 { .compatible = "nvidia,tegra20-gr2d", }, 1164 { .compatible = "nvidia,tegra20-gr3d", }, 1165 { .compatible = "nvidia,tegra30-dc", }, 1166 { .compatible = "nvidia,tegra30-hdmi", }, 1167 { .compatible = "nvidia,tegra30-gr2d", }, 1168 { .compatible = "nvidia,tegra30-gr3d", }, 1169 { .compatible = "nvidia,tegra114-dsi", }, 1170 { .compatible = "nvidia,tegra114-hdmi", }, 1171 { .compatible = "nvidia,tegra114-gr3d", }, 1172 { .compatible = "nvidia,tegra124-dc", }, 1173 { .compatible = "nvidia,tegra124-sor", }, 1174 { .compatible = "nvidia,tegra124-hdmi", }, 1175 { .compatible = "nvidia,tegra124-dsi", }, 1176 { .compatible = "nvidia,tegra124-vic", }, 1177 { .compatible = "nvidia,tegra132-dsi", }, 1178 { .compatible = "nvidia,tegra210-dc", }, 1179 { .compatible = "nvidia,tegra210-dsi", }, 1180 { .compatible = "nvidia,tegra210-sor", }, 1181 { .compatible = "nvidia,tegra210-sor1", }, 1182 { .compatible = "nvidia,tegra210-vic", }, 1183 { /* sentinel */ } 1184 }; 1185 1186 static struct host1x_driver host1x_drm_driver = { 1187 .driver = { 1188 .name = "drm", 1189 .pm = &host1x_drm_pm_ops, 1190 }, 1191 .probe = host1x_drm_probe, 1192 .remove = host1x_drm_remove, 1193 .subdevs = host1x_drm_subdevs, 1194 }; 1195 1196 static struct platform_driver * const drivers[] = { 1197 &tegra_dc_driver, 1198 &tegra_hdmi_driver, 1199 &tegra_dsi_driver, 1200 &tegra_dpaux_driver, 1201 &tegra_sor_driver, 1202 &tegra_gr2d_driver, 1203 &tegra_gr3d_driver, 1204 &tegra_vic_driver, 1205 }; 1206 1207 static int __init host1x_drm_init(void) 1208 { 1209 int err; 1210 1211 err = host1x_driver_register(&host1x_drm_driver); 1212 if (err < 0) 1213 return err; 1214 1215 err = platform_register_drivers(drivers, ARRAY_SIZE(drivers)); 1216 if (err < 0) 1217 goto unregister_host1x; 1218 1219 return 0; 1220 1221 unregister_host1x: 1222 host1x_driver_unregister(&host1x_drm_driver); 1223 return err; 1224 } 1225 module_init(host1x_drm_init); 1226 1227 static void __exit host1x_drm_exit(void) 1228 { 1229 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); 1230 host1x_driver_unregister(&host1x_drm_driver); 1231 } 1232 module_exit(host1x_drm_exit); 1233 1234 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); 1235 MODULE_DESCRIPTION("NVIDIA Tegra DRM driver"); 1236 MODULE_LICENSE("GPL v2"); 1237