1 /* 2 * Copyright (C) 2012 Avionic Design GmbH 3 * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 */ 9 10 #include <linux/bitops.h> 11 #include <linux/host1x.h> 12 #include <linux/idr.h> 13 #include <linux/iommu.h> 14 15 #include <drm/drm_atomic.h> 16 #include <drm/drm_atomic_helper.h> 17 18 #include "drm.h" 19 #include "gem.h" 20 21 #define DRIVER_NAME "tegra" 22 #define DRIVER_DESC "NVIDIA Tegra graphics" 23 #define DRIVER_DATE "20120330" 24 #define DRIVER_MAJOR 0 25 #define DRIVER_MINOR 0 26 #define DRIVER_PATCHLEVEL 0 27 28 #define CARVEOUT_SZ SZ_64M 29 #define CDMA_GATHER_FETCHES_MAX_NB 16383 30 31 struct tegra_drm_file { 32 struct idr contexts; 33 struct mutex lock; 34 }; 35 36 static int tegra_atomic_check(struct drm_device *drm, 37 struct drm_atomic_state *state) 38 { 39 int err; 40 41 err = drm_atomic_helper_check(drm, state); 42 if (err < 0) 43 return err; 44 45 return tegra_display_hub_atomic_check(drm, state); 46 } 47 48 static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = { 49 .fb_create = tegra_fb_create, 50 #ifdef CONFIG_DRM_FBDEV_EMULATION 51 .output_poll_changed = drm_fb_helper_output_poll_changed, 52 #endif 53 .atomic_check = tegra_atomic_check, 54 .atomic_commit = drm_atomic_helper_commit, 55 }; 56 57 static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state) 58 { 59 struct drm_device *drm = old_state->dev; 60 struct tegra_drm *tegra = drm->dev_private; 61 62 if (tegra->hub) { 63 drm_atomic_helper_commit_modeset_disables(drm, old_state); 64 tegra_display_hub_atomic_commit(drm, old_state); 65 drm_atomic_helper_commit_planes(drm, old_state, 0); 66 drm_atomic_helper_commit_modeset_enables(drm, old_state); 67 drm_atomic_helper_commit_hw_done(old_state); 68 drm_atomic_helper_wait_for_vblanks(drm, old_state); 69 drm_atomic_helper_cleanup_planes(drm, old_state); 70 } else { 71 drm_atomic_helper_commit_tail_rpm(old_state); 72 } 73 } 74 75 static const struct drm_mode_config_helper_funcs 76 tegra_drm_mode_config_helpers = { 77 .atomic_commit_tail = tegra_atomic_commit_tail, 78 }; 79 80 static int tegra_drm_load(struct drm_device *drm, unsigned long flags) 81 { 82 struct host1x_device *device = to_host1x_device(drm->dev); 83 struct tegra_drm *tegra; 84 int err; 85 86 tegra = kzalloc(sizeof(*tegra), GFP_KERNEL); 87 if (!tegra) 88 return -ENOMEM; 89 90 if (iommu_present(&platform_bus_type)) { 91 u64 carveout_start, carveout_end, gem_start, gem_end; 92 struct iommu_domain_geometry *geometry; 93 unsigned long order; 94 95 tegra->domain = iommu_domain_alloc(&platform_bus_type); 96 if (!tegra->domain) { 97 err = -ENOMEM; 98 goto free; 99 } 100 101 err = iova_cache_get(); 102 if (err < 0) 103 goto domain; 104 105 geometry = &tegra->domain->geometry; 106 gem_start = geometry->aperture_start; 107 gem_end = geometry->aperture_end - CARVEOUT_SZ; 108 carveout_start = gem_end + 1; 109 carveout_end = geometry->aperture_end; 110 111 order = __ffs(tegra->domain->pgsize_bitmap); 112 init_iova_domain(&tegra->carveout.domain, 1UL << order, 113 carveout_start >> order); 114 115 tegra->carveout.shift = iova_shift(&tegra->carveout.domain); 116 tegra->carveout.limit = carveout_end >> tegra->carveout.shift; 117 118 drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1); 119 mutex_init(&tegra->mm_lock); 120 121 DRM_DEBUG("IOMMU apertures:\n"); 122 DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end); 123 DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start, 124 carveout_end); 125 } 126 127 mutex_init(&tegra->clients_lock); 128 INIT_LIST_HEAD(&tegra->clients); 129 130 drm->dev_private = tegra; 131 tegra->drm = drm; 132 133 drm_mode_config_init(drm); 134 135 drm->mode_config.min_width = 0; 136 drm->mode_config.min_height = 0; 137 138 drm->mode_config.max_width = 4096; 139 drm->mode_config.max_height = 4096; 140 141 drm->mode_config.allow_fb_modifiers = true; 142 143 drm->mode_config.normalize_zpos = true; 144 145 drm->mode_config.funcs = &tegra_drm_mode_config_funcs; 146 drm->mode_config.helper_private = &tegra_drm_mode_config_helpers; 147 148 err = tegra_drm_fb_prepare(drm); 149 if (err < 0) 150 goto config; 151 152 drm_kms_helper_poll_init(drm); 153 154 err = host1x_device_init(device); 155 if (err < 0) 156 goto fbdev; 157 158 if (tegra->hub) { 159 err = tegra_display_hub_prepare(tegra->hub); 160 if (err < 0) 161 goto device; 162 } 163 164 /* 165 * We don't use the drm_irq_install() helpers provided by the DRM 166 * core, so we need to set this manually in order to allow the 167 * DRM_IOCTL_WAIT_VBLANK to operate correctly. 168 */ 169 drm->irq_enabled = true; 170 171 /* syncpoints are used for full 32-bit hardware VBLANK counters */ 172 drm->max_vblank_count = 0xffffffff; 173 174 err = drm_vblank_init(drm, drm->mode_config.num_crtc); 175 if (err < 0) 176 goto hub; 177 178 drm_mode_config_reset(drm); 179 180 err = tegra_drm_fb_init(drm); 181 if (err < 0) 182 goto hub; 183 184 return 0; 185 186 hub: 187 if (tegra->hub) 188 tegra_display_hub_cleanup(tegra->hub); 189 device: 190 host1x_device_exit(device); 191 fbdev: 192 drm_kms_helper_poll_fini(drm); 193 tegra_drm_fb_free(drm); 194 config: 195 drm_mode_config_cleanup(drm); 196 197 if (tegra->domain) { 198 mutex_destroy(&tegra->mm_lock); 199 drm_mm_takedown(&tegra->mm); 200 put_iova_domain(&tegra->carveout.domain); 201 iova_cache_put(); 202 } 203 domain: 204 if (tegra->domain) 205 iommu_domain_free(tegra->domain); 206 free: 207 kfree(tegra); 208 return err; 209 } 210 211 static void tegra_drm_unload(struct drm_device *drm) 212 { 213 struct host1x_device *device = to_host1x_device(drm->dev); 214 struct tegra_drm *tegra = drm->dev_private; 215 int err; 216 217 drm_kms_helper_poll_fini(drm); 218 tegra_drm_fb_exit(drm); 219 drm_atomic_helper_shutdown(drm); 220 drm_mode_config_cleanup(drm); 221 222 err = host1x_device_exit(device); 223 if (err < 0) 224 return; 225 226 if (tegra->domain) { 227 mutex_destroy(&tegra->mm_lock); 228 drm_mm_takedown(&tegra->mm); 229 put_iova_domain(&tegra->carveout.domain); 230 iova_cache_put(); 231 iommu_domain_free(tegra->domain); 232 } 233 234 kfree(tegra); 235 } 236 237 static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) 238 { 239 struct tegra_drm_file *fpriv; 240 241 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 242 if (!fpriv) 243 return -ENOMEM; 244 245 idr_init(&fpriv->contexts); 246 mutex_init(&fpriv->lock); 247 filp->driver_priv = fpriv; 248 249 return 0; 250 } 251 252 static void tegra_drm_context_free(struct tegra_drm_context *context) 253 { 254 context->client->ops->close_channel(context); 255 kfree(context); 256 } 257 258 static struct host1x_bo * 259 host1x_bo_lookup(struct drm_file *file, u32 handle) 260 { 261 struct drm_gem_object *gem; 262 struct tegra_bo *bo; 263 264 gem = drm_gem_object_lookup(file, handle); 265 if (!gem) 266 return NULL; 267 268 bo = to_tegra_bo(gem); 269 return &bo->base; 270 } 271 272 static int host1x_reloc_copy_from_user(struct host1x_reloc *dest, 273 struct drm_tegra_reloc __user *src, 274 struct drm_device *drm, 275 struct drm_file *file) 276 { 277 u32 cmdbuf, target; 278 int err; 279 280 err = get_user(cmdbuf, &src->cmdbuf.handle); 281 if (err < 0) 282 return err; 283 284 err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset); 285 if (err < 0) 286 return err; 287 288 err = get_user(target, &src->target.handle); 289 if (err < 0) 290 return err; 291 292 err = get_user(dest->target.offset, &src->target.offset); 293 if (err < 0) 294 return err; 295 296 err = get_user(dest->shift, &src->shift); 297 if (err < 0) 298 return err; 299 300 dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf); 301 if (!dest->cmdbuf.bo) 302 return -ENOENT; 303 304 dest->target.bo = host1x_bo_lookup(file, target); 305 if (!dest->target.bo) 306 return -ENOENT; 307 308 return 0; 309 } 310 311 int tegra_drm_submit(struct tegra_drm_context *context, 312 struct drm_tegra_submit *args, struct drm_device *drm, 313 struct drm_file *file) 314 { 315 struct host1x_client *client = &context->client->base; 316 unsigned int num_cmdbufs = args->num_cmdbufs; 317 unsigned int num_relocs = args->num_relocs; 318 struct drm_tegra_cmdbuf __user *user_cmdbufs; 319 struct drm_tegra_reloc __user *user_relocs; 320 struct drm_tegra_syncpt __user *user_syncpt; 321 struct drm_tegra_syncpt syncpt; 322 struct host1x *host1x = dev_get_drvdata(drm->dev->parent); 323 struct drm_gem_object **refs; 324 struct host1x_syncpt *sp; 325 struct host1x_job *job; 326 unsigned int num_refs; 327 int err; 328 329 user_cmdbufs = u64_to_user_ptr(args->cmdbufs); 330 user_relocs = u64_to_user_ptr(args->relocs); 331 user_syncpt = u64_to_user_ptr(args->syncpts); 332 333 /* We don't yet support other than one syncpt_incr struct per submit */ 334 if (args->num_syncpts != 1) 335 return -EINVAL; 336 337 /* We don't yet support waitchks */ 338 if (args->num_waitchks != 0) 339 return -EINVAL; 340 341 job = host1x_job_alloc(context->channel, args->num_cmdbufs, 342 args->num_relocs); 343 if (!job) 344 return -ENOMEM; 345 346 job->num_relocs = args->num_relocs; 347 job->client = client; 348 job->class = client->class; 349 job->serialize = true; 350 351 /* 352 * Track referenced BOs so that they can be unreferenced after the 353 * submission is complete. 354 */ 355 num_refs = num_cmdbufs + num_relocs * 2; 356 357 refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL); 358 if (!refs) { 359 err = -ENOMEM; 360 goto put; 361 } 362 363 /* reuse as an iterator later */ 364 num_refs = 0; 365 366 while (num_cmdbufs) { 367 struct drm_tegra_cmdbuf cmdbuf; 368 struct host1x_bo *bo; 369 struct tegra_bo *obj; 370 u64 offset; 371 372 if (copy_from_user(&cmdbuf, user_cmdbufs, sizeof(cmdbuf))) { 373 err = -EFAULT; 374 goto fail; 375 } 376 377 /* 378 * The maximum number of CDMA gather fetches is 16383, a higher 379 * value means the words count is malformed. 380 */ 381 if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) { 382 err = -EINVAL; 383 goto fail; 384 } 385 386 bo = host1x_bo_lookup(file, cmdbuf.handle); 387 if (!bo) { 388 err = -ENOENT; 389 goto fail; 390 } 391 392 offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32); 393 obj = host1x_to_tegra_bo(bo); 394 refs[num_refs++] = &obj->gem; 395 396 /* 397 * Gather buffer base address must be 4-bytes aligned, 398 * unaligned offset is malformed and cause commands stream 399 * corruption on the buffer address relocation. 400 */ 401 if (offset & 3 || offset >= obj->gem.size) { 402 err = -EINVAL; 403 goto fail; 404 } 405 406 host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset); 407 num_cmdbufs--; 408 user_cmdbufs++; 409 } 410 411 /* copy and resolve relocations from submit */ 412 while (num_relocs--) { 413 struct host1x_reloc *reloc; 414 struct tegra_bo *obj; 415 416 err = host1x_reloc_copy_from_user(&job->relocs[num_relocs], 417 &user_relocs[num_relocs], drm, 418 file); 419 if (err < 0) 420 goto fail; 421 422 reloc = &job->relocs[num_relocs]; 423 obj = host1x_to_tegra_bo(reloc->cmdbuf.bo); 424 refs[num_refs++] = &obj->gem; 425 426 /* 427 * The unaligned cmdbuf offset will cause an unaligned write 428 * during of the relocations patching, corrupting the commands 429 * stream. 430 */ 431 if (reloc->cmdbuf.offset & 3 || 432 reloc->cmdbuf.offset >= obj->gem.size) { 433 err = -EINVAL; 434 goto fail; 435 } 436 437 obj = host1x_to_tegra_bo(reloc->target.bo); 438 refs[num_refs++] = &obj->gem; 439 440 if (reloc->target.offset >= obj->gem.size) { 441 err = -EINVAL; 442 goto fail; 443 } 444 } 445 446 if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) { 447 err = -EFAULT; 448 goto fail; 449 } 450 451 /* check whether syncpoint ID is valid */ 452 sp = host1x_syncpt_get(host1x, syncpt.id); 453 if (!sp) { 454 err = -ENOENT; 455 goto fail; 456 } 457 458 job->is_addr_reg = context->client->ops->is_addr_reg; 459 job->is_valid_class = context->client->ops->is_valid_class; 460 job->syncpt_incrs = syncpt.incrs; 461 job->syncpt_id = syncpt.id; 462 job->timeout = 10000; 463 464 if (args->timeout && args->timeout < 10000) 465 job->timeout = args->timeout; 466 467 err = host1x_job_pin(job, context->client->base.dev); 468 if (err) 469 goto fail; 470 471 err = host1x_job_submit(job); 472 if (err) { 473 host1x_job_unpin(job); 474 goto fail; 475 } 476 477 args->fence = job->syncpt_end; 478 479 fail: 480 while (num_refs--) 481 drm_gem_object_put_unlocked(refs[num_refs]); 482 483 kfree(refs); 484 485 put: 486 host1x_job_put(job); 487 return err; 488 } 489 490 491 #ifdef CONFIG_DRM_TEGRA_STAGING 492 static int tegra_gem_create(struct drm_device *drm, void *data, 493 struct drm_file *file) 494 { 495 struct drm_tegra_gem_create *args = data; 496 struct tegra_bo *bo; 497 498 bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags, 499 &args->handle); 500 if (IS_ERR(bo)) 501 return PTR_ERR(bo); 502 503 return 0; 504 } 505 506 static int tegra_gem_mmap(struct drm_device *drm, void *data, 507 struct drm_file *file) 508 { 509 struct drm_tegra_gem_mmap *args = data; 510 struct drm_gem_object *gem; 511 struct tegra_bo *bo; 512 513 gem = drm_gem_object_lookup(file, args->handle); 514 if (!gem) 515 return -EINVAL; 516 517 bo = to_tegra_bo(gem); 518 519 args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node); 520 521 drm_gem_object_put_unlocked(gem); 522 523 return 0; 524 } 525 526 static int tegra_syncpt_read(struct drm_device *drm, void *data, 527 struct drm_file *file) 528 { 529 struct host1x *host = dev_get_drvdata(drm->dev->parent); 530 struct drm_tegra_syncpt_read *args = data; 531 struct host1x_syncpt *sp; 532 533 sp = host1x_syncpt_get(host, args->id); 534 if (!sp) 535 return -EINVAL; 536 537 args->value = host1x_syncpt_read_min(sp); 538 return 0; 539 } 540 541 static int tegra_syncpt_incr(struct drm_device *drm, void *data, 542 struct drm_file *file) 543 { 544 struct host1x *host1x = dev_get_drvdata(drm->dev->parent); 545 struct drm_tegra_syncpt_incr *args = data; 546 struct host1x_syncpt *sp; 547 548 sp = host1x_syncpt_get(host1x, args->id); 549 if (!sp) 550 return -EINVAL; 551 552 return host1x_syncpt_incr(sp); 553 } 554 555 static int tegra_syncpt_wait(struct drm_device *drm, void *data, 556 struct drm_file *file) 557 { 558 struct host1x *host1x = dev_get_drvdata(drm->dev->parent); 559 struct drm_tegra_syncpt_wait *args = data; 560 struct host1x_syncpt *sp; 561 562 sp = host1x_syncpt_get(host1x, args->id); 563 if (!sp) 564 return -EINVAL; 565 566 return host1x_syncpt_wait(sp, args->thresh, 567 msecs_to_jiffies(args->timeout), 568 &args->value); 569 } 570 571 static int tegra_client_open(struct tegra_drm_file *fpriv, 572 struct tegra_drm_client *client, 573 struct tegra_drm_context *context) 574 { 575 int err; 576 577 err = client->ops->open_channel(client, context); 578 if (err < 0) 579 return err; 580 581 err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL); 582 if (err < 0) { 583 client->ops->close_channel(context); 584 return err; 585 } 586 587 context->client = client; 588 context->id = err; 589 590 return 0; 591 } 592 593 static int tegra_open_channel(struct drm_device *drm, void *data, 594 struct drm_file *file) 595 { 596 struct tegra_drm_file *fpriv = file->driver_priv; 597 struct tegra_drm *tegra = drm->dev_private; 598 struct drm_tegra_open_channel *args = data; 599 struct tegra_drm_context *context; 600 struct tegra_drm_client *client; 601 int err = -ENODEV; 602 603 context = kzalloc(sizeof(*context), GFP_KERNEL); 604 if (!context) 605 return -ENOMEM; 606 607 mutex_lock(&fpriv->lock); 608 609 list_for_each_entry(client, &tegra->clients, list) 610 if (client->base.class == args->client) { 611 err = tegra_client_open(fpriv, client, context); 612 if (err < 0) 613 break; 614 615 args->context = context->id; 616 break; 617 } 618 619 if (err < 0) 620 kfree(context); 621 622 mutex_unlock(&fpriv->lock); 623 return err; 624 } 625 626 static int tegra_close_channel(struct drm_device *drm, void *data, 627 struct drm_file *file) 628 { 629 struct tegra_drm_file *fpriv = file->driver_priv; 630 struct drm_tegra_close_channel *args = data; 631 struct tegra_drm_context *context; 632 int err = 0; 633 634 mutex_lock(&fpriv->lock); 635 636 context = idr_find(&fpriv->contexts, args->context); 637 if (!context) { 638 err = -EINVAL; 639 goto unlock; 640 } 641 642 idr_remove(&fpriv->contexts, context->id); 643 tegra_drm_context_free(context); 644 645 unlock: 646 mutex_unlock(&fpriv->lock); 647 return err; 648 } 649 650 static int tegra_get_syncpt(struct drm_device *drm, void *data, 651 struct drm_file *file) 652 { 653 struct tegra_drm_file *fpriv = file->driver_priv; 654 struct drm_tegra_get_syncpt *args = data; 655 struct tegra_drm_context *context; 656 struct host1x_syncpt *syncpt; 657 int err = 0; 658 659 mutex_lock(&fpriv->lock); 660 661 context = idr_find(&fpriv->contexts, args->context); 662 if (!context) { 663 err = -ENODEV; 664 goto unlock; 665 } 666 667 if (args->index >= context->client->base.num_syncpts) { 668 err = -EINVAL; 669 goto unlock; 670 } 671 672 syncpt = context->client->base.syncpts[args->index]; 673 args->id = host1x_syncpt_id(syncpt); 674 675 unlock: 676 mutex_unlock(&fpriv->lock); 677 return err; 678 } 679 680 static int tegra_submit(struct drm_device *drm, void *data, 681 struct drm_file *file) 682 { 683 struct tegra_drm_file *fpriv = file->driver_priv; 684 struct drm_tegra_submit *args = data; 685 struct tegra_drm_context *context; 686 int err; 687 688 mutex_lock(&fpriv->lock); 689 690 context = idr_find(&fpriv->contexts, args->context); 691 if (!context) { 692 err = -ENODEV; 693 goto unlock; 694 } 695 696 err = context->client->ops->submit(context, args, drm, file); 697 698 unlock: 699 mutex_unlock(&fpriv->lock); 700 return err; 701 } 702 703 static int tegra_get_syncpt_base(struct drm_device *drm, void *data, 704 struct drm_file *file) 705 { 706 struct tegra_drm_file *fpriv = file->driver_priv; 707 struct drm_tegra_get_syncpt_base *args = data; 708 struct tegra_drm_context *context; 709 struct host1x_syncpt_base *base; 710 struct host1x_syncpt *syncpt; 711 int err = 0; 712 713 mutex_lock(&fpriv->lock); 714 715 context = idr_find(&fpriv->contexts, args->context); 716 if (!context) { 717 err = -ENODEV; 718 goto unlock; 719 } 720 721 if (args->syncpt >= context->client->base.num_syncpts) { 722 err = -EINVAL; 723 goto unlock; 724 } 725 726 syncpt = context->client->base.syncpts[args->syncpt]; 727 728 base = host1x_syncpt_get_base(syncpt); 729 if (!base) { 730 err = -ENXIO; 731 goto unlock; 732 } 733 734 args->id = host1x_syncpt_base_id(base); 735 736 unlock: 737 mutex_unlock(&fpriv->lock); 738 return err; 739 } 740 741 static int tegra_gem_set_tiling(struct drm_device *drm, void *data, 742 struct drm_file *file) 743 { 744 struct drm_tegra_gem_set_tiling *args = data; 745 enum tegra_bo_tiling_mode mode; 746 struct drm_gem_object *gem; 747 unsigned long value = 0; 748 struct tegra_bo *bo; 749 750 switch (args->mode) { 751 case DRM_TEGRA_GEM_TILING_MODE_PITCH: 752 mode = TEGRA_BO_TILING_MODE_PITCH; 753 754 if (args->value != 0) 755 return -EINVAL; 756 757 break; 758 759 case DRM_TEGRA_GEM_TILING_MODE_TILED: 760 mode = TEGRA_BO_TILING_MODE_TILED; 761 762 if (args->value != 0) 763 return -EINVAL; 764 765 break; 766 767 case DRM_TEGRA_GEM_TILING_MODE_BLOCK: 768 mode = TEGRA_BO_TILING_MODE_BLOCK; 769 770 if (args->value > 5) 771 return -EINVAL; 772 773 value = args->value; 774 break; 775 776 default: 777 return -EINVAL; 778 } 779 780 gem = drm_gem_object_lookup(file, args->handle); 781 if (!gem) 782 return -ENOENT; 783 784 bo = to_tegra_bo(gem); 785 786 bo->tiling.mode = mode; 787 bo->tiling.value = value; 788 789 drm_gem_object_put_unlocked(gem); 790 791 return 0; 792 } 793 794 static int tegra_gem_get_tiling(struct drm_device *drm, void *data, 795 struct drm_file *file) 796 { 797 struct drm_tegra_gem_get_tiling *args = data; 798 struct drm_gem_object *gem; 799 struct tegra_bo *bo; 800 int err = 0; 801 802 gem = drm_gem_object_lookup(file, args->handle); 803 if (!gem) 804 return -ENOENT; 805 806 bo = to_tegra_bo(gem); 807 808 switch (bo->tiling.mode) { 809 case TEGRA_BO_TILING_MODE_PITCH: 810 args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH; 811 args->value = 0; 812 break; 813 814 case TEGRA_BO_TILING_MODE_TILED: 815 args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED; 816 args->value = 0; 817 break; 818 819 case TEGRA_BO_TILING_MODE_BLOCK: 820 args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK; 821 args->value = bo->tiling.value; 822 break; 823 824 default: 825 err = -EINVAL; 826 break; 827 } 828 829 drm_gem_object_put_unlocked(gem); 830 831 return err; 832 } 833 834 static int tegra_gem_set_flags(struct drm_device *drm, void *data, 835 struct drm_file *file) 836 { 837 struct drm_tegra_gem_set_flags *args = data; 838 struct drm_gem_object *gem; 839 struct tegra_bo *bo; 840 841 if (args->flags & ~DRM_TEGRA_GEM_FLAGS) 842 return -EINVAL; 843 844 gem = drm_gem_object_lookup(file, args->handle); 845 if (!gem) 846 return -ENOENT; 847 848 bo = to_tegra_bo(gem); 849 bo->flags = 0; 850 851 if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP) 852 bo->flags |= TEGRA_BO_BOTTOM_UP; 853 854 drm_gem_object_put_unlocked(gem); 855 856 return 0; 857 } 858 859 static int tegra_gem_get_flags(struct drm_device *drm, void *data, 860 struct drm_file *file) 861 { 862 struct drm_tegra_gem_get_flags *args = data; 863 struct drm_gem_object *gem; 864 struct tegra_bo *bo; 865 866 gem = drm_gem_object_lookup(file, args->handle); 867 if (!gem) 868 return -ENOENT; 869 870 bo = to_tegra_bo(gem); 871 args->flags = 0; 872 873 if (bo->flags & TEGRA_BO_BOTTOM_UP) 874 args->flags |= DRM_TEGRA_GEM_BOTTOM_UP; 875 876 drm_gem_object_put_unlocked(gem); 877 878 return 0; 879 } 880 #endif 881 882 static const struct drm_ioctl_desc tegra_drm_ioctls[] = { 883 #ifdef CONFIG_DRM_TEGRA_STAGING 884 DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, 885 DRM_UNLOCKED | DRM_RENDER_ALLOW), 886 DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, 887 DRM_UNLOCKED | DRM_RENDER_ALLOW), 888 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, 889 DRM_UNLOCKED | DRM_RENDER_ALLOW), 890 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, 891 DRM_UNLOCKED | DRM_RENDER_ALLOW), 892 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, 893 DRM_UNLOCKED | DRM_RENDER_ALLOW), 894 DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, 895 DRM_UNLOCKED | DRM_RENDER_ALLOW), 896 DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, 897 DRM_UNLOCKED | DRM_RENDER_ALLOW), 898 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, 899 DRM_UNLOCKED | DRM_RENDER_ALLOW), 900 DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, 901 DRM_UNLOCKED | DRM_RENDER_ALLOW), 902 DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, 903 DRM_UNLOCKED | DRM_RENDER_ALLOW), 904 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, 905 DRM_UNLOCKED | DRM_RENDER_ALLOW), 906 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, 907 DRM_UNLOCKED | DRM_RENDER_ALLOW), 908 DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, 909 DRM_UNLOCKED | DRM_RENDER_ALLOW), 910 DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, 911 DRM_UNLOCKED | DRM_RENDER_ALLOW), 912 #endif 913 }; 914 915 static const struct file_operations tegra_drm_fops = { 916 .owner = THIS_MODULE, 917 .open = drm_open, 918 .release = drm_release, 919 .unlocked_ioctl = drm_ioctl, 920 .mmap = tegra_drm_mmap, 921 .poll = drm_poll, 922 .read = drm_read, 923 .compat_ioctl = drm_compat_ioctl, 924 .llseek = noop_llseek, 925 }; 926 927 static int tegra_drm_context_cleanup(int id, void *p, void *data) 928 { 929 struct tegra_drm_context *context = p; 930 931 tegra_drm_context_free(context); 932 933 return 0; 934 } 935 936 static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file) 937 { 938 struct tegra_drm_file *fpriv = file->driver_priv; 939 940 mutex_lock(&fpriv->lock); 941 idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL); 942 mutex_unlock(&fpriv->lock); 943 944 idr_destroy(&fpriv->contexts); 945 mutex_destroy(&fpriv->lock); 946 kfree(fpriv); 947 } 948 949 #ifdef CONFIG_DEBUG_FS 950 static int tegra_debugfs_framebuffers(struct seq_file *s, void *data) 951 { 952 struct drm_info_node *node = (struct drm_info_node *)s->private; 953 struct drm_device *drm = node->minor->dev; 954 struct drm_framebuffer *fb; 955 956 mutex_lock(&drm->mode_config.fb_lock); 957 958 list_for_each_entry(fb, &drm->mode_config.fb_list, head) { 959 seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n", 960 fb->base.id, fb->width, fb->height, 961 fb->format->depth, 962 fb->format->cpp[0] * 8, 963 drm_framebuffer_read_refcount(fb)); 964 } 965 966 mutex_unlock(&drm->mode_config.fb_lock); 967 968 return 0; 969 } 970 971 static int tegra_debugfs_iova(struct seq_file *s, void *data) 972 { 973 struct drm_info_node *node = (struct drm_info_node *)s->private; 974 struct drm_device *drm = node->minor->dev; 975 struct tegra_drm *tegra = drm->dev_private; 976 struct drm_printer p = drm_seq_file_printer(s); 977 978 if (tegra->domain) { 979 mutex_lock(&tegra->mm_lock); 980 drm_mm_print(&tegra->mm, &p); 981 mutex_unlock(&tegra->mm_lock); 982 } 983 984 return 0; 985 } 986 987 static struct drm_info_list tegra_debugfs_list[] = { 988 { "framebuffers", tegra_debugfs_framebuffers, 0 }, 989 { "iova", tegra_debugfs_iova, 0 }, 990 }; 991 992 static int tegra_debugfs_init(struct drm_minor *minor) 993 { 994 return drm_debugfs_create_files(tegra_debugfs_list, 995 ARRAY_SIZE(tegra_debugfs_list), 996 minor->debugfs_root, minor); 997 } 998 #endif 999 1000 static struct drm_driver tegra_drm_driver = { 1001 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | 1002 DRIVER_ATOMIC | DRIVER_RENDER, 1003 .load = tegra_drm_load, 1004 .unload = tegra_drm_unload, 1005 .open = tegra_drm_open, 1006 .postclose = tegra_drm_postclose, 1007 .lastclose = drm_fb_helper_lastclose, 1008 1009 #if defined(CONFIG_DEBUG_FS) 1010 .debugfs_init = tegra_debugfs_init, 1011 #endif 1012 1013 .gem_free_object_unlocked = tegra_bo_free_object, 1014 .gem_vm_ops = &tegra_bo_vm_ops, 1015 1016 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 1017 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 1018 .gem_prime_export = tegra_gem_prime_export, 1019 .gem_prime_import = tegra_gem_prime_import, 1020 1021 .dumb_create = tegra_bo_dumb_create, 1022 1023 .ioctls = tegra_drm_ioctls, 1024 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls), 1025 .fops = &tegra_drm_fops, 1026 1027 .name = DRIVER_NAME, 1028 .desc = DRIVER_DESC, 1029 .date = DRIVER_DATE, 1030 .major = DRIVER_MAJOR, 1031 .minor = DRIVER_MINOR, 1032 .patchlevel = DRIVER_PATCHLEVEL, 1033 }; 1034 1035 int tegra_drm_register_client(struct tegra_drm *tegra, 1036 struct tegra_drm_client *client) 1037 { 1038 mutex_lock(&tegra->clients_lock); 1039 list_add_tail(&client->list, &tegra->clients); 1040 mutex_unlock(&tegra->clients_lock); 1041 1042 return 0; 1043 } 1044 1045 int tegra_drm_unregister_client(struct tegra_drm *tegra, 1046 struct tegra_drm_client *client) 1047 { 1048 mutex_lock(&tegra->clients_lock); 1049 list_del_init(&client->list); 1050 mutex_unlock(&tegra->clients_lock); 1051 1052 return 0; 1053 } 1054 1055 struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client, 1056 bool shared) 1057 { 1058 struct drm_device *drm = dev_get_drvdata(client->parent); 1059 struct tegra_drm *tegra = drm->dev_private; 1060 struct iommu_group *group = NULL; 1061 int err; 1062 1063 if (tegra->domain) { 1064 group = iommu_group_get(client->dev); 1065 if (!group) { 1066 dev_err(client->dev, "failed to get IOMMU group\n"); 1067 return ERR_PTR(-ENODEV); 1068 } 1069 1070 if (!shared || (shared && (group != tegra->group))) { 1071 err = iommu_attach_group(tegra->domain, group); 1072 if (err < 0) { 1073 iommu_group_put(group); 1074 return ERR_PTR(err); 1075 } 1076 1077 if (shared && !tegra->group) 1078 tegra->group = group; 1079 } 1080 } 1081 1082 return group; 1083 } 1084 1085 void host1x_client_iommu_detach(struct host1x_client *client, 1086 struct iommu_group *group) 1087 { 1088 struct drm_device *drm = dev_get_drvdata(client->parent); 1089 struct tegra_drm *tegra = drm->dev_private; 1090 1091 if (group) { 1092 if (group == tegra->group) { 1093 iommu_detach_group(tegra->domain, group); 1094 tegra->group = NULL; 1095 } 1096 1097 iommu_group_put(group); 1098 } 1099 } 1100 1101 void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma) 1102 { 1103 struct iova *alloc; 1104 void *virt; 1105 gfp_t gfp; 1106 int err; 1107 1108 if (tegra->domain) 1109 size = iova_align(&tegra->carveout.domain, size); 1110 else 1111 size = PAGE_ALIGN(size); 1112 1113 gfp = GFP_KERNEL | __GFP_ZERO; 1114 if (!tegra->domain) { 1115 /* 1116 * Many units only support 32-bit addresses, even on 64-bit 1117 * SoCs. If there is no IOMMU to translate into a 32-bit IO 1118 * virtual address space, force allocations to be in the 1119 * lower 32-bit range. 1120 */ 1121 gfp |= GFP_DMA; 1122 } 1123 1124 virt = (void *)__get_free_pages(gfp, get_order(size)); 1125 if (!virt) 1126 return ERR_PTR(-ENOMEM); 1127 1128 if (!tegra->domain) { 1129 /* 1130 * If IOMMU is disabled, devices address physical memory 1131 * directly. 1132 */ 1133 *dma = virt_to_phys(virt); 1134 return virt; 1135 } 1136 1137 alloc = alloc_iova(&tegra->carveout.domain, 1138 size >> tegra->carveout.shift, 1139 tegra->carveout.limit, true); 1140 if (!alloc) { 1141 err = -EBUSY; 1142 goto free_pages; 1143 } 1144 1145 *dma = iova_dma_addr(&tegra->carveout.domain, alloc); 1146 err = iommu_map(tegra->domain, *dma, virt_to_phys(virt), 1147 size, IOMMU_READ | IOMMU_WRITE); 1148 if (err < 0) 1149 goto free_iova; 1150 1151 return virt; 1152 1153 free_iova: 1154 __free_iova(&tegra->carveout.domain, alloc); 1155 free_pages: 1156 free_pages((unsigned long)virt, get_order(size)); 1157 1158 return ERR_PTR(err); 1159 } 1160 1161 void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt, 1162 dma_addr_t dma) 1163 { 1164 if (tegra->domain) 1165 size = iova_align(&tegra->carveout.domain, size); 1166 else 1167 size = PAGE_ALIGN(size); 1168 1169 if (tegra->domain) { 1170 iommu_unmap(tegra->domain, dma, size); 1171 free_iova(&tegra->carveout.domain, 1172 iova_pfn(&tegra->carveout.domain, dma)); 1173 } 1174 1175 free_pages((unsigned long)virt, get_order(size)); 1176 } 1177 1178 static int host1x_drm_probe(struct host1x_device *dev) 1179 { 1180 struct drm_driver *driver = &tegra_drm_driver; 1181 struct drm_device *drm; 1182 int err; 1183 1184 drm = drm_dev_alloc(driver, &dev->dev); 1185 if (IS_ERR(drm)) 1186 return PTR_ERR(drm); 1187 1188 dev_set_drvdata(&dev->dev, drm); 1189 1190 err = drm_dev_register(drm, 0); 1191 if (err < 0) 1192 goto unref; 1193 1194 return 0; 1195 1196 unref: 1197 drm_dev_unref(drm); 1198 return err; 1199 } 1200 1201 static int host1x_drm_remove(struct host1x_device *dev) 1202 { 1203 struct drm_device *drm = dev_get_drvdata(&dev->dev); 1204 1205 drm_dev_unregister(drm); 1206 drm_dev_unref(drm); 1207 1208 return 0; 1209 } 1210 1211 #ifdef CONFIG_PM_SLEEP 1212 static int host1x_drm_suspend(struct device *dev) 1213 { 1214 struct drm_device *drm = dev_get_drvdata(dev); 1215 struct tegra_drm *tegra = drm->dev_private; 1216 1217 drm_kms_helper_poll_disable(drm); 1218 tegra_drm_fb_suspend(drm); 1219 1220 tegra->state = drm_atomic_helper_suspend(drm); 1221 if (IS_ERR(tegra->state)) { 1222 tegra_drm_fb_resume(drm); 1223 drm_kms_helper_poll_enable(drm); 1224 return PTR_ERR(tegra->state); 1225 } 1226 1227 return 0; 1228 } 1229 1230 static int host1x_drm_resume(struct device *dev) 1231 { 1232 struct drm_device *drm = dev_get_drvdata(dev); 1233 struct tegra_drm *tegra = drm->dev_private; 1234 1235 drm_atomic_helper_resume(drm, tegra->state); 1236 tegra_drm_fb_resume(drm); 1237 drm_kms_helper_poll_enable(drm); 1238 1239 return 0; 1240 } 1241 #endif 1242 1243 static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend, 1244 host1x_drm_resume); 1245 1246 static const struct of_device_id host1x_drm_subdevs[] = { 1247 { .compatible = "nvidia,tegra20-dc", }, 1248 { .compatible = "nvidia,tegra20-hdmi", }, 1249 { .compatible = "nvidia,tegra20-gr2d", }, 1250 { .compatible = "nvidia,tegra20-gr3d", }, 1251 { .compatible = "nvidia,tegra30-dc", }, 1252 { .compatible = "nvidia,tegra30-hdmi", }, 1253 { .compatible = "nvidia,tegra30-gr2d", }, 1254 { .compatible = "nvidia,tegra30-gr3d", }, 1255 { .compatible = "nvidia,tegra114-dsi", }, 1256 { .compatible = "nvidia,tegra114-hdmi", }, 1257 { .compatible = "nvidia,tegra114-gr3d", }, 1258 { .compatible = "nvidia,tegra124-dc", }, 1259 { .compatible = "nvidia,tegra124-sor", }, 1260 { .compatible = "nvidia,tegra124-hdmi", }, 1261 { .compatible = "nvidia,tegra124-dsi", }, 1262 { .compatible = "nvidia,tegra124-vic", }, 1263 { .compatible = "nvidia,tegra132-dsi", }, 1264 { .compatible = "nvidia,tegra210-dc", }, 1265 { .compatible = "nvidia,tegra210-dsi", }, 1266 { .compatible = "nvidia,tegra210-sor", }, 1267 { .compatible = "nvidia,tegra210-sor1", }, 1268 { .compatible = "nvidia,tegra210-vic", }, 1269 { .compatible = "nvidia,tegra186-display", }, 1270 { .compatible = "nvidia,tegra186-dc", }, 1271 { .compatible = "nvidia,tegra186-sor", }, 1272 { .compatible = "nvidia,tegra186-sor1", }, 1273 { .compatible = "nvidia,tegra186-vic", }, 1274 { /* sentinel */ } 1275 }; 1276 1277 static struct host1x_driver host1x_drm_driver = { 1278 .driver = { 1279 .name = "drm", 1280 .pm = &host1x_drm_pm_ops, 1281 }, 1282 .probe = host1x_drm_probe, 1283 .remove = host1x_drm_remove, 1284 .subdevs = host1x_drm_subdevs, 1285 }; 1286 1287 static struct platform_driver * const drivers[] = { 1288 &tegra_display_hub_driver, 1289 &tegra_dc_driver, 1290 &tegra_hdmi_driver, 1291 &tegra_dsi_driver, 1292 &tegra_dpaux_driver, 1293 &tegra_sor_driver, 1294 &tegra_gr2d_driver, 1295 &tegra_gr3d_driver, 1296 &tegra_vic_driver, 1297 }; 1298 1299 static int __init host1x_drm_init(void) 1300 { 1301 int err; 1302 1303 err = host1x_driver_register(&host1x_drm_driver); 1304 if (err < 0) 1305 return err; 1306 1307 err = platform_register_drivers(drivers, ARRAY_SIZE(drivers)); 1308 if (err < 0) 1309 goto unregister_host1x; 1310 1311 return 0; 1312 1313 unregister_host1x: 1314 host1x_driver_unregister(&host1x_drm_driver); 1315 return err; 1316 } 1317 module_init(host1x_drm_init); 1318 1319 static void __exit host1x_drm_exit(void) 1320 { 1321 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); 1322 host1x_driver_unregister(&host1x_drm_driver); 1323 } 1324 module_exit(host1x_drm_exit); 1325 1326 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); 1327 MODULE_DESCRIPTION("NVIDIA Tegra DRM driver"); 1328 MODULE_LICENSE("GPL v2"); 1329