1 /* 2 * NVIDIA Tegra DRM GEM helper functions 3 * 4 * Copyright (C) 2012 Sascha Hauer, Pengutronix 5 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved. 6 * 7 * Based on the GEM/CMA helpers 8 * 9 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License version 2 as 13 * published by the Free Software Foundation. 14 */ 15 16 #include <linux/dma-buf.h> 17 #include <linux/iommu.h> 18 #include <drm/tegra_drm.h> 19 20 #include "drm.h" 21 #include "gem.h" 22 23 static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo) 24 { 25 return container_of(bo, struct tegra_bo, base); 26 } 27 28 static void tegra_bo_put(struct host1x_bo *bo) 29 { 30 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 31 32 drm_gem_object_unreference_unlocked(&obj->gem); 33 } 34 35 static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt) 36 { 37 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 38 39 *sgt = obj->sgt; 40 41 return obj->paddr; 42 } 43 44 static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt) 45 { 46 } 47 48 static void *tegra_bo_mmap(struct host1x_bo *bo) 49 { 50 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 51 52 if (obj->vaddr) 53 return obj->vaddr; 54 else if (obj->gem.import_attach) 55 return dma_buf_vmap(obj->gem.import_attach->dmabuf); 56 else 57 return vmap(obj->pages, obj->num_pages, VM_MAP, 58 pgprot_writecombine(PAGE_KERNEL)); 59 } 60 61 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) 62 { 63 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 64 65 if (obj->vaddr) 66 return; 67 else if (obj->gem.import_attach) 68 dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr); 69 else 70 vunmap(addr); 71 } 72 73 static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page) 74 { 75 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 76 77 if (obj->vaddr) 78 return obj->vaddr + page * PAGE_SIZE; 79 else if (obj->gem.import_attach) 80 return dma_buf_kmap(obj->gem.import_attach->dmabuf, page); 81 else 82 return vmap(obj->pages + page, 1, VM_MAP, 83 pgprot_writecombine(PAGE_KERNEL)); 84 } 85 86 static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page, 87 void *addr) 88 { 89 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 90 91 if (obj->vaddr) 92 return; 93 else if (obj->gem.import_attach) 94 dma_buf_kunmap(obj->gem.import_attach->dmabuf, page, addr); 95 else 96 vunmap(addr); 97 } 98 99 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo) 100 { 101 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 102 103 drm_gem_object_reference(&obj->gem); 104 105 return bo; 106 } 107 108 static const struct host1x_bo_ops tegra_bo_ops = { 109 .get = tegra_bo_get, 110 .put = tegra_bo_put, 111 .pin = tegra_bo_pin, 112 .unpin = tegra_bo_unpin, 113 .mmap = tegra_bo_mmap, 114 .munmap = tegra_bo_munmap, 115 .kmap = tegra_bo_kmap, 116 .kunmap = tegra_bo_kunmap, 117 }; 118 119 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) 120 { 121 int prot = IOMMU_READ | IOMMU_WRITE; 122 ssize_t err; 123 124 if (bo->mm) 125 return -EBUSY; 126 127 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL); 128 if (!bo->mm) 129 return -ENOMEM; 130 131 err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size, 132 PAGE_SIZE, 0, 0, 0); 133 if (err < 0) { 134 dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n", 135 err); 136 goto free; 137 } 138 139 bo->paddr = bo->mm->start; 140 141 err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl, 142 bo->sgt->nents, prot); 143 if (err < 0) { 144 dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err); 145 goto remove; 146 } 147 148 bo->size = err; 149 150 return 0; 151 152 remove: 153 drm_mm_remove_node(bo->mm); 154 free: 155 kfree(bo->mm); 156 return err; 157 } 158 159 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo) 160 { 161 if (!bo->mm) 162 return 0; 163 164 iommu_unmap(tegra->domain, bo->paddr, bo->size); 165 drm_mm_remove_node(bo->mm); 166 kfree(bo->mm); 167 168 return 0; 169 } 170 171 static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm, 172 size_t size) 173 { 174 struct tegra_bo *bo; 175 int err; 176 177 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 178 if (!bo) 179 return ERR_PTR(-ENOMEM); 180 181 host1x_bo_init(&bo->base, &tegra_bo_ops); 182 size = round_up(size, PAGE_SIZE); 183 184 err = drm_gem_object_init(drm, &bo->gem, size); 185 if (err < 0) 186 goto free; 187 188 err = drm_gem_create_mmap_offset(&bo->gem); 189 if (err < 0) 190 goto release; 191 192 return bo; 193 194 release: 195 drm_gem_object_release(&bo->gem); 196 free: 197 kfree(bo); 198 return ERR_PTR(err); 199 } 200 201 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) 202 { 203 if (bo->pages) { 204 drm_gem_put_pages(&bo->gem, bo->pages, true, true); 205 sg_free_table(bo->sgt); 206 kfree(bo->sgt); 207 } else if (bo->vaddr) { 208 dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr); 209 } 210 } 211 212 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) 213 { 214 struct scatterlist *s; 215 unsigned int i; 216 217 bo->pages = drm_gem_get_pages(&bo->gem); 218 if (IS_ERR(bo->pages)) 219 return PTR_ERR(bo->pages); 220 221 bo->num_pages = bo->gem.size >> PAGE_SHIFT; 222 223 bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); 224 if (IS_ERR(bo->sgt)) 225 goto put_pages; 226 227 /* 228 * Fake up the SG table so that dma_sync_sg_for_device() can be used 229 * to flush the pages associated with it. 230 * 231 * TODO: Replace this by drm_clflash_sg() once it can be implemented 232 * without relying on symbols that are not exported. 233 */ 234 for_each_sg(bo->sgt->sgl, s, bo->sgt->nents, i) 235 sg_dma_address(s) = sg_phys(s); 236 237 dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents, 238 DMA_TO_DEVICE); 239 240 return 0; 241 242 put_pages: 243 drm_gem_put_pages(&bo->gem, bo->pages, false, false); 244 return PTR_ERR(bo->sgt); 245 } 246 247 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo) 248 { 249 struct tegra_drm *tegra = drm->dev_private; 250 int err; 251 252 if (tegra->domain) { 253 err = tegra_bo_get_pages(drm, bo); 254 if (err < 0) 255 return err; 256 257 err = tegra_bo_iommu_map(tegra, bo); 258 if (err < 0) { 259 tegra_bo_free(drm, bo); 260 return err; 261 } 262 } else { 263 size_t size = bo->gem.size; 264 265 bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr, 266 GFP_KERNEL | __GFP_NOWARN); 267 if (!bo->vaddr) { 268 dev_err(drm->dev, 269 "failed to allocate buffer of size %zu\n", 270 size); 271 return -ENOMEM; 272 } 273 } 274 275 return 0; 276 } 277 278 struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size, 279 unsigned long flags) 280 { 281 struct tegra_bo *bo; 282 int err; 283 284 bo = tegra_bo_alloc_object(drm, size); 285 if (IS_ERR(bo)) 286 return bo; 287 288 err = tegra_bo_alloc(drm, bo); 289 if (err < 0) 290 goto release; 291 292 if (flags & DRM_TEGRA_GEM_CREATE_TILED) 293 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED; 294 295 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP) 296 bo->flags |= TEGRA_BO_BOTTOM_UP; 297 298 return bo; 299 300 release: 301 drm_gem_object_release(&bo->gem); 302 kfree(bo); 303 return ERR_PTR(err); 304 } 305 306 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, 307 struct drm_device *drm, 308 size_t size, 309 unsigned long flags, 310 u32 *handle) 311 { 312 struct tegra_bo *bo; 313 int err; 314 315 bo = tegra_bo_create(drm, size, flags); 316 if (IS_ERR(bo)) 317 return bo; 318 319 err = drm_gem_handle_create(file, &bo->gem, handle); 320 if (err) { 321 tegra_bo_free_object(&bo->gem); 322 return ERR_PTR(err); 323 } 324 325 drm_gem_object_unreference_unlocked(&bo->gem); 326 327 return bo; 328 } 329 330 static struct tegra_bo *tegra_bo_import(struct drm_device *drm, 331 struct dma_buf *buf) 332 { 333 struct tegra_drm *tegra = drm->dev_private; 334 struct dma_buf_attachment *attach; 335 struct tegra_bo *bo; 336 int err; 337 338 bo = tegra_bo_alloc_object(drm, buf->size); 339 if (IS_ERR(bo)) 340 return bo; 341 342 attach = dma_buf_attach(buf, drm->dev); 343 if (IS_ERR(attach)) { 344 err = PTR_ERR(attach); 345 goto free; 346 } 347 348 get_dma_buf(buf); 349 350 bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); 351 if (IS_ERR(bo->sgt)) { 352 err = PTR_ERR(bo->sgt); 353 goto detach; 354 } 355 356 if (tegra->domain) { 357 err = tegra_bo_iommu_map(tegra, bo); 358 if (err < 0) 359 goto detach; 360 } else { 361 if (bo->sgt->nents > 1) { 362 err = -EINVAL; 363 goto detach; 364 } 365 366 bo->paddr = sg_dma_address(bo->sgt->sgl); 367 } 368 369 bo->gem.import_attach = attach; 370 371 return bo; 372 373 detach: 374 if (!IS_ERR_OR_NULL(bo->sgt)) 375 dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE); 376 377 dma_buf_detach(buf, attach); 378 dma_buf_put(buf); 379 free: 380 drm_gem_object_release(&bo->gem); 381 kfree(bo); 382 return ERR_PTR(err); 383 } 384 385 void tegra_bo_free_object(struct drm_gem_object *gem) 386 { 387 struct tegra_drm *tegra = gem->dev->dev_private; 388 struct tegra_bo *bo = to_tegra_bo(gem); 389 390 if (tegra->domain) 391 tegra_bo_iommu_unmap(tegra, bo); 392 393 if (gem->import_attach) { 394 dma_buf_unmap_attachment(gem->import_attach, bo->sgt, 395 DMA_TO_DEVICE); 396 drm_prime_gem_destroy(gem, NULL); 397 } else { 398 tegra_bo_free(gem->dev, bo); 399 } 400 401 drm_gem_object_release(gem); 402 kfree(bo); 403 } 404 405 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, 406 struct drm_mode_create_dumb *args) 407 { 408 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 409 struct tegra_drm *tegra = drm->dev_private; 410 struct tegra_bo *bo; 411 412 args->pitch = round_up(min_pitch, tegra->pitch_align); 413 args->size = args->pitch * args->height; 414 415 bo = tegra_bo_create_with_handle(file, drm, args->size, 0, 416 &args->handle); 417 if (IS_ERR(bo)) 418 return PTR_ERR(bo); 419 420 return 0; 421 } 422 423 int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, 424 u32 handle, u64 *offset) 425 { 426 struct drm_gem_object *gem; 427 struct tegra_bo *bo; 428 429 gem = drm_gem_object_lookup(file, handle); 430 if (!gem) { 431 dev_err(drm->dev, "failed to lookup GEM object\n"); 432 return -EINVAL; 433 } 434 435 bo = to_tegra_bo(gem); 436 437 *offset = drm_vma_node_offset_addr(&bo->gem.vma_node); 438 439 drm_gem_object_unreference_unlocked(gem); 440 441 return 0; 442 } 443 444 static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 445 { 446 struct drm_gem_object *gem = vma->vm_private_data; 447 struct tegra_bo *bo = to_tegra_bo(gem); 448 struct page *page; 449 pgoff_t offset; 450 int err; 451 452 if (!bo->pages) 453 return VM_FAULT_SIGBUS; 454 455 offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 456 page = bo->pages[offset]; 457 458 err = vm_insert_page(vma, vmf->address, page); 459 switch (err) { 460 case -EAGAIN: 461 case 0: 462 case -ERESTARTSYS: 463 case -EINTR: 464 case -EBUSY: 465 return VM_FAULT_NOPAGE; 466 467 case -ENOMEM: 468 return VM_FAULT_OOM; 469 } 470 471 return VM_FAULT_SIGBUS; 472 } 473 474 const struct vm_operations_struct tegra_bo_vm_ops = { 475 .fault = tegra_bo_fault, 476 .open = drm_gem_vm_open, 477 .close = drm_gem_vm_close, 478 }; 479 480 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma) 481 { 482 struct drm_gem_object *gem; 483 struct tegra_bo *bo; 484 int ret; 485 486 ret = drm_gem_mmap(file, vma); 487 if (ret) 488 return ret; 489 490 gem = vma->vm_private_data; 491 bo = to_tegra_bo(gem); 492 493 if (!bo->pages) { 494 unsigned long vm_pgoff = vma->vm_pgoff; 495 496 vma->vm_flags &= ~VM_PFNMAP; 497 vma->vm_pgoff = 0; 498 499 ret = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr, 500 gem->size); 501 if (ret) { 502 drm_gem_vm_close(vma); 503 return ret; 504 } 505 506 vma->vm_pgoff = vm_pgoff; 507 } else { 508 pgprot_t prot = vm_get_page_prot(vma->vm_flags); 509 510 vma->vm_flags |= VM_MIXEDMAP; 511 vma->vm_flags &= ~VM_PFNMAP; 512 513 vma->vm_page_prot = pgprot_writecombine(prot); 514 } 515 516 return 0; 517 } 518 519 static struct sg_table * 520 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, 521 enum dma_data_direction dir) 522 { 523 struct drm_gem_object *gem = attach->dmabuf->priv; 524 struct tegra_bo *bo = to_tegra_bo(gem); 525 struct sg_table *sgt; 526 527 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); 528 if (!sgt) 529 return NULL; 530 531 if (bo->pages) { 532 struct scatterlist *sg; 533 unsigned int i; 534 535 if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL)) 536 goto free; 537 538 for_each_sg(sgt->sgl, sg, bo->num_pages, i) 539 sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0); 540 541 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) 542 goto free; 543 } else { 544 if (sg_alloc_table(sgt, 1, GFP_KERNEL)) 545 goto free; 546 547 sg_dma_address(sgt->sgl) = bo->paddr; 548 sg_dma_len(sgt->sgl) = gem->size; 549 } 550 551 return sgt; 552 553 free: 554 sg_free_table(sgt); 555 kfree(sgt); 556 return NULL; 557 } 558 559 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, 560 struct sg_table *sgt, 561 enum dma_data_direction dir) 562 { 563 struct drm_gem_object *gem = attach->dmabuf->priv; 564 struct tegra_bo *bo = to_tegra_bo(gem); 565 566 if (bo->pages) 567 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); 568 569 sg_free_table(sgt); 570 kfree(sgt); 571 } 572 573 static void tegra_gem_prime_release(struct dma_buf *buf) 574 { 575 drm_gem_dmabuf_release(buf); 576 } 577 578 static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf, 579 unsigned long page) 580 { 581 return NULL; 582 } 583 584 static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf, 585 unsigned long page, 586 void *addr) 587 { 588 } 589 590 static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page) 591 { 592 return NULL; 593 } 594 595 static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page, 596 void *addr) 597 { 598 } 599 600 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma) 601 { 602 return -EINVAL; 603 } 604 605 static void *tegra_gem_prime_vmap(struct dma_buf *buf) 606 { 607 struct drm_gem_object *gem = buf->priv; 608 struct tegra_bo *bo = to_tegra_bo(gem); 609 610 return bo->vaddr; 611 } 612 613 static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr) 614 { 615 } 616 617 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = { 618 .map_dma_buf = tegra_gem_prime_map_dma_buf, 619 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf, 620 .release = tegra_gem_prime_release, 621 .kmap_atomic = tegra_gem_prime_kmap_atomic, 622 .kunmap_atomic = tegra_gem_prime_kunmap_atomic, 623 .kmap = tegra_gem_prime_kmap, 624 .kunmap = tegra_gem_prime_kunmap, 625 .mmap = tegra_gem_prime_mmap, 626 .vmap = tegra_gem_prime_vmap, 627 .vunmap = tegra_gem_prime_vunmap, 628 }; 629 630 struct dma_buf *tegra_gem_prime_export(struct drm_device *drm, 631 struct drm_gem_object *gem, 632 int flags) 633 { 634 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 635 636 exp_info.ops = &tegra_gem_prime_dmabuf_ops; 637 exp_info.size = gem->size; 638 exp_info.flags = flags; 639 exp_info.priv = gem; 640 641 return drm_gem_dmabuf_export(drm, &exp_info); 642 } 643 644 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm, 645 struct dma_buf *buf) 646 { 647 struct tegra_bo *bo; 648 649 if (buf->ops == &tegra_gem_prime_dmabuf_ops) { 650 struct drm_gem_object *gem = buf->priv; 651 652 if (gem->dev == drm) { 653 drm_gem_object_reference(gem); 654 return gem; 655 } 656 } 657 658 bo = tegra_bo_import(drm, buf); 659 if (IS_ERR(bo)) 660 return ERR_CAST(bo); 661 662 return &bo->gem; 663 } 664