1 /* 2 * NVIDIA Tegra DRM GEM helper functions 3 * 4 * Copyright (C) 2012 Sascha Hauer, Pengutronix 5 * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved. 6 * 7 * Based on the GEM/CMA helpers 8 * 9 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License version 2 as 13 * published by the Free Software Foundation. 14 */ 15 16 #include <linux/dma-buf.h> 17 #include <linux/iommu.h> 18 #include <drm/tegra_drm.h> 19 20 #include "drm.h" 21 #include "gem.h" 22 23 static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo) 24 { 25 return container_of(bo, struct tegra_bo, base); 26 } 27 28 static void tegra_bo_put(struct host1x_bo *bo) 29 { 30 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 31 32 drm_gem_object_unreference_unlocked(&obj->gem); 33 } 34 35 static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt) 36 { 37 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 38 39 return obj->paddr; 40 } 41 42 static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt) 43 { 44 } 45 46 static void *tegra_bo_mmap(struct host1x_bo *bo) 47 { 48 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 49 50 return obj->vaddr; 51 } 52 53 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) 54 { 55 } 56 57 static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page) 58 { 59 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 60 61 return obj->vaddr + page * PAGE_SIZE; 62 } 63 64 static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page, 65 void *addr) 66 { 67 } 68 69 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo) 70 { 71 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 72 73 drm_gem_object_reference(&obj->gem); 74 75 return bo; 76 } 77 78 static const struct host1x_bo_ops tegra_bo_ops = { 79 .get = tegra_bo_get, 80 .put = tegra_bo_put, 81 .pin = tegra_bo_pin, 82 .unpin = tegra_bo_unpin, 83 .mmap = tegra_bo_mmap, 84 .munmap = tegra_bo_munmap, 85 .kmap = tegra_bo_kmap, 86 .kunmap = tegra_bo_kunmap, 87 }; 88 89 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) 90 { 91 int prot = IOMMU_READ | IOMMU_WRITE; 92 ssize_t err; 93 94 if (bo->mm) 95 return -EBUSY; 96 97 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL); 98 if (!bo->mm) 99 return -ENOMEM; 100 101 err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size, 102 PAGE_SIZE, 0, 0, 0); 103 if (err < 0) { 104 dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n", 105 err); 106 goto free; 107 } 108 109 bo->paddr = bo->mm->start; 110 111 err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl, 112 bo->sgt->nents, prot); 113 if (err < 0) { 114 dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err); 115 goto remove; 116 } 117 118 bo->size = err; 119 120 return 0; 121 122 remove: 123 drm_mm_remove_node(bo->mm); 124 free: 125 kfree(bo->mm); 126 return err; 127 } 128 129 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo) 130 { 131 if (!bo->mm) 132 return 0; 133 134 iommu_unmap(tegra->domain, bo->paddr, bo->size); 135 drm_mm_remove_node(bo->mm); 136 kfree(bo->mm); 137 138 return 0; 139 } 140 141 static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm, 142 size_t size) 143 { 144 struct tegra_bo *bo; 145 int err; 146 147 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 148 if (!bo) 149 return ERR_PTR(-ENOMEM); 150 151 host1x_bo_init(&bo->base, &tegra_bo_ops); 152 size = round_up(size, PAGE_SIZE); 153 154 err = drm_gem_object_init(drm, &bo->gem, size); 155 if (err < 0) 156 goto free; 157 158 err = drm_gem_create_mmap_offset(&bo->gem); 159 if (err < 0) 160 goto release; 161 162 return bo; 163 164 release: 165 drm_gem_object_release(&bo->gem); 166 free: 167 kfree(bo); 168 return ERR_PTR(err); 169 } 170 171 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) 172 { 173 if (bo->pages) { 174 drm_gem_put_pages(&bo->gem, bo->pages, true, true); 175 sg_free_table(bo->sgt); 176 kfree(bo->sgt); 177 } else if (bo->vaddr) { 178 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, 179 bo->paddr); 180 } 181 } 182 183 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) 184 { 185 struct scatterlist *s; 186 unsigned int i; 187 188 bo->pages = drm_gem_get_pages(&bo->gem); 189 if (IS_ERR(bo->pages)) 190 return PTR_ERR(bo->pages); 191 192 bo->num_pages = bo->gem.size >> PAGE_SHIFT; 193 194 bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); 195 if (IS_ERR(bo->sgt)) 196 goto put_pages; 197 198 /* 199 * Fake up the SG table so that dma_sync_sg_for_device() can be used 200 * to flush the pages associated with it. 201 * 202 * TODO: Replace this by drm_clflash_sg() once it can be implemented 203 * without relying on symbols that are not exported. 204 */ 205 for_each_sg(bo->sgt->sgl, s, bo->sgt->nents, i) 206 sg_dma_address(s) = sg_phys(s); 207 208 dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents, 209 DMA_TO_DEVICE); 210 211 return 0; 212 213 put_pages: 214 drm_gem_put_pages(&bo->gem, bo->pages, false, false); 215 return PTR_ERR(bo->sgt); 216 } 217 218 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo) 219 { 220 struct tegra_drm *tegra = drm->dev_private; 221 int err; 222 223 if (tegra->domain) { 224 err = tegra_bo_get_pages(drm, bo); 225 if (err < 0) 226 return err; 227 228 err = tegra_bo_iommu_map(tegra, bo); 229 if (err < 0) { 230 tegra_bo_free(drm, bo); 231 return err; 232 } 233 } else { 234 size_t size = bo->gem.size; 235 236 bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr, 237 GFP_KERNEL | __GFP_NOWARN); 238 if (!bo->vaddr) { 239 dev_err(drm->dev, 240 "failed to allocate buffer of size %zu\n", 241 size); 242 return -ENOMEM; 243 } 244 } 245 246 return 0; 247 } 248 249 struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size, 250 unsigned long flags) 251 { 252 struct tegra_bo *bo; 253 int err; 254 255 bo = tegra_bo_alloc_object(drm, size); 256 if (IS_ERR(bo)) 257 return bo; 258 259 err = tegra_bo_alloc(drm, bo); 260 if (err < 0) 261 goto release; 262 263 if (flags & DRM_TEGRA_GEM_CREATE_TILED) 264 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED; 265 266 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP) 267 bo->flags |= TEGRA_BO_BOTTOM_UP; 268 269 return bo; 270 271 release: 272 drm_gem_object_release(&bo->gem); 273 kfree(bo); 274 return ERR_PTR(err); 275 } 276 277 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, 278 struct drm_device *drm, 279 size_t size, 280 unsigned long flags, 281 u32 *handle) 282 { 283 struct tegra_bo *bo; 284 int err; 285 286 bo = tegra_bo_create(drm, size, flags); 287 if (IS_ERR(bo)) 288 return bo; 289 290 err = drm_gem_handle_create(file, &bo->gem, handle); 291 if (err) { 292 tegra_bo_free_object(&bo->gem); 293 return ERR_PTR(err); 294 } 295 296 drm_gem_object_unreference_unlocked(&bo->gem); 297 298 return bo; 299 } 300 301 static struct tegra_bo *tegra_bo_import(struct drm_device *drm, 302 struct dma_buf *buf) 303 { 304 struct tegra_drm *tegra = drm->dev_private; 305 struct dma_buf_attachment *attach; 306 struct tegra_bo *bo; 307 int err; 308 309 bo = tegra_bo_alloc_object(drm, buf->size); 310 if (IS_ERR(bo)) 311 return bo; 312 313 attach = dma_buf_attach(buf, drm->dev); 314 if (IS_ERR(attach)) { 315 err = PTR_ERR(attach); 316 goto free; 317 } 318 319 get_dma_buf(buf); 320 321 bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); 322 if (!bo->sgt) { 323 err = -ENOMEM; 324 goto detach; 325 } 326 327 if (IS_ERR(bo->sgt)) { 328 err = PTR_ERR(bo->sgt); 329 goto detach; 330 } 331 332 if (tegra->domain) { 333 err = tegra_bo_iommu_map(tegra, bo); 334 if (err < 0) 335 goto detach; 336 } else { 337 if (bo->sgt->nents > 1) { 338 err = -EINVAL; 339 goto detach; 340 } 341 342 bo->paddr = sg_dma_address(bo->sgt->sgl); 343 } 344 345 bo->gem.import_attach = attach; 346 347 return bo; 348 349 detach: 350 if (!IS_ERR_OR_NULL(bo->sgt)) 351 dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE); 352 353 dma_buf_detach(buf, attach); 354 dma_buf_put(buf); 355 free: 356 drm_gem_object_release(&bo->gem); 357 kfree(bo); 358 return ERR_PTR(err); 359 } 360 361 void tegra_bo_free_object(struct drm_gem_object *gem) 362 { 363 struct tegra_drm *tegra = gem->dev->dev_private; 364 struct tegra_bo *bo = to_tegra_bo(gem); 365 366 if (tegra->domain) 367 tegra_bo_iommu_unmap(tegra, bo); 368 369 if (gem->import_attach) { 370 dma_buf_unmap_attachment(gem->import_attach, bo->sgt, 371 DMA_TO_DEVICE); 372 drm_prime_gem_destroy(gem, NULL); 373 } else { 374 tegra_bo_free(gem->dev, bo); 375 } 376 377 drm_gem_object_release(gem); 378 kfree(bo); 379 } 380 381 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, 382 struct drm_mode_create_dumb *args) 383 { 384 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 385 struct tegra_drm *tegra = drm->dev_private; 386 struct tegra_bo *bo; 387 388 args->pitch = round_up(min_pitch, tegra->pitch_align); 389 args->size = args->pitch * args->height; 390 391 bo = tegra_bo_create_with_handle(file, drm, args->size, 0, 392 &args->handle); 393 if (IS_ERR(bo)) 394 return PTR_ERR(bo); 395 396 return 0; 397 } 398 399 int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, 400 u32 handle, u64 *offset) 401 { 402 struct drm_gem_object *gem; 403 struct tegra_bo *bo; 404 405 gem = drm_gem_object_lookup(drm, file, handle); 406 if (!gem) { 407 dev_err(drm->dev, "failed to lookup GEM object\n"); 408 return -EINVAL; 409 } 410 411 bo = to_tegra_bo(gem); 412 413 *offset = drm_vma_node_offset_addr(&bo->gem.vma_node); 414 415 drm_gem_object_unreference_unlocked(gem); 416 417 return 0; 418 } 419 420 static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 421 { 422 struct drm_gem_object *gem = vma->vm_private_data; 423 struct tegra_bo *bo = to_tegra_bo(gem); 424 struct page *page; 425 pgoff_t offset; 426 int err; 427 428 if (!bo->pages) 429 return VM_FAULT_SIGBUS; 430 431 offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT; 432 page = bo->pages[offset]; 433 434 err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); 435 switch (err) { 436 case -EAGAIN: 437 case 0: 438 case -ERESTARTSYS: 439 case -EINTR: 440 case -EBUSY: 441 return VM_FAULT_NOPAGE; 442 443 case -ENOMEM: 444 return VM_FAULT_OOM; 445 } 446 447 return VM_FAULT_SIGBUS; 448 } 449 450 const struct vm_operations_struct tegra_bo_vm_ops = { 451 .fault = tegra_bo_fault, 452 .open = drm_gem_vm_open, 453 .close = drm_gem_vm_close, 454 }; 455 456 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma) 457 { 458 struct drm_gem_object *gem; 459 struct tegra_bo *bo; 460 int ret; 461 462 ret = drm_gem_mmap(file, vma); 463 if (ret) 464 return ret; 465 466 gem = vma->vm_private_data; 467 bo = to_tegra_bo(gem); 468 469 if (!bo->pages) { 470 unsigned long vm_pgoff = vma->vm_pgoff; 471 472 vma->vm_flags &= ~VM_PFNMAP; 473 vma->vm_pgoff = 0; 474 475 ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr, 476 bo->paddr, gem->size); 477 if (ret) { 478 drm_gem_vm_close(vma); 479 return ret; 480 } 481 482 vma->vm_pgoff = vm_pgoff; 483 } else { 484 pgprot_t prot = vm_get_page_prot(vma->vm_flags); 485 486 vma->vm_flags |= VM_MIXEDMAP; 487 vma->vm_flags &= ~VM_PFNMAP; 488 489 vma->vm_page_prot = pgprot_writecombine(prot); 490 } 491 492 return 0; 493 } 494 495 static struct sg_table * 496 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, 497 enum dma_data_direction dir) 498 { 499 struct drm_gem_object *gem = attach->dmabuf->priv; 500 struct tegra_bo *bo = to_tegra_bo(gem); 501 struct sg_table *sgt; 502 503 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); 504 if (!sgt) 505 return NULL; 506 507 if (bo->pages) { 508 struct scatterlist *sg; 509 unsigned int i; 510 511 if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL)) 512 goto free; 513 514 for_each_sg(sgt->sgl, sg, bo->num_pages, i) 515 sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0); 516 517 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) 518 goto free; 519 } else { 520 if (sg_alloc_table(sgt, 1, GFP_KERNEL)) 521 goto free; 522 523 sg_dma_address(sgt->sgl) = bo->paddr; 524 sg_dma_len(sgt->sgl) = gem->size; 525 } 526 527 return sgt; 528 529 free: 530 sg_free_table(sgt); 531 kfree(sgt); 532 return NULL; 533 } 534 535 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, 536 struct sg_table *sgt, 537 enum dma_data_direction dir) 538 { 539 struct drm_gem_object *gem = attach->dmabuf->priv; 540 struct tegra_bo *bo = to_tegra_bo(gem); 541 542 if (bo->pages) 543 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); 544 545 sg_free_table(sgt); 546 kfree(sgt); 547 } 548 549 static void tegra_gem_prime_release(struct dma_buf *buf) 550 { 551 drm_gem_dmabuf_release(buf); 552 } 553 554 static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf, 555 unsigned long page) 556 { 557 return NULL; 558 } 559 560 static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf, 561 unsigned long page, 562 void *addr) 563 { 564 } 565 566 static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page) 567 { 568 return NULL; 569 } 570 571 static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page, 572 void *addr) 573 { 574 } 575 576 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma) 577 { 578 return -EINVAL; 579 } 580 581 static void *tegra_gem_prime_vmap(struct dma_buf *buf) 582 { 583 struct drm_gem_object *gem = buf->priv; 584 struct tegra_bo *bo = to_tegra_bo(gem); 585 586 return bo->vaddr; 587 } 588 589 static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr) 590 { 591 } 592 593 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = { 594 .map_dma_buf = tegra_gem_prime_map_dma_buf, 595 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf, 596 .release = tegra_gem_prime_release, 597 .kmap_atomic = tegra_gem_prime_kmap_atomic, 598 .kunmap_atomic = tegra_gem_prime_kunmap_atomic, 599 .kmap = tegra_gem_prime_kmap, 600 .kunmap = tegra_gem_prime_kunmap, 601 .mmap = tegra_gem_prime_mmap, 602 .vmap = tegra_gem_prime_vmap, 603 .vunmap = tegra_gem_prime_vunmap, 604 }; 605 606 struct dma_buf *tegra_gem_prime_export(struct drm_device *drm, 607 struct drm_gem_object *gem, 608 int flags) 609 { 610 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 611 612 exp_info.ops = &tegra_gem_prime_dmabuf_ops; 613 exp_info.size = gem->size; 614 exp_info.flags = flags; 615 exp_info.priv = gem; 616 617 return dma_buf_export(&exp_info); 618 } 619 620 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm, 621 struct dma_buf *buf) 622 { 623 struct tegra_bo *bo; 624 625 if (buf->ops == &tegra_gem_prime_dmabuf_ops) { 626 struct drm_gem_object *gem = buf->priv; 627 628 if (gem->dev == drm) { 629 drm_gem_object_reference(gem); 630 return gem; 631 } 632 } 633 634 bo = tegra_bo_import(drm, buf); 635 if (IS_ERR(bo)) 636 return ERR_CAST(bo); 637 638 return &bo->gem; 639 } 640