1 /* 2 * Copyright (C) 2013 Red Hat 3 * Author: Rob Clark <robdclark@gmail.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include <linux/spinlock.h> 19 #include <linux/shmem_fs.h> 20 #include <linux/dma-buf.h> 21 #include <linux/pfn_t.h> 22 23 #include "msm_drv.h" 24 #include "msm_fence.h" 25 #include "msm_gem.h" 26 #include "msm_gpu.h" 27 #include "msm_mmu.h" 28 29 static void msm_gem_vunmap_locked(struct drm_gem_object *obj); 30 31 32 static dma_addr_t physaddr(struct drm_gem_object *obj) 33 { 34 struct msm_gem_object *msm_obj = to_msm_bo(obj); 35 struct msm_drm_private *priv = obj->dev->dev_private; 36 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + 37 priv->vram.paddr; 38 } 39 40 static bool use_pages(struct drm_gem_object *obj) 41 { 42 struct msm_gem_object *msm_obj = to_msm_bo(obj); 43 return !msm_obj->vram_node; 44 } 45 46 /* allocate pages from VRAM carveout, used when no IOMMU: */ 47 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) 48 { 49 struct msm_gem_object *msm_obj = to_msm_bo(obj); 50 struct msm_drm_private *priv = obj->dev->dev_private; 51 dma_addr_t paddr; 52 struct page **p; 53 int ret, i; 54 55 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 56 if (!p) 57 return ERR_PTR(-ENOMEM); 58 59 spin_lock(&priv->vram.lock); 60 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); 61 spin_unlock(&priv->vram.lock); 62 if (ret) { 63 kvfree(p); 64 return ERR_PTR(ret); 65 } 66 67 paddr = physaddr(obj); 68 for (i = 0; i < npages; i++) { 69 p[i] = phys_to_page(paddr); 70 paddr += PAGE_SIZE; 71 } 72 73 return p; 74 } 75 76 static struct page **get_pages(struct drm_gem_object *obj) 77 { 78 struct msm_gem_object *msm_obj = to_msm_bo(obj); 79 80 if (!msm_obj->pages) { 81 struct drm_device *dev = obj->dev; 82 struct page **p; 83 int npages = obj->size >> PAGE_SHIFT; 84 85 if (use_pages(obj)) 86 p = drm_gem_get_pages(obj); 87 else 88 p = get_pages_vram(obj, npages); 89 90 if (IS_ERR(p)) { 91 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", 92 PTR_ERR(p)); 93 return p; 94 } 95 96 msm_obj->pages = p; 97 98 msm_obj->sgt = drm_prime_pages_to_sg(p, npages); 99 if (IS_ERR(msm_obj->sgt)) { 100 void *ptr = ERR_CAST(msm_obj->sgt); 101 102 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n"); 103 msm_obj->sgt = NULL; 104 return ptr; 105 } 106 107 /* For non-cached buffers, ensure the new pages are clean 108 * because display controller, GPU, etc. are not coherent: 109 */ 110 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 111 dma_map_sg(dev->dev, msm_obj->sgt->sgl, 112 msm_obj->sgt->nents, DMA_BIDIRECTIONAL); 113 } 114 115 return msm_obj->pages; 116 } 117 118 static void put_pages_vram(struct drm_gem_object *obj) 119 { 120 struct msm_gem_object *msm_obj = to_msm_bo(obj); 121 struct msm_drm_private *priv = obj->dev->dev_private; 122 123 spin_lock(&priv->vram.lock); 124 drm_mm_remove_node(msm_obj->vram_node); 125 spin_unlock(&priv->vram.lock); 126 127 kvfree(msm_obj->pages); 128 } 129 130 static void put_pages(struct drm_gem_object *obj) 131 { 132 struct msm_gem_object *msm_obj = to_msm_bo(obj); 133 134 if (msm_obj->pages) { 135 if (msm_obj->sgt) { 136 /* For non-cached buffers, ensure the new 137 * pages are clean because display controller, 138 * GPU, etc. are not coherent: 139 */ 140 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 141 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, 142 msm_obj->sgt->nents, 143 DMA_BIDIRECTIONAL); 144 145 sg_free_table(msm_obj->sgt); 146 kfree(msm_obj->sgt); 147 } 148 149 if (use_pages(obj)) 150 drm_gem_put_pages(obj, msm_obj->pages, true, false); 151 else 152 put_pages_vram(obj); 153 154 msm_obj->pages = NULL; 155 } 156 } 157 158 struct page **msm_gem_get_pages(struct drm_gem_object *obj) 159 { 160 struct msm_gem_object *msm_obj = to_msm_bo(obj); 161 struct page **p; 162 163 mutex_lock(&msm_obj->lock); 164 165 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 166 mutex_unlock(&msm_obj->lock); 167 return ERR_PTR(-EBUSY); 168 } 169 170 p = get_pages(obj); 171 mutex_unlock(&msm_obj->lock); 172 return p; 173 } 174 175 void msm_gem_put_pages(struct drm_gem_object *obj) 176 { 177 /* when we start tracking the pin count, then do something here */ 178 } 179 180 int msm_gem_mmap_obj(struct drm_gem_object *obj, 181 struct vm_area_struct *vma) 182 { 183 struct msm_gem_object *msm_obj = to_msm_bo(obj); 184 185 vma->vm_flags &= ~VM_PFNMAP; 186 vma->vm_flags |= VM_MIXEDMAP; 187 188 if (msm_obj->flags & MSM_BO_WC) { 189 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 190 } else if (msm_obj->flags & MSM_BO_UNCACHED) { 191 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 192 } else { 193 /* 194 * Shunt off cached objs to shmem file so they have their own 195 * address_space (so unmap_mapping_range does what we want, 196 * in particular in the case of mmap'd dmabufs) 197 */ 198 fput(vma->vm_file); 199 get_file(obj->filp); 200 vma->vm_pgoff = 0; 201 vma->vm_file = obj->filp; 202 203 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 204 } 205 206 return 0; 207 } 208 209 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 210 { 211 int ret; 212 213 ret = drm_gem_mmap(filp, vma); 214 if (ret) { 215 DBG("mmap failed: %d", ret); 216 return ret; 217 } 218 219 return msm_gem_mmap_obj(vma->vm_private_data, vma); 220 } 221 222 vm_fault_t msm_gem_fault(struct vm_fault *vmf) 223 { 224 struct vm_area_struct *vma = vmf->vma; 225 struct drm_gem_object *obj = vma->vm_private_data; 226 struct msm_gem_object *msm_obj = to_msm_bo(obj); 227 struct page **pages; 228 unsigned long pfn; 229 pgoff_t pgoff; 230 int err; 231 vm_fault_t ret; 232 233 /* 234 * vm_ops.open/drm_gem_mmap_obj and close get and put 235 * a reference on obj. So, we dont need to hold one here. 236 */ 237 err = mutex_lock_interruptible(&msm_obj->lock); 238 if (err) { 239 ret = VM_FAULT_NOPAGE; 240 goto out; 241 } 242 243 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 244 mutex_unlock(&msm_obj->lock); 245 return VM_FAULT_SIGBUS; 246 } 247 248 /* make sure we have pages attached now */ 249 pages = get_pages(obj); 250 if (IS_ERR(pages)) { 251 ret = vmf_error(PTR_ERR(pages)); 252 goto out_unlock; 253 } 254 255 /* We don't use vmf->pgoff since that has the fake offset: */ 256 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 257 258 pfn = page_to_pfn(pages[pgoff]); 259 260 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 261 pfn, pfn << PAGE_SHIFT); 262 263 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); 264 out_unlock: 265 mutex_unlock(&msm_obj->lock); 266 out: 267 return ret; 268 } 269 270 /** get mmap offset */ 271 static uint64_t mmap_offset(struct drm_gem_object *obj) 272 { 273 struct drm_device *dev = obj->dev; 274 struct msm_gem_object *msm_obj = to_msm_bo(obj); 275 int ret; 276 277 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 278 279 /* Make it mmapable */ 280 ret = drm_gem_create_mmap_offset(obj); 281 282 if (ret) { 283 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n"); 284 return 0; 285 } 286 287 return drm_vma_node_offset_addr(&obj->vma_node); 288 } 289 290 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) 291 { 292 uint64_t offset; 293 struct msm_gem_object *msm_obj = to_msm_bo(obj); 294 295 mutex_lock(&msm_obj->lock); 296 offset = mmap_offset(obj); 297 mutex_unlock(&msm_obj->lock); 298 return offset; 299 } 300 301 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, 302 struct msm_gem_address_space *aspace) 303 { 304 struct msm_gem_object *msm_obj = to_msm_bo(obj); 305 struct msm_gem_vma *vma; 306 307 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 308 309 vma = kzalloc(sizeof(*vma), GFP_KERNEL); 310 if (!vma) 311 return ERR_PTR(-ENOMEM); 312 313 vma->aspace = aspace; 314 315 list_add_tail(&vma->list, &msm_obj->vmas); 316 317 return vma; 318 } 319 320 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, 321 struct msm_gem_address_space *aspace) 322 { 323 struct msm_gem_object *msm_obj = to_msm_bo(obj); 324 struct msm_gem_vma *vma; 325 326 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 327 328 list_for_each_entry(vma, &msm_obj->vmas, list) { 329 if (vma->aspace == aspace) 330 return vma; 331 } 332 333 return NULL; 334 } 335 336 static void del_vma(struct msm_gem_vma *vma) 337 { 338 if (!vma) 339 return; 340 341 list_del(&vma->list); 342 kfree(vma); 343 } 344 345 /* Called with msm_obj->lock locked */ 346 static void 347 put_iova(struct drm_gem_object *obj) 348 { 349 struct msm_gem_object *msm_obj = to_msm_bo(obj); 350 struct msm_gem_vma *vma, *tmp; 351 352 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 353 354 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { 355 msm_gem_unmap_vma(vma->aspace, vma); 356 del_vma(vma); 357 } 358 } 359 360 static int msm_gem_get_iova_locked(struct drm_gem_object *obj, 361 struct msm_gem_address_space *aspace, uint64_t *iova) 362 { 363 struct msm_gem_object *msm_obj = to_msm_bo(obj); 364 struct msm_gem_vma *vma; 365 int ret = 0; 366 367 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 368 369 vma = lookup_vma(obj, aspace); 370 371 if (!vma) { 372 vma = add_vma(obj, aspace); 373 if (IS_ERR(vma)) 374 return PTR_ERR(vma); 375 376 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT); 377 if (ret) { 378 del_vma(vma); 379 return ret; 380 } 381 } 382 383 *iova = vma->iova; 384 return 0; 385 } 386 387 static int msm_gem_pin_iova(struct drm_gem_object *obj, 388 struct msm_gem_address_space *aspace) 389 { 390 struct msm_gem_object *msm_obj = to_msm_bo(obj); 391 struct msm_gem_vma *vma; 392 struct page **pages; 393 394 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 395 396 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) 397 return -EBUSY; 398 399 vma = lookup_vma(obj, aspace); 400 if (WARN_ON(!vma)) 401 return -EINVAL; 402 403 pages = get_pages(obj); 404 if (IS_ERR(pages)) 405 return PTR_ERR(pages); 406 407 return msm_gem_map_vma(aspace, vma, msm_obj->sgt, 408 obj->size >> PAGE_SHIFT); 409 } 410 411 /* get iova and pin it. Should have a matching put */ 412 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, 413 struct msm_gem_address_space *aspace, uint64_t *iova) 414 { 415 struct msm_gem_object *msm_obj = to_msm_bo(obj); 416 u64 local; 417 int ret; 418 419 mutex_lock(&msm_obj->lock); 420 421 ret = msm_gem_get_iova_locked(obj, aspace, &local); 422 423 if (!ret) 424 ret = msm_gem_pin_iova(obj, aspace); 425 426 if (!ret) 427 *iova = local; 428 429 mutex_unlock(&msm_obj->lock); 430 return ret; 431 } 432 433 /* Get an iova but don't pin the memory behind it */ 434 int msm_gem_get_iova(struct drm_gem_object *obj, 435 struct msm_gem_address_space *aspace, uint64_t *iova) 436 { 437 struct msm_gem_object *msm_obj = to_msm_bo(obj); 438 int ret; 439 440 mutex_lock(&msm_obj->lock); 441 ret = msm_gem_get_iova_locked(obj, aspace, iova); 442 mutex_unlock(&msm_obj->lock); 443 444 return ret; 445 } 446 447 448 /* get iova without taking a reference, used in places where you have 449 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova' 450 */ 451 uint64_t msm_gem_iova(struct drm_gem_object *obj, 452 struct msm_gem_address_space *aspace) 453 { 454 struct msm_gem_object *msm_obj = to_msm_bo(obj); 455 struct msm_gem_vma *vma; 456 457 mutex_lock(&msm_obj->lock); 458 vma = lookup_vma(obj, aspace); 459 mutex_unlock(&msm_obj->lock); 460 WARN_ON(!vma); 461 462 return vma ? vma->iova : 0; 463 } 464 465 void msm_gem_put_iova(struct drm_gem_object *obj, 466 struct msm_gem_address_space *aspace) 467 { 468 // XXX TODO .. 469 // NOTE: probably don't need a _locked() version.. we wouldn't 470 // normally unmap here, but instead just mark that it could be 471 // unmapped (if the iova refcnt drops to zero), but then later 472 // if another _get_iova_locked() fails we can start unmapping 473 // things that are no longer needed.. 474 } 475 476 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 477 struct drm_mode_create_dumb *args) 478 { 479 args->pitch = align_pitch(args->width, args->bpp); 480 args->size = PAGE_ALIGN(args->pitch * args->height); 481 return msm_gem_new_handle(dev, file, args->size, 482 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); 483 } 484 485 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 486 uint32_t handle, uint64_t *offset) 487 { 488 struct drm_gem_object *obj; 489 int ret = 0; 490 491 /* GEM does all our handle to object mapping */ 492 obj = drm_gem_object_lookup(file, handle); 493 if (obj == NULL) { 494 ret = -ENOENT; 495 goto fail; 496 } 497 498 *offset = msm_gem_mmap_offset(obj); 499 500 drm_gem_object_put_unlocked(obj); 501 502 fail: 503 return ret; 504 } 505 506 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) 507 { 508 struct msm_gem_object *msm_obj = to_msm_bo(obj); 509 int ret = 0; 510 511 mutex_lock(&msm_obj->lock); 512 513 if (WARN_ON(msm_obj->madv > madv)) { 514 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", 515 msm_obj->madv, madv); 516 mutex_unlock(&msm_obj->lock); 517 return ERR_PTR(-EBUSY); 518 } 519 520 /* increment vmap_count *before* vmap() call, so shrinker can 521 * check vmap_count (is_vunmapable()) outside of msm_obj->lock. 522 * This guarantees that we won't try to msm_gem_vunmap() this 523 * same object from within the vmap() call (while we already 524 * hold msm_obj->lock) 525 */ 526 msm_obj->vmap_count++; 527 528 if (!msm_obj->vaddr) { 529 struct page **pages = get_pages(obj); 530 if (IS_ERR(pages)) { 531 ret = PTR_ERR(pages); 532 goto fail; 533 } 534 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 535 VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 536 if (msm_obj->vaddr == NULL) { 537 ret = -ENOMEM; 538 goto fail; 539 } 540 } 541 542 mutex_unlock(&msm_obj->lock); 543 return msm_obj->vaddr; 544 545 fail: 546 msm_obj->vmap_count--; 547 mutex_unlock(&msm_obj->lock); 548 return ERR_PTR(ret); 549 } 550 551 void *msm_gem_get_vaddr(struct drm_gem_object *obj) 552 { 553 return get_vaddr(obj, MSM_MADV_WILLNEED); 554 } 555 556 /* 557 * Don't use this! It is for the very special case of dumping 558 * submits from GPU hangs or faults, were the bo may already 559 * be MSM_MADV_DONTNEED, but we know the buffer is still on the 560 * active list. 561 */ 562 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) 563 { 564 return get_vaddr(obj, __MSM_MADV_PURGED); 565 } 566 567 void msm_gem_put_vaddr(struct drm_gem_object *obj) 568 { 569 struct msm_gem_object *msm_obj = to_msm_bo(obj); 570 571 mutex_lock(&msm_obj->lock); 572 WARN_ON(msm_obj->vmap_count < 1); 573 msm_obj->vmap_count--; 574 mutex_unlock(&msm_obj->lock); 575 } 576 577 /* Update madvise status, returns true if not purged, else 578 * false or -errno. 579 */ 580 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) 581 { 582 struct msm_gem_object *msm_obj = to_msm_bo(obj); 583 584 mutex_lock(&msm_obj->lock); 585 586 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 587 588 if (msm_obj->madv != __MSM_MADV_PURGED) 589 msm_obj->madv = madv; 590 591 madv = msm_obj->madv; 592 593 mutex_unlock(&msm_obj->lock); 594 595 return (madv != __MSM_MADV_PURGED); 596 } 597 598 void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass) 599 { 600 struct drm_device *dev = obj->dev; 601 struct msm_gem_object *msm_obj = to_msm_bo(obj); 602 603 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 604 WARN_ON(!is_purgeable(msm_obj)); 605 WARN_ON(obj->import_attach); 606 607 mutex_lock_nested(&msm_obj->lock, subclass); 608 609 put_iova(obj); 610 611 msm_gem_vunmap_locked(obj); 612 613 put_pages(obj); 614 615 msm_obj->madv = __MSM_MADV_PURGED; 616 617 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 618 drm_gem_free_mmap_offset(obj); 619 620 /* Our goal here is to return as much of the memory as 621 * is possible back to the system as we are called from OOM. 622 * To do this we must instruct the shmfs to drop all of its 623 * backing pages, *now*. 624 */ 625 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 626 627 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 628 0, (loff_t)-1); 629 630 mutex_unlock(&msm_obj->lock); 631 } 632 633 static void msm_gem_vunmap_locked(struct drm_gem_object *obj) 634 { 635 struct msm_gem_object *msm_obj = to_msm_bo(obj); 636 637 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 638 639 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) 640 return; 641 642 vunmap(msm_obj->vaddr); 643 msm_obj->vaddr = NULL; 644 } 645 646 void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass) 647 { 648 struct msm_gem_object *msm_obj = to_msm_bo(obj); 649 650 mutex_lock_nested(&msm_obj->lock, subclass); 651 msm_gem_vunmap_locked(obj); 652 mutex_unlock(&msm_obj->lock); 653 } 654 655 /* must be called before _move_to_active().. */ 656 int msm_gem_sync_object(struct drm_gem_object *obj, 657 struct msm_fence_context *fctx, bool exclusive) 658 { 659 struct msm_gem_object *msm_obj = to_msm_bo(obj); 660 struct reservation_object_list *fobj; 661 struct dma_fence *fence; 662 int i, ret; 663 664 fobj = reservation_object_get_list(msm_obj->resv); 665 if (!fobj || (fobj->shared_count == 0)) { 666 fence = reservation_object_get_excl(msm_obj->resv); 667 /* don't need to wait on our own fences, since ring is fifo */ 668 if (fence && (fence->context != fctx->context)) { 669 ret = dma_fence_wait(fence, true); 670 if (ret) 671 return ret; 672 } 673 } 674 675 if (!exclusive || !fobj) 676 return 0; 677 678 for (i = 0; i < fobj->shared_count; i++) { 679 fence = rcu_dereference_protected(fobj->shared[i], 680 reservation_object_held(msm_obj->resv)); 681 if (fence->context != fctx->context) { 682 ret = dma_fence_wait(fence, true); 683 if (ret) 684 return ret; 685 } 686 } 687 688 return 0; 689 } 690 691 void msm_gem_move_to_active(struct drm_gem_object *obj, 692 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence) 693 { 694 struct msm_gem_object *msm_obj = to_msm_bo(obj); 695 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); 696 msm_obj->gpu = gpu; 697 if (exclusive) 698 reservation_object_add_excl_fence(msm_obj->resv, fence); 699 else 700 reservation_object_add_shared_fence(msm_obj->resv, fence); 701 list_del_init(&msm_obj->mm_list); 702 list_add_tail(&msm_obj->mm_list, &gpu->active_list); 703 } 704 705 void msm_gem_move_to_inactive(struct drm_gem_object *obj) 706 { 707 struct drm_device *dev = obj->dev; 708 struct msm_drm_private *priv = dev->dev_private; 709 struct msm_gem_object *msm_obj = to_msm_bo(obj); 710 711 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 712 713 msm_obj->gpu = NULL; 714 list_del_init(&msm_obj->mm_list); 715 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 716 } 717 718 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 719 { 720 struct msm_gem_object *msm_obj = to_msm_bo(obj); 721 bool write = !!(op & MSM_PREP_WRITE); 722 unsigned long remain = 723 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 724 long ret; 725 726 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write, 727 true, remain); 728 if (ret == 0) 729 return remain == 0 ? -EBUSY : -ETIMEDOUT; 730 else if (ret < 0) 731 return ret; 732 733 /* TODO cache maintenance */ 734 735 return 0; 736 } 737 738 int msm_gem_cpu_fini(struct drm_gem_object *obj) 739 { 740 /* TODO cache maintenance */ 741 return 0; 742 } 743 744 #ifdef CONFIG_DEBUG_FS 745 static void describe_fence(struct dma_fence *fence, const char *type, 746 struct seq_file *m) 747 { 748 if (!dma_fence_is_signaled(fence)) 749 seq_printf(m, "\t%9s: %s %s seq %u\n", type, 750 fence->ops->get_driver_name(fence), 751 fence->ops->get_timeline_name(fence), 752 fence->seqno); 753 } 754 755 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 756 { 757 struct msm_gem_object *msm_obj = to_msm_bo(obj); 758 struct reservation_object *robj = msm_obj->resv; 759 struct reservation_object_list *fobj; 760 struct dma_fence *fence; 761 struct msm_gem_vma *vma; 762 uint64_t off = drm_vma_node_start(&obj->vma_node); 763 const char *madv; 764 765 mutex_lock(&msm_obj->lock); 766 767 switch (msm_obj->madv) { 768 case __MSM_MADV_PURGED: 769 madv = " purged"; 770 break; 771 case MSM_MADV_DONTNEED: 772 madv = " purgeable"; 773 break; 774 case MSM_MADV_WILLNEED: 775 default: 776 madv = ""; 777 break; 778 } 779 780 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p", 781 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 782 obj->name, kref_read(&obj->refcount), 783 off, msm_obj->vaddr); 784 785 seq_printf(m, " %08zu%9s\n", obj->size, madv); 786 787 if (!list_empty(&msm_obj->vmas)) { 788 789 seq_puts(m, " vmas:"); 790 791 list_for_each_entry(vma, &msm_obj->vmas, list) 792 seq_printf(m, " [%s: %08llx,%s]", vma->aspace->name, 793 vma->iova, vma->mapped ? "mapped" : "unmapped"); 794 795 seq_puts(m, "\n"); 796 } 797 798 rcu_read_lock(); 799 fobj = rcu_dereference(robj->fence); 800 if (fobj) { 801 unsigned int i, shared_count = fobj->shared_count; 802 803 for (i = 0; i < shared_count; i++) { 804 fence = rcu_dereference(fobj->shared[i]); 805 describe_fence(fence, "Shared", m); 806 } 807 } 808 809 fence = rcu_dereference(robj->fence_excl); 810 if (fence) 811 describe_fence(fence, "Exclusive", m); 812 rcu_read_unlock(); 813 814 mutex_unlock(&msm_obj->lock); 815 } 816 817 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) 818 { 819 struct msm_gem_object *msm_obj; 820 int count = 0; 821 size_t size = 0; 822 823 seq_puts(m, " flags id ref offset kaddr size madv\n"); 824 list_for_each_entry(msm_obj, list, mm_list) { 825 struct drm_gem_object *obj = &msm_obj->base; 826 seq_puts(m, " "); 827 msm_gem_describe(obj, m); 828 count++; 829 size += obj->size; 830 } 831 832 seq_printf(m, "Total %d objects, %zu bytes\n", count, size); 833 } 834 #endif 835 836 /* don't call directly! Use drm_gem_object_put() and friends */ 837 void msm_gem_free_object(struct drm_gem_object *obj) 838 { 839 struct drm_device *dev = obj->dev; 840 struct msm_gem_object *msm_obj = to_msm_bo(obj); 841 842 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 843 844 /* object should not be on active list: */ 845 WARN_ON(is_active(msm_obj)); 846 847 list_del(&msm_obj->mm_list); 848 849 mutex_lock(&msm_obj->lock); 850 851 put_iova(obj); 852 853 if (obj->import_attach) { 854 if (msm_obj->vaddr) 855 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); 856 857 /* Don't drop the pages for imported dmabuf, as they are not 858 * ours, just free the array we allocated: 859 */ 860 if (msm_obj->pages) 861 kvfree(msm_obj->pages); 862 863 drm_prime_gem_destroy(obj, msm_obj->sgt); 864 } else { 865 msm_gem_vunmap_locked(obj); 866 put_pages(obj); 867 } 868 869 if (msm_obj->resv == &msm_obj->_resv) 870 reservation_object_fini(msm_obj->resv); 871 872 drm_gem_object_release(obj); 873 874 mutex_unlock(&msm_obj->lock); 875 kfree(msm_obj); 876 } 877 878 /* convenience method to construct a GEM buffer object, and userspace handle */ 879 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 880 uint32_t size, uint32_t flags, uint32_t *handle) 881 { 882 struct drm_gem_object *obj; 883 int ret; 884 885 obj = msm_gem_new(dev, size, flags); 886 887 if (IS_ERR(obj)) 888 return PTR_ERR(obj); 889 890 ret = drm_gem_handle_create(file, obj, handle); 891 892 /* drop reference from allocate - handle holds it now */ 893 drm_gem_object_put_unlocked(obj); 894 895 return ret; 896 } 897 898 static int msm_gem_new_impl(struct drm_device *dev, 899 uint32_t size, uint32_t flags, 900 struct reservation_object *resv, 901 struct drm_gem_object **obj, 902 bool struct_mutex_locked) 903 { 904 struct msm_drm_private *priv = dev->dev_private; 905 struct msm_gem_object *msm_obj; 906 907 switch (flags & MSM_BO_CACHE_MASK) { 908 case MSM_BO_UNCACHED: 909 case MSM_BO_CACHED: 910 case MSM_BO_WC: 911 break; 912 default: 913 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", 914 (flags & MSM_BO_CACHE_MASK)); 915 return -EINVAL; 916 } 917 918 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); 919 if (!msm_obj) 920 return -ENOMEM; 921 922 mutex_init(&msm_obj->lock); 923 924 msm_obj->flags = flags; 925 msm_obj->madv = MSM_MADV_WILLNEED; 926 927 if (resv) { 928 msm_obj->resv = resv; 929 } else { 930 msm_obj->resv = &msm_obj->_resv; 931 reservation_object_init(msm_obj->resv); 932 } 933 934 INIT_LIST_HEAD(&msm_obj->submit_entry); 935 INIT_LIST_HEAD(&msm_obj->vmas); 936 937 if (struct_mutex_locked) { 938 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 939 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 940 } else { 941 mutex_lock(&dev->struct_mutex); 942 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 943 mutex_unlock(&dev->struct_mutex); 944 } 945 946 *obj = &msm_obj->base; 947 948 return 0; 949 } 950 951 static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, 952 uint32_t size, uint32_t flags, bool struct_mutex_locked) 953 { 954 struct msm_drm_private *priv = dev->dev_private; 955 struct drm_gem_object *obj = NULL; 956 bool use_vram = false; 957 int ret; 958 959 size = PAGE_ALIGN(size); 960 961 if (!iommu_present(&platform_bus_type)) 962 use_vram = true; 963 else if ((flags & MSM_BO_STOLEN) && priv->vram.size) 964 use_vram = true; 965 966 if (WARN_ON(use_vram && !priv->vram.size)) 967 return ERR_PTR(-EINVAL); 968 969 /* Disallow zero sized objects as they make the underlying 970 * infrastructure grumpy 971 */ 972 if (size == 0) 973 return ERR_PTR(-EINVAL); 974 975 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked); 976 if (ret) 977 goto fail; 978 979 if (use_vram) { 980 struct msm_gem_vma *vma; 981 struct page **pages; 982 struct msm_gem_object *msm_obj = to_msm_bo(obj); 983 984 mutex_lock(&msm_obj->lock); 985 986 vma = add_vma(obj, NULL); 987 mutex_unlock(&msm_obj->lock); 988 if (IS_ERR(vma)) { 989 ret = PTR_ERR(vma); 990 goto fail; 991 } 992 993 to_msm_bo(obj)->vram_node = &vma->node; 994 995 drm_gem_private_object_init(dev, obj, size); 996 997 pages = get_pages(obj); 998 if (IS_ERR(pages)) { 999 ret = PTR_ERR(pages); 1000 goto fail; 1001 } 1002 1003 vma->iova = physaddr(obj); 1004 } else { 1005 ret = drm_gem_object_init(dev, obj, size); 1006 if (ret) 1007 goto fail; 1008 } 1009 1010 return obj; 1011 1012 fail: 1013 drm_gem_object_put_unlocked(obj); 1014 return ERR_PTR(ret); 1015 } 1016 1017 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, 1018 uint32_t size, uint32_t flags) 1019 { 1020 return _msm_gem_new(dev, size, flags, true); 1021 } 1022 1023 struct drm_gem_object *msm_gem_new(struct drm_device *dev, 1024 uint32_t size, uint32_t flags) 1025 { 1026 return _msm_gem_new(dev, size, flags, false); 1027 } 1028 1029 struct drm_gem_object *msm_gem_import(struct drm_device *dev, 1030 struct dma_buf *dmabuf, struct sg_table *sgt) 1031 { 1032 struct msm_gem_object *msm_obj; 1033 struct drm_gem_object *obj; 1034 uint32_t size; 1035 int ret, npages; 1036 1037 /* if we don't have IOMMU, don't bother pretending we can import: */ 1038 if (!iommu_present(&platform_bus_type)) { 1039 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); 1040 return ERR_PTR(-EINVAL); 1041 } 1042 1043 size = PAGE_ALIGN(dmabuf->size); 1044 1045 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false); 1046 if (ret) 1047 goto fail; 1048 1049 drm_gem_private_object_init(dev, obj, size); 1050 1051 npages = size / PAGE_SIZE; 1052 1053 msm_obj = to_msm_bo(obj); 1054 mutex_lock(&msm_obj->lock); 1055 msm_obj->sgt = sgt; 1056 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 1057 if (!msm_obj->pages) { 1058 mutex_unlock(&msm_obj->lock); 1059 ret = -ENOMEM; 1060 goto fail; 1061 } 1062 1063 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); 1064 if (ret) { 1065 mutex_unlock(&msm_obj->lock); 1066 goto fail; 1067 } 1068 1069 mutex_unlock(&msm_obj->lock); 1070 return obj; 1071 1072 fail: 1073 drm_gem_object_put_unlocked(obj); 1074 return ERR_PTR(ret); 1075 } 1076 1077 static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1078 uint32_t flags, struct msm_gem_address_space *aspace, 1079 struct drm_gem_object **bo, uint64_t *iova, bool locked) 1080 { 1081 void *vaddr; 1082 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked); 1083 int ret; 1084 1085 if (IS_ERR(obj)) 1086 return ERR_CAST(obj); 1087 1088 if (iova) { 1089 ret = msm_gem_get_and_pin_iova(obj, aspace, iova); 1090 if (ret) 1091 goto err; 1092 } 1093 1094 vaddr = msm_gem_get_vaddr(obj); 1095 if (IS_ERR(vaddr)) { 1096 msm_gem_put_iova(obj, aspace); 1097 ret = PTR_ERR(vaddr); 1098 goto err; 1099 } 1100 1101 if (bo) 1102 *bo = obj; 1103 1104 return vaddr; 1105 err: 1106 if (locked) 1107 drm_gem_object_put(obj); 1108 else 1109 drm_gem_object_put_unlocked(obj); 1110 1111 return ERR_PTR(ret); 1112 1113 } 1114 1115 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1116 uint32_t flags, struct msm_gem_address_space *aspace, 1117 struct drm_gem_object **bo, uint64_t *iova) 1118 { 1119 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false); 1120 } 1121 1122 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, 1123 uint32_t flags, struct msm_gem_address_space *aspace, 1124 struct drm_gem_object **bo, uint64_t *iova) 1125 { 1126 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true); 1127 } 1128 1129 void msm_gem_kernel_put(struct drm_gem_object *bo, 1130 struct msm_gem_address_space *aspace, bool locked) 1131 { 1132 if (IS_ERR_OR_NULL(bo)) 1133 return; 1134 1135 msm_gem_put_vaddr(bo); 1136 msm_gem_put_iova(bo, aspace); 1137 1138 if (locked) 1139 drm_gem_object_put(bo); 1140 else 1141 drm_gem_object_put_unlocked(bo); 1142 } 1143