1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #include <linux/spinlock.h> 8 #include <linux/shmem_fs.h> 9 #include <linux/dma-buf.h> 10 #include <linux/pfn_t.h> 11 12 #include "msm_drv.h" 13 #include "msm_fence.h" 14 #include "msm_gem.h" 15 #include "msm_gpu.h" 16 #include "msm_mmu.h" 17 18 static void msm_gem_vunmap_locked(struct drm_gem_object *obj); 19 20 21 static dma_addr_t physaddr(struct drm_gem_object *obj) 22 { 23 struct msm_gem_object *msm_obj = to_msm_bo(obj); 24 struct msm_drm_private *priv = obj->dev->dev_private; 25 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + 26 priv->vram.paddr; 27 } 28 29 static bool use_pages(struct drm_gem_object *obj) 30 { 31 struct msm_gem_object *msm_obj = to_msm_bo(obj); 32 return !msm_obj->vram_node; 33 } 34 35 /* allocate pages from VRAM carveout, used when no IOMMU: */ 36 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) 37 { 38 struct msm_gem_object *msm_obj = to_msm_bo(obj); 39 struct msm_drm_private *priv = obj->dev->dev_private; 40 dma_addr_t paddr; 41 struct page **p; 42 int ret, i; 43 44 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 45 if (!p) 46 return ERR_PTR(-ENOMEM); 47 48 spin_lock(&priv->vram.lock); 49 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); 50 spin_unlock(&priv->vram.lock); 51 if (ret) { 52 kvfree(p); 53 return ERR_PTR(ret); 54 } 55 56 paddr = physaddr(obj); 57 for (i = 0; i < npages; i++) { 58 p[i] = phys_to_page(paddr); 59 paddr += PAGE_SIZE; 60 } 61 62 return p; 63 } 64 65 static struct page **get_pages(struct drm_gem_object *obj) 66 { 67 struct msm_gem_object *msm_obj = to_msm_bo(obj); 68 69 if (!msm_obj->pages) { 70 struct drm_device *dev = obj->dev; 71 struct page **p; 72 int npages = obj->size >> PAGE_SHIFT; 73 74 if (use_pages(obj)) 75 p = drm_gem_get_pages(obj); 76 else 77 p = get_pages_vram(obj, npages); 78 79 if (IS_ERR(p)) { 80 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", 81 PTR_ERR(p)); 82 return p; 83 } 84 85 msm_obj->pages = p; 86 87 msm_obj->sgt = drm_prime_pages_to_sg(p, npages); 88 if (IS_ERR(msm_obj->sgt)) { 89 void *ptr = ERR_CAST(msm_obj->sgt); 90 91 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n"); 92 msm_obj->sgt = NULL; 93 return ptr; 94 } 95 96 /* For non-cached buffers, ensure the new pages are clean 97 * because display controller, GPU, etc. are not coherent: 98 */ 99 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 100 dma_map_sg(dev->dev, msm_obj->sgt->sgl, 101 msm_obj->sgt->nents, DMA_BIDIRECTIONAL); 102 } 103 104 return msm_obj->pages; 105 } 106 107 static void put_pages_vram(struct drm_gem_object *obj) 108 { 109 struct msm_gem_object *msm_obj = to_msm_bo(obj); 110 struct msm_drm_private *priv = obj->dev->dev_private; 111 112 spin_lock(&priv->vram.lock); 113 drm_mm_remove_node(msm_obj->vram_node); 114 spin_unlock(&priv->vram.lock); 115 116 kvfree(msm_obj->pages); 117 } 118 119 static void put_pages(struct drm_gem_object *obj) 120 { 121 struct msm_gem_object *msm_obj = to_msm_bo(obj); 122 123 if (msm_obj->pages) { 124 if (msm_obj->sgt) { 125 /* For non-cached buffers, ensure the new 126 * pages are clean because display controller, 127 * GPU, etc. are not coherent: 128 */ 129 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 130 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, 131 msm_obj->sgt->nents, 132 DMA_BIDIRECTIONAL); 133 134 sg_free_table(msm_obj->sgt); 135 kfree(msm_obj->sgt); 136 } 137 138 if (use_pages(obj)) 139 drm_gem_put_pages(obj, msm_obj->pages, true, false); 140 else 141 put_pages_vram(obj); 142 143 msm_obj->pages = NULL; 144 } 145 } 146 147 struct page **msm_gem_get_pages(struct drm_gem_object *obj) 148 { 149 struct msm_gem_object *msm_obj = to_msm_bo(obj); 150 struct page **p; 151 152 mutex_lock(&msm_obj->lock); 153 154 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 155 mutex_unlock(&msm_obj->lock); 156 return ERR_PTR(-EBUSY); 157 } 158 159 p = get_pages(obj); 160 mutex_unlock(&msm_obj->lock); 161 return p; 162 } 163 164 void msm_gem_put_pages(struct drm_gem_object *obj) 165 { 166 /* when we start tracking the pin count, then do something here */ 167 } 168 169 int msm_gem_mmap_obj(struct drm_gem_object *obj, 170 struct vm_area_struct *vma) 171 { 172 struct msm_gem_object *msm_obj = to_msm_bo(obj); 173 174 vma->vm_flags &= ~VM_PFNMAP; 175 vma->vm_flags |= VM_MIXEDMAP; 176 177 if (msm_obj->flags & MSM_BO_WC) { 178 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 179 } else if (msm_obj->flags & MSM_BO_UNCACHED) { 180 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 181 } else { 182 /* 183 * Shunt off cached objs to shmem file so they have their own 184 * address_space (so unmap_mapping_range does what we want, 185 * in particular in the case of mmap'd dmabufs) 186 */ 187 fput(vma->vm_file); 188 get_file(obj->filp); 189 vma->vm_pgoff = 0; 190 vma->vm_file = obj->filp; 191 192 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 193 } 194 195 return 0; 196 } 197 198 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 199 { 200 int ret; 201 202 ret = drm_gem_mmap(filp, vma); 203 if (ret) { 204 DBG("mmap failed: %d", ret); 205 return ret; 206 } 207 208 return msm_gem_mmap_obj(vma->vm_private_data, vma); 209 } 210 211 vm_fault_t msm_gem_fault(struct vm_fault *vmf) 212 { 213 struct vm_area_struct *vma = vmf->vma; 214 struct drm_gem_object *obj = vma->vm_private_data; 215 struct msm_gem_object *msm_obj = to_msm_bo(obj); 216 struct page **pages; 217 unsigned long pfn; 218 pgoff_t pgoff; 219 int err; 220 vm_fault_t ret; 221 222 /* 223 * vm_ops.open/drm_gem_mmap_obj and close get and put 224 * a reference on obj. So, we dont need to hold one here. 225 */ 226 err = mutex_lock_interruptible(&msm_obj->lock); 227 if (err) { 228 ret = VM_FAULT_NOPAGE; 229 goto out; 230 } 231 232 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 233 mutex_unlock(&msm_obj->lock); 234 return VM_FAULT_SIGBUS; 235 } 236 237 /* make sure we have pages attached now */ 238 pages = get_pages(obj); 239 if (IS_ERR(pages)) { 240 ret = vmf_error(PTR_ERR(pages)); 241 goto out_unlock; 242 } 243 244 /* We don't use vmf->pgoff since that has the fake offset: */ 245 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 246 247 pfn = page_to_pfn(pages[pgoff]); 248 249 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 250 pfn, pfn << PAGE_SHIFT); 251 252 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); 253 out_unlock: 254 mutex_unlock(&msm_obj->lock); 255 out: 256 return ret; 257 } 258 259 /** get mmap offset */ 260 static uint64_t mmap_offset(struct drm_gem_object *obj) 261 { 262 struct drm_device *dev = obj->dev; 263 struct msm_gem_object *msm_obj = to_msm_bo(obj); 264 int ret; 265 266 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 267 268 /* Make it mmapable */ 269 ret = drm_gem_create_mmap_offset(obj); 270 271 if (ret) { 272 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n"); 273 return 0; 274 } 275 276 return drm_vma_node_offset_addr(&obj->vma_node); 277 } 278 279 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) 280 { 281 uint64_t offset; 282 struct msm_gem_object *msm_obj = to_msm_bo(obj); 283 284 mutex_lock(&msm_obj->lock); 285 offset = mmap_offset(obj); 286 mutex_unlock(&msm_obj->lock); 287 return offset; 288 } 289 290 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, 291 struct msm_gem_address_space *aspace) 292 { 293 struct msm_gem_object *msm_obj = to_msm_bo(obj); 294 struct msm_gem_vma *vma; 295 296 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 297 298 vma = kzalloc(sizeof(*vma), GFP_KERNEL); 299 if (!vma) 300 return ERR_PTR(-ENOMEM); 301 302 vma->aspace = aspace; 303 304 list_add_tail(&vma->list, &msm_obj->vmas); 305 306 return vma; 307 } 308 309 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, 310 struct msm_gem_address_space *aspace) 311 { 312 struct msm_gem_object *msm_obj = to_msm_bo(obj); 313 struct msm_gem_vma *vma; 314 315 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 316 317 list_for_each_entry(vma, &msm_obj->vmas, list) { 318 if (vma->aspace == aspace) 319 return vma; 320 } 321 322 return NULL; 323 } 324 325 static void del_vma(struct msm_gem_vma *vma) 326 { 327 if (!vma) 328 return; 329 330 list_del(&vma->list); 331 kfree(vma); 332 } 333 334 /* Called with msm_obj->lock locked */ 335 static void 336 put_iova(struct drm_gem_object *obj) 337 { 338 struct msm_gem_object *msm_obj = to_msm_bo(obj); 339 struct msm_gem_vma *vma, *tmp; 340 341 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 342 343 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { 344 if (vma->aspace) { 345 msm_gem_purge_vma(vma->aspace, vma); 346 msm_gem_close_vma(vma->aspace, vma); 347 } 348 del_vma(vma); 349 } 350 } 351 352 static int msm_gem_get_iova_locked(struct drm_gem_object *obj, 353 struct msm_gem_address_space *aspace, uint64_t *iova) 354 { 355 struct msm_gem_object *msm_obj = to_msm_bo(obj); 356 struct msm_gem_vma *vma; 357 int ret = 0; 358 359 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 360 361 vma = lookup_vma(obj, aspace); 362 363 if (!vma) { 364 vma = add_vma(obj, aspace); 365 if (IS_ERR(vma)) 366 return PTR_ERR(vma); 367 368 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT); 369 if (ret) { 370 del_vma(vma); 371 return ret; 372 } 373 } 374 375 *iova = vma->iova; 376 return 0; 377 } 378 379 static int msm_gem_pin_iova(struct drm_gem_object *obj, 380 struct msm_gem_address_space *aspace) 381 { 382 struct msm_gem_object *msm_obj = to_msm_bo(obj); 383 struct msm_gem_vma *vma; 384 struct page **pages; 385 int prot = IOMMU_READ; 386 387 if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) 388 prot |= IOMMU_WRITE; 389 390 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 391 392 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) 393 return -EBUSY; 394 395 vma = lookup_vma(obj, aspace); 396 if (WARN_ON(!vma)) 397 return -EINVAL; 398 399 pages = get_pages(obj); 400 if (IS_ERR(pages)) 401 return PTR_ERR(pages); 402 403 return msm_gem_map_vma(aspace, vma, prot, 404 msm_obj->sgt, obj->size >> PAGE_SHIFT); 405 } 406 407 /* get iova and pin it. Should have a matching put */ 408 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, 409 struct msm_gem_address_space *aspace, uint64_t *iova) 410 { 411 struct msm_gem_object *msm_obj = to_msm_bo(obj); 412 u64 local; 413 int ret; 414 415 mutex_lock(&msm_obj->lock); 416 417 ret = msm_gem_get_iova_locked(obj, aspace, &local); 418 419 if (!ret) 420 ret = msm_gem_pin_iova(obj, aspace); 421 422 if (!ret) 423 *iova = local; 424 425 mutex_unlock(&msm_obj->lock); 426 return ret; 427 } 428 429 /* 430 * Get an iova but don't pin it. Doesn't need a put because iovas are currently 431 * valid for the life of the object 432 */ 433 int msm_gem_get_iova(struct drm_gem_object *obj, 434 struct msm_gem_address_space *aspace, uint64_t *iova) 435 { 436 struct msm_gem_object *msm_obj = to_msm_bo(obj); 437 int ret; 438 439 mutex_lock(&msm_obj->lock); 440 ret = msm_gem_get_iova_locked(obj, aspace, iova); 441 mutex_unlock(&msm_obj->lock); 442 443 return ret; 444 } 445 446 /* get iova without taking a reference, used in places where you have 447 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova' 448 */ 449 uint64_t msm_gem_iova(struct drm_gem_object *obj, 450 struct msm_gem_address_space *aspace) 451 { 452 struct msm_gem_object *msm_obj = to_msm_bo(obj); 453 struct msm_gem_vma *vma; 454 455 mutex_lock(&msm_obj->lock); 456 vma = lookup_vma(obj, aspace); 457 mutex_unlock(&msm_obj->lock); 458 WARN_ON(!vma); 459 460 return vma ? vma->iova : 0; 461 } 462 463 /* 464 * Unpin a iova by updating the reference counts. The memory isn't actually 465 * purged until something else (shrinker, mm_notifier, destroy, etc) decides 466 * to get rid of it 467 */ 468 void msm_gem_unpin_iova(struct drm_gem_object *obj, 469 struct msm_gem_address_space *aspace) 470 { 471 struct msm_gem_object *msm_obj = to_msm_bo(obj); 472 struct msm_gem_vma *vma; 473 474 mutex_lock(&msm_obj->lock); 475 vma = lookup_vma(obj, aspace); 476 477 if (!WARN_ON(!vma)) 478 msm_gem_unmap_vma(aspace, vma); 479 480 mutex_unlock(&msm_obj->lock); 481 } 482 483 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 484 struct drm_mode_create_dumb *args) 485 { 486 args->pitch = align_pitch(args->width, args->bpp); 487 args->size = PAGE_ALIGN(args->pitch * args->height); 488 return msm_gem_new_handle(dev, file, args->size, 489 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb"); 490 } 491 492 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 493 uint32_t handle, uint64_t *offset) 494 { 495 struct drm_gem_object *obj; 496 int ret = 0; 497 498 /* GEM does all our handle to object mapping */ 499 obj = drm_gem_object_lookup(file, handle); 500 if (obj == NULL) { 501 ret = -ENOENT; 502 goto fail; 503 } 504 505 *offset = msm_gem_mmap_offset(obj); 506 507 drm_gem_object_put_unlocked(obj); 508 509 fail: 510 return ret; 511 } 512 513 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) 514 { 515 struct msm_gem_object *msm_obj = to_msm_bo(obj); 516 int ret = 0; 517 518 mutex_lock(&msm_obj->lock); 519 520 if (WARN_ON(msm_obj->madv > madv)) { 521 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", 522 msm_obj->madv, madv); 523 mutex_unlock(&msm_obj->lock); 524 return ERR_PTR(-EBUSY); 525 } 526 527 /* increment vmap_count *before* vmap() call, so shrinker can 528 * check vmap_count (is_vunmapable()) outside of msm_obj->lock. 529 * This guarantees that we won't try to msm_gem_vunmap() this 530 * same object from within the vmap() call (while we already 531 * hold msm_obj->lock) 532 */ 533 msm_obj->vmap_count++; 534 535 if (!msm_obj->vaddr) { 536 struct page **pages = get_pages(obj); 537 if (IS_ERR(pages)) { 538 ret = PTR_ERR(pages); 539 goto fail; 540 } 541 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 542 VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 543 if (msm_obj->vaddr == NULL) { 544 ret = -ENOMEM; 545 goto fail; 546 } 547 } 548 549 mutex_unlock(&msm_obj->lock); 550 return msm_obj->vaddr; 551 552 fail: 553 msm_obj->vmap_count--; 554 mutex_unlock(&msm_obj->lock); 555 return ERR_PTR(ret); 556 } 557 558 void *msm_gem_get_vaddr(struct drm_gem_object *obj) 559 { 560 return get_vaddr(obj, MSM_MADV_WILLNEED); 561 } 562 563 /* 564 * Don't use this! It is for the very special case of dumping 565 * submits from GPU hangs or faults, were the bo may already 566 * be MSM_MADV_DONTNEED, but we know the buffer is still on the 567 * active list. 568 */ 569 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) 570 { 571 return get_vaddr(obj, __MSM_MADV_PURGED); 572 } 573 574 void msm_gem_put_vaddr(struct drm_gem_object *obj) 575 { 576 struct msm_gem_object *msm_obj = to_msm_bo(obj); 577 578 mutex_lock(&msm_obj->lock); 579 WARN_ON(msm_obj->vmap_count < 1); 580 msm_obj->vmap_count--; 581 mutex_unlock(&msm_obj->lock); 582 } 583 584 /* Update madvise status, returns true if not purged, else 585 * false or -errno. 586 */ 587 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) 588 { 589 struct msm_gem_object *msm_obj = to_msm_bo(obj); 590 591 mutex_lock(&msm_obj->lock); 592 593 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 594 595 if (msm_obj->madv != __MSM_MADV_PURGED) 596 msm_obj->madv = madv; 597 598 madv = msm_obj->madv; 599 600 mutex_unlock(&msm_obj->lock); 601 602 return (madv != __MSM_MADV_PURGED); 603 } 604 605 void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass) 606 { 607 struct drm_device *dev = obj->dev; 608 struct msm_gem_object *msm_obj = to_msm_bo(obj); 609 610 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 611 WARN_ON(!is_purgeable(msm_obj)); 612 WARN_ON(obj->import_attach); 613 614 mutex_lock_nested(&msm_obj->lock, subclass); 615 616 put_iova(obj); 617 618 msm_gem_vunmap_locked(obj); 619 620 put_pages(obj); 621 622 msm_obj->madv = __MSM_MADV_PURGED; 623 624 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 625 drm_gem_free_mmap_offset(obj); 626 627 /* Our goal here is to return as much of the memory as 628 * is possible back to the system as we are called from OOM. 629 * To do this we must instruct the shmfs to drop all of its 630 * backing pages, *now*. 631 */ 632 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 633 634 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 635 0, (loff_t)-1); 636 637 mutex_unlock(&msm_obj->lock); 638 } 639 640 static void msm_gem_vunmap_locked(struct drm_gem_object *obj) 641 { 642 struct msm_gem_object *msm_obj = to_msm_bo(obj); 643 644 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 645 646 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) 647 return; 648 649 vunmap(msm_obj->vaddr); 650 msm_obj->vaddr = NULL; 651 } 652 653 void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass) 654 { 655 struct msm_gem_object *msm_obj = to_msm_bo(obj); 656 657 mutex_lock_nested(&msm_obj->lock, subclass); 658 msm_gem_vunmap_locked(obj); 659 mutex_unlock(&msm_obj->lock); 660 } 661 662 /* must be called before _move_to_active().. */ 663 int msm_gem_sync_object(struct drm_gem_object *obj, 664 struct msm_fence_context *fctx, bool exclusive) 665 { 666 struct reservation_object_list *fobj; 667 struct dma_fence *fence; 668 int i, ret; 669 670 fobj = reservation_object_get_list(obj->resv); 671 if (!fobj || (fobj->shared_count == 0)) { 672 fence = reservation_object_get_excl(obj->resv); 673 /* don't need to wait on our own fences, since ring is fifo */ 674 if (fence && (fence->context != fctx->context)) { 675 ret = dma_fence_wait(fence, true); 676 if (ret) 677 return ret; 678 } 679 } 680 681 if (!exclusive || !fobj) 682 return 0; 683 684 for (i = 0; i < fobj->shared_count; i++) { 685 fence = rcu_dereference_protected(fobj->shared[i], 686 reservation_object_held(obj->resv)); 687 if (fence->context != fctx->context) { 688 ret = dma_fence_wait(fence, true); 689 if (ret) 690 return ret; 691 } 692 } 693 694 return 0; 695 } 696 697 void msm_gem_move_to_active(struct drm_gem_object *obj, 698 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence) 699 { 700 struct msm_gem_object *msm_obj = to_msm_bo(obj); 701 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); 702 msm_obj->gpu = gpu; 703 if (exclusive) 704 reservation_object_add_excl_fence(obj->resv, fence); 705 else 706 reservation_object_add_shared_fence(obj->resv, fence); 707 list_del_init(&msm_obj->mm_list); 708 list_add_tail(&msm_obj->mm_list, &gpu->active_list); 709 } 710 711 void msm_gem_move_to_inactive(struct drm_gem_object *obj) 712 { 713 struct drm_device *dev = obj->dev; 714 struct msm_drm_private *priv = dev->dev_private; 715 struct msm_gem_object *msm_obj = to_msm_bo(obj); 716 717 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 718 719 msm_obj->gpu = NULL; 720 list_del_init(&msm_obj->mm_list); 721 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 722 } 723 724 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 725 { 726 bool write = !!(op & MSM_PREP_WRITE); 727 unsigned long remain = 728 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 729 long ret; 730 731 ret = reservation_object_wait_timeout_rcu(obj->resv, write, 732 true, remain); 733 if (ret == 0) 734 return remain == 0 ? -EBUSY : -ETIMEDOUT; 735 else if (ret < 0) 736 return ret; 737 738 /* TODO cache maintenance */ 739 740 return 0; 741 } 742 743 int msm_gem_cpu_fini(struct drm_gem_object *obj) 744 { 745 /* TODO cache maintenance */ 746 return 0; 747 } 748 749 #ifdef CONFIG_DEBUG_FS 750 static void describe_fence(struct dma_fence *fence, const char *type, 751 struct seq_file *m) 752 { 753 if (!dma_fence_is_signaled(fence)) 754 seq_printf(m, "\t%9s: %s %s seq %llu\n", type, 755 fence->ops->get_driver_name(fence), 756 fence->ops->get_timeline_name(fence), 757 fence->seqno); 758 } 759 760 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 761 { 762 struct msm_gem_object *msm_obj = to_msm_bo(obj); 763 struct reservation_object *robj = obj->resv; 764 struct reservation_object_list *fobj; 765 struct dma_fence *fence; 766 struct msm_gem_vma *vma; 767 uint64_t off = drm_vma_node_start(&obj->vma_node); 768 const char *madv; 769 770 mutex_lock(&msm_obj->lock); 771 772 switch (msm_obj->madv) { 773 case __MSM_MADV_PURGED: 774 madv = " purged"; 775 break; 776 case MSM_MADV_DONTNEED: 777 madv = " purgeable"; 778 break; 779 case MSM_MADV_WILLNEED: 780 default: 781 madv = ""; 782 break; 783 } 784 785 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p", 786 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 787 obj->name, kref_read(&obj->refcount), 788 off, msm_obj->vaddr); 789 790 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name); 791 792 if (!list_empty(&msm_obj->vmas)) { 793 794 seq_puts(m, " vmas:"); 795 796 list_for_each_entry(vma, &msm_obj->vmas, list) 797 seq_printf(m, " [%s: %08llx,%s,inuse=%d]", 798 vma->aspace != NULL ? vma->aspace->name : NULL, 799 vma->iova, vma->mapped ? "mapped" : "unmapped", 800 vma->inuse); 801 802 seq_puts(m, "\n"); 803 } 804 805 rcu_read_lock(); 806 fobj = rcu_dereference(robj->fence); 807 if (fobj) { 808 unsigned int i, shared_count = fobj->shared_count; 809 810 for (i = 0; i < shared_count; i++) { 811 fence = rcu_dereference(fobj->shared[i]); 812 describe_fence(fence, "Shared", m); 813 } 814 } 815 816 fence = rcu_dereference(robj->fence_excl); 817 if (fence) 818 describe_fence(fence, "Exclusive", m); 819 rcu_read_unlock(); 820 821 mutex_unlock(&msm_obj->lock); 822 } 823 824 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) 825 { 826 struct msm_gem_object *msm_obj; 827 int count = 0; 828 size_t size = 0; 829 830 seq_puts(m, " flags id ref offset kaddr size madv name\n"); 831 list_for_each_entry(msm_obj, list, mm_list) { 832 struct drm_gem_object *obj = &msm_obj->base; 833 seq_puts(m, " "); 834 msm_gem_describe(obj, m); 835 count++; 836 size += obj->size; 837 } 838 839 seq_printf(m, "Total %d objects, %zu bytes\n", count, size); 840 } 841 #endif 842 843 /* don't call directly! Use drm_gem_object_put() and friends */ 844 void msm_gem_free_object(struct drm_gem_object *obj) 845 { 846 struct msm_gem_object *msm_obj = to_msm_bo(obj); 847 struct drm_device *dev = obj->dev; 848 struct msm_drm_private *priv = dev->dev_private; 849 850 if (llist_add(&msm_obj->freed, &priv->free_list)) 851 queue_work(priv->wq, &priv->free_work); 852 } 853 854 static void free_object(struct msm_gem_object *msm_obj) 855 { 856 struct drm_gem_object *obj = &msm_obj->base; 857 struct drm_device *dev = obj->dev; 858 859 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 860 861 /* object should not be on active list: */ 862 WARN_ON(is_active(msm_obj)); 863 864 list_del(&msm_obj->mm_list); 865 866 mutex_lock(&msm_obj->lock); 867 868 put_iova(obj); 869 870 if (obj->import_attach) { 871 if (msm_obj->vaddr) 872 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); 873 874 /* Don't drop the pages for imported dmabuf, as they are not 875 * ours, just free the array we allocated: 876 */ 877 if (msm_obj->pages) 878 kvfree(msm_obj->pages); 879 880 drm_prime_gem_destroy(obj, msm_obj->sgt); 881 } else { 882 msm_gem_vunmap_locked(obj); 883 put_pages(obj); 884 } 885 886 drm_gem_object_release(obj); 887 888 mutex_unlock(&msm_obj->lock); 889 kfree(msm_obj); 890 } 891 892 void msm_gem_free_work(struct work_struct *work) 893 { 894 struct msm_drm_private *priv = 895 container_of(work, struct msm_drm_private, free_work); 896 struct drm_device *dev = priv->dev; 897 struct llist_node *freed; 898 struct msm_gem_object *msm_obj, *next; 899 900 while ((freed = llist_del_all(&priv->free_list))) { 901 902 mutex_lock(&dev->struct_mutex); 903 904 llist_for_each_entry_safe(msm_obj, next, 905 freed, freed) 906 free_object(msm_obj); 907 908 mutex_unlock(&dev->struct_mutex); 909 910 if (need_resched()) 911 break; 912 } 913 } 914 915 /* convenience method to construct a GEM buffer object, and userspace handle */ 916 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 917 uint32_t size, uint32_t flags, uint32_t *handle, 918 char *name) 919 { 920 struct drm_gem_object *obj; 921 int ret; 922 923 obj = msm_gem_new(dev, size, flags); 924 925 if (IS_ERR(obj)) 926 return PTR_ERR(obj); 927 928 if (name) 929 msm_gem_object_set_name(obj, "%s", name); 930 931 ret = drm_gem_handle_create(file, obj, handle); 932 933 /* drop reference from allocate - handle holds it now */ 934 drm_gem_object_put_unlocked(obj); 935 936 return ret; 937 } 938 939 static int msm_gem_new_impl(struct drm_device *dev, 940 uint32_t size, uint32_t flags, 941 struct reservation_object *resv, 942 struct drm_gem_object **obj, 943 bool struct_mutex_locked) 944 { 945 struct msm_drm_private *priv = dev->dev_private; 946 struct msm_gem_object *msm_obj; 947 948 switch (flags & MSM_BO_CACHE_MASK) { 949 case MSM_BO_UNCACHED: 950 case MSM_BO_CACHED: 951 case MSM_BO_WC: 952 break; 953 default: 954 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", 955 (flags & MSM_BO_CACHE_MASK)); 956 return -EINVAL; 957 } 958 959 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); 960 if (!msm_obj) 961 return -ENOMEM; 962 963 mutex_init(&msm_obj->lock); 964 965 msm_obj->flags = flags; 966 msm_obj->madv = MSM_MADV_WILLNEED; 967 968 if (resv) 969 msm_obj->base.resv = resv; 970 971 INIT_LIST_HEAD(&msm_obj->submit_entry); 972 INIT_LIST_HEAD(&msm_obj->vmas); 973 974 if (struct_mutex_locked) { 975 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 976 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 977 } else { 978 mutex_lock(&dev->struct_mutex); 979 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 980 mutex_unlock(&dev->struct_mutex); 981 } 982 983 *obj = &msm_obj->base; 984 985 return 0; 986 } 987 988 static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, 989 uint32_t size, uint32_t flags, bool struct_mutex_locked) 990 { 991 struct msm_drm_private *priv = dev->dev_private; 992 struct drm_gem_object *obj = NULL; 993 bool use_vram = false; 994 int ret; 995 996 size = PAGE_ALIGN(size); 997 998 if (!msm_use_mmu(dev)) 999 use_vram = true; 1000 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size) 1001 use_vram = true; 1002 1003 if (WARN_ON(use_vram && !priv->vram.size)) 1004 return ERR_PTR(-EINVAL); 1005 1006 /* Disallow zero sized objects as they make the underlying 1007 * infrastructure grumpy 1008 */ 1009 if (size == 0) 1010 return ERR_PTR(-EINVAL); 1011 1012 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked); 1013 if (ret) 1014 goto fail; 1015 1016 if (use_vram) { 1017 struct msm_gem_vma *vma; 1018 struct page **pages; 1019 struct msm_gem_object *msm_obj = to_msm_bo(obj); 1020 1021 mutex_lock(&msm_obj->lock); 1022 1023 vma = add_vma(obj, NULL); 1024 mutex_unlock(&msm_obj->lock); 1025 if (IS_ERR(vma)) { 1026 ret = PTR_ERR(vma); 1027 goto fail; 1028 } 1029 1030 to_msm_bo(obj)->vram_node = &vma->node; 1031 1032 drm_gem_private_object_init(dev, obj, size); 1033 1034 pages = get_pages(obj); 1035 if (IS_ERR(pages)) { 1036 ret = PTR_ERR(pages); 1037 goto fail; 1038 } 1039 1040 vma->iova = physaddr(obj); 1041 } else { 1042 ret = drm_gem_object_init(dev, obj, size); 1043 if (ret) 1044 goto fail; 1045 /* 1046 * Our buffers are kept pinned, so allocating them from the 1047 * MOVABLE zone is a really bad idea, and conflicts with CMA. 1048 * See comments above new_inode() why this is required _and_ 1049 * expected if you're going to pin these pages. 1050 */ 1051 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); 1052 } 1053 1054 return obj; 1055 1056 fail: 1057 drm_gem_object_put_unlocked(obj); 1058 return ERR_PTR(ret); 1059 } 1060 1061 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, 1062 uint32_t size, uint32_t flags) 1063 { 1064 return _msm_gem_new(dev, size, flags, true); 1065 } 1066 1067 struct drm_gem_object *msm_gem_new(struct drm_device *dev, 1068 uint32_t size, uint32_t flags) 1069 { 1070 return _msm_gem_new(dev, size, flags, false); 1071 } 1072 1073 struct drm_gem_object *msm_gem_import(struct drm_device *dev, 1074 struct dma_buf *dmabuf, struct sg_table *sgt) 1075 { 1076 struct msm_gem_object *msm_obj; 1077 struct drm_gem_object *obj; 1078 uint32_t size; 1079 int ret, npages; 1080 1081 /* if we don't have IOMMU, don't bother pretending we can import: */ 1082 if (!msm_use_mmu(dev)) { 1083 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); 1084 return ERR_PTR(-EINVAL); 1085 } 1086 1087 size = PAGE_ALIGN(dmabuf->size); 1088 1089 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false); 1090 if (ret) 1091 goto fail; 1092 1093 drm_gem_private_object_init(dev, obj, size); 1094 1095 npages = size / PAGE_SIZE; 1096 1097 msm_obj = to_msm_bo(obj); 1098 mutex_lock(&msm_obj->lock); 1099 msm_obj->sgt = sgt; 1100 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 1101 if (!msm_obj->pages) { 1102 mutex_unlock(&msm_obj->lock); 1103 ret = -ENOMEM; 1104 goto fail; 1105 } 1106 1107 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); 1108 if (ret) { 1109 mutex_unlock(&msm_obj->lock); 1110 goto fail; 1111 } 1112 1113 mutex_unlock(&msm_obj->lock); 1114 return obj; 1115 1116 fail: 1117 drm_gem_object_put_unlocked(obj); 1118 return ERR_PTR(ret); 1119 } 1120 1121 static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1122 uint32_t flags, struct msm_gem_address_space *aspace, 1123 struct drm_gem_object **bo, uint64_t *iova, bool locked) 1124 { 1125 void *vaddr; 1126 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked); 1127 int ret; 1128 1129 if (IS_ERR(obj)) 1130 return ERR_CAST(obj); 1131 1132 if (iova) { 1133 ret = msm_gem_get_and_pin_iova(obj, aspace, iova); 1134 if (ret) 1135 goto err; 1136 } 1137 1138 vaddr = msm_gem_get_vaddr(obj); 1139 if (IS_ERR(vaddr)) { 1140 msm_gem_unpin_iova(obj, aspace); 1141 ret = PTR_ERR(vaddr); 1142 goto err; 1143 } 1144 1145 if (bo) 1146 *bo = obj; 1147 1148 return vaddr; 1149 err: 1150 if (locked) 1151 drm_gem_object_put(obj); 1152 else 1153 drm_gem_object_put_unlocked(obj); 1154 1155 return ERR_PTR(ret); 1156 1157 } 1158 1159 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1160 uint32_t flags, struct msm_gem_address_space *aspace, 1161 struct drm_gem_object **bo, uint64_t *iova) 1162 { 1163 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false); 1164 } 1165 1166 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, 1167 uint32_t flags, struct msm_gem_address_space *aspace, 1168 struct drm_gem_object **bo, uint64_t *iova) 1169 { 1170 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true); 1171 } 1172 1173 void msm_gem_kernel_put(struct drm_gem_object *bo, 1174 struct msm_gem_address_space *aspace, bool locked) 1175 { 1176 if (IS_ERR_OR_NULL(bo)) 1177 return; 1178 1179 msm_gem_put_vaddr(bo); 1180 msm_gem_unpin_iova(bo, aspace); 1181 1182 if (locked) 1183 drm_gem_object_put(bo); 1184 else 1185 drm_gem_object_put_unlocked(bo); 1186 } 1187 1188 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...) 1189 { 1190 struct msm_gem_object *msm_obj = to_msm_bo(bo); 1191 va_list ap; 1192 1193 if (!fmt) 1194 return; 1195 1196 va_start(ap, fmt); 1197 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap); 1198 va_end(ap); 1199 } 1200