1 /* 2 * Copyright (C) 2013 Red Hat 3 * Author: Rob Clark <robdclark@gmail.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include <linux/spinlock.h> 19 #include <linux/shmem_fs.h> 20 #include <linux/dma-buf.h> 21 #include <linux/pfn_t.h> 22 23 #include "msm_drv.h" 24 #include "msm_fence.h" 25 #include "msm_gem.h" 26 #include "msm_gpu.h" 27 #include "msm_mmu.h" 28 29 static void msm_gem_vunmap_locked(struct drm_gem_object *obj); 30 31 32 static dma_addr_t physaddr(struct drm_gem_object *obj) 33 { 34 struct msm_gem_object *msm_obj = to_msm_bo(obj); 35 struct msm_drm_private *priv = obj->dev->dev_private; 36 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + 37 priv->vram.paddr; 38 } 39 40 static bool use_pages(struct drm_gem_object *obj) 41 { 42 struct msm_gem_object *msm_obj = to_msm_bo(obj); 43 return !msm_obj->vram_node; 44 } 45 46 /* allocate pages from VRAM carveout, used when no IOMMU: */ 47 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) 48 { 49 struct msm_gem_object *msm_obj = to_msm_bo(obj); 50 struct msm_drm_private *priv = obj->dev->dev_private; 51 dma_addr_t paddr; 52 struct page **p; 53 int ret, i; 54 55 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 56 if (!p) 57 return ERR_PTR(-ENOMEM); 58 59 spin_lock(&priv->vram.lock); 60 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); 61 spin_unlock(&priv->vram.lock); 62 if (ret) { 63 kvfree(p); 64 return ERR_PTR(ret); 65 } 66 67 paddr = physaddr(obj); 68 for (i = 0; i < npages; i++) { 69 p[i] = phys_to_page(paddr); 70 paddr += PAGE_SIZE; 71 } 72 73 return p; 74 } 75 76 static struct page **get_pages(struct drm_gem_object *obj) 77 { 78 struct msm_gem_object *msm_obj = to_msm_bo(obj); 79 80 if (!msm_obj->pages) { 81 struct drm_device *dev = obj->dev; 82 struct page **p; 83 int npages = obj->size >> PAGE_SHIFT; 84 85 if (use_pages(obj)) 86 p = drm_gem_get_pages(obj); 87 else 88 p = get_pages_vram(obj, npages); 89 90 if (IS_ERR(p)) { 91 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", 92 PTR_ERR(p)); 93 return p; 94 } 95 96 msm_obj->pages = p; 97 98 msm_obj->sgt = drm_prime_pages_to_sg(p, npages); 99 if (IS_ERR(msm_obj->sgt)) { 100 void *ptr = ERR_CAST(msm_obj->sgt); 101 102 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n"); 103 msm_obj->sgt = NULL; 104 return ptr; 105 } 106 107 /* For non-cached buffers, ensure the new pages are clean 108 * because display controller, GPU, etc. are not coherent: 109 */ 110 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 111 dma_map_sg(dev->dev, msm_obj->sgt->sgl, 112 msm_obj->sgt->nents, DMA_BIDIRECTIONAL); 113 } 114 115 return msm_obj->pages; 116 } 117 118 static void put_pages_vram(struct drm_gem_object *obj) 119 { 120 struct msm_gem_object *msm_obj = to_msm_bo(obj); 121 struct msm_drm_private *priv = obj->dev->dev_private; 122 123 spin_lock(&priv->vram.lock); 124 drm_mm_remove_node(msm_obj->vram_node); 125 spin_unlock(&priv->vram.lock); 126 127 kvfree(msm_obj->pages); 128 } 129 130 static void put_pages(struct drm_gem_object *obj) 131 { 132 struct msm_gem_object *msm_obj = to_msm_bo(obj); 133 134 if (msm_obj->pages) { 135 if (msm_obj->sgt) { 136 /* For non-cached buffers, ensure the new 137 * pages are clean because display controller, 138 * GPU, etc. are not coherent: 139 */ 140 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 141 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, 142 msm_obj->sgt->nents, 143 DMA_BIDIRECTIONAL); 144 145 sg_free_table(msm_obj->sgt); 146 kfree(msm_obj->sgt); 147 } 148 149 if (use_pages(obj)) 150 drm_gem_put_pages(obj, msm_obj->pages, true, false); 151 else 152 put_pages_vram(obj); 153 154 msm_obj->pages = NULL; 155 } 156 } 157 158 struct page **msm_gem_get_pages(struct drm_gem_object *obj) 159 { 160 struct msm_gem_object *msm_obj = to_msm_bo(obj); 161 struct page **p; 162 163 mutex_lock(&msm_obj->lock); 164 165 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 166 mutex_unlock(&msm_obj->lock); 167 return ERR_PTR(-EBUSY); 168 } 169 170 p = get_pages(obj); 171 mutex_unlock(&msm_obj->lock); 172 return p; 173 } 174 175 void msm_gem_put_pages(struct drm_gem_object *obj) 176 { 177 /* when we start tracking the pin count, then do something here */ 178 } 179 180 int msm_gem_mmap_obj(struct drm_gem_object *obj, 181 struct vm_area_struct *vma) 182 { 183 struct msm_gem_object *msm_obj = to_msm_bo(obj); 184 185 vma->vm_flags &= ~VM_PFNMAP; 186 vma->vm_flags |= VM_MIXEDMAP; 187 188 if (msm_obj->flags & MSM_BO_WC) { 189 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 190 } else if (msm_obj->flags & MSM_BO_UNCACHED) { 191 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 192 } else { 193 /* 194 * Shunt off cached objs to shmem file so they have their own 195 * address_space (so unmap_mapping_range does what we want, 196 * in particular in the case of mmap'd dmabufs) 197 */ 198 fput(vma->vm_file); 199 get_file(obj->filp); 200 vma->vm_pgoff = 0; 201 vma->vm_file = obj->filp; 202 203 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 204 } 205 206 return 0; 207 } 208 209 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 210 { 211 int ret; 212 213 ret = drm_gem_mmap(filp, vma); 214 if (ret) { 215 DBG("mmap failed: %d", ret); 216 return ret; 217 } 218 219 return msm_gem_mmap_obj(vma->vm_private_data, vma); 220 } 221 222 vm_fault_t msm_gem_fault(struct vm_fault *vmf) 223 { 224 struct vm_area_struct *vma = vmf->vma; 225 struct drm_gem_object *obj = vma->vm_private_data; 226 struct msm_gem_object *msm_obj = to_msm_bo(obj); 227 struct page **pages; 228 unsigned long pfn; 229 pgoff_t pgoff; 230 int err; 231 vm_fault_t ret; 232 233 /* 234 * vm_ops.open/drm_gem_mmap_obj and close get and put 235 * a reference on obj. So, we dont need to hold one here. 236 */ 237 err = mutex_lock_interruptible(&msm_obj->lock); 238 if (err) { 239 ret = VM_FAULT_NOPAGE; 240 goto out; 241 } 242 243 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 244 mutex_unlock(&msm_obj->lock); 245 return VM_FAULT_SIGBUS; 246 } 247 248 /* make sure we have pages attached now */ 249 pages = get_pages(obj); 250 if (IS_ERR(pages)) { 251 ret = vmf_error(PTR_ERR(pages)); 252 goto out_unlock; 253 } 254 255 /* We don't use vmf->pgoff since that has the fake offset: */ 256 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 257 258 pfn = page_to_pfn(pages[pgoff]); 259 260 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 261 pfn, pfn << PAGE_SHIFT); 262 263 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); 264 out_unlock: 265 mutex_unlock(&msm_obj->lock); 266 out: 267 return ret; 268 } 269 270 /** get mmap offset */ 271 static uint64_t mmap_offset(struct drm_gem_object *obj) 272 { 273 struct drm_device *dev = obj->dev; 274 struct msm_gem_object *msm_obj = to_msm_bo(obj); 275 int ret; 276 277 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 278 279 /* Make it mmapable */ 280 ret = drm_gem_create_mmap_offset(obj); 281 282 if (ret) { 283 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n"); 284 return 0; 285 } 286 287 return drm_vma_node_offset_addr(&obj->vma_node); 288 } 289 290 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) 291 { 292 uint64_t offset; 293 struct msm_gem_object *msm_obj = to_msm_bo(obj); 294 295 mutex_lock(&msm_obj->lock); 296 offset = mmap_offset(obj); 297 mutex_unlock(&msm_obj->lock); 298 return offset; 299 } 300 301 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, 302 struct msm_gem_address_space *aspace) 303 { 304 struct msm_gem_object *msm_obj = to_msm_bo(obj); 305 struct msm_gem_vma *vma; 306 307 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 308 309 vma = kzalloc(sizeof(*vma), GFP_KERNEL); 310 if (!vma) 311 return ERR_PTR(-ENOMEM); 312 313 vma->aspace = aspace; 314 315 list_add_tail(&vma->list, &msm_obj->vmas); 316 317 return vma; 318 } 319 320 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, 321 struct msm_gem_address_space *aspace) 322 { 323 struct msm_gem_object *msm_obj = to_msm_bo(obj); 324 struct msm_gem_vma *vma; 325 326 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 327 328 list_for_each_entry(vma, &msm_obj->vmas, list) { 329 if (vma->aspace == aspace) 330 return vma; 331 } 332 333 return NULL; 334 } 335 336 static void del_vma(struct msm_gem_vma *vma) 337 { 338 if (!vma) 339 return; 340 341 list_del(&vma->list); 342 kfree(vma); 343 } 344 345 /* Called with msm_obj->lock locked */ 346 static void 347 put_iova(struct drm_gem_object *obj) 348 { 349 struct msm_gem_object *msm_obj = to_msm_bo(obj); 350 struct msm_gem_vma *vma, *tmp; 351 352 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 353 354 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { 355 msm_gem_purge_vma(vma->aspace, vma); 356 msm_gem_close_vma(vma->aspace, vma); 357 del_vma(vma); 358 } 359 } 360 361 static int msm_gem_get_iova_locked(struct drm_gem_object *obj, 362 struct msm_gem_address_space *aspace, uint64_t *iova) 363 { 364 struct msm_gem_object *msm_obj = to_msm_bo(obj); 365 struct msm_gem_vma *vma; 366 int ret = 0; 367 368 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 369 370 vma = lookup_vma(obj, aspace); 371 372 if (!vma) { 373 vma = add_vma(obj, aspace); 374 if (IS_ERR(vma)) 375 return PTR_ERR(vma); 376 377 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT); 378 if (ret) { 379 del_vma(vma); 380 return ret; 381 } 382 } 383 384 *iova = vma->iova; 385 return 0; 386 } 387 388 static int msm_gem_pin_iova(struct drm_gem_object *obj, 389 struct msm_gem_address_space *aspace) 390 { 391 struct msm_gem_object *msm_obj = to_msm_bo(obj); 392 struct msm_gem_vma *vma; 393 struct page **pages; 394 int prot = IOMMU_READ; 395 396 if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) 397 prot |= IOMMU_WRITE; 398 399 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 400 401 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) 402 return -EBUSY; 403 404 vma = lookup_vma(obj, aspace); 405 if (WARN_ON(!vma)) 406 return -EINVAL; 407 408 pages = get_pages(obj); 409 if (IS_ERR(pages)) 410 return PTR_ERR(pages); 411 412 return msm_gem_map_vma(aspace, vma, prot, 413 msm_obj->sgt, obj->size >> PAGE_SHIFT); 414 } 415 416 /* get iova and pin it. Should have a matching put */ 417 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, 418 struct msm_gem_address_space *aspace, uint64_t *iova) 419 { 420 struct msm_gem_object *msm_obj = to_msm_bo(obj); 421 u64 local; 422 int ret; 423 424 mutex_lock(&msm_obj->lock); 425 426 ret = msm_gem_get_iova_locked(obj, aspace, &local); 427 428 if (!ret) 429 ret = msm_gem_pin_iova(obj, aspace); 430 431 if (!ret) 432 *iova = local; 433 434 mutex_unlock(&msm_obj->lock); 435 return ret; 436 } 437 438 /* 439 * Get an iova but don't pin it. Doesn't need a put because iovas are currently 440 * valid for the life of the object 441 */ 442 int msm_gem_get_iova(struct drm_gem_object *obj, 443 struct msm_gem_address_space *aspace, uint64_t *iova) 444 { 445 struct msm_gem_object *msm_obj = to_msm_bo(obj); 446 int ret; 447 448 mutex_lock(&msm_obj->lock); 449 ret = msm_gem_get_iova_locked(obj, aspace, iova); 450 mutex_unlock(&msm_obj->lock); 451 452 return ret; 453 } 454 455 /* get iova without taking a reference, used in places where you have 456 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova' 457 */ 458 uint64_t msm_gem_iova(struct drm_gem_object *obj, 459 struct msm_gem_address_space *aspace) 460 { 461 struct msm_gem_object *msm_obj = to_msm_bo(obj); 462 struct msm_gem_vma *vma; 463 464 mutex_lock(&msm_obj->lock); 465 vma = lookup_vma(obj, aspace); 466 mutex_unlock(&msm_obj->lock); 467 WARN_ON(!vma); 468 469 return vma ? vma->iova : 0; 470 } 471 472 /* 473 * Unpin a iova by updating the reference counts. The memory isn't actually 474 * purged until something else (shrinker, mm_notifier, destroy, etc) decides 475 * to get rid of it 476 */ 477 void msm_gem_unpin_iova(struct drm_gem_object *obj, 478 struct msm_gem_address_space *aspace) 479 { 480 struct msm_gem_object *msm_obj = to_msm_bo(obj); 481 struct msm_gem_vma *vma; 482 483 mutex_lock(&msm_obj->lock); 484 vma = lookup_vma(obj, aspace); 485 486 if (!WARN_ON(!vma)) 487 msm_gem_unmap_vma(aspace, vma); 488 489 mutex_unlock(&msm_obj->lock); 490 } 491 492 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 493 struct drm_mode_create_dumb *args) 494 { 495 args->pitch = align_pitch(args->width, args->bpp); 496 args->size = PAGE_ALIGN(args->pitch * args->height); 497 return msm_gem_new_handle(dev, file, args->size, 498 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb"); 499 } 500 501 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 502 uint32_t handle, uint64_t *offset) 503 { 504 struct drm_gem_object *obj; 505 int ret = 0; 506 507 /* GEM does all our handle to object mapping */ 508 obj = drm_gem_object_lookup(file, handle); 509 if (obj == NULL) { 510 ret = -ENOENT; 511 goto fail; 512 } 513 514 *offset = msm_gem_mmap_offset(obj); 515 516 drm_gem_object_put_unlocked(obj); 517 518 fail: 519 return ret; 520 } 521 522 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) 523 { 524 struct msm_gem_object *msm_obj = to_msm_bo(obj); 525 int ret = 0; 526 527 mutex_lock(&msm_obj->lock); 528 529 if (WARN_ON(msm_obj->madv > madv)) { 530 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", 531 msm_obj->madv, madv); 532 mutex_unlock(&msm_obj->lock); 533 return ERR_PTR(-EBUSY); 534 } 535 536 /* increment vmap_count *before* vmap() call, so shrinker can 537 * check vmap_count (is_vunmapable()) outside of msm_obj->lock. 538 * This guarantees that we won't try to msm_gem_vunmap() this 539 * same object from within the vmap() call (while we already 540 * hold msm_obj->lock) 541 */ 542 msm_obj->vmap_count++; 543 544 if (!msm_obj->vaddr) { 545 struct page **pages = get_pages(obj); 546 if (IS_ERR(pages)) { 547 ret = PTR_ERR(pages); 548 goto fail; 549 } 550 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 551 VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 552 if (msm_obj->vaddr == NULL) { 553 ret = -ENOMEM; 554 goto fail; 555 } 556 } 557 558 mutex_unlock(&msm_obj->lock); 559 return msm_obj->vaddr; 560 561 fail: 562 msm_obj->vmap_count--; 563 mutex_unlock(&msm_obj->lock); 564 return ERR_PTR(ret); 565 } 566 567 void *msm_gem_get_vaddr(struct drm_gem_object *obj) 568 { 569 return get_vaddr(obj, MSM_MADV_WILLNEED); 570 } 571 572 /* 573 * Don't use this! It is for the very special case of dumping 574 * submits from GPU hangs or faults, were the bo may already 575 * be MSM_MADV_DONTNEED, but we know the buffer is still on the 576 * active list. 577 */ 578 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) 579 { 580 return get_vaddr(obj, __MSM_MADV_PURGED); 581 } 582 583 void msm_gem_put_vaddr(struct drm_gem_object *obj) 584 { 585 struct msm_gem_object *msm_obj = to_msm_bo(obj); 586 587 mutex_lock(&msm_obj->lock); 588 WARN_ON(msm_obj->vmap_count < 1); 589 msm_obj->vmap_count--; 590 mutex_unlock(&msm_obj->lock); 591 } 592 593 /* Update madvise status, returns true if not purged, else 594 * false or -errno. 595 */ 596 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) 597 { 598 struct msm_gem_object *msm_obj = to_msm_bo(obj); 599 600 mutex_lock(&msm_obj->lock); 601 602 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 603 604 if (msm_obj->madv != __MSM_MADV_PURGED) 605 msm_obj->madv = madv; 606 607 madv = msm_obj->madv; 608 609 mutex_unlock(&msm_obj->lock); 610 611 return (madv != __MSM_MADV_PURGED); 612 } 613 614 void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass) 615 { 616 struct drm_device *dev = obj->dev; 617 struct msm_gem_object *msm_obj = to_msm_bo(obj); 618 619 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 620 WARN_ON(!is_purgeable(msm_obj)); 621 WARN_ON(obj->import_attach); 622 623 mutex_lock_nested(&msm_obj->lock, subclass); 624 625 put_iova(obj); 626 627 msm_gem_vunmap_locked(obj); 628 629 put_pages(obj); 630 631 msm_obj->madv = __MSM_MADV_PURGED; 632 633 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 634 drm_gem_free_mmap_offset(obj); 635 636 /* Our goal here is to return as much of the memory as 637 * is possible back to the system as we are called from OOM. 638 * To do this we must instruct the shmfs to drop all of its 639 * backing pages, *now*. 640 */ 641 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 642 643 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 644 0, (loff_t)-1); 645 646 mutex_unlock(&msm_obj->lock); 647 } 648 649 static void msm_gem_vunmap_locked(struct drm_gem_object *obj) 650 { 651 struct msm_gem_object *msm_obj = to_msm_bo(obj); 652 653 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 654 655 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) 656 return; 657 658 vunmap(msm_obj->vaddr); 659 msm_obj->vaddr = NULL; 660 } 661 662 void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass) 663 { 664 struct msm_gem_object *msm_obj = to_msm_bo(obj); 665 666 mutex_lock_nested(&msm_obj->lock, subclass); 667 msm_gem_vunmap_locked(obj); 668 mutex_unlock(&msm_obj->lock); 669 } 670 671 /* must be called before _move_to_active().. */ 672 int msm_gem_sync_object(struct drm_gem_object *obj, 673 struct msm_fence_context *fctx, bool exclusive) 674 { 675 struct reservation_object_list *fobj; 676 struct dma_fence *fence; 677 int i, ret; 678 679 fobj = reservation_object_get_list(obj->resv); 680 if (!fobj || (fobj->shared_count == 0)) { 681 fence = reservation_object_get_excl(obj->resv); 682 /* don't need to wait on our own fences, since ring is fifo */ 683 if (fence && (fence->context != fctx->context)) { 684 ret = dma_fence_wait(fence, true); 685 if (ret) 686 return ret; 687 } 688 } 689 690 if (!exclusive || !fobj) 691 return 0; 692 693 for (i = 0; i < fobj->shared_count; i++) { 694 fence = rcu_dereference_protected(fobj->shared[i], 695 reservation_object_held(obj->resv)); 696 if (fence->context != fctx->context) { 697 ret = dma_fence_wait(fence, true); 698 if (ret) 699 return ret; 700 } 701 } 702 703 return 0; 704 } 705 706 void msm_gem_move_to_active(struct drm_gem_object *obj, 707 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence) 708 { 709 struct msm_gem_object *msm_obj = to_msm_bo(obj); 710 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); 711 msm_obj->gpu = gpu; 712 if (exclusive) 713 reservation_object_add_excl_fence(obj->resv, fence); 714 else 715 reservation_object_add_shared_fence(obj->resv, fence); 716 list_del_init(&msm_obj->mm_list); 717 list_add_tail(&msm_obj->mm_list, &gpu->active_list); 718 } 719 720 void msm_gem_move_to_inactive(struct drm_gem_object *obj) 721 { 722 struct drm_device *dev = obj->dev; 723 struct msm_drm_private *priv = dev->dev_private; 724 struct msm_gem_object *msm_obj = to_msm_bo(obj); 725 726 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 727 728 msm_obj->gpu = NULL; 729 list_del_init(&msm_obj->mm_list); 730 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 731 } 732 733 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 734 { 735 bool write = !!(op & MSM_PREP_WRITE); 736 unsigned long remain = 737 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 738 long ret; 739 740 ret = reservation_object_wait_timeout_rcu(obj->resv, write, 741 true, remain); 742 if (ret == 0) 743 return remain == 0 ? -EBUSY : -ETIMEDOUT; 744 else if (ret < 0) 745 return ret; 746 747 /* TODO cache maintenance */ 748 749 return 0; 750 } 751 752 int msm_gem_cpu_fini(struct drm_gem_object *obj) 753 { 754 /* TODO cache maintenance */ 755 return 0; 756 } 757 758 #ifdef CONFIG_DEBUG_FS 759 static void describe_fence(struct dma_fence *fence, const char *type, 760 struct seq_file *m) 761 { 762 if (!dma_fence_is_signaled(fence)) 763 seq_printf(m, "\t%9s: %s %s seq %llu\n", type, 764 fence->ops->get_driver_name(fence), 765 fence->ops->get_timeline_name(fence), 766 fence->seqno); 767 } 768 769 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 770 { 771 struct msm_gem_object *msm_obj = to_msm_bo(obj); 772 struct reservation_object *robj = obj->resv; 773 struct reservation_object_list *fobj; 774 struct dma_fence *fence; 775 struct msm_gem_vma *vma; 776 uint64_t off = drm_vma_node_start(&obj->vma_node); 777 const char *madv; 778 779 mutex_lock(&msm_obj->lock); 780 781 switch (msm_obj->madv) { 782 case __MSM_MADV_PURGED: 783 madv = " purged"; 784 break; 785 case MSM_MADV_DONTNEED: 786 madv = " purgeable"; 787 break; 788 case MSM_MADV_WILLNEED: 789 default: 790 madv = ""; 791 break; 792 } 793 794 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p", 795 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 796 obj->name, kref_read(&obj->refcount), 797 off, msm_obj->vaddr); 798 799 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name); 800 801 if (!list_empty(&msm_obj->vmas)) { 802 803 seq_puts(m, " vmas:"); 804 805 list_for_each_entry(vma, &msm_obj->vmas, list) 806 seq_printf(m, " [%s: %08llx,%s,inuse=%d]", 807 vma->aspace != NULL ? vma->aspace->name : NULL, 808 vma->iova, vma->mapped ? "mapped" : "unmapped", 809 vma->inuse); 810 811 seq_puts(m, "\n"); 812 } 813 814 rcu_read_lock(); 815 fobj = rcu_dereference(robj->fence); 816 if (fobj) { 817 unsigned int i, shared_count = fobj->shared_count; 818 819 for (i = 0; i < shared_count; i++) { 820 fence = rcu_dereference(fobj->shared[i]); 821 describe_fence(fence, "Shared", m); 822 } 823 } 824 825 fence = rcu_dereference(robj->fence_excl); 826 if (fence) 827 describe_fence(fence, "Exclusive", m); 828 rcu_read_unlock(); 829 830 mutex_unlock(&msm_obj->lock); 831 } 832 833 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) 834 { 835 struct msm_gem_object *msm_obj; 836 int count = 0; 837 size_t size = 0; 838 839 seq_puts(m, " flags id ref offset kaddr size madv name\n"); 840 list_for_each_entry(msm_obj, list, mm_list) { 841 struct drm_gem_object *obj = &msm_obj->base; 842 seq_puts(m, " "); 843 msm_gem_describe(obj, m); 844 count++; 845 size += obj->size; 846 } 847 848 seq_printf(m, "Total %d objects, %zu bytes\n", count, size); 849 } 850 #endif 851 852 /* don't call directly! Use drm_gem_object_put() and friends */ 853 void msm_gem_free_object(struct drm_gem_object *obj) 854 { 855 struct msm_gem_object *msm_obj = to_msm_bo(obj); 856 struct drm_device *dev = obj->dev; 857 struct msm_drm_private *priv = dev->dev_private; 858 859 if (llist_add(&msm_obj->freed, &priv->free_list)) 860 queue_work(priv->wq, &priv->free_work); 861 } 862 863 static void free_object(struct msm_gem_object *msm_obj) 864 { 865 struct drm_gem_object *obj = &msm_obj->base; 866 struct drm_device *dev = obj->dev; 867 868 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 869 870 /* object should not be on active list: */ 871 WARN_ON(is_active(msm_obj)); 872 873 list_del(&msm_obj->mm_list); 874 875 mutex_lock(&msm_obj->lock); 876 877 put_iova(obj); 878 879 if (obj->import_attach) { 880 if (msm_obj->vaddr) 881 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); 882 883 /* Don't drop the pages for imported dmabuf, as they are not 884 * ours, just free the array we allocated: 885 */ 886 if (msm_obj->pages) 887 kvfree(msm_obj->pages); 888 889 drm_prime_gem_destroy(obj, msm_obj->sgt); 890 } else { 891 msm_gem_vunmap_locked(obj); 892 put_pages(obj); 893 } 894 895 drm_gem_object_release(obj); 896 897 mutex_unlock(&msm_obj->lock); 898 kfree(msm_obj); 899 } 900 901 void msm_gem_free_work(struct work_struct *work) 902 { 903 struct msm_drm_private *priv = 904 container_of(work, struct msm_drm_private, free_work); 905 struct drm_device *dev = priv->dev; 906 struct llist_node *freed; 907 struct msm_gem_object *msm_obj, *next; 908 909 while ((freed = llist_del_all(&priv->free_list))) { 910 911 mutex_lock(&dev->struct_mutex); 912 913 llist_for_each_entry_safe(msm_obj, next, 914 freed, freed) 915 free_object(msm_obj); 916 917 mutex_unlock(&dev->struct_mutex); 918 919 if (need_resched()) 920 break; 921 } 922 } 923 924 /* convenience method to construct a GEM buffer object, and userspace handle */ 925 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 926 uint32_t size, uint32_t flags, uint32_t *handle, 927 char *name) 928 { 929 struct drm_gem_object *obj; 930 int ret; 931 932 obj = msm_gem_new(dev, size, flags); 933 934 if (IS_ERR(obj)) 935 return PTR_ERR(obj); 936 937 if (name) 938 msm_gem_object_set_name(obj, "%s", name); 939 940 ret = drm_gem_handle_create(file, obj, handle); 941 942 /* drop reference from allocate - handle holds it now */ 943 drm_gem_object_put_unlocked(obj); 944 945 return ret; 946 } 947 948 static int msm_gem_new_impl(struct drm_device *dev, 949 uint32_t size, uint32_t flags, 950 struct reservation_object *resv, 951 struct drm_gem_object **obj, 952 bool struct_mutex_locked) 953 { 954 struct msm_drm_private *priv = dev->dev_private; 955 struct msm_gem_object *msm_obj; 956 957 switch (flags & MSM_BO_CACHE_MASK) { 958 case MSM_BO_UNCACHED: 959 case MSM_BO_CACHED: 960 case MSM_BO_WC: 961 break; 962 default: 963 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", 964 (flags & MSM_BO_CACHE_MASK)); 965 return -EINVAL; 966 } 967 968 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); 969 if (!msm_obj) 970 return -ENOMEM; 971 972 mutex_init(&msm_obj->lock); 973 974 msm_obj->flags = flags; 975 msm_obj->madv = MSM_MADV_WILLNEED; 976 977 if (resv) 978 msm_obj->base.resv = resv; 979 980 INIT_LIST_HEAD(&msm_obj->submit_entry); 981 INIT_LIST_HEAD(&msm_obj->vmas); 982 983 if (struct_mutex_locked) { 984 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 985 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 986 } else { 987 mutex_lock(&dev->struct_mutex); 988 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 989 mutex_unlock(&dev->struct_mutex); 990 } 991 992 *obj = &msm_obj->base; 993 994 return 0; 995 } 996 997 static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, 998 uint32_t size, uint32_t flags, bool struct_mutex_locked) 999 { 1000 struct msm_drm_private *priv = dev->dev_private; 1001 struct drm_gem_object *obj = NULL; 1002 bool use_vram = false; 1003 int ret; 1004 1005 size = PAGE_ALIGN(size); 1006 1007 if (!msm_use_mmu(dev)) 1008 use_vram = true; 1009 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size) 1010 use_vram = true; 1011 1012 if (WARN_ON(use_vram && !priv->vram.size)) 1013 return ERR_PTR(-EINVAL); 1014 1015 /* Disallow zero sized objects as they make the underlying 1016 * infrastructure grumpy 1017 */ 1018 if (size == 0) 1019 return ERR_PTR(-EINVAL); 1020 1021 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked); 1022 if (ret) 1023 goto fail; 1024 1025 if (use_vram) { 1026 struct msm_gem_vma *vma; 1027 struct page **pages; 1028 struct msm_gem_object *msm_obj = to_msm_bo(obj); 1029 1030 mutex_lock(&msm_obj->lock); 1031 1032 vma = add_vma(obj, NULL); 1033 mutex_unlock(&msm_obj->lock); 1034 if (IS_ERR(vma)) { 1035 ret = PTR_ERR(vma); 1036 goto fail; 1037 } 1038 1039 to_msm_bo(obj)->vram_node = &vma->node; 1040 1041 drm_gem_private_object_init(dev, obj, size); 1042 1043 pages = get_pages(obj); 1044 if (IS_ERR(pages)) { 1045 ret = PTR_ERR(pages); 1046 goto fail; 1047 } 1048 1049 vma->iova = physaddr(obj); 1050 } else { 1051 ret = drm_gem_object_init(dev, obj, size); 1052 if (ret) 1053 goto fail; 1054 /* 1055 * Our buffers are kept pinned, so allocating them from the 1056 * MOVABLE zone is a really bad idea, and conflicts with CMA. 1057 * See comments above new_inode() why this is required _and_ 1058 * expected if you're going to pin these pages. 1059 */ 1060 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); 1061 } 1062 1063 return obj; 1064 1065 fail: 1066 drm_gem_object_put_unlocked(obj); 1067 return ERR_PTR(ret); 1068 } 1069 1070 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, 1071 uint32_t size, uint32_t flags) 1072 { 1073 return _msm_gem_new(dev, size, flags, true); 1074 } 1075 1076 struct drm_gem_object *msm_gem_new(struct drm_device *dev, 1077 uint32_t size, uint32_t flags) 1078 { 1079 return _msm_gem_new(dev, size, flags, false); 1080 } 1081 1082 struct drm_gem_object *msm_gem_import(struct drm_device *dev, 1083 struct dma_buf *dmabuf, struct sg_table *sgt) 1084 { 1085 struct msm_gem_object *msm_obj; 1086 struct drm_gem_object *obj; 1087 uint32_t size; 1088 int ret, npages; 1089 1090 /* if we don't have IOMMU, don't bother pretending we can import: */ 1091 if (!msm_use_mmu(dev)) { 1092 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); 1093 return ERR_PTR(-EINVAL); 1094 } 1095 1096 size = PAGE_ALIGN(dmabuf->size); 1097 1098 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false); 1099 if (ret) 1100 goto fail; 1101 1102 drm_gem_private_object_init(dev, obj, size); 1103 1104 npages = size / PAGE_SIZE; 1105 1106 msm_obj = to_msm_bo(obj); 1107 mutex_lock(&msm_obj->lock); 1108 msm_obj->sgt = sgt; 1109 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 1110 if (!msm_obj->pages) { 1111 mutex_unlock(&msm_obj->lock); 1112 ret = -ENOMEM; 1113 goto fail; 1114 } 1115 1116 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); 1117 if (ret) { 1118 mutex_unlock(&msm_obj->lock); 1119 goto fail; 1120 } 1121 1122 mutex_unlock(&msm_obj->lock); 1123 return obj; 1124 1125 fail: 1126 drm_gem_object_put_unlocked(obj); 1127 return ERR_PTR(ret); 1128 } 1129 1130 static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1131 uint32_t flags, struct msm_gem_address_space *aspace, 1132 struct drm_gem_object **bo, uint64_t *iova, bool locked) 1133 { 1134 void *vaddr; 1135 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked); 1136 int ret; 1137 1138 if (IS_ERR(obj)) 1139 return ERR_CAST(obj); 1140 1141 if (iova) { 1142 ret = msm_gem_get_and_pin_iova(obj, aspace, iova); 1143 if (ret) 1144 goto err; 1145 } 1146 1147 vaddr = msm_gem_get_vaddr(obj); 1148 if (IS_ERR(vaddr)) { 1149 msm_gem_unpin_iova(obj, aspace); 1150 ret = PTR_ERR(vaddr); 1151 goto err; 1152 } 1153 1154 if (bo) 1155 *bo = obj; 1156 1157 return vaddr; 1158 err: 1159 if (locked) 1160 drm_gem_object_put(obj); 1161 else 1162 drm_gem_object_put_unlocked(obj); 1163 1164 return ERR_PTR(ret); 1165 1166 } 1167 1168 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1169 uint32_t flags, struct msm_gem_address_space *aspace, 1170 struct drm_gem_object **bo, uint64_t *iova) 1171 { 1172 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false); 1173 } 1174 1175 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, 1176 uint32_t flags, struct msm_gem_address_space *aspace, 1177 struct drm_gem_object **bo, uint64_t *iova) 1178 { 1179 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true); 1180 } 1181 1182 void msm_gem_kernel_put(struct drm_gem_object *bo, 1183 struct msm_gem_address_space *aspace, bool locked) 1184 { 1185 if (IS_ERR_OR_NULL(bo)) 1186 return; 1187 1188 msm_gem_put_vaddr(bo); 1189 msm_gem_unpin_iova(bo, aspace); 1190 1191 if (locked) 1192 drm_gem_object_put(bo); 1193 else 1194 drm_gem_object_put_unlocked(bo); 1195 } 1196 1197 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...) 1198 { 1199 struct msm_gem_object *msm_obj = to_msm_bo(bo); 1200 va_list ap; 1201 1202 if (!fmt) 1203 return; 1204 1205 va_start(ap, fmt); 1206 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap); 1207 va_end(ap); 1208 } 1209