1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #include <linux/dma-map-ops.h> 8 #include <linux/spinlock.h> 9 #include <linux/shmem_fs.h> 10 #include <linux/dma-buf.h> 11 #include <linux/pfn_t.h> 12 13 #include <drm/drm_prime.h> 14 15 #include "msm_drv.h" 16 #include "msm_fence.h" 17 #include "msm_gem.h" 18 #include "msm_gpu.h" 19 #include "msm_mmu.h" 20 21 22 static dma_addr_t physaddr(struct drm_gem_object *obj) 23 { 24 struct msm_gem_object *msm_obj = to_msm_bo(obj); 25 struct msm_drm_private *priv = obj->dev->dev_private; 26 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + 27 priv->vram.paddr; 28 } 29 30 static bool use_pages(struct drm_gem_object *obj) 31 { 32 struct msm_gem_object *msm_obj = to_msm_bo(obj); 33 return !msm_obj->vram_node; 34 } 35 36 /* 37 * Cache sync.. this is a bit over-complicated, to fit dma-mapping 38 * API. Really GPU cache is out of scope here (handled on cmdstream) 39 * and all we need to do is invalidate newly allocated pages before 40 * mapping to CPU as uncached/writecombine. 41 * 42 * On top of this, we have the added headache, that depending on 43 * display generation, the display's iommu may be wired up to either 44 * the toplevel drm device (mdss), or to the mdp sub-node, meaning 45 * that here we either have dma-direct or iommu ops. 46 * 47 * Let this be a cautionary tail of abstraction gone wrong. 48 */ 49 50 static void sync_for_device(struct msm_gem_object *msm_obj) 51 { 52 struct device *dev = msm_obj->base.dev->dev; 53 54 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 55 } 56 57 static void sync_for_cpu(struct msm_gem_object *msm_obj) 58 { 59 struct device *dev = msm_obj->base.dev->dev; 60 61 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 62 } 63 64 /* allocate pages from VRAM carveout, used when no IOMMU: */ 65 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) 66 { 67 struct msm_gem_object *msm_obj = to_msm_bo(obj); 68 struct msm_drm_private *priv = obj->dev->dev_private; 69 dma_addr_t paddr; 70 struct page **p; 71 int ret, i; 72 73 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 74 if (!p) 75 return ERR_PTR(-ENOMEM); 76 77 spin_lock(&priv->vram.lock); 78 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); 79 spin_unlock(&priv->vram.lock); 80 if (ret) { 81 kvfree(p); 82 return ERR_PTR(ret); 83 } 84 85 paddr = physaddr(obj); 86 for (i = 0; i < npages; i++) { 87 p[i] = phys_to_page(paddr); 88 paddr += PAGE_SIZE; 89 } 90 91 return p; 92 } 93 94 static struct page **get_pages(struct drm_gem_object *obj) 95 { 96 struct msm_gem_object *msm_obj = to_msm_bo(obj); 97 98 if (!msm_obj->pages) { 99 struct drm_device *dev = obj->dev; 100 struct page **p; 101 int npages = obj->size >> PAGE_SHIFT; 102 103 if (use_pages(obj)) 104 p = drm_gem_get_pages(obj); 105 else 106 p = get_pages_vram(obj, npages); 107 108 if (IS_ERR(p)) { 109 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", 110 PTR_ERR(p)); 111 return p; 112 } 113 114 msm_obj->pages = p; 115 116 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages); 117 if (IS_ERR(msm_obj->sgt)) { 118 void *ptr = ERR_CAST(msm_obj->sgt); 119 120 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n"); 121 msm_obj->sgt = NULL; 122 return ptr; 123 } 124 125 /* For non-cached buffers, ensure the new pages are clean 126 * because display controller, GPU, etc. are not coherent: 127 */ 128 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 129 sync_for_device(msm_obj); 130 } 131 132 return msm_obj->pages; 133 } 134 135 static void put_pages_vram(struct drm_gem_object *obj) 136 { 137 struct msm_gem_object *msm_obj = to_msm_bo(obj); 138 struct msm_drm_private *priv = obj->dev->dev_private; 139 140 spin_lock(&priv->vram.lock); 141 drm_mm_remove_node(msm_obj->vram_node); 142 spin_unlock(&priv->vram.lock); 143 144 kvfree(msm_obj->pages); 145 } 146 147 static void put_pages(struct drm_gem_object *obj) 148 { 149 struct msm_gem_object *msm_obj = to_msm_bo(obj); 150 151 if (msm_obj->pages) { 152 if (msm_obj->sgt) { 153 /* For non-cached buffers, ensure the new 154 * pages are clean because display controller, 155 * GPU, etc. are not coherent: 156 */ 157 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 158 sync_for_cpu(msm_obj); 159 160 sg_free_table(msm_obj->sgt); 161 kfree(msm_obj->sgt); 162 } 163 164 if (use_pages(obj)) 165 drm_gem_put_pages(obj, msm_obj->pages, true, false); 166 else 167 put_pages_vram(obj); 168 169 msm_obj->pages = NULL; 170 } 171 } 172 173 struct page **msm_gem_get_pages(struct drm_gem_object *obj) 174 { 175 struct msm_gem_object *msm_obj = to_msm_bo(obj); 176 struct page **p; 177 178 msm_gem_lock(obj); 179 180 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 181 msm_gem_unlock(obj); 182 return ERR_PTR(-EBUSY); 183 } 184 185 p = get_pages(obj); 186 msm_gem_unlock(obj); 187 return p; 188 } 189 190 void msm_gem_put_pages(struct drm_gem_object *obj) 191 { 192 /* when we start tracking the pin count, then do something here */ 193 } 194 195 int msm_gem_mmap_obj(struct drm_gem_object *obj, 196 struct vm_area_struct *vma) 197 { 198 struct msm_gem_object *msm_obj = to_msm_bo(obj); 199 200 vma->vm_flags &= ~VM_PFNMAP; 201 vma->vm_flags |= VM_MIXEDMAP; 202 203 if (msm_obj->flags & MSM_BO_WC) { 204 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 205 } else if (msm_obj->flags & MSM_BO_UNCACHED) { 206 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 207 } else { 208 /* 209 * Shunt off cached objs to shmem file so they have their own 210 * address_space (so unmap_mapping_range does what we want, 211 * in particular in the case of mmap'd dmabufs) 212 */ 213 fput(vma->vm_file); 214 get_file(obj->filp); 215 vma->vm_pgoff = 0; 216 vma->vm_file = obj->filp; 217 218 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 219 } 220 221 return 0; 222 } 223 224 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 225 { 226 int ret; 227 228 ret = drm_gem_mmap(filp, vma); 229 if (ret) { 230 DBG("mmap failed: %d", ret); 231 return ret; 232 } 233 234 return msm_gem_mmap_obj(vma->vm_private_data, vma); 235 } 236 237 vm_fault_t msm_gem_fault(struct vm_fault *vmf) 238 { 239 struct vm_area_struct *vma = vmf->vma; 240 struct drm_gem_object *obj = vma->vm_private_data; 241 struct msm_gem_object *msm_obj = to_msm_bo(obj); 242 struct page **pages; 243 unsigned long pfn; 244 pgoff_t pgoff; 245 int err; 246 vm_fault_t ret; 247 248 /* 249 * vm_ops.open/drm_gem_mmap_obj and close get and put 250 * a reference on obj. So, we dont need to hold one here. 251 */ 252 err = msm_gem_lock_interruptible(obj); 253 if (err) { 254 ret = VM_FAULT_NOPAGE; 255 goto out; 256 } 257 258 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 259 msm_gem_unlock(obj); 260 return VM_FAULT_SIGBUS; 261 } 262 263 /* make sure we have pages attached now */ 264 pages = get_pages(obj); 265 if (IS_ERR(pages)) { 266 ret = vmf_error(PTR_ERR(pages)); 267 goto out_unlock; 268 } 269 270 /* We don't use vmf->pgoff since that has the fake offset: */ 271 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 272 273 pfn = page_to_pfn(pages[pgoff]); 274 275 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 276 pfn, pfn << PAGE_SHIFT); 277 278 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); 279 out_unlock: 280 msm_gem_unlock(obj); 281 out: 282 return ret; 283 } 284 285 /** get mmap offset */ 286 static uint64_t mmap_offset(struct drm_gem_object *obj) 287 { 288 struct drm_device *dev = obj->dev; 289 int ret; 290 291 WARN_ON(!msm_gem_is_locked(obj)); 292 293 /* Make it mmapable */ 294 ret = drm_gem_create_mmap_offset(obj); 295 296 if (ret) { 297 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n"); 298 return 0; 299 } 300 301 return drm_vma_node_offset_addr(&obj->vma_node); 302 } 303 304 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) 305 { 306 uint64_t offset; 307 308 msm_gem_lock(obj); 309 offset = mmap_offset(obj); 310 msm_gem_unlock(obj); 311 return offset; 312 } 313 314 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, 315 struct msm_gem_address_space *aspace) 316 { 317 struct msm_gem_object *msm_obj = to_msm_bo(obj); 318 struct msm_gem_vma *vma; 319 320 WARN_ON(!msm_gem_is_locked(obj)); 321 322 vma = kzalloc(sizeof(*vma), GFP_KERNEL); 323 if (!vma) 324 return ERR_PTR(-ENOMEM); 325 326 vma->aspace = aspace; 327 328 list_add_tail(&vma->list, &msm_obj->vmas); 329 330 return vma; 331 } 332 333 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, 334 struct msm_gem_address_space *aspace) 335 { 336 struct msm_gem_object *msm_obj = to_msm_bo(obj); 337 struct msm_gem_vma *vma; 338 339 WARN_ON(!msm_gem_is_locked(obj)); 340 341 list_for_each_entry(vma, &msm_obj->vmas, list) { 342 if (vma->aspace == aspace) 343 return vma; 344 } 345 346 return NULL; 347 } 348 349 static void del_vma(struct msm_gem_vma *vma) 350 { 351 if (!vma) 352 return; 353 354 list_del(&vma->list); 355 kfree(vma); 356 } 357 358 /* Called with msm_obj locked */ 359 static void 360 put_iova(struct drm_gem_object *obj) 361 { 362 struct msm_gem_object *msm_obj = to_msm_bo(obj); 363 struct msm_gem_vma *vma, *tmp; 364 365 WARN_ON(!msm_gem_is_locked(obj)); 366 367 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { 368 if (vma->aspace) { 369 msm_gem_purge_vma(vma->aspace, vma); 370 msm_gem_close_vma(vma->aspace, vma); 371 } 372 del_vma(vma); 373 } 374 } 375 376 static int get_iova_locked(struct drm_gem_object *obj, 377 struct msm_gem_address_space *aspace, uint64_t *iova, 378 u64 range_start, u64 range_end) 379 { 380 struct msm_gem_vma *vma; 381 int ret = 0; 382 383 WARN_ON(!msm_gem_is_locked(obj)); 384 385 vma = lookup_vma(obj, aspace); 386 387 if (!vma) { 388 vma = add_vma(obj, aspace); 389 if (IS_ERR(vma)) 390 return PTR_ERR(vma); 391 392 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT, 393 range_start, range_end); 394 if (ret) { 395 del_vma(vma); 396 return ret; 397 } 398 } 399 400 *iova = vma->iova; 401 return 0; 402 } 403 404 static int msm_gem_pin_iova(struct drm_gem_object *obj, 405 struct msm_gem_address_space *aspace) 406 { 407 struct msm_gem_object *msm_obj = to_msm_bo(obj); 408 struct msm_gem_vma *vma; 409 struct page **pages; 410 int prot = IOMMU_READ; 411 412 if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) 413 prot |= IOMMU_WRITE; 414 415 if (msm_obj->flags & MSM_BO_MAP_PRIV) 416 prot |= IOMMU_PRIV; 417 418 WARN_ON(!msm_gem_is_locked(obj)); 419 420 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) 421 return -EBUSY; 422 423 vma = lookup_vma(obj, aspace); 424 if (WARN_ON(!vma)) 425 return -EINVAL; 426 427 pages = get_pages(obj); 428 if (IS_ERR(pages)) 429 return PTR_ERR(pages); 430 431 return msm_gem_map_vma(aspace, vma, prot, 432 msm_obj->sgt, obj->size >> PAGE_SHIFT); 433 } 434 435 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, 436 struct msm_gem_address_space *aspace, uint64_t *iova, 437 u64 range_start, u64 range_end) 438 { 439 u64 local; 440 int ret; 441 442 WARN_ON(!msm_gem_is_locked(obj)); 443 444 ret = get_iova_locked(obj, aspace, &local, 445 range_start, range_end); 446 447 if (!ret) 448 ret = msm_gem_pin_iova(obj, aspace); 449 450 if (!ret) 451 *iova = local; 452 453 return ret; 454 } 455 456 /* 457 * get iova and pin it. Should have a matching put 458 * limits iova to specified range (in pages) 459 */ 460 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, 461 struct msm_gem_address_space *aspace, uint64_t *iova, 462 u64 range_start, u64 range_end) 463 { 464 int ret; 465 466 msm_gem_lock(obj); 467 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end); 468 msm_gem_unlock(obj); 469 470 return ret; 471 } 472 473 int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj, 474 struct msm_gem_address_space *aspace, uint64_t *iova) 475 { 476 return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX); 477 } 478 479 /* get iova and pin it. Should have a matching put */ 480 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, 481 struct msm_gem_address_space *aspace, uint64_t *iova) 482 { 483 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX); 484 } 485 486 /* 487 * Get an iova but don't pin it. Doesn't need a put because iovas are currently 488 * valid for the life of the object 489 */ 490 int msm_gem_get_iova(struct drm_gem_object *obj, 491 struct msm_gem_address_space *aspace, uint64_t *iova) 492 { 493 int ret; 494 495 msm_gem_lock(obj); 496 ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX); 497 msm_gem_unlock(obj); 498 499 return ret; 500 } 501 502 /* get iova without taking a reference, used in places where you have 503 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova' 504 */ 505 uint64_t msm_gem_iova(struct drm_gem_object *obj, 506 struct msm_gem_address_space *aspace) 507 { 508 struct msm_gem_vma *vma; 509 510 msm_gem_lock(obj); 511 vma = lookup_vma(obj, aspace); 512 msm_gem_unlock(obj); 513 WARN_ON(!vma); 514 515 return vma ? vma->iova : 0; 516 } 517 518 /* 519 * Locked variant of msm_gem_unpin_iova() 520 */ 521 void msm_gem_unpin_iova_locked(struct drm_gem_object *obj, 522 struct msm_gem_address_space *aspace) 523 { 524 struct msm_gem_vma *vma; 525 526 WARN_ON(!msm_gem_is_locked(obj)); 527 528 vma = lookup_vma(obj, aspace); 529 530 if (!WARN_ON(!vma)) 531 msm_gem_unmap_vma(aspace, vma); 532 } 533 534 /* 535 * Unpin a iova by updating the reference counts. The memory isn't actually 536 * purged until something else (shrinker, mm_notifier, destroy, etc) decides 537 * to get rid of it 538 */ 539 void msm_gem_unpin_iova(struct drm_gem_object *obj, 540 struct msm_gem_address_space *aspace) 541 { 542 msm_gem_lock(obj); 543 msm_gem_unpin_iova_locked(obj, aspace); 544 msm_gem_unlock(obj); 545 } 546 547 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 548 struct drm_mode_create_dumb *args) 549 { 550 args->pitch = align_pitch(args->width, args->bpp); 551 args->size = PAGE_ALIGN(args->pitch * args->height); 552 return msm_gem_new_handle(dev, file, args->size, 553 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb"); 554 } 555 556 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 557 uint32_t handle, uint64_t *offset) 558 { 559 struct drm_gem_object *obj; 560 int ret = 0; 561 562 /* GEM does all our handle to object mapping */ 563 obj = drm_gem_object_lookup(file, handle); 564 if (obj == NULL) { 565 ret = -ENOENT; 566 goto fail; 567 } 568 569 *offset = msm_gem_mmap_offset(obj); 570 571 drm_gem_object_put(obj); 572 573 fail: 574 return ret; 575 } 576 577 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) 578 { 579 struct msm_gem_object *msm_obj = to_msm_bo(obj); 580 int ret = 0; 581 582 WARN_ON(!msm_gem_is_locked(obj)); 583 584 if (obj->import_attach) 585 return ERR_PTR(-ENODEV); 586 587 if (WARN_ON(msm_obj->madv > madv)) { 588 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", 589 msm_obj->madv, madv); 590 return ERR_PTR(-EBUSY); 591 } 592 593 /* increment vmap_count *before* vmap() call, so shrinker can 594 * check vmap_count (is_vunmapable()) outside of msm_obj lock. 595 * This guarantees that we won't try to msm_gem_vunmap() this 596 * same object from within the vmap() call (while we already 597 * hold msm_obj lock) 598 */ 599 msm_obj->vmap_count++; 600 601 if (!msm_obj->vaddr) { 602 struct page **pages = get_pages(obj); 603 if (IS_ERR(pages)) { 604 ret = PTR_ERR(pages); 605 goto fail; 606 } 607 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 608 VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 609 if (msm_obj->vaddr == NULL) { 610 ret = -ENOMEM; 611 goto fail; 612 } 613 } 614 615 return msm_obj->vaddr; 616 617 fail: 618 msm_obj->vmap_count--; 619 return ERR_PTR(ret); 620 } 621 622 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) 623 { 624 return get_vaddr(obj, MSM_MADV_WILLNEED); 625 } 626 627 void *msm_gem_get_vaddr(struct drm_gem_object *obj) 628 { 629 void *ret; 630 631 msm_gem_lock(obj); 632 ret = msm_gem_get_vaddr_locked(obj); 633 msm_gem_unlock(obj); 634 635 return ret; 636 } 637 638 /* 639 * Don't use this! It is for the very special case of dumping 640 * submits from GPU hangs or faults, were the bo may already 641 * be MSM_MADV_DONTNEED, but we know the buffer is still on the 642 * active list. 643 */ 644 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) 645 { 646 return get_vaddr(obj, __MSM_MADV_PURGED); 647 } 648 649 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) 650 { 651 struct msm_gem_object *msm_obj = to_msm_bo(obj); 652 653 WARN_ON(!msm_gem_is_locked(obj)); 654 WARN_ON(msm_obj->vmap_count < 1); 655 656 msm_obj->vmap_count--; 657 } 658 659 void msm_gem_put_vaddr(struct drm_gem_object *obj) 660 { 661 msm_gem_lock(obj); 662 msm_gem_put_vaddr_locked(obj); 663 msm_gem_unlock(obj); 664 } 665 666 /* Update madvise status, returns true if not purged, else 667 * false or -errno. 668 */ 669 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) 670 { 671 struct msm_gem_object *msm_obj = to_msm_bo(obj); 672 673 msm_gem_lock(obj); 674 675 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 676 677 if (msm_obj->madv != __MSM_MADV_PURGED) 678 msm_obj->madv = madv; 679 680 madv = msm_obj->madv; 681 682 msm_gem_unlock(obj); 683 684 return (madv != __MSM_MADV_PURGED); 685 } 686 687 void msm_gem_purge(struct drm_gem_object *obj) 688 { 689 struct drm_device *dev = obj->dev; 690 struct msm_gem_object *msm_obj = to_msm_bo(obj); 691 692 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 693 WARN_ON(!msm_gem_is_locked(obj)); 694 WARN_ON(!is_purgeable(msm_obj)); 695 WARN_ON(obj->import_attach); 696 697 put_iova(obj); 698 699 msm_gem_vunmap(obj); 700 701 put_pages(obj); 702 703 msm_obj->madv = __MSM_MADV_PURGED; 704 705 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 706 drm_gem_free_mmap_offset(obj); 707 708 /* Our goal here is to return as much of the memory as 709 * is possible back to the system as we are called from OOM. 710 * To do this we must instruct the shmfs to drop all of its 711 * backing pages, *now*. 712 */ 713 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 714 715 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 716 0, (loff_t)-1); 717 } 718 719 void msm_gem_vunmap(struct drm_gem_object *obj) 720 { 721 struct msm_gem_object *msm_obj = to_msm_bo(obj); 722 723 WARN_ON(!msm_gem_is_locked(obj)); 724 725 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) 726 return; 727 728 vunmap(msm_obj->vaddr); 729 msm_obj->vaddr = NULL; 730 } 731 732 /* must be called before _move_to_active().. */ 733 int msm_gem_sync_object(struct drm_gem_object *obj, 734 struct msm_fence_context *fctx, bool exclusive) 735 { 736 struct dma_resv_list *fobj; 737 struct dma_fence *fence; 738 int i, ret; 739 740 fobj = dma_resv_get_list(obj->resv); 741 if (!fobj || (fobj->shared_count == 0)) { 742 fence = dma_resv_get_excl(obj->resv); 743 /* don't need to wait on our own fences, since ring is fifo */ 744 if (fence && (fence->context != fctx->context)) { 745 ret = dma_fence_wait(fence, true); 746 if (ret) 747 return ret; 748 } 749 } 750 751 if (!exclusive || !fobj) 752 return 0; 753 754 for (i = 0; i < fobj->shared_count; i++) { 755 fence = rcu_dereference_protected(fobj->shared[i], 756 dma_resv_held(obj->resv)); 757 if (fence->context != fctx->context) { 758 ret = dma_fence_wait(fence, true); 759 if (ret) 760 return ret; 761 } 762 } 763 764 return 0; 765 } 766 767 void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu) 768 { 769 struct msm_gem_object *msm_obj = to_msm_bo(obj); 770 struct msm_drm_private *priv = obj->dev->dev_private; 771 772 might_sleep(); 773 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); 774 775 if (!atomic_fetch_inc(&msm_obj->active_count)) { 776 mutex_lock(&priv->mm_lock); 777 list_del_init(&msm_obj->mm_list); 778 list_add_tail(&msm_obj->mm_list, &gpu->active_list); 779 mutex_unlock(&priv->mm_lock); 780 } 781 } 782 783 void msm_gem_active_put(struct drm_gem_object *obj) 784 { 785 struct msm_gem_object *msm_obj = to_msm_bo(obj); 786 struct msm_drm_private *priv = obj->dev->dev_private; 787 788 might_sleep(); 789 790 if (!atomic_dec_return(&msm_obj->active_count)) { 791 mutex_lock(&priv->mm_lock); 792 list_del_init(&msm_obj->mm_list); 793 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 794 mutex_unlock(&priv->mm_lock); 795 } 796 } 797 798 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 799 { 800 bool write = !!(op & MSM_PREP_WRITE); 801 unsigned long remain = 802 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 803 long ret; 804 805 ret = dma_resv_wait_timeout_rcu(obj->resv, write, 806 true, remain); 807 if (ret == 0) 808 return remain == 0 ? -EBUSY : -ETIMEDOUT; 809 else if (ret < 0) 810 return ret; 811 812 /* TODO cache maintenance */ 813 814 return 0; 815 } 816 817 int msm_gem_cpu_fini(struct drm_gem_object *obj) 818 { 819 /* TODO cache maintenance */ 820 return 0; 821 } 822 823 #ifdef CONFIG_DEBUG_FS 824 static void describe_fence(struct dma_fence *fence, const char *type, 825 struct seq_file *m) 826 { 827 if (!dma_fence_is_signaled(fence)) 828 seq_printf(m, "\t%9s: %s %s seq %llu\n", type, 829 fence->ops->get_driver_name(fence), 830 fence->ops->get_timeline_name(fence), 831 fence->seqno); 832 } 833 834 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 835 { 836 struct msm_gem_object *msm_obj = to_msm_bo(obj); 837 struct dma_resv *robj = obj->resv; 838 struct dma_resv_list *fobj; 839 struct dma_fence *fence; 840 struct msm_gem_vma *vma; 841 uint64_t off = drm_vma_node_start(&obj->vma_node); 842 const char *madv; 843 844 msm_gem_lock(obj); 845 846 switch (msm_obj->madv) { 847 case __MSM_MADV_PURGED: 848 madv = " purged"; 849 break; 850 case MSM_MADV_DONTNEED: 851 madv = " purgeable"; 852 break; 853 case MSM_MADV_WILLNEED: 854 default: 855 madv = ""; 856 break; 857 } 858 859 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p", 860 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 861 obj->name, kref_read(&obj->refcount), 862 off, msm_obj->vaddr); 863 864 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name); 865 866 if (!list_empty(&msm_obj->vmas)) { 867 868 seq_puts(m, " vmas:"); 869 870 list_for_each_entry(vma, &msm_obj->vmas, list) { 871 const char *name, *comm; 872 if (vma->aspace) { 873 struct msm_gem_address_space *aspace = vma->aspace; 874 struct task_struct *task = 875 get_pid_task(aspace->pid, PIDTYPE_PID); 876 if (task) { 877 comm = kstrdup(task->comm, GFP_KERNEL); 878 } else { 879 comm = NULL; 880 } 881 name = aspace->name; 882 } else { 883 name = comm = NULL; 884 } 885 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]", 886 name, comm ? ":" : "", comm ? comm : "", 887 vma->aspace, vma->iova, 888 vma->mapped ? "mapped" : "unmapped", 889 vma->inuse); 890 kfree(comm); 891 } 892 893 seq_puts(m, "\n"); 894 } 895 896 rcu_read_lock(); 897 fobj = rcu_dereference(robj->fence); 898 if (fobj) { 899 unsigned int i, shared_count = fobj->shared_count; 900 901 for (i = 0; i < shared_count; i++) { 902 fence = rcu_dereference(fobj->shared[i]); 903 describe_fence(fence, "Shared", m); 904 } 905 } 906 907 fence = rcu_dereference(robj->fence_excl); 908 if (fence) 909 describe_fence(fence, "Exclusive", m); 910 rcu_read_unlock(); 911 912 msm_gem_unlock(obj); 913 } 914 915 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) 916 { 917 struct msm_gem_object *msm_obj; 918 int count = 0; 919 size_t size = 0; 920 921 seq_puts(m, " flags id ref offset kaddr size madv name\n"); 922 list_for_each_entry(msm_obj, list, mm_list) { 923 struct drm_gem_object *obj = &msm_obj->base; 924 seq_puts(m, " "); 925 msm_gem_describe(obj, m); 926 count++; 927 size += obj->size; 928 } 929 930 seq_printf(m, "Total %d objects, %zu bytes\n", count, size); 931 } 932 #endif 933 934 /* don't call directly! Use drm_gem_object_put_locked() and friends */ 935 void msm_gem_free_object(struct drm_gem_object *obj) 936 { 937 struct msm_gem_object *msm_obj = to_msm_bo(obj); 938 struct drm_device *dev = obj->dev; 939 struct msm_drm_private *priv = dev->dev_private; 940 941 if (llist_add(&msm_obj->freed, &priv->free_list)) 942 queue_work(priv->wq, &priv->free_work); 943 } 944 945 static void free_object(struct msm_gem_object *msm_obj) 946 { 947 struct drm_gem_object *obj = &msm_obj->base; 948 struct drm_device *dev = obj->dev; 949 struct msm_drm_private *priv = dev->dev_private; 950 951 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 952 953 /* object should not be on active list: */ 954 WARN_ON(is_active(msm_obj)); 955 956 mutex_lock(&priv->mm_lock); 957 list_del(&msm_obj->mm_list); 958 mutex_unlock(&priv->mm_lock); 959 960 msm_gem_lock(obj); 961 962 put_iova(obj); 963 964 if (obj->import_attach) { 965 WARN_ON(msm_obj->vaddr); 966 967 /* Don't drop the pages for imported dmabuf, as they are not 968 * ours, just free the array we allocated: 969 */ 970 if (msm_obj->pages) 971 kvfree(msm_obj->pages); 972 973 /* dma_buf_detach() grabs resv lock, so we need to unlock 974 * prior to drm_prime_gem_destroy 975 */ 976 msm_gem_unlock(obj); 977 978 drm_prime_gem_destroy(obj, msm_obj->sgt); 979 } else { 980 msm_gem_vunmap(obj); 981 put_pages(obj); 982 msm_gem_unlock(obj); 983 } 984 985 drm_gem_object_release(obj); 986 987 kfree(msm_obj); 988 } 989 990 void msm_gem_free_work(struct work_struct *work) 991 { 992 struct msm_drm_private *priv = 993 container_of(work, struct msm_drm_private, free_work); 994 struct drm_device *dev = priv->dev; 995 struct llist_node *freed; 996 struct msm_gem_object *msm_obj, *next; 997 998 while ((freed = llist_del_all(&priv->free_list))) { 999 1000 mutex_lock(&dev->struct_mutex); 1001 1002 llist_for_each_entry_safe(msm_obj, next, 1003 freed, freed) 1004 free_object(msm_obj); 1005 1006 mutex_unlock(&dev->struct_mutex); 1007 1008 if (need_resched()) 1009 break; 1010 } 1011 } 1012 1013 /* convenience method to construct a GEM buffer object, and userspace handle */ 1014 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 1015 uint32_t size, uint32_t flags, uint32_t *handle, 1016 char *name) 1017 { 1018 struct drm_gem_object *obj; 1019 int ret; 1020 1021 obj = msm_gem_new(dev, size, flags); 1022 1023 if (IS_ERR(obj)) 1024 return PTR_ERR(obj); 1025 1026 if (name) 1027 msm_gem_object_set_name(obj, "%s", name); 1028 1029 ret = drm_gem_handle_create(file, obj, handle); 1030 1031 /* drop reference from allocate - handle holds it now */ 1032 drm_gem_object_put(obj); 1033 1034 return ret; 1035 } 1036 1037 static int msm_gem_new_impl(struct drm_device *dev, 1038 uint32_t size, uint32_t flags, 1039 struct drm_gem_object **obj) 1040 { 1041 struct msm_gem_object *msm_obj; 1042 1043 switch (flags & MSM_BO_CACHE_MASK) { 1044 case MSM_BO_UNCACHED: 1045 case MSM_BO_CACHED: 1046 case MSM_BO_WC: 1047 break; 1048 default: 1049 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", 1050 (flags & MSM_BO_CACHE_MASK)); 1051 return -EINVAL; 1052 } 1053 1054 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); 1055 if (!msm_obj) 1056 return -ENOMEM; 1057 1058 msm_obj->flags = flags; 1059 msm_obj->madv = MSM_MADV_WILLNEED; 1060 1061 INIT_LIST_HEAD(&msm_obj->submit_entry); 1062 INIT_LIST_HEAD(&msm_obj->vmas); 1063 1064 *obj = &msm_obj->base; 1065 1066 return 0; 1067 } 1068 1069 static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, 1070 uint32_t size, uint32_t flags, bool struct_mutex_locked) 1071 { 1072 struct msm_drm_private *priv = dev->dev_private; 1073 struct msm_gem_object *msm_obj; 1074 struct drm_gem_object *obj = NULL; 1075 bool use_vram = false; 1076 int ret; 1077 1078 size = PAGE_ALIGN(size); 1079 1080 if (!msm_use_mmu(dev)) 1081 use_vram = true; 1082 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size) 1083 use_vram = true; 1084 1085 if (WARN_ON(use_vram && !priv->vram.size)) 1086 return ERR_PTR(-EINVAL); 1087 1088 /* Disallow zero sized objects as they make the underlying 1089 * infrastructure grumpy 1090 */ 1091 if (size == 0) 1092 return ERR_PTR(-EINVAL); 1093 1094 ret = msm_gem_new_impl(dev, size, flags, &obj); 1095 if (ret) 1096 goto fail; 1097 1098 msm_obj = to_msm_bo(obj); 1099 1100 if (use_vram) { 1101 struct msm_gem_vma *vma; 1102 struct page **pages; 1103 1104 msm_gem_lock(obj); 1105 1106 vma = add_vma(obj, NULL); 1107 msm_gem_unlock(obj); 1108 if (IS_ERR(vma)) { 1109 ret = PTR_ERR(vma); 1110 goto fail; 1111 } 1112 1113 to_msm_bo(obj)->vram_node = &vma->node; 1114 1115 drm_gem_private_object_init(dev, obj, size); 1116 1117 pages = get_pages(obj); 1118 if (IS_ERR(pages)) { 1119 ret = PTR_ERR(pages); 1120 goto fail; 1121 } 1122 1123 vma->iova = physaddr(obj); 1124 } else { 1125 ret = drm_gem_object_init(dev, obj, size); 1126 if (ret) 1127 goto fail; 1128 /* 1129 * Our buffers are kept pinned, so allocating them from the 1130 * MOVABLE zone is a really bad idea, and conflicts with CMA. 1131 * See comments above new_inode() why this is required _and_ 1132 * expected if you're going to pin these pages. 1133 */ 1134 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); 1135 } 1136 1137 mutex_lock(&priv->mm_lock); 1138 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 1139 mutex_unlock(&priv->mm_lock); 1140 1141 return obj; 1142 1143 fail: 1144 if (struct_mutex_locked) { 1145 drm_gem_object_put_locked(obj); 1146 } else { 1147 drm_gem_object_put(obj); 1148 } 1149 return ERR_PTR(ret); 1150 } 1151 1152 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, 1153 uint32_t size, uint32_t flags) 1154 { 1155 return _msm_gem_new(dev, size, flags, true); 1156 } 1157 1158 struct drm_gem_object *msm_gem_new(struct drm_device *dev, 1159 uint32_t size, uint32_t flags) 1160 { 1161 return _msm_gem_new(dev, size, flags, false); 1162 } 1163 1164 struct drm_gem_object *msm_gem_import(struct drm_device *dev, 1165 struct dma_buf *dmabuf, struct sg_table *sgt) 1166 { 1167 struct msm_drm_private *priv = dev->dev_private; 1168 struct msm_gem_object *msm_obj; 1169 struct drm_gem_object *obj; 1170 uint32_t size; 1171 int ret, npages; 1172 1173 /* if we don't have IOMMU, don't bother pretending we can import: */ 1174 if (!msm_use_mmu(dev)) { 1175 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); 1176 return ERR_PTR(-EINVAL); 1177 } 1178 1179 size = PAGE_ALIGN(dmabuf->size); 1180 1181 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); 1182 if (ret) 1183 goto fail; 1184 1185 drm_gem_private_object_init(dev, obj, size); 1186 1187 npages = size / PAGE_SIZE; 1188 1189 msm_obj = to_msm_bo(obj); 1190 msm_gem_lock(obj); 1191 msm_obj->sgt = sgt; 1192 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 1193 if (!msm_obj->pages) { 1194 msm_gem_unlock(obj); 1195 ret = -ENOMEM; 1196 goto fail; 1197 } 1198 1199 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); 1200 if (ret) { 1201 msm_gem_unlock(obj); 1202 goto fail; 1203 } 1204 1205 msm_gem_unlock(obj); 1206 1207 mutex_lock(&priv->mm_lock); 1208 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 1209 mutex_unlock(&priv->mm_lock); 1210 1211 return obj; 1212 1213 fail: 1214 drm_gem_object_put(obj); 1215 return ERR_PTR(ret); 1216 } 1217 1218 static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1219 uint32_t flags, struct msm_gem_address_space *aspace, 1220 struct drm_gem_object **bo, uint64_t *iova, bool locked) 1221 { 1222 void *vaddr; 1223 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked); 1224 int ret; 1225 1226 if (IS_ERR(obj)) 1227 return ERR_CAST(obj); 1228 1229 if (iova) { 1230 ret = msm_gem_get_and_pin_iova(obj, aspace, iova); 1231 if (ret) 1232 goto err; 1233 } 1234 1235 vaddr = msm_gem_get_vaddr(obj); 1236 if (IS_ERR(vaddr)) { 1237 msm_gem_unpin_iova(obj, aspace); 1238 ret = PTR_ERR(vaddr); 1239 goto err; 1240 } 1241 1242 if (bo) 1243 *bo = obj; 1244 1245 return vaddr; 1246 err: 1247 if (locked) 1248 drm_gem_object_put_locked(obj); 1249 else 1250 drm_gem_object_put(obj); 1251 1252 return ERR_PTR(ret); 1253 1254 } 1255 1256 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1257 uint32_t flags, struct msm_gem_address_space *aspace, 1258 struct drm_gem_object **bo, uint64_t *iova) 1259 { 1260 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false); 1261 } 1262 1263 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, 1264 uint32_t flags, struct msm_gem_address_space *aspace, 1265 struct drm_gem_object **bo, uint64_t *iova) 1266 { 1267 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true); 1268 } 1269 1270 void msm_gem_kernel_put(struct drm_gem_object *bo, 1271 struct msm_gem_address_space *aspace, bool locked) 1272 { 1273 if (IS_ERR_OR_NULL(bo)) 1274 return; 1275 1276 msm_gem_put_vaddr(bo); 1277 msm_gem_unpin_iova(bo, aspace); 1278 1279 if (locked) 1280 drm_gem_object_put_locked(bo); 1281 else 1282 drm_gem_object_put(bo); 1283 } 1284 1285 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...) 1286 { 1287 struct msm_gem_object *msm_obj = to_msm_bo(bo); 1288 va_list ap; 1289 1290 if (!fmt) 1291 return; 1292 1293 va_start(ap, fmt); 1294 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap); 1295 va_end(ap); 1296 } 1297