1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #include <linux/dma-map-ops.h> 8 #include <linux/spinlock.h> 9 #include <linux/shmem_fs.h> 10 #include <linux/dma-buf.h> 11 #include <linux/pfn_t.h> 12 13 #include <drm/drm_prime.h> 14 15 #include "msm_drv.h" 16 #include "msm_fence.h" 17 #include "msm_gem.h" 18 #include "msm_gpu.h" 19 #include "msm_mmu.h" 20 21 static void msm_gem_vunmap_locked(struct drm_gem_object *obj); 22 23 24 static dma_addr_t physaddr(struct drm_gem_object *obj) 25 { 26 struct msm_gem_object *msm_obj = to_msm_bo(obj); 27 struct msm_drm_private *priv = obj->dev->dev_private; 28 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + 29 priv->vram.paddr; 30 } 31 32 static bool use_pages(struct drm_gem_object *obj) 33 { 34 struct msm_gem_object *msm_obj = to_msm_bo(obj); 35 return !msm_obj->vram_node; 36 } 37 38 /* 39 * Cache sync.. this is a bit over-complicated, to fit dma-mapping 40 * API. Really GPU cache is out of scope here (handled on cmdstream) 41 * and all we need to do is invalidate newly allocated pages before 42 * mapping to CPU as uncached/writecombine. 43 * 44 * On top of this, we have the added headache, that depending on 45 * display generation, the display's iommu may be wired up to either 46 * the toplevel drm device (mdss), or to the mdp sub-node, meaning 47 * that here we either have dma-direct or iommu ops. 48 * 49 * Let this be a cautionary tail of abstraction gone wrong. 50 */ 51 52 static void sync_for_device(struct msm_gem_object *msm_obj) 53 { 54 struct device *dev = msm_obj->base.dev->dev; 55 56 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 57 } 58 59 static void sync_for_cpu(struct msm_gem_object *msm_obj) 60 { 61 struct device *dev = msm_obj->base.dev->dev; 62 63 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 64 } 65 66 /* allocate pages from VRAM carveout, used when no IOMMU: */ 67 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) 68 { 69 struct msm_gem_object *msm_obj = to_msm_bo(obj); 70 struct msm_drm_private *priv = obj->dev->dev_private; 71 dma_addr_t paddr; 72 struct page **p; 73 int ret, i; 74 75 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 76 if (!p) 77 return ERR_PTR(-ENOMEM); 78 79 spin_lock(&priv->vram.lock); 80 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); 81 spin_unlock(&priv->vram.lock); 82 if (ret) { 83 kvfree(p); 84 return ERR_PTR(ret); 85 } 86 87 paddr = physaddr(obj); 88 for (i = 0; i < npages; i++) { 89 p[i] = phys_to_page(paddr); 90 paddr += PAGE_SIZE; 91 } 92 93 return p; 94 } 95 96 static struct page **get_pages(struct drm_gem_object *obj) 97 { 98 struct msm_gem_object *msm_obj = to_msm_bo(obj); 99 100 if (!msm_obj->pages) { 101 struct drm_device *dev = obj->dev; 102 struct page **p; 103 int npages = obj->size >> PAGE_SHIFT; 104 105 if (use_pages(obj)) 106 p = drm_gem_get_pages(obj); 107 else 108 p = get_pages_vram(obj, npages); 109 110 if (IS_ERR(p)) { 111 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", 112 PTR_ERR(p)); 113 return p; 114 } 115 116 msm_obj->pages = p; 117 118 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages); 119 if (IS_ERR(msm_obj->sgt)) { 120 void *ptr = ERR_CAST(msm_obj->sgt); 121 122 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n"); 123 msm_obj->sgt = NULL; 124 return ptr; 125 } 126 127 /* For non-cached buffers, ensure the new pages are clean 128 * because display controller, GPU, etc. are not coherent: 129 */ 130 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 131 sync_for_device(msm_obj); 132 } 133 134 return msm_obj->pages; 135 } 136 137 static void put_pages_vram(struct drm_gem_object *obj) 138 { 139 struct msm_gem_object *msm_obj = to_msm_bo(obj); 140 struct msm_drm_private *priv = obj->dev->dev_private; 141 142 spin_lock(&priv->vram.lock); 143 drm_mm_remove_node(msm_obj->vram_node); 144 spin_unlock(&priv->vram.lock); 145 146 kvfree(msm_obj->pages); 147 } 148 149 static void put_pages(struct drm_gem_object *obj) 150 { 151 struct msm_gem_object *msm_obj = to_msm_bo(obj); 152 153 if (msm_obj->pages) { 154 if (msm_obj->sgt) { 155 /* For non-cached buffers, ensure the new 156 * pages are clean because display controller, 157 * GPU, etc. are not coherent: 158 */ 159 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 160 sync_for_cpu(msm_obj); 161 162 sg_free_table(msm_obj->sgt); 163 kfree(msm_obj->sgt); 164 } 165 166 if (use_pages(obj)) 167 drm_gem_put_pages(obj, msm_obj->pages, true, false); 168 else 169 put_pages_vram(obj); 170 171 msm_obj->pages = NULL; 172 } 173 } 174 175 struct page **msm_gem_get_pages(struct drm_gem_object *obj) 176 { 177 struct msm_gem_object *msm_obj = to_msm_bo(obj); 178 struct page **p; 179 180 msm_gem_lock(obj); 181 182 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 183 msm_gem_unlock(obj); 184 return ERR_PTR(-EBUSY); 185 } 186 187 p = get_pages(obj); 188 msm_gem_unlock(obj); 189 return p; 190 } 191 192 void msm_gem_put_pages(struct drm_gem_object *obj) 193 { 194 /* when we start tracking the pin count, then do something here */ 195 } 196 197 int msm_gem_mmap_obj(struct drm_gem_object *obj, 198 struct vm_area_struct *vma) 199 { 200 struct msm_gem_object *msm_obj = to_msm_bo(obj); 201 202 vma->vm_flags &= ~VM_PFNMAP; 203 vma->vm_flags |= VM_MIXEDMAP; 204 205 if (msm_obj->flags & MSM_BO_WC) { 206 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 207 } else if (msm_obj->flags & MSM_BO_UNCACHED) { 208 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 209 } else { 210 /* 211 * Shunt off cached objs to shmem file so they have their own 212 * address_space (so unmap_mapping_range does what we want, 213 * in particular in the case of mmap'd dmabufs) 214 */ 215 fput(vma->vm_file); 216 get_file(obj->filp); 217 vma->vm_pgoff = 0; 218 vma->vm_file = obj->filp; 219 220 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 221 } 222 223 return 0; 224 } 225 226 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 227 { 228 int ret; 229 230 ret = drm_gem_mmap(filp, vma); 231 if (ret) { 232 DBG("mmap failed: %d", ret); 233 return ret; 234 } 235 236 return msm_gem_mmap_obj(vma->vm_private_data, vma); 237 } 238 239 vm_fault_t msm_gem_fault(struct vm_fault *vmf) 240 { 241 struct vm_area_struct *vma = vmf->vma; 242 struct drm_gem_object *obj = vma->vm_private_data; 243 struct msm_gem_object *msm_obj = to_msm_bo(obj); 244 struct page **pages; 245 unsigned long pfn; 246 pgoff_t pgoff; 247 int err; 248 vm_fault_t ret; 249 250 /* 251 * vm_ops.open/drm_gem_mmap_obj and close get and put 252 * a reference on obj. So, we dont need to hold one here. 253 */ 254 err = msm_gem_lock_interruptible(obj); 255 if (err) { 256 ret = VM_FAULT_NOPAGE; 257 goto out; 258 } 259 260 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 261 msm_gem_unlock(obj); 262 return VM_FAULT_SIGBUS; 263 } 264 265 /* make sure we have pages attached now */ 266 pages = get_pages(obj); 267 if (IS_ERR(pages)) { 268 ret = vmf_error(PTR_ERR(pages)); 269 goto out_unlock; 270 } 271 272 /* We don't use vmf->pgoff since that has the fake offset: */ 273 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 274 275 pfn = page_to_pfn(pages[pgoff]); 276 277 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 278 pfn, pfn << PAGE_SHIFT); 279 280 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); 281 out_unlock: 282 msm_gem_unlock(obj); 283 out: 284 return ret; 285 } 286 287 /** get mmap offset */ 288 static uint64_t mmap_offset(struct drm_gem_object *obj) 289 { 290 struct drm_device *dev = obj->dev; 291 int ret; 292 293 WARN_ON(!msm_gem_is_locked(obj)); 294 295 /* Make it mmapable */ 296 ret = drm_gem_create_mmap_offset(obj); 297 298 if (ret) { 299 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n"); 300 return 0; 301 } 302 303 return drm_vma_node_offset_addr(&obj->vma_node); 304 } 305 306 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) 307 { 308 uint64_t offset; 309 310 msm_gem_lock(obj); 311 offset = mmap_offset(obj); 312 msm_gem_unlock(obj); 313 return offset; 314 } 315 316 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, 317 struct msm_gem_address_space *aspace) 318 { 319 struct msm_gem_object *msm_obj = to_msm_bo(obj); 320 struct msm_gem_vma *vma; 321 322 WARN_ON(!msm_gem_is_locked(obj)); 323 324 vma = kzalloc(sizeof(*vma), GFP_KERNEL); 325 if (!vma) 326 return ERR_PTR(-ENOMEM); 327 328 vma->aspace = aspace; 329 330 list_add_tail(&vma->list, &msm_obj->vmas); 331 332 return vma; 333 } 334 335 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, 336 struct msm_gem_address_space *aspace) 337 { 338 struct msm_gem_object *msm_obj = to_msm_bo(obj); 339 struct msm_gem_vma *vma; 340 341 WARN_ON(!msm_gem_is_locked(obj)); 342 343 list_for_each_entry(vma, &msm_obj->vmas, list) { 344 if (vma->aspace == aspace) 345 return vma; 346 } 347 348 return NULL; 349 } 350 351 static void del_vma(struct msm_gem_vma *vma) 352 { 353 if (!vma) 354 return; 355 356 list_del(&vma->list); 357 kfree(vma); 358 } 359 360 /* Called with msm_obj locked */ 361 static void 362 put_iova(struct drm_gem_object *obj) 363 { 364 struct msm_gem_object *msm_obj = to_msm_bo(obj); 365 struct msm_gem_vma *vma, *tmp; 366 367 WARN_ON(!msm_gem_is_locked(obj)); 368 369 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { 370 if (vma->aspace) { 371 msm_gem_purge_vma(vma->aspace, vma); 372 msm_gem_close_vma(vma->aspace, vma); 373 } 374 del_vma(vma); 375 } 376 } 377 378 static int get_iova_locked(struct drm_gem_object *obj, 379 struct msm_gem_address_space *aspace, uint64_t *iova, 380 u64 range_start, u64 range_end) 381 { 382 struct msm_gem_vma *vma; 383 int ret = 0; 384 385 WARN_ON(!msm_gem_is_locked(obj)); 386 387 vma = lookup_vma(obj, aspace); 388 389 if (!vma) { 390 vma = add_vma(obj, aspace); 391 if (IS_ERR(vma)) 392 return PTR_ERR(vma); 393 394 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT, 395 range_start, range_end); 396 if (ret) { 397 del_vma(vma); 398 return ret; 399 } 400 } 401 402 *iova = vma->iova; 403 return 0; 404 } 405 406 static int msm_gem_pin_iova(struct drm_gem_object *obj, 407 struct msm_gem_address_space *aspace) 408 { 409 struct msm_gem_object *msm_obj = to_msm_bo(obj); 410 struct msm_gem_vma *vma; 411 struct page **pages; 412 int prot = IOMMU_READ; 413 414 if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) 415 prot |= IOMMU_WRITE; 416 417 if (msm_obj->flags & MSM_BO_MAP_PRIV) 418 prot |= IOMMU_PRIV; 419 420 WARN_ON(!msm_gem_is_locked(obj)); 421 422 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) 423 return -EBUSY; 424 425 vma = lookup_vma(obj, aspace); 426 if (WARN_ON(!vma)) 427 return -EINVAL; 428 429 pages = get_pages(obj); 430 if (IS_ERR(pages)) 431 return PTR_ERR(pages); 432 433 return msm_gem_map_vma(aspace, vma, prot, 434 msm_obj->sgt, obj->size >> PAGE_SHIFT); 435 } 436 437 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, 438 struct msm_gem_address_space *aspace, uint64_t *iova, 439 u64 range_start, u64 range_end) 440 { 441 u64 local; 442 int ret; 443 444 WARN_ON(!msm_gem_is_locked(obj)); 445 446 ret = get_iova_locked(obj, aspace, &local, 447 range_start, range_end); 448 449 if (!ret) 450 ret = msm_gem_pin_iova(obj, aspace); 451 452 if (!ret) 453 *iova = local; 454 455 return ret; 456 } 457 458 /* 459 * get iova and pin it. Should have a matching put 460 * limits iova to specified range (in pages) 461 */ 462 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, 463 struct msm_gem_address_space *aspace, uint64_t *iova, 464 u64 range_start, u64 range_end) 465 { 466 int ret; 467 468 msm_gem_lock(obj); 469 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end); 470 msm_gem_unlock(obj); 471 472 return ret; 473 } 474 475 int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj, 476 struct msm_gem_address_space *aspace, uint64_t *iova) 477 { 478 return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX); 479 } 480 481 /* get iova and pin it. Should have a matching put */ 482 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, 483 struct msm_gem_address_space *aspace, uint64_t *iova) 484 { 485 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX); 486 } 487 488 /* 489 * Get an iova but don't pin it. Doesn't need a put because iovas are currently 490 * valid for the life of the object 491 */ 492 int msm_gem_get_iova(struct drm_gem_object *obj, 493 struct msm_gem_address_space *aspace, uint64_t *iova) 494 { 495 int ret; 496 497 msm_gem_lock(obj); 498 ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX); 499 msm_gem_unlock(obj); 500 501 return ret; 502 } 503 504 /* get iova without taking a reference, used in places where you have 505 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova' 506 */ 507 uint64_t msm_gem_iova(struct drm_gem_object *obj, 508 struct msm_gem_address_space *aspace) 509 { 510 struct msm_gem_vma *vma; 511 512 msm_gem_lock(obj); 513 vma = lookup_vma(obj, aspace); 514 msm_gem_unlock(obj); 515 WARN_ON(!vma); 516 517 return vma ? vma->iova : 0; 518 } 519 520 /* 521 * Locked variant of msm_gem_unpin_iova() 522 */ 523 void msm_gem_unpin_iova_locked(struct drm_gem_object *obj, 524 struct msm_gem_address_space *aspace) 525 { 526 struct msm_gem_vma *vma; 527 528 WARN_ON(!msm_gem_is_locked(obj)); 529 530 vma = lookup_vma(obj, aspace); 531 532 if (!WARN_ON(!vma)) 533 msm_gem_unmap_vma(aspace, vma); 534 } 535 536 /* 537 * Unpin a iova by updating the reference counts. The memory isn't actually 538 * purged until something else (shrinker, mm_notifier, destroy, etc) decides 539 * to get rid of it 540 */ 541 void msm_gem_unpin_iova(struct drm_gem_object *obj, 542 struct msm_gem_address_space *aspace) 543 { 544 msm_gem_lock(obj); 545 msm_gem_unpin_iova_locked(obj, aspace); 546 msm_gem_unlock(obj); 547 } 548 549 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 550 struct drm_mode_create_dumb *args) 551 { 552 args->pitch = align_pitch(args->width, args->bpp); 553 args->size = PAGE_ALIGN(args->pitch * args->height); 554 return msm_gem_new_handle(dev, file, args->size, 555 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb"); 556 } 557 558 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 559 uint32_t handle, uint64_t *offset) 560 { 561 struct drm_gem_object *obj; 562 int ret = 0; 563 564 /* GEM does all our handle to object mapping */ 565 obj = drm_gem_object_lookup(file, handle); 566 if (obj == NULL) { 567 ret = -ENOENT; 568 goto fail; 569 } 570 571 *offset = msm_gem_mmap_offset(obj); 572 573 drm_gem_object_put(obj); 574 575 fail: 576 return ret; 577 } 578 579 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) 580 { 581 struct msm_gem_object *msm_obj = to_msm_bo(obj); 582 int ret = 0; 583 584 WARN_ON(!msm_gem_is_locked(obj)); 585 586 if (obj->import_attach) 587 return ERR_PTR(-ENODEV); 588 589 if (WARN_ON(msm_obj->madv > madv)) { 590 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", 591 msm_obj->madv, madv); 592 return ERR_PTR(-EBUSY); 593 } 594 595 /* increment vmap_count *before* vmap() call, so shrinker can 596 * check vmap_count (is_vunmapable()) outside of msm_obj lock. 597 * This guarantees that we won't try to msm_gem_vunmap() this 598 * same object from within the vmap() call (while we already 599 * hold msm_obj lock) 600 */ 601 msm_obj->vmap_count++; 602 603 if (!msm_obj->vaddr) { 604 struct page **pages = get_pages(obj); 605 if (IS_ERR(pages)) { 606 ret = PTR_ERR(pages); 607 goto fail; 608 } 609 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 610 VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 611 if (msm_obj->vaddr == NULL) { 612 ret = -ENOMEM; 613 goto fail; 614 } 615 } 616 617 return msm_obj->vaddr; 618 619 fail: 620 msm_obj->vmap_count--; 621 return ERR_PTR(ret); 622 } 623 624 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) 625 { 626 return get_vaddr(obj, MSM_MADV_WILLNEED); 627 } 628 629 void *msm_gem_get_vaddr(struct drm_gem_object *obj) 630 { 631 void *ret; 632 633 msm_gem_lock(obj); 634 ret = msm_gem_get_vaddr_locked(obj); 635 msm_gem_unlock(obj); 636 637 return ret; 638 } 639 640 /* 641 * Don't use this! It is for the very special case of dumping 642 * submits from GPU hangs or faults, were the bo may already 643 * be MSM_MADV_DONTNEED, but we know the buffer is still on the 644 * active list. 645 */ 646 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) 647 { 648 void *ret; 649 650 msm_gem_lock(obj); 651 ret = get_vaddr(obj, __MSM_MADV_PURGED); 652 msm_gem_unlock(obj); 653 654 return ret; 655 } 656 657 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) 658 { 659 struct msm_gem_object *msm_obj = to_msm_bo(obj); 660 661 WARN_ON(!msm_gem_is_locked(obj)); 662 WARN_ON(msm_obj->vmap_count < 1); 663 664 msm_obj->vmap_count--; 665 } 666 667 void msm_gem_put_vaddr(struct drm_gem_object *obj) 668 { 669 msm_gem_lock(obj); 670 msm_gem_put_vaddr_locked(obj); 671 msm_gem_unlock(obj); 672 } 673 674 /* Update madvise status, returns true if not purged, else 675 * false or -errno. 676 */ 677 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) 678 { 679 struct msm_gem_object *msm_obj = to_msm_bo(obj); 680 681 msm_gem_lock(obj); 682 683 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 684 685 if (msm_obj->madv != __MSM_MADV_PURGED) 686 msm_obj->madv = madv; 687 688 madv = msm_obj->madv; 689 690 msm_gem_unlock(obj); 691 692 return (madv != __MSM_MADV_PURGED); 693 } 694 695 void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass) 696 { 697 struct drm_device *dev = obj->dev; 698 struct msm_gem_object *msm_obj = to_msm_bo(obj); 699 700 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 701 WARN_ON(!is_purgeable(msm_obj)); 702 WARN_ON(obj->import_attach); 703 704 mutex_lock_nested(&msm_obj->lock, subclass); 705 706 put_iova(obj); 707 708 msm_gem_vunmap_locked(obj); 709 710 put_pages(obj); 711 712 msm_obj->madv = __MSM_MADV_PURGED; 713 714 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 715 drm_gem_free_mmap_offset(obj); 716 717 /* Our goal here is to return as much of the memory as 718 * is possible back to the system as we are called from OOM. 719 * To do this we must instruct the shmfs to drop all of its 720 * backing pages, *now*. 721 */ 722 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 723 724 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 725 0, (loff_t)-1); 726 727 msm_gem_unlock(obj); 728 } 729 730 static void msm_gem_vunmap_locked(struct drm_gem_object *obj) 731 { 732 struct msm_gem_object *msm_obj = to_msm_bo(obj); 733 734 WARN_ON(!msm_gem_is_locked(obj)); 735 736 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) 737 return; 738 739 vunmap(msm_obj->vaddr); 740 msm_obj->vaddr = NULL; 741 } 742 743 void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass) 744 { 745 struct msm_gem_object *msm_obj = to_msm_bo(obj); 746 747 mutex_lock_nested(&msm_obj->lock, subclass); 748 msm_gem_vunmap_locked(obj); 749 msm_gem_unlock(obj); 750 } 751 752 /* must be called before _move_to_active().. */ 753 int msm_gem_sync_object(struct drm_gem_object *obj, 754 struct msm_fence_context *fctx, bool exclusive) 755 { 756 struct dma_resv_list *fobj; 757 struct dma_fence *fence; 758 int i, ret; 759 760 fobj = dma_resv_get_list(obj->resv); 761 if (!fobj || (fobj->shared_count == 0)) { 762 fence = dma_resv_get_excl(obj->resv); 763 /* don't need to wait on our own fences, since ring is fifo */ 764 if (fence && (fence->context != fctx->context)) { 765 ret = dma_fence_wait(fence, true); 766 if (ret) 767 return ret; 768 } 769 } 770 771 if (!exclusive || !fobj) 772 return 0; 773 774 for (i = 0; i < fobj->shared_count; i++) { 775 fence = rcu_dereference_protected(fobj->shared[i], 776 dma_resv_held(obj->resv)); 777 if (fence->context != fctx->context) { 778 ret = dma_fence_wait(fence, true); 779 if (ret) 780 return ret; 781 } 782 } 783 784 return 0; 785 } 786 787 void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu) 788 { 789 struct msm_gem_object *msm_obj = to_msm_bo(obj); 790 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 791 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); 792 793 if (!atomic_fetch_inc(&msm_obj->active_count)) { 794 msm_obj->gpu = gpu; 795 list_del_init(&msm_obj->mm_list); 796 list_add_tail(&msm_obj->mm_list, &gpu->active_list); 797 } 798 } 799 800 void msm_gem_active_put(struct drm_gem_object *obj) 801 { 802 struct msm_gem_object *msm_obj = to_msm_bo(obj); 803 struct msm_drm_private *priv = obj->dev->dev_private; 804 805 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 806 807 if (!atomic_dec_return(&msm_obj->active_count)) { 808 msm_obj->gpu = NULL; 809 list_del_init(&msm_obj->mm_list); 810 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 811 } 812 } 813 814 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 815 { 816 bool write = !!(op & MSM_PREP_WRITE); 817 unsigned long remain = 818 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 819 long ret; 820 821 ret = dma_resv_wait_timeout_rcu(obj->resv, write, 822 true, remain); 823 if (ret == 0) 824 return remain == 0 ? -EBUSY : -ETIMEDOUT; 825 else if (ret < 0) 826 return ret; 827 828 /* TODO cache maintenance */ 829 830 return 0; 831 } 832 833 int msm_gem_cpu_fini(struct drm_gem_object *obj) 834 { 835 /* TODO cache maintenance */ 836 return 0; 837 } 838 839 #ifdef CONFIG_DEBUG_FS 840 static void describe_fence(struct dma_fence *fence, const char *type, 841 struct seq_file *m) 842 { 843 if (!dma_fence_is_signaled(fence)) 844 seq_printf(m, "\t%9s: %s %s seq %llu\n", type, 845 fence->ops->get_driver_name(fence), 846 fence->ops->get_timeline_name(fence), 847 fence->seqno); 848 } 849 850 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 851 { 852 struct msm_gem_object *msm_obj = to_msm_bo(obj); 853 struct dma_resv *robj = obj->resv; 854 struct dma_resv_list *fobj; 855 struct dma_fence *fence; 856 struct msm_gem_vma *vma; 857 uint64_t off = drm_vma_node_start(&obj->vma_node); 858 const char *madv; 859 860 msm_gem_lock(obj); 861 862 switch (msm_obj->madv) { 863 case __MSM_MADV_PURGED: 864 madv = " purged"; 865 break; 866 case MSM_MADV_DONTNEED: 867 madv = " purgeable"; 868 break; 869 case MSM_MADV_WILLNEED: 870 default: 871 madv = ""; 872 break; 873 } 874 875 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p", 876 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 877 obj->name, kref_read(&obj->refcount), 878 off, msm_obj->vaddr); 879 880 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name); 881 882 if (!list_empty(&msm_obj->vmas)) { 883 884 seq_puts(m, " vmas:"); 885 886 list_for_each_entry(vma, &msm_obj->vmas, list) { 887 const char *name, *comm; 888 if (vma->aspace) { 889 struct msm_gem_address_space *aspace = vma->aspace; 890 struct task_struct *task = 891 get_pid_task(aspace->pid, PIDTYPE_PID); 892 if (task) { 893 comm = kstrdup(task->comm, GFP_KERNEL); 894 } else { 895 comm = NULL; 896 } 897 name = aspace->name; 898 } else { 899 name = comm = NULL; 900 } 901 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]", 902 name, comm ? ":" : "", comm ? comm : "", 903 vma->aspace, vma->iova, 904 vma->mapped ? "mapped" : "unmapped", 905 vma->inuse); 906 kfree(comm); 907 } 908 909 seq_puts(m, "\n"); 910 } 911 912 rcu_read_lock(); 913 fobj = rcu_dereference(robj->fence); 914 if (fobj) { 915 unsigned int i, shared_count = fobj->shared_count; 916 917 for (i = 0; i < shared_count; i++) { 918 fence = rcu_dereference(fobj->shared[i]); 919 describe_fence(fence, "Shared", m); 920 } 921 } 922 923 fence = rcu_dereference(robj->fence_excl); 924 if (fence) 925 describe_fence(fence, "Exclusive", m); 926 rcu_read_unlock(); 927 928 msm_gem_unlock(obj); 929 } 930 931 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) 932 { 933 struct msm_gem_object *msm_obj; 934 int count = 0; 935 size_t size = 0; 936 937 seq_puts(m, " flags id ref offset kaddr size madv name\n"); 938 list_for_each_entry(msm_obj, list, mm_list) { 939 struct drm_gem_object *obj = &msm_obj->base; 940 seq_puts(m, " "); 941 msm_gem_describe(obj, m); 942 count++; 943 size += obj->size; 944 } 945 946 seq_printf(m, "Total %d objects, %zu bytes\n", count, size); 947 } 948 #endif 949 950 /* don't call directly! Use drm_gem_object_put_locked() and friends */ 951 void msm_gem_free_object(struct drm_gem_object *obj) 952 { 953 struct msm_gem_object *msm_obj = to_msm_bo(obj); 954 struct drm_device *dev = obj->dev; 955 struct msm_drm_private *priv = dev->dev_private; 956 957 if (llist_add(&msm_obj->freed, &priv->free_list)) 958 queue_work(priv->wq, &priv->free_work); 959 } 960 961 static void free_object(struct msm_gem_object *msm_obj) 962 { 963 struct drm_gem_object *obj = &msm_obj->base; 964 struct drm_device *dev = obj->dev; 965 966 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 967 968 /* object should not be on active list: */ 969 WARN_ON(is_active(msm_obj)); 970 971 list_del(&msm_obj->mm_list); 972 973 msm_gem_lock(obj); 974 975 put_iova(obj); 976 977 if (obj->import_attach) { 978 WARN_ON(msm_obj->vaddr); 979 980 /* Don't drop the pages for imported dmabuf, as they are not 981 * ours, just free the array we allocated: 982 */ 983 if (msm_obj->pages) 984 kvfree(msm_obj->pages); 985 986 drm_prime_gem_destroy(obj, msm_obj->sgt); 987 } else { 988 msm_gem_vunmap_locked(obj); 989 put_pages(obj); 990 } 991 992 drm_gem_object_release(obj); 993 994 msm_gem_unlock(obj); 995 kfree(msm_obj); 996 } 997 998 void msm_gem_free_work(struct work_struct *work) 999 { 1000 struct msm_drm_private *priv = 1001 container_of(work, struct msm_drm_private, free_work); 1002 struct drm_device *dev = priv->dev; 1003 struct llist_node *freed; 1004 struct msm_gem_object *msm_obj, *next; 1005 1006 while ((freed = llist_del_all(&priv->free_list))) { 1007 1008 mutex_lock(&dev->struct_mutex); 1009 1010 llist_for_each_entry_safe(msm_obj, next, 1011 freed, freed) 1012 free_object(msm_obj); 1013 1014 mutex_unlock(&dev->struct_mutex); 1015 1016 if (need_resched()) 1017 break; 1018 } 1019 } 1020 1021 /* convenience method to construct a GEM buffer object, and userspace handle */ 1022 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 1023 uint32_t size, uint32_t flags, uint32_t *handle, 1024 char *name) 1025 { 1026 struct drm_gem_object *obj; 1027 int ret; 1028 1029 obj = msm_gem_new(dev, size, flags); 1030 1031 if (IS_ERR(obj)) 1032 return PTR_ERR(obj); 1033 1034 if (name) 1035 msm_gem_object_set_name(obj, "%s", name); 1036 1037 ret = drm_gem_handle_create(file, obj, handle); 1038 1039 /* drop reference from allocate - handle holds it now */ 1040 drm_gem_object_put(obj); 1041 1042 return ret; 1043 } 1044 1045 static int msm_gem_new_impl(struct drm_device *dev, 1046 uint32_t size, uint32_t flags, 1047 struct drm_gem_object **obj) 1048 { 1049 struct msm_gem_object *msm_obj; 1050 1051 switch (flags & MSM_BO_CACHE_MASK) { 1052 case MSM_BO_UNCACHED: 1053 case MSM_BO_CACHED: 1054 case MSM_BO_WC: 1055 break; 1056 default: 1057 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", 1058 (flags & MSM_BO_CACHE_MASK)); 1059 return -EINVAL; 1060 } 1061 1062 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); 1063 if (!msm_obj) 1064 return -ENOMEM; 1065 1066 mutex_init(&msm_obj->lock); 1067 1068 msm_obj->flags = flags; 1069 msm_obj->madv = MSM_MADV_WILLNEED; 1070 1071 INIT_LIST_HEAD(&msm_obj->submit_entry); 1072 INIT_LIST_HEAD(&msm_obj->vmas); 1073 1074 *obj = &msm_obj->base; 1075 1076 return 0; 1077 } 1078 1079 static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, 1080 uint32_t size, uint32_t flags, bool struct_mutex_locked) 1081 { 1082 struct msm_drm_private *priv = dev->dev_private; 1083 struct msm_gem_object *msm_obj; 1084 struct drm_gem_object *obj = NULL; 1085 bool use_vram = false; 1086 int ret; 1087 1088 size = PAGE_ALIGN(size); 1089 1090 if (!msm_use_mmu(dev)) 1091 use_vram = true; 1092 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size) 1093 use_vram = true; 1094 1095 if (WARN_ON(use_vram && !priv->vram.size)) 1096 return ERR_PTR(-EINVAL); 1097 1098 /* Disallow zero sized objects as they make the underlying 1099 * infrastructure grumpy 1100 */ 1101 if (size == 0) 1102 return ERR_PTR(-EINVAL); 1103 1104 ret = msm_gem_new_impl(dev, size, flags, &obj); 1105 if (ret) 1106 goto fail; 1107 1108 msm_obj = to_msm_bo(obj); 1109 1110 if (use_vram) { 1111 struct msm_gem_vma *vma; 1112 struct page **pages; 1113 1114 msm_gem_lock(obj); 1115 1116 vma = add_vma(obj, NULL); 1117 msm_gem_unlock(obj); 1118 if (IS_ERR(vma)) { 1119 ret = PTR_ERR(vma); 1120 goto fail; 1121 } 1122 1123 to_msm_bo(obj)->vram_node = &vma->node; 1124 1125 drm_gem_private_object_init(dev, obj, size); 1126 1127 pages = get_pages(obj); 1128 if (IS_ERR(pages)) { 1129 ret = PTR_ERR(pages); 1130 goto fail; 1131 } 1132 1133 vma->iova = physaddr(obj); 1134 } else { 1135 ret = drm_gem_object_init(dev, obj, size); 1136 if (ret) 1137 goto fail; 1138 /* 1139 * Our buffers are kept pinned, so allocating them from the 1140 * MOVABLE zone is a really bad idea, and conflicts with CMA. 1141 * See comments above new_inode() why this is required _and_ 1142 * expected if you're going to pin these pages. 1143 */ 1144 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); 1145 } 1146 1147 if (struct_mutex_locked) { 1148 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 1149 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 1150 } else { 1151 mutex_lock(&dev->struct_mutex); 1152 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 1153 mutex_unlock(&dev->struct_mutex); 1154 } 1155 1156 return obj; 1157 1158 fail: 1159 drm_gem_object_put(obj); 1160 return ERR_PTR(ret); 1161 } 1162 1163 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, 1164 uint32_t size, uint32_t flags) 1165 { 1166 return _msm_gem_new(dev, size, flags, true); 1167 } 1168 1169 struct drm_gem_object *msm_gem_new(struct drm_device *dev, 1170 uint32_t size, uint32_t flags) 1171 { 1172 return _msm_gem_new(dev, size, flags, false); 1173 } 1174 1175 struct drm_gem_object *msm_gem_import(struct drm_device *dev, 1176 struct dma_buf *dmabuf, struct sg_table *sgt) 1177 { 1178 struct msm_drm_private *priv = dev->dev_private; 1179 struct msm_gem_object *msm_obj; 1180 struct drm_gem_object *obj; 1181 uint32_t size; 1182 int ret, npages; 1183 1184 /* if we don't have IOMMU, don't bother pretending we can import: */ 1185 if (!msm_use_mmu(dev)) { 1186 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); 1187 return ERR_PTR(-EINVAL); 1188 } 1189 1190 size = PAGE_ALIGN(dmabuf->size); 1191 1192 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); 1193 if (ret) 1194 goto fail; 1195 1196 drm_gem_private_object_init(dev, obj, size); 1197 1198 npages = size / PAGE_SIZE; 1199 1200 msm_obj = to_msm_bo(obj); 1201 msm_gem_lock(obj); 1202 msm_obj->sgt = sgt; 1203 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 1204 if (!msm_obj->pages) { 1205 msm_gem_unlock(obj); 1206 ret = -ENOMEM; 1207 goto fail; 1208 } 1209 1210 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); 1211 if (ret) { 1212 msm_gem_unlock(obj); 1213 goto fail; 1214 } 1215 1216 msm_gem_unlock(obj); 1217 1218 mutex_lock(&dev->struct_mutex); 1219 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 1220 mutex_unlock(&dev->struct_mutex); 1221 1222 return obj; 1223 1224 fail: 1225 drm_gem_object_put(obj); 1226 return ERR_PTR(ret); 1227 } 1228 1229 static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1230 uint32_t flags, struct msm_gem_address_space *aspace, 1231 struct drm_gem_object **bo, uint64_t *iova, bool locked) 1232 { 1233 void *vaddr; 1234 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked); 1235 int ret; 1236 1237 if (IS_ERR(obj)) 1238 return ERR_CAST(obj); 1239 1240 if (iova) { 1241 ret = msm_gem_get_and_pin_iova(obj, aspace, iova); 1242 if (ret) 1243 goto err; 1244 } 1245 1246 vaddr = msm_gem_get_vaddr(obj); 1247 if (IS_ERR(vaddr)) { 1248 msm_gem_unpin_iova(obj, aspace); 1249 ret = PTR_ERR(vaddr); 1250 goto err; 1251 } 1252 1253 if (bo) 1254 *bo = obj; 1255 1256 return vaddr; 1257 err: 1258 if (locked) 1259 drm_gem_object_put_locked(obj); 1260 else 1261 drm_gem_object_put(obj); 1262 1263 return ERR_PTR(ret); 1264 1265 } 1266 1267 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1268 uint32_t flags, struct msm_gem_address_space *aspace, 1269 struct drm_gem_object **bo, uint64_t *iova) 1270 { 1271 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false); 1272 } 1273 1274 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, 1275 uint32_t flags, struct msm_gem_address_space *aspace, 1276 struct drm_gem_object **bo, uint64_t *iova) 1277 { 1278 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true); 1279 } 1280 1281 void msm_gem_kernel_put(struct drm_gem_object *bo, 1282 struct msm_gem_address_space *aspace, bool locked) 1283 { 1284 if (IS_ERR_OR_NULL(bo)) 1285 return; 1286 1287 msm_gem_put_vaddr(bo); 1288 msm_gem_unpin_iova(bo, aspace); 1289 1290 if (locked) 1291 drm_gem_object_put_locked(bo); 1292 else 1293 drm_gem_object_put(bo); 1294 } 1295 1296 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...) 1297 { 1298 struct msm_gem_object *msm_obj = to_msm_bo(bo); 1299 va_list ap; 1300 1301 if (!fmt) 1302 return; 1303 1304 va_start(ap, fmt); 1305 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap); 1306 va_end(ap); 1307 } 1308