1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #include <linux/dma-map-ops.h> 8 #include <linux/vmalloc.h> 9 #include <linux/spinlock.h> 10 #include <linux/shmem_fs.h> 11 #include <linux/dma-buf.h> 12 #include <linux/pfn_t.h> 13 14 #include <drm/drm_prime.h> 15 16 #include "msm_drv.h" 17 #include "msm_fence.h" 18 #include "msm_gem.h" 19 #include "msm_gpu.h" 20 #include "msm_mmu.h" 21 22 static void update_lru(struct drm_gem_object *obj); 23 24 static dma_addr_t physaddr(struct drm_gem_object *obj) 25 { 26 struct msm_gem_object *msm_obj = to_msm_bo(obj); 27 struct msm_drm_private *priv = obj->dev->dev_private; 28 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + 29 priv->vram.paddr; 30 } 31 32 static bool use_pages(struct drm_gem_object *obj) 33 { 34 struct msm_gem_object *msm_obj = to_msm_bo(obj); 35 return !msm_obj->vram_node; 36 } 37 38 /* 39 * Cache sync.. this is a bit over-complicated, to fit dma-mapping 40 * API. Really GPU cache is out of scope here (handled on cmdstream) 41 * and all we need to do is invalidate newly allocated pages before 42 * mapping to CPU as uncached/writecombine. 43 * 44 * On top of this, we have the added headache, that depending on 45 * display generation, the display's iommu may be wired up to either 46 * the toplevel drm device (mdss), or to the mdp sub-node, meaning 47 * that here we either have dma-direct or iommu ops. 48 * 49 * Let this be a cautionary tail of abstraction gone wrong. 50 */ 51 52 static void sync_for_device(struct msm_gem_object *msm_obj) 53 { 54 struct device *dev = msm_obj->base.dev->dev; 55 56 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 57 } 58 59 static void sync_for_cpu(struct msm_gem_object *msm_obj) 60 { 61 struct device *dev = msm_obj->base.dev->dev; 62 63 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 64 } 65 66 /* allocate pages from VRAM carveout, used when no IOMMU: */ 67 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) 68 { 69 struct msm_gem_object *msm_obj = to_msm_bo(obj); 70 struct msm_drm_private *priv = obj->dev->dev_private; 71 dma_addr_t paddr; 72 struct page **p; 73 int ret, i; 74 75 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 76 if (!p) 77 return ERR_PTR(-ENOMEM); 78 79 spin_lock(&priv->vram.lock); 80 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); 81 spin_unlock(&priv->vram.lock); 82 if (ret) { 83 kvfree(p); 84 return ERR_PTR(ret); 85 } 86 87 paddr = physaddr(obj); 88 for (i = 0; i < npages; i++) { 89 p[i] = pfn_to_page(__phys_to_pfn(paddr)); 90 paddr += PAGE_SIZE; 91 } 92 93 return p; 94 } 95 96 static struct page **get_pages(struct drm_gem_object *obj) 97 { 98 struct msm_gem_object *msm_obj = to_msm_bo(obj); 99 100 msm_gem_assert_locked(obj); 101 102 if (!msm_obj->pages) { 103 struct drm_device *dev = obj->dev; 104 struct page **p; 105 int npages = obj->size >> PAGE_SHIFT; 106 107 if (use_pages(obj)) 108 p = drm_gem_get_pages(obj); 109 else 110 p = get_pages_vram(obj, npages); 111 112 if (IS_ERR(p)) { 113 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", 114 PTR_ERR(p)); 115 return p; 116 } 117 118 msm_obj->pages = p; 119 120 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages); 121 if (IS_ERR(msm_obj->sgt)) { 122 void *ptr = ERR_CAST(msm_obj->sgt); 123 124 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n"); 125 msm_obj->sgt = NULL; 126 return ptr; 127 } 128 129 /* For non-cached buffers, ensure the new pages are clean 130 * because display controller, GPU, etc. are not coherent: 131 */ 132 if (msm_obj->flags & MSM_BO_WC) 133 sync_for_device(msm_obj); 134 135 update_lru(obj); 136 } 137 138 return msm_obj->pages; 139 } 140 141 static void put_pages_vram(struct drm_gem_object *obj) 142 { 143 struct msm_gem_object *msm_obj = to_msm_bo(obj); 144 struct msm_drm_private *priv = obj->dev->dev_private; 145 146 spin_lock(&priv->vram.lock); 147 drm_mm_remove_node(msm_obj->vram_node); 148 spin_unlock(&priv->vram.lock); 149 150 kvfree(msm_obj->pages); 151 } 152 153 static void put_pages(struct drm_gem_object *obj) 154 { 155 struct msm_gem_object *msm_obj = to_msm_bo(obj); 156 157 if (msm_obj->pages) { 158 if (msm_obj->sgt) { 159 /* For non-cached buffers, ensure the new 160 * pages are clean because display controller, 161 * GPU, etc. are not coherent: 162 */ 163 if (msm_obj->flags & MSM_BO_WC) 164 sync_for_cpu(msm_obj); 165 166 sg_free_table(msm_obj->sgt); 167 kfree(msm_obj->sgt); 168 msm_obj->sgt = NULL; 169 } 170 171 if (use_pages(obj)) 172 drm_gem_put_pages(obj, msm_obj->pages, true, false); 173 else 174 put_pages_vram(obj); 175 176 msm_obj->pages = NULL; 177 update_lru(obj); 178 } 179 } 180 181 static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj) 182 { 183 struct msm_gem_object *msm_obj = to_msm_bo(obj); 184 struct page **p; 185 186 msm_gem_assert_locked(obj); 187 188 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 189 return ERR_PTR(-EBUSY); 190 } 191 192 p = get_pages(obj); 193 if (!IS_ERR(p)) { 194 to_msm_bo(obj)->pin_count++; 195 update_lru(obj); 196 } 197 198 return p; 199 } 200 201 struct page **msm_gem_pin_pages(struct drm_gem_object *obj) 202 { 203 struct page **p; 204 205 msm_gem_lock(obj); 206 p = msm_gem_pin_pages_locked(obj); 207 msm_gem_unlock(obj); 208 209 return p; 210 } 211 212 void msm_gem_unpin_pages(struct drm_gem_object *obj) 213 { 214 msm_gem_lock(obj); 215 msm_gem_unpin_locked(obj); 216 msm_gem_unlock(obj); 217 } 218 219 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot) 220 { 221 if (msm_obj->flags & MSM_BO_WC) 222 return pgprot_writecombine(prot); 223 return prot; 224 } 225 226 static vm_fault_t msm_gem_fault(struct vm_fault *vmf) 227 { 228 struct vm_area_struct *vma = vmf->vma; 229 struct drm_gem_object *obj = vma->vm_private_data; 230 struct msm_gem_object *msm_obj = to_msm_bo(obj); 231 struct page **pages; 232 unsigned long pfn; 233 pgoff_t pgoff; 234 int err; 235 vm_fault_t ret; 236 237 /* 238 * vm_ops.open/drm_gem_mmap_obj and close get and put 239 * a reference on obj. So, we dont need to hold one here. 240 */ 241 err = msm_gem_lock_interruptible(obj); 242 if (err) { 243 ret = VM_FAULT_NOPAGE; 244 goto out; 245 } 246 247 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 248 msm_gem_unlock(obj); 249 return VM_FAULT_SIGBUS; 250 } 251 252 /* make sure we have pages attached now */ 253 pages = get_pages(obj); 254 if (IS_ERR(pages)) { 255 ret = vmf_error(PTR_ERR(pages)); 256 goto out_unlock; 257 } 258 259 /* We don't use vmf->pgoff since that has the fake offset: */ 260 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 261 262 pfn = page_to_pfn(pages[pgoff]); 263 264 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 265 pfn, pfn << PAGE_SHIFT); 266 267 ret = vmf_insert_pfn(vma, vmf->address, pfn); 268 269 out_unlock: 270 msm_gem_unlock(obj); 271 out: 272 return ret; 273 } 274 275 /** get mmap offset */ 276 static uint64_t mmap_offset(struct drm_gem_object *obj) 277 { 278 struct drm_device *dev = obj->dev; 279 int ret; 280 281 msm_gem_assert_locked(obj); 282 283 /* Make it mmapable */ 284 ret = drm_gem_create_mmap_offset(obj); 285 286 if (ret) { 287 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n"); 288 return 0; 289 } 290 291 return drm_vma_node_offset_addr(&obj->vma_node); 292 } 293 294 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) 295 { 296 uint64_t offset; 297 298 msm_gem_lock(obj); 299 offset = mmap_offset(obj); 300 msm_gem_unlock(obj); 301 return offset; 302 } 303 304 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, 305 struct msm_gem_address_space *aspace) 306 { 307 struct msm_gem_object *msm_obj = to_msm_bo(obj); 308 struct msm_gem_vma *vma; 309 310 msm_gem_assert_locked(obj); 311 312 vma = msm_gem_vma_new(aspace); 313 if (!vma) 314 return ERR_PTR(-ENOMEM); 315 316 list_add_tail(&vma->list, &msm_obj->vmas); 317 318 return vma; 319 } 320 321 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, 322 struct msm_gem_address_space *aspace) 323 { 324 struct msm_gem_object *msm_obj = to_msm_bo(obj); 325 struct msm_gem_vma *vma; 326 327 msm_gem_assert_locked(obj); 328 329 list_for_each_entry(vma, &msm_obj->vmas, list) { 330 if (vma->aspace == aspace) 331 return vma; 332 } 333 334 return NULL; 335 } 336 337 static void del_vma(struct msm_gem_vma *vma) 338 { 339 if (!vma) 340 return; 341 342 list_del(&vma->list); 343 kfree(vma); 344 } 345 346 /* 347 * If close is true, this also closes the VMA (releasing the allocated 348 * iova range) in addition to removing the iommu mapping. In the eviction 349 * case (!close), we keep the iova allocated, but only remove the iommu 350 * mapping. 351 */ 352 static void 353 put_iova_spaces(struct drm_gem_object *obj, bool close) 354 { 355 struct msm_gem_object *msm_obj = to_msm_bo(obj); 356 struct msm_gem_vma *vma; 357 358 msm_gem_assert_locked(obj); 359 360 list_for_each_entry(vma, &msm_obj->vmas, list) { 361 if (vma->aspace) { 362 msm_gem_vma_purge(vma); 363 if (close) 364 msm_gem_vma_close(vma); 365 } 366 } 367 } 368 369 /* Called with msm_obj locked */ 370 static void 371 put_iova_vmas(struct drm_gem_object *obj) 372 { 373 struct msm_gem_object *msm_obj = to_msm_bo(obj); 374 struct msm_gem_vma *vma, *tmp; 375 376 msm_gem_assert_locked(obj); 377 378 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { 379 del_vma(vma); 380 } 381 } 382 383 static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj, 384 struct msm_gem_address_space *aspace, 385 u64 range_start, u64 range_end) 386 { 387 struct msm_gem_vma *vma; 388 389 msm_gem_assert_locked(obj); 390 391 vma = lookup_vma(obj, aspace); 392 393 if (!vma) { 394 int ret; 395 396 vma = add_vma(obj, aspace); 397 if (IS_ERR(vma)) 398 return vma; 399 400 ret = msm_gem_vma_init(vma, obj->size, 401 range_start, range_end); 402 if (ret) { 403 del_vma(vma); 404 return ERR_PTR(ret); 405 } 406 } else { 407 GEM_WARN_ON(vma->iova < range_start); 408 GEM_WARN_ON((vma->iova + obj->size) > range_end); 409 } 410 411 return vma; 412 } 413 414 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma) 415 { 416 struct msm_gem_object *msm_obj = to_msm_bo(obj); 417 struct page **pages; 418 int ret, prot = IOMMU_READ; 419 420 if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) 421 prot |= IOMMU_WRITE; 422 423 if (msm_obj->flags & MSM_BO_MAP_PRIV) 424 prot |= IOMMU_PRIV; 425 426 if (msm_obj->flags & MSM_BO_CACHED_COHERENT) 427 prot |= IOMMU_CACHE; 428 429 msm_gem_assert_locked(obj); 430 431 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) 432 return -EBUSY; 433 434 pages = msm_gem_pin_pages_locked(obj); 435 if (IS_ERR(pages)) 436 return PTR_ERR(pages); 437 438 ret = msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size); 439 if (ret) 440 msm_gem_unpin_locked(obj); 441 442 return ret; 443 } 444 445 void msm_gem_unpin_locked(struct drm_gem_object *obj) 446 { 447 struct msm_gem_object *msm_obj = to_msm_bo(obj); 448 449 msm_gem_assert_locked(obj); 450 451 msm_obj->pin_count--; 452 GEM_WARN_ON(msm_obj->pin_count < 0); 453 454 update_lru(obj); 455 } 456 457 struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj, 458 struct msm_gem_address_space *aspace) 459 { 460 return get_vma_locked(obj, aspace, 0, U64_MAX); 461 } 462 463 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, 464 struct msm_gem_address_space *aspace, uint64_t *iova, 465 u64 range_start, u64 range_end) 466 { 467 struct msm_gem_vma *vma; 468 int ret; 469 470 msm_gem_assert_locked(obj); 471 472 vma = get_vma_locked(obj, aspace, range_start, range_end); 473 if (IS_ERR(vma)) 474 return PTR_ERR(vma); 475 476 ret = msm_gem_pin_vma_locked(obj, vma); 477 if (!ret) 478 *iova = vma->iova; 479 480 return ret; 481 } 482 483 /* 484 * get iova and pin it. Should have a matching put 485 * limits iova to specified range (in pages) 486 */ 487 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, 488 struct msm_gem_address_space *aspace, uint64_t *iova, 489 u64 range_start, u64 range_end) 490 { 491 int ret; 492 493 msm_gem_lock(obj); 494 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end); 495 msm_gem_unlock(obj); 496 497 return ret; 498 } 499 500 /* get iova and pin it. Should have a matching put */ 501 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, 502 struct msm_gem_address_space *aspace, uint64_t *iova) 503 { 504 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX); 505 } 506 507 /* 508 * Get an iova but don't pin it. Doesn't need a put because iovas are currently 509 * valid for the life of the object 510 */ 511 int msm_gem_get_iova(struct drm_gem_object *obj, 512 struct msm_gem_address_space *aspace, uint64_t *iova) 513 { 514 struct msm_gem_vma *vma; 515 int ret = 0; 516 517 msm_gem_lock(obj); 518 vma = get_vma_locked(obj, aspace, 0, U64_MAX); 519 if (IS_ERR(vma)) { 520 ret = PTR_ERR(vma); 521 } else { 522 *iova = vma->iova; 523 } 524 msm_gem_unlock(obj); 525 526 return ret; 527 } 528 529 static int clear_iova(struct drm_gem_object *obj, 530 struct msm_gem_address_space *aspace) 531 { 532 struct msm_gem_vma *vma = lookup_vma(obj, aspace); 533 534 if (!vma) 535 return 0; 536 537 if (msm_gem_vma_inuse(vma)) 538 return -EBUSY; 539 540 msm_gem_vma_purge(vma); 541 msm_gem_vma_close(vma); 542 del_vma(vma); 543 544 return 0; 545 } 546 547 /* 548 * Get the requested iova but don't pin it. Fails if the requested iova is 549 * not available. Doesn't need a put because iovas are currently valid for 550 * the life of the object. 551 * 552 * Setting an iova of zero will clear the vma. 553 */ 554 int msm_gem_set_iova(struct drm_gem_object *obj, 555 struct msm_gem_address_space *aspace, uint64_t iova) 556 { 557 int ret = 0; 558 559 msm_gem_lock(obj); 560 if (!iova) { 561 ret = clear_iova(obj, aspace); 562 } else { 563 struct msm_gem_vma *vma; 564 vma = get_vma_locked(obj, aspace, iova, iova + obj->size); 565 if (IS_ERR(vma)) { 566 ret = PTR_ERR(vma); 567 } else if (GEM_WARN_ON(vma->iova != iova)) { 568 clear_iova(obj, aspace); 569 ret = -EBUSY; 570 } 571 } 572 msm_gem_unlock(obj); 573 574 return ret; 575 } 576 577 /* 578 * Unpin a iova by updating the reference counts. The memory isn't actually 579 * purged until something else (shrinker, mm_notifier, destroy, etc) decides 580 * to get rid of it 581 */ 582 void msm_gem_unpin_iova(struct drm_gem_object *obj, 583 struct msm_gem_address_space *aspace) 584 { 585 struct msm_gem_vma *vma; 586 587 msm_gem_lock(obj); 588 vma = lookup_vma(obj, aspace); 589 if (!GEM_WARN_ON(!vma)) { 590 msm_gem_vma_unpin(vma); 591 msm_gem_unpin_locked(obj); 592 } 593 msm_gem_unlock(obj); 594 } 595 596 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 597 struct drm_mode_create_dumb *args) 598 { 599 args->pitch = align_pitch(args->width, args->bpp); 600 args->size = PAGE_ALIGN(args->pitch * args->height); 601 return msm_gem_new_handle(dev, file, args->size, 602 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb"); 603 } 604 605 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 606 uint32_t handle, uint64_t *offset) 607 { 608 struct drm_gem_object *obj; 609 int ret = 0; 610 611 /* GEM does all our handle to object mapping */ 612 obj = drm_gem_object_lookup(file, handle); 613 if (obj == NULL) { 614 ret = -ENOENT; 615 goto fail; 616 } 617 618 *offset = msm_gem_mmap_offset(obj); 619 620 drm_gem_object_put(obj); 621 622 fail: 623 return ret; 624 } 625 626 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) 627 { 628 struct msm_gem_object *msm_obj = to_msm_bo(obj); 629 struct page **pages; 630 int ret = 0; 631 632 msm_gem_assert_locked(obj); 633 634 if (obj->import_attach) 635 return ERR_PTR(-ENODEV); 636 637 if (GEM_WARN_ON(msm_obj->madv > madv)) { 638 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", 639 msm_obj->madv, madv); 640 return ERR_PTR(-EBUSY); 641 } 642 643 pages = msm_gem_pin_pages_locked(obj); 644 if (IS_ERR(pages)) 645 return ERR_CAST(pages); 646 647 /* increment vmap_count *before* vmap() call, so shrinker can 648 * check vmap_count (is_vunmapable()) outside of msm_obj lock. 649 * This guarantees that we won't try to msm_gem_vunmap() this 650 * same object from within the vmap() call (while we already 651 * hold msm_obj lock) 652 */ 653 msm_obj->vmap_count++; 654 655 if (!msm_obj->vaddr) { 656 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 657 VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL)); 658 if (msm_obj->vaddr == NULL) { 659 ret = -ENOMEM; 660 goto fail; 661 } 662 } 663 664 return msm_obj->vaddr; 665 666 fail: 667 msm_obj->vmap_count--; 668 msm_gem_unpin_locked(obj); 669 return ERR_PTR(ret); 670 } 671 672 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) 673 { 674 return get_vaddr(obj, MSM_MADV_WILLNEED); 675 } 676 677 void *msm_gem_get_vaddr(struct drm_gem_object *obj) 678 { 679 void *ret; 680 681 msm_gem_lock(obj); 682 ret = msm_gem_get_vaddr_locked(obj); 683 msm_gem_unlock(obj); 684 685 return ret; 686 } 687 688 /* 689 * Don't use this! It is for the very special case of dumping 690 * submits from GPU hangs or faults, were the bo may already 691 * be MSM_MADV_DONTNEED, but we know the buffer is still on the 692 * active list. 693 */ 694 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) 695 { 696 return get_vaddr(obj, __MSM_MADV_PURGED); 697 } 698 699 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) 700 { 701 struct msm_gem_object *msm_obj = to_msm_bo(obj); 702 703 msm_gem_assert_locked(obj); 704 GEM_WARN_ON(msm_obj->vmap_count < 1); 705 706 msm_obj->vmap_count--; 707 msm_gem_unpin_locked(obj); 708 } 709 710 void msm_gem_put_vaddr(struct drm_gem_object *obj) 711 { 712 msm_gem_lock(obj); 713 msm_gem_put_vaddr_locked(obj); 714 msm_gem_unlock(obj); 715 } 716 717 /* Update madvise status, returns true if not purged, else 718 * false or -errno. 719 */ 720 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) 721 { 722 struct msm_gem_object *msm_obj = to_msm_bo(obj); 723 724 msm_gem_lock(obj); 725 726 if (msm_obj->madv != __MSM_MADV_PURGED) 727 msm_obj->madv = madv; 728 729 madv = msm_obj->madv; 730 731 /* If the obj is inactive, we might need to move it 732 * between inactive lists 733 */ 734 update_lru(obj); 735 736 msm_gem_unlock(obj); 737 738 return (madv != __MSM_MADV_PURGED); 739 } 740 741 void msm_gem_purge(struct drm_gem_object *obj) 742 { 743 struct drm_device *dev = obj->dev; 744 struct msm_gem_object *msm_obj = to_msm_bo(obj); 745 746 msm_gem_assert_locked(obj); 747 GEM_WARN_ON(!is_purgeable(msm_obj)); 748 749 /* Get rid of any iommu mapping(s): */ 750 put_iova_spaces(obj, true); 751 752 msm_gem_vunmap(obj); 753 754 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 755 756 put_pages(obj); 757 758 put_iova_vmas(obj); 759 760 msm_obj->madv = __MSM_MADV_PURGED; 761 762 drm_gem_free_mmap_offset(obj); 763 764 /* Our goal here is to return as much of the memory as 765 * is possible back to the system as we are called from OOM. 766 * To do this we must instruct the shmfs to drop all of its 767 * backing pages, *now*. 768 */ 769 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 770 771 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 772 0, (loff_t)-1); 773 } 774 775 /* 776 * Unpin the backing pages and make them available to be swapped out. 777 */ 778 void msm_gem_evict(struct drm_gem_object *obj) 779 { 780 struct drm_device *dev = obj->dev; 781 struct msm_gem_object *msm_obj = to_msm_bo(obj); 782 783 msm_gem_assert_locked(obj); 784 GEM_WARN_ON(is_unevictable(msm_obj)); 785 786 /* Get rid of any iommu mapping(s): */ 787 put_iova_spaces(obj, false); 788 789 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 790 791 put_pages(obj); 792 } 793 794 void msm_gem_vunmap(struct drm_gem_object *obj) 795 { 796 struct msm_gem_object *msm_obj = to_msm_bo(obj); 797 798 msm_gem_assert_locked(obj); 799 800 if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj))) 801 return; 802 803 vunmap(msm_obj->vaddr); 804 msm_obj->vaddr = NULL; 805 } 806 807 static void update_lru(struct drm_gem_object *obj) 808 { 809 struct msm_drm_private *priv = obj->dev->dev_private; 810 struct msm_gem_object *msm_obj = to_msm_bo(obj); 811 812 msm_gem_assert_locked(&msm_obj->base); 813 814 if (!msm_obj->pages) { 815 GEM_WARN_ON(msm_obj->pin_count); 816 817 drm_gem_lru_move_tail(&priv->lru.unbacked, obj); 818 } else if (msm_obj->pin_count) { 819 drm_gem_lru_move_tail(&priv->lru.pinned, obj); 820 } else if (msm_obj->madv == MSM_MADV_WILLNEED) { 821 drm_gem_lru_move_tail(&priv->lru.willneed, obj); 822 } else { 823 GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED); 824 825 drm_gem_lru_move_tail(&priv->lru.dontneed, obj); 826 } 827 } 828 829 bool msm_gem_active(struct drm_gem_object *obj) 830 { 831 msm_gem_assert_locked(obj); 832 833 if (to_msm_bo(obj)->pin_count) 834 return true; 835 836 return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true)); 837 } 838 839 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 840 { 841 bool write = !!(op & MSM_PREP_WRITE); 842 unsigned long remain = 843 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 844 long ret; 845 846 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write), 847 true, remain); 848 if (ret == 0) 849 return remain == 0 ? -EBUSY : -ETIMEDOUT; 850 else if (ret < 0) 851 return ret; 852 853 /* TODO cache maintenance */ 854 855 return 0; 856 } 857 858 int msm_gem_cpu_fini(struct drm_gem_object *obj) 859 { 860 /* TODO cache maintenance */ 861 return 0; 862 } 863 864 #ifdef CONFIG_DEBUG_FS 865 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, 866 struct msm_gem_stats *stats) 867 { 868 struct msm_gem_object *msm_obj = to_msm_bo(obj); 869 struct dma_resv *robj = obj->resv; 870 struct msm_gem_vma *vma; 871 uint64_t off = drm_vma_node_start(&obj->vma_node); 872 const char *madv; 873 874 msm_gem_lock(obj); 875 876 stats->all.count++; 877 stats->all.size += obj->size; 878 879 if (msm_gem_active(obj)) { 880 stats->active.count++; 881 stats->active.size += obj->size; 882 } 883 884 if (msm_obj->pages) { 885 stats->resident.count++; 886 stats->resident.size += obj->size; 887 } 888 889 switch (msm_obj->madv) { 890 case __MSM_MADV_PURGED: 891 stats->purged.count++; 892 stats->purged.size += obj->size; 893 madv = " purged"; 894 break; 895 case MSM_MADV_DONTNEED: 896 stats->purgeable.count++; 897 stats->purgeable.size += obj->size; 898 madv = " purgeable"; 899 break; 900 case MSM_MADV_WILLNEED: 901 default: 902 madv = ""; 903 break; 904 } 905 906 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p", 907 msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I', 908 obj->name, kref_read(&obj->refcount), 909 off, msm_obj->vaddr); 910 911 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name); 912 913 if (!list_empty(&msm_obj->vmas)) { 914 915 seq_puts(m, " vmas:"); 916 917 list_for_each_entry(vma, &msm_obj->vmas, list) { 918 const char *name, *comm; 919 if (vma->aspace) { 920 struct msm_gem_address_space *aspace = vma->aspace; 921 struct task_struct *task = 922 get_pid_task(aspace->pid, PIDTYPE_PID); 923 if (task) { 924 comm = kstrdup(task->comm, GFP_KERNEL); 925 put_task_struct(task); 926 } else { 927 comm = NULL; 928 } 929 name = aspace->name; 930 } else { 931 name = comm = NULL; 932 } 933 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]", 934 name, comm ? ":" : "", comm ? comm : "", 935 vma->aspace, vma->iova, 936 vma->mapped ? "mapped" : "unmapped", 937 msm_gem_vma_inuse(vma)); 938 kfree(comm); 939 } 940 941 seq_puts(m, "\n"); 942 } 943 944 dma_resv_describe(robj, m); 945 msm_gem_unlock(obj); 946 } 947 948 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) 949 { 950 struct msm_gem_stats stats = {}; 951 struct msm_gem_object *msm_obj; 952 953 seq_puts(m, " flags id ref offset kaddr size madv name\n"); 954 list_for_each_entry(msm_obj, list, node) { 955 struct drm_gem_object *obj = &msm_obj->base; 956 seq_puts(m, " "); 957 msm_gem_describe(obj, m, &stats); 958 } 959 960 seq_printf(m, "Total: %4d objects, %9zu bytes\n", 961 stats.all.count, stats.all.size); 962 seq_printf(m, "Active: %4d objects, %9zu bytes\n", 963 stats.active.count, stats.active.size); 964 seq_printf(m, "Resident: %4d objects, %9zu bytes\n", 965 stats.resident.count, stats.resident.size); 966 seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n", 967 stats.purgeable.count, stats.purgeable.size); 968 seq_printf(m, "Purged: %4d objects, %9zu bytes\n", 969 stats.purged.count, stats.purged.size); 970 } 971 #endif 972 973 /* don't call directly! Use drm_gem_object_put() */ 974 static void msm_gem_free_object(struct drm_gem_object *obj) 975 { 976 struct msm_gem_object *msm_obj = to_msm_bo(obj); 977 struct drm_device *dev = obj->dev; 978 struct msm_drm_private *priv = dev->dev_private; 979 980 mutex_lock(&priv->obj_lock); 981 list_del(&msm_obj->node); 982 mutex_unlock(&priv->obj_lock); 983 984 put_iova_spaces(obj, true); 985 986 if (obj->import_attach) { 987 GEM_WARN_ON(msm_obj->vaddr); 988 989 /* Don't drop the pages for imported dmabuf, as they are not 990 * ours, just free the array we allocated: 991 */ 992 kvfree(msm_obj->pages); 993 994 put_iova_vmas(obj); 995 996 drm_prime_gem_destroy(obj, msm_obj->sgt); 997 } else { 998 msm_gem_vunmap(obj); 999 put_pages(obj); 1000 put_iova_vmas(obj); 1001 } 1002 1003 drm_gem_object_release(obj); 1004 1005 kfree(msm_obj); 1006 } 1007 1008 static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 1009 { 1010 struct msm_gem_object *msm_obj = to_msm_bo(obj); 1011 1012 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); 1013 vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags)); 1014 1015 return 0; 1016 } 1017 1018 /* convenience method to construct a GEM buffer object, and userspace handle */ 1019 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 1020 uint32_t size, uint32_t flags, uint32_t *handle, 1021 char *name) 1022 { 1023 struct drm_gem_object *obj; 1024 int ret; 1025 1026 obj = msm_gem_new(dev, size, flags); 1027 1028 if (IS_ERR(obj)) 1029 return PTR_ERR(obj); 1030 1031 if (name) 1032 msm_gem_object_set_name(obj, "%s", name); 1033 1034 ret = drm_gem_handle_create(file, obj, handle); 1035 1036 /* drop reference from allocate - handle holds it now */ 1037 drm_gem_object_put(obj); 1038 1039 return ret; 1040 } 1041 1042 static const struct vm_operations_struct vm_ops = { 1043 .fault = msm_gem_fault, 1044 .open = drm_gem_vm_open, 1045 .close = drm_gem_vm_close, 1046 }; 1047 1048 static const struct drm_gem_object_funcs msm_gem_object_funcs = { 1049 .free = msm_gem_free_object, 1050 .pin = msm_gem_prime_pin, 1051 .unpin = msm_gem_prime_unpin, 1052 .get_sg_table = msm_gem_prime_get_sg_table, 1053 .vmap = msm_gem_prime_vmap, 1054 .vunmap = msm_gem_prime_vunmap, 1055 .mmap = msm_gem_object_mmap, 1056 .vm_ops = &vm_ops, 1057 }; 1058 1059 static int msm_gem_new_impl(struct drm_device *dev, 1060 uint32_t size, uint32_t flags, 1061 struct drm_gem_object **obj) 1062 { 1063 struct msm_drm_private *priv = dev->dev_private; 1064 struct msm_gem_object *msm_obj; 1065 1066 switch (flags & MSM_BO_CACHE_MASK) { 1067 case MSM_BO_CACHED: 1068 case MSM_BO_WC: 1069 break; 1070 case MSM_BO_CACHED_COHERENT: 1071 if (priv->has_cached_coherent) 1072 break; 1073 fallthrough; 1074 default: 1075 DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n", 1076 (flags & MSM_BO_CACHE_MASK)); 1077 return -EINVAL; 1078 } 1079 1080 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); 1081 if (!msm_obj) 1082 return -ENOMEM; 1083 1084 msm_obj->flags = flags; 1085 msm_obj->madv = MSM_MADV_WILLNEED; 1086 1087 INIT_LIST_HEAD(&msm_obj->node); 1088 INIT_LIST_HEAD(&msm_obj->vmas); 1089 1090 *obj = &msm_obj->base; 1091 (*obj)->funcs = &msm_gem_object_funcs; 1092 1093 return 0; 1094 } 1095 1096 struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags) 1097 { 1098 struct msm_drm_private *priv = dev->dev_private; 1099 struct msm_gem_object *msm_obj; 1100 struct drm_gem_object *obj = NULL; 1101 bool use_vram = false; 1102 int ret; 1103 1104 size = PAGE_ALIGN(size); 1105 1106 if (!msm_use_mmu(dev)) 1107 use_vram = true; 1108 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size) 1109 use_vram = true; 1110 1111 if (GEM_WARN_ON(use_vram && !priv->vram.size)) 1112 return ERR_PTR(-EINVAL); 1113 1114 /* Disallow zero sized objects as they make the underlying 1115 * infrastructure grumpy 1116 */ 1117 if (size == 0) 1118 return ERR_PTR(-EINVAL); 1119 1120 ret = msm_gem_new_impl(dev, size, flags, &obj); 1121 if (ret) 1122 return ERR_PTR(ret); 1123 1124 msm_obj = to_msm_bo(obj); 1125 1126 if (use_vram) { 1127 struct msm_gem_vma *vma; 1128 struct page **pages; 1129 1130 drm_gem_private_object_init(dev, obj, size); 1131 1132 msm_gem_lock(obj); 1133 1134 vma = add_vma(obj, NULL); 1135 msm_gem_unlock(obj); 1136 if (IS_ERR(vma)) { 1137 ret = PTR_ERR(vma); 1138 goto fail; 1139 } 1140 1141 to_msm_bo(obj)->vram_node = &vma->node; 1142 1143 msm_gem_lock(obj); 1144 pages = get_pages(obj); 1145 msm_gem_unlock(obj); 1146 if (IS_ERR(pages)) { 1147 ret = PTR_ERR(pages); 1148 goto fail; 1149 } 1150 1151 vma->iova = physaddr(obj); 1152 } else { 1153 ret = drm_gem_object_init(dev, obj, size); 1154 if (ret) 1155 goto fail; 1156 /* 1157 * Our buffers are kept pinned, so allocating them from the 1158 * MOVABLE zone is a really bad idea, and conflicts with CMA. 1159 * See comments above new_inode() why this is required _and_ 1160 * expected if you're going to pin these pages. 1161 */ 1162 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); 1163 } 1164 1165 drm_gem_lru_move_tail(&priv->lru.unbacked, obj); 1166 1167 mutex_lock(&priv->obj_lock); 1168 list_add_tail(&msm_obj->node, &priv->objects); 1169 mutex_unlock(&priv->obj_lock); 1170 1171 return obj; 1172 1173 fail: 1174 drm_gem_object_put(obj); 1175 return ERR_PTR(ret); 1176 } 1177 1178 struct drm_gem_object *msm_gem_import(struct drm_device *dev, 1179 struct dma_buf *dmabuf, struct sg_table *sgt) 1180 { 1181 struct msm_drm_private *priv = dev->dev_private; 1182 struct msm_gem_object *msm_obj; 1183 struct drm_gem_object *obj; 1184 uint32_t size; 1185 int ret, npages; 1186 1187 /* if we don't have IOMMU, don't bother pretending we can import: */ 1188 if (!msm_use_mmu(dev)) { 1189 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); 1190 return ERR_PTR(-EINVAL); 1191 } 1192 1193 size = PAGE_ALIGN(dmabuf->size); 1194 1195 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); 1196 if (ret) 1197 return ERR_PTR(ret); 1198 1199 drm_gem_private_object_init(dev, obj, size); 1200 1201 npages = size / PAGE_SIZE; 1202 1203 msm_obj = to_msm_bo(obj); 1204 msm_gem_lock(obj); 1205 msm_obj->sgt = sgt; 1206 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 1207 if (!msm_obj->pages) { 1208 msm_gem_unlock(obj); 1209 ret = -ENOMEM; 1210 goto fail; 1211 } 1212 1213 ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages); 1214 if (ret) { 1215 msm_gem_unlock(obj); 1216 goto fail; 1217 } 1218 1219 msm_gem_unlock(obj); 1220 1221 drm_gem_lru_move_tail(&priv->lru.pinned, obj); 1222 1223 mutex_lock(&priv->obj_lock); 1224 list_add_tail(&msm_obj->node, &priv->objects); 1225 mutex_unlock(&priv->obj_lock); 1226 1227 return obj; 1228 1229 fail: 1230 drm_gem_object_put(obj); 1231 return ERR_PTR(ret); 1232 } 1233 1234 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1235 uint32_t flags, struct msm_gem_address_space *aspace, 1236 struct drm_gem_object **bo, uint64_t *iova) 1237 { 1238 void *vaddr; 1239 struct drm_gem_object *obj = msm_gem_new(dev, size, flags); 1240 int ret; 1241 1242 if (IS_ERR(obj)) 1243 return ERR_CAST(obj); 1244 1245 if (iova) { 1246 ret = msm_gem_get_and_pin_iova(obj, aspace, iova); 1247 if (ret) 1248 goto err; 1249 } 1250 1251 vaddr = msm_gem_get_vaddr(obj); 1252 if (IS_ERR(vaddr)) { 1253 msm_gem_unpin_iova(obj, aspace); 1254 ret = PTR_ERR(vaddr); 1255 goto err; 1256 } 1257 1258 if (bo) 1259 *bo = obj; 1260 1261 return vaddr; 1262 err: 1263 drm_gem_object_put(obj); 1264 1265 return ERR_PTR(ret); 1266 1267 } 1268 1269 void msm_gem_kernel_put(struct drm_gem_object *bo, 1270 struct msm_gem_address_space *aspace) 1271 { 1272 if (IS_ERR_OR_NULL(bo)) 1273 return; 1274 1275 msm_gem_put_vaddr(bo); 1276 msm_gem_unpin_iova(bo, aspace); 1277 drm_gem_object_put(bo); 1278 } 1279 1280 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...) 1281 { 1282 struct msm_gem_object *msm_obj = to_msm_bo(bo); 1283 va_list ap; 1284 1285 if (!fmt) 1286 return; 1287 1288 va_start(ap, fmt); 1289 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap); 1290 va_end(ap); 1291 } 1292