1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #include <linux/dma-map-ops.h> 8 #include <linux/vmalloc.h> 9 #include <linux/spinlock.h> 10 #include <linux/shmem_fs.h> 11 #include <linux/dma-buf.h> 12 #include <linux/pfn_t.h> 13 14 #include <drm/drm_prime.h> 15 16 #include "msm_drv.h" 17 #include "msm_fence.h" 18 #include "msm_gem.h" 19 #include "msm_gpu.h" 20 #include "msm_mmu.h" 21 22 static void update_lru(struct drm_gem_object *obj); 23 24 static dma_addr_t physaddr(struct drm_gem_object *obj) 25 { 26 struct msm_gem_object *msm_obj = to_msm_bo(obj); 27 struct msm_drm_private *priv = obj->dev->dev_private; 28 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + 29 priv->vram.paddr; 30 } 31 32 static bool use_pages(struct drm_gem_object *obj) 33 { 34 struct msm_gem_object *msm_obj = to_msm_bo(obj); 35 return !msm_obj->vram_node; 36 } 37 38 /* 39 * Cache sync.. this is a bit over-complicated, to fit dma-mapping 40 * API. Really GPU cache is out of scope here (handled on cmdstream) 41 * and all we need to do is invalidate newly allocated pages before 42 * mapping to CPU as uncached/writecombine. 43 * 44 * On top of this, we have the added headache, that depending on 45 * display generation, the display's iommu may be wired up to either 46 * the toplevel drm device (mdss), or to the mdp sub-node, meaning 47 * that here we either have dma-direct or iommu ops. 48 * 49 * Let this be a cautionary tail of abstraction gone wrong. 50 */ 51 52 static void sync_for_device(struct msm_gem_object *msm_obj) 53 { 54 struct device *dev = msm_obj->base.dev->dev; 55 56 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 57 } 58 59 static void sync_for_cpu(struct msm_gem_object *msm_obj) 60 { 61 struct device *dev = msm_obj->base.dev->dev; 62 63 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 64 } 65 66 /* allocate pages from VRAM carveout, used when no IOMMU: */ 67 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) 68 { 69 struct msm_gem_object *msm_obj = to_msm_bo(obj); 70 struct msm_drm_private *priv = obj->dev->dev_private; 71 dma_addr_t paddr; 72 struct page **p; 73 int ret, i; 74 75 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 76 if (!p) 77 return ERR_PTR(-ENOMEM); 78 79 spin_lock(&priv->vram.lock); 80 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); 81 spin_unlock(&priv->vram.lock); 82 if (ret) { 83 kvfree(p); 84 return ERR_PTR(ret); 85 } 86 87 paddr = physaddr(obj); 88 for (i = 0; i < npages; i++) { 89 p[i] = pfn_to_page(__phys_to_pfn(paddr)); 90 paddr += PAGE_SIZE; 91 } 92 93 return p; 94 } 95 96 static struct page **get_pages(struct drm_gem_object *obj) 97 { 98 struct msm_gem_object *msm_obj = to_msm_bo(obj); 99 100 GEM_WARN_ON(!msm_gem_is_locked(obj)); 101 102 if (!msm_obj->pages) { 103 struct drm_device *dev = obj->dev; 104 struct page **p; 105 int npages = obj->size >> PAGE_SHIFT; 106 107 if (use_pages(obj)) 108 p = drm_gem_get_pages(obj); 109 else 110 p = get_pages_vram(obj, npages); 111 112 if (IS_ERR(p)) { 113 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", 114 PTR_ERR(p)); 115 return p; 116 } 117 118 msm_obj->pages = p; 119 120 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages); 121 if (IS_ERR(msm_obj->sgt)) { 122 void *ptr = ERR_CAST(msm_obj->sgt); 123 124 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n"); 125 msm_obj->sgt = NULL; 126 return ptr; 127 } 128 129 /* For non-cached buffers, ensure the new pages are clean 130 * because display controller, GPU, etc. are not coherent: 131 */ 132 if (msm_obj->flags & MSM_BO_WC) 133 sync_for_device(msm_obj); 134 135 update_lru(obj); 136 } 137 138 return msm_obj->pages; 139 } 140 141 static void put_pages_vram(struct drm_gem_object *obj) 142 { 143 struct msm_gem_object *msm_obj = to_msm_bo(obj); 144 struct msm_drm_private *priv = obj->dev->dev_private; 145 146 spin_lock(&priv->vram.lock); 147 drm_mm_remove_node(msm_obj->vram_node); 148 spin_unlock(&priv->vram.lock); 149 150 kvfree(msm_obj->pages); 151 } 152 153 static void put_pages(struct drm_gem_object *obj) 154 { 155 struct msm_gem_object *msm_obj = to_msm_bo(obj); 156 157 if (msm_obj->pages) { 158 if (msm_obj->sgt) { 159 /* For non-cached buffers, ensure the new 160 * pages are clean because display controller, 161 * GPU, etc. are not coherent: 162 */ 163 if (msm_obj->flags & MSM_BO_WC) 164 sync_for_cpu(msm_obj); 165 166 sg_free_table(msm_obj->sgt); 167 kfree(msm_obj->sgt); 168 msm_obj->sgt = NULL; 169 } 170 171 if (use_pages(obj)) 172 drm_gem_put_pages(obj, msm_obj->pages, true, false); 173 else 174 put_pages_vram(obj); 175 176 msm_obj->pages = NULL; 177 } 178 } 179 180 static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj) 181 { 182 struct msm_gem_object *msm_obj = to_msm_bo(obj); 183 struct page **p; 184 185 GEM_WARN_ON(!msm_gem_is_locked(obj)); 186 187 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 188 return ERR_PTR(-EBUSY); 189 } 190 191 p = get_pages(obj); 192 if (!IS_ERR(p)) { 193 to_msm_bo(obj)->pin_count++; 194 update_lru(obj); 195 } 196 197 return p; 198 } 199 200 struct page **msm_gem_pin_pages(struct drm_gem_object *obj) 201 { 202 struct page **p; 203 204 msm_gem_lock(obj); 205 p = msm_gem_pin_pages_locked(obj); 206 msm_gem_unlock(obj); 207 208 return p; 209 } 210 211 void msm_gem_unpin_pages(struct drm_gem_object *obj) 212 { 213 struct msm_gem_object *msm_obj = to_msm_bo(obj); 214 215 msm_gem_lock(obj); 216 msm_gem_unpin_locked(obj); 217 msm_gem_unlock(obj); 218 } 219 220 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot) 221 { 222 if (msm_obj->flags & MSM_BO_WC) 223 return pgprot_writecombine(prot); 224 return prot; 225 } 226 227 static vm_fault_t msm_gem_fault(struct vm_fault *vmf) 228 { 229 struct vm_area_struct *vma = vmf->vma; 230 struct drm_gem_object *obj = vma->vm_private_data; 231 struct msm_gem_object *msm_obj = to_msm_bo(obj); 232 struct page **pages; 233 unsigned long pfn; 234 pgoff_t pgoff; 235 int err; 236 vm_fault_t ret; 237 238 /* 239 * vm_ops.open/drm_gem_mmap_obj and close get and put 240 * a reference on obj. So, we dont need to hold one here. 241 */ 242 err = msm_gem_lock_interruptible(obj); 243 if (err) { 244 ret = VM_FAULT_NOPAGE; 245 goto out; 246 } 247 248 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 249 msm_gem_unlock(obj); 250 return VM_FAULT_SIGBUS; 251 } 252 253 /* make sure we have pages attached now */ 254 pages = get_pages(obj); 255 if (IS_ERR(pages)) { 256 ret = vmf_error(PTR_ERR(pages)); 257 goto out_unlock; 258 } 259 260 /* We don't use vmf->pgoff since that has the fake offset: */ 261 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 262 263 pfn = page_to_pfn(pages[pgoff]); 264 265 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 266 pfn, pfn << PAGE_SHIFT); 267 268 ret = vmf_insert_pfn(vma, vmf->address, pfn); 269 270 out_unlock: 271 msm_gem_unlock(obj); 272 out: 273 return ret; 274 } 275 276 /** get mmap offset */ 277 static uint64_t mmap_offset(struct drm_gem_object *obj) 278 { 279 struct drm_device *dev = obj->dev; 280 int ret; 281 282 GEM_WARN_ON(!msm_gem_is_locked(obj)); 283 284 /* Make it mmapable */ 285 ret = drm_gem_create_mmap_offset(obj); 286 287 if (ret) { 288 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n"); 289 return 0; 290 } 291 292 return drm_vma_node_offset_addr(&obj->vma_node); 293 } 294 295 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) 296 { 297 uint64_t offset; 298 299 msm_gem_lock(obj); 300 offset = mmap_offset(obj); 301 msm_gem_unlock(obj); 302 return offset; 303 } 304 305 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, 306 struct msm_gem_address_space *aspace) 307 { 308 struct msm_gem_object *msm_obj = to_msm_bo(obj); 309 struct msm_gem_vma *vma; 310 311 GEM_WARN_ON(!msm_gem_is_locked(obj)); 312 313 vma = kzalloc(sizeof(*vma), GFP_KERNEL); 314 if (!vma) 315 return ERR_PTR(-ENOMEM); 316 317 vma->aspace = aspace; 318 319 list_add_tail(&vma->list, &msm_obj->vmas); 320 321 return vma; 322 } 323 324 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, 325 struct msm_gem_address_space *aspace) 326 { 327 struct msm_gem_object *msm_obj = to_msm_bo(obj); 328 struct msm_gem_vma *vma; 329 330 GEM_WARN_ON(!msm_gem_is_locked(obj)); 331 332 list_for_each_entry(vma, &msm_obj->vmas, list) { 333 if (vma->aspace == aspace) 334 return vma; 335 } 336 337 return NULL; 338 } 339 340 static void del_vma(struct msm_gem_vma *vma) 341 { 342 if (!vma) 343 return; 344 345 list_del(&vma->list); 346 kfree(vma); 347 } 348 349 /* 350 * If close is true, this also closes the VMA (releasing the allocated 351 * iova range) in addition to removing the iommu mapping. In the eviction 352 * case (!close), we keep the iova allocated, but only remove the iommu 353 * mapping. 354 */ 355 static void 356 put_iova_spaces(struct drm_gem_object *obj, bool close) 357 { 358 struct msm_gem_object *msm_obj = to_msm_bo(obj); 359 struct msm_gem_vma *vma; 360 361 GEM_WARN_ON(!msm_gem_is_locked(obj)); 362 363 list_for_each_entry(vma, &msm_obj->vmas, list) { 364 if (vma->aspace) { 365 msm_gem_purge_vma(vma->aspace, vma); 366 if (close) 367 msm_gem_close_vma(vma->aspace, vma); 368 } 369 } 370 } 371 372 /* Called with msm_obj locked */ 373 static void 374 put_iova_vmas(struct drm_gem_object *obj) 375 { 376 struct msm_gem_object *msm_obj = to_msm_bo(obj); 377 struct msm_gem_vma *vma, *tmp; 378 379 GEM_WARN_ON(!msm_gem_is_locked(obj)); 380 381 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { 382 del_vma(vma); 383 } 384 } 385 386 static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj, 387 struct msm_gem_address_space *aspace, 388 u64 range_start, u64 range_end) 389 { 390 struct msm_gem_vma *vma; 391 392 GEM_WARN_ON(!msm_gem_is_locked(obj)); 393 394 vma = lookup_vma(obj, aspace); 395 396 if (!vma) { 397 int ret; 398 399 vma = add_vma(obj, aspace); 400 if (IS_ERR(vma)) 401 return vma; 402 403 ret = msm_gem_init_vma(aspace, vma, obj->size, 404 range_start, range_end); 405 if (ret) { 406 del_vma(vma); 407 return ERR_PTR(ret); 408 } 409 } else { 410 GEM_WARN_ON(vma->iova < range_start); 411 GEM_WARN_ON((vma->iova + obj->size) > range_end); 412 } 413 414 return vma; 415 } 416 417 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma) 418 { 419 struct msm_gem_object *msm_obj = to_msm_bo(obj); 420 struct page **pages; 421 int ret, prot = IOMMU_READ; 422 423 if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) 424 prot |= IOMMU_WRITE; 425 426 if (msm_obj->flags & MSM_BO_MAP_PRIV) 427 prot |= IOMMU_PRIV; 428 429 if (msm_obj->flags & MSM_BO_CACHED_COHERENT) 430 prot |= IOMMU_CACHE; 431 432 GEM_WARN_ON(!msm_gem_is_locked(obj)); 433 434 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) 435 return -EBUSY; 436 437 pages = msm_gem_pin_pages_locked(obj); 438 if (IS_ERR(pages)) 439 return PTR_ERR(pages); 440 441 ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size); 442 if (ret) 443 msm_gem_unpin_locked(obj); 444 445 return ret; 446 } 447 448 void msm_gem_unpin_locked(struct drm_gem_object *obj) 449 { 450 struct msm_gem_object *msm_obj = to_msm_bo(obj); 451 452 GEM_WARN_ON(!msm_gem_is_locked(obj)); 453 454 msm_obj->pin_count--; 455 GEM_WARN_ON(msm_obj->pin_count < 0); 456 457 update_lru(obj); 458 } 459 460 struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj, 461 struct msm_gem_address_space *aspace) 462 { 463 return get_vma_locked(obj, aspace, 0, U64_MAX); 464 } 465 466 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, 467 struct msm_gem_address_space *aspace, uint64_t *iova, 468 u64 range_start, u64 range_end) 469 { 470 struct msm_gem_vma *vma; 471 int ret; 472 473 GEM_WARN_ON(!msm_gem_is_locked(obj)); 474 475 vma = get_vma_locked(obj, aspace, range_start, range_end); 476 if (IS_ERR(vma)) 477 return PTR_ERR(vma); 478 479 ret = msm_gem_pin_vma_locked(obj, vma); 480 if (!ret) 481 *iova = vma->iova; 482 483 return ret; 484 } 485 486 /* 487 * get iova and pin it. Should have a matching put 488 * limits iova to specified range (in pages) 489 */ 490 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, 491 struct msm_gem_address_space *aspace, uint64_t *iova, 492 u64 range_start, u64 range_end) 493 { 494 int ret; 495 496 msm_gem_lock(obj); 497 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end); 498 msm_gem_unlock(obj); 499 500 return ret; 501 } 502 503 /* get iova and pin it. Should have a matching put */ 504 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, 505 struct msm_gem_address_space *aspace, uint64_t *iova) 506 { 507 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX); 508 } 509 510 /* 511 * Get an iova but don't pin it. Doesn't need a put because iovas are currently 512 * valid for the life of the object 513 */ 514 int msm_gem_get_iova(struct drm_gem_object *obj, 515 struct msm_gem_address_space *aspace, uint64_t *iova) 516 { 517 struct msm_gem_vma *vma; 518 int ret = 0; 519 520 msm_gem_lock(obj); 521 vma = get_vma_locked(obj, aspace, 0, U64_MAX); 522 if (IS_ERR(vma)) { 523 ret = PTR_ERR(vma); 524 } else { 525 *iova = vma->iova; 526 } 527 msm_gem_unlock(obj); 528 529 return ret; 530 } 531 532 static int clear_iova(struct drm_gem_object *obj, 533 struct msm_gem_address_space *aspace) 534 { 535 struct msm_gem_vma *vma = lookup_vma(obj, aspace); 536 537 if (!vma) 538 return 0; 539 540 if (msm_gem_vma_inuse(vma)) 541 return -EBUSY; 542 543 msm_gem_purge_vma(vma->aspace, vma); 544 msm_gem_close_vma(vma->aspace, vma); 545 del_vma(vma); 546 547 return 0; 548 } 549 550 /* 551 * Get the requested iova but don't pin it. Fails if the requested iova is 552 * not available. Doesn't need a put because iovas are currently valid for 553 * the life of the object. 554 * 555 * Setting an iova of zero will clear the vma. 556 */ 557 int msm_gem_set_iova(struct drm_gem_object *obj, 558 struct msm_gem_address_space *aspace, uint64_t iova) 559 { 560 int ret = 0; 561 562 msm_gem_lock(obj); 563 if (!iova) { 564 ret = clear_iova(obj, aspace); 565 } else { 566 struct msm_gem_vma *vma; 567 vma = get_vma_locked(obj, aspace, iova, iova + obj->size); 568 if (IS_ERR(vma)) { 569 ret = PTR_ERR(vma); 570 } else if (GEM_WARN_ON(vma->iova != iova)) { 571 clear_iova(obj, aspace); 572 ret = -EBUSY; 573 } 574 } 575 msm_gem_unlock(obj); 576 577 return ret; 578 } 579 580 /* 581 * Unpin a iova by updating the reference counts. The memory isn't actually 582 * purged until something else (shrinker, mm_notifier, destroy, etc) decides 583 * to get rid of it 584 */ 585 void msm_gem_unpin_iova(struct drm_gem_object *obj, 586 struct msm_gem_address_space *aspace) 587 { 588 struct msm_gem_vma *vma; 589 590 msm_gem_lock(obj); 591 vma = lookup_vma(obj, aspace); 592 if (!GEM_WARN_ON(!vma)) { 593 msm_gem_unpin_vma(vma); 594 msm_gem_unpin_locked(obj); 595 } 596 msm_gem_unlock(obj); 597 } 598 599 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 600 struct drm_mode_create_dumb *args) 601 { 602 args->pitch = align_pitch(args->width, args->bpp); 603 args->size = PAGE_ALIGN(args->pitch * args->height); 604 return msm_gem_new_handle(dev, file, args->size, 605 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb"); 606 } 607 608 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 609 uint32_t handle, uint64_t *offset) 610 { 611 struct drm_gem_object *obj; 612 int ret = 0; 613 614 /* GEM does all our handle to object mapping */ 615 obj = drm_gem_object_lookup(file, handle); 616 if (obj == NULL) { 617 ret = -ENOENT; 618 goto fail; 619 } 620 621 *offset = msm_gem_mmap_offset(obj); 622 623 drm_gem_object_put(obj); 624 625 fail: 626 return ret; 627 } 628 629 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) 630 { 631 struct msm_gem_object *msm_obj = to_msm_bo(obj); 632 int ret = 0; 633 634 GEM_WARN_ON(!msm_gem_is_locked(obj)); 635 636 if (obj->import_attach) 637 return ERR_PTR(-ENODEV); 638 639 if (GEM_WARN_ON(msm_obj->madv > madv)) { 640 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", 641 msm_obj->madv, madv); 642 return ERR_PTR(-EBUSY); 643 } 644 645 /* increment vmap_count *before* vmap() call, so shrinker can 646 * check vmap_count (is_vunmapable()) outside of msm_obj lock. 647 * This guarantees that we won't try to msm_gem_vunmap() this 648 * same object from within the vmap() call (while we already 649 * hold msm_obj lock) 650 */ 651 msm_obj->vmap_count++; 652 653 if (!msm_obj->vaddr) { 654 struct page **pages = get_pages(obj); 655 if (IS_ERR(pages)) { 656 ret = PTR_ERR(pages); 657 goto fail; 658 } 659 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 660 VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL)); 661 if (msm_obj->vaddr == NULL) { 662 ret = -ENOMEM; 663 goto fail; 664 } 665 666 update_lru(obj); 667 } 668 669 return msm_obj->vaddr; 670 671 fail: 672 msm_obj->vmap_count--; 673 return ERR_PTR(ret); 674 } 675 676 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) 677 { 678 return get_vaddr(obj, MSM_MADV_WILLNEED); 679 } 680 681 void *msm_gem_get_vaddr(struct drm_gem_object *obj) 682 { 683 void *ret; 684 685 msm_gem_lock(obj); 686 ret = msm_gem_get_vaddr_locked(obj); 687 msm_gem_unlock(obj); 688 689 return ret; 690 } 691 692 /* 693 * Don't use this! It is for the very special case of dumping 694 * submits from GPU hangs or faults, were the bo may already 695 * be MSM_MADV_DONTNEED, but we know the buffer is still on the 696 * active list. 697 */ 698 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) 699 { 700 return get_vaddr(obj, __MSM_MADV_PURGED); 701 } 702 703 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) 704 { 705 struct msm_gem_object *msm_obj = to_msm_bo(obj); 706 707 GEM_WARN_ON(!msm_gem_is_locked(obj)); 708 GEM_WARN_ON(msm_obj->vmap_count < 1); 709 710 msm_obj->vmap_count--; 711 } 712 713 void msm_gem_put_vaddr(struct drm_gem_object *obj) 714 { 715 msm_gem_lock(obj); 716 msm_gem_put_vaddr_locked(obj); 717 msm_gem_unlock(obj); 718 } 719 720 /* Update madvise status, returns true if not purged, else 721 * false or -errno. 722 */ 723 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) 724 { 725 struct msm_gem_object *msm_obj = to_msm_bo(obj); 726 727 msm_gem_lock(obj); 728 729 if (msm_obj->madv != __MSM_MADV_PURGED) 730 msm_obj->madv = madv; 731 732 madv = msm_obj->madv; 733 734 /* If the obj is inactive, we might need to move it 735 * between inactive lists 736 */ 737 update_lru(obj); 738 739 msm_gem_unlock(obj); 740 741 return (madv != __MSM_MADV_PURGED); 742 } 743 744 void msm_gem_purge(struct drm_gem_object *obj) 745 { 746 struct drm_device *dev = obj->dev; 747 struct msm_gem_object *msm_obj = to_msm_bo(obj); 748 749 GEM_WARN_ON(!msm_gem_is_locked(obj)); 750 GEM_WARN_ON(!is_purgeable(msm_obj)); 751 752 /* Get rid of any iommu mapping(s): */ 753 put_iova_spaces(obj, true); 754 755 msm_gem_vunmap(obj); 756 757 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 758 759 put_pages(obj); 760 761 put_iova_vmas(obj); 762 763 msm_obj->madv = __MSM_MADV_PURGED; 764 update_lru(obj); 765 766 drm_gem_free_mmap_offset(obj); 767 768 /* Our goal here is to return as much of the memory as 769 * is possible back to the system as we are called from OOM. 770 * To do this we must instruct the shmfs to drop all of its 771 * backing pages, *now*. 772 */ 773 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 774 775 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 776 0, (loff_t)-1); 777 } 778 779 /* 780 * Unpin the backing pages and make them available to be swapped out. 781 */ 782 void msm_gem_evict(struct drm_gem_object *obj) 783 { 784 struct drm_device *dev = obj->dev; 785 struct msm_gem_object *msm_obj = to_msm_bo(obj); 786 787 GEM_WARN_ON(!msm_gem_is_locked(obj)); 788 GEM_WARN_ON(is_unevictable(msm_obj)); 789 GEM_WARN_ON(!msm_obj->evictable); 790 791 /* Get rid of any iommu mapping(s): */ 792 put_iova_spaces(obj, false); 793 794 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 795 796 put_pages(obj); 797 798 update_lru(obj); 799 } 800 801 void msm_gem_vunmap(struct drm_gem_object *obj) 802 { 803 struct msm_gem_object *msm_obj = to_msm_bo(obj); 804 805 GEM_WARN_ON(!msm_gem_is_locked(obj)); 806 807 if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj))) 808 return; 809 810 vunmap(msm_obj->vaddr); 811 msm_obj->vaddr = NULL; 812 } 813 814 static void update_lru(struct drm_gem_object *obj) 815 { 816 struct msm_drm_private *priv = obj->dev->dev_private; 817 struct msm_gem_object *msm_obj = to_msm_bo(obj); 818 819 GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base)); 820 821 mutex_lock(&priv->mm_lock); 822 823 if (msm_obj->dontneed) 824 mark_unpurgeable(msm_obj); 825 if (msm_obj->evictable) 826 mark_unevictable(msm_obj); 827 828 list_del(&msm_obj->mm_list); 829 if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) { 830 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); 831 mark_evictable(msm_obj); 832 } else if (msm_obj->madv == MSM_MADV_DONTNEED) { 833 list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed); 834 mark_purgeable(msm_obj); 835 } else { 836 GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt); 837 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); 838 } 839 840 mutex_unlock(&priv->mm_lock); 841 } 842 843 bool msm_gem_active(struct drm_gem_object *obj) 844 { 845 GEM_WARN_ON(!msm_gem_is_locked(obj)); 846 847 if (to_msm_bo(obj)->pin_count) 848 return true; 849 850 return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true)); 851 } 852 853 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 854 { 855 bool write = !!(op & MSM_PREP_WRITE); 856 unsigned long remain = 857 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 858 long ret; 859 860 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write), 861 true, remain); 862 if (ret == 0) 863 return remain == 0 ? -EBUSY : -ETIMEDOUT; 864 else if (ret < 0) 865 return ret; 866 867 /* TODO cache maintenance */ 868 869 return 0; 870 } 871 872 int msm_gem_cpu_fini(struct drm_gem_object *obj) 873 { 874 /* TODO cache maintenance */ 875 return 0; 876 } 877 878 #ifdef CONFIG_DEBUG_FS 879 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, 880 struct msm_gem_stats *stats) 881 { 882 struct msm_gem_object *msm_obj = to_msm_bo(obj); 883 struct dma_resv *robj = obj->resv; 884 struct msm_gem_vma *vma; 885 uint64_t off = drm_vma_node_start(&obj->vma_node); 886 const char *madv; 887 888 msm_gem_lock(obj); 889 890 stats->all.count++; 891 stats->all.size += obj->size; 892 893 if (msm_gem_active(obj)) { 894 stats->active.count++; 895 stats->active.size += obj->size; 896 } 897 898 if (msm_obj->pages) { 899 stats->resident.count++; 900 stats->resident.size += obj->size; 901 } 902 903 switch (msm_obj->madv) { 904 case __MSM_MADV_PURGED: 905 stats->purged.count++; 906 stats->purged.size += obj->size; 907 madv = " purged"; 908 break; 909 case MSM_MADV_DONTNEED: 910 stats->purgeable.count++; 911 stats->purgeable.size += obj->size; 912 madv = " purgeable"; 913 break; 914 case MSM_MADV_WILLNEED: 915 default: 916 madv = ""; 917 break; 918 } 919 920 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p", 921 msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I', 922 obj->name, kref_read(&obj->refcount), 923 off, msm_obj->vaddr); 924 925 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name); 926 927 if (!list_empty(&msm_obj->vmas)) { 928 929 seq_puts(m, " vmas:"); 930 931 list_for_each_entry(vma, &msm_obj->vmas, list) { 932 const char *name, *comm; 933 if (vma->aspace) { 934 struct msm_gem_address_space *aspace = vma->aspace; 935 struct task_struct *task = 936 get_pid_task(aspace->pid, PIDTYPE_PID); 937 if (task) { 938 comm = kstrdup(task->comm, GFP_KERNEL); 939 put_task_struct(task); 940 } else { 941 comm = NULL; 942 } 943 name = aspace->name; 944 } else { 945 name = comm = NULL; 946 } 947 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]", 948 name, comm ? ":" : "", comm ? comm : "", 949 vma->aspace, vma->iova, 950 vma->mapped ? "mapped" : "unmapped", 951 msm_gem_vma_inuse(vma)); 952 kfree(comm); 953 } 954 955 seq_puts(m, "\n"); 956 } 957 958 dma_resv_describe(robj, m); 959 msm_gem_unlock(obj); 960 } 961 962 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) 963 { 964 struct msm_gem_stats stats = {}; 965 struct msm_gem_object *msm_obj; 966 967 seq_puts(m, " flags id ref offset kaddr size madv name\n"); 968 list_for_each_entry(msm_obj, list, node) { 969 struct drm_gem_object *obj = &msm_obj->base; 970 seq_puts(m, " "); 971 msm_gem_describe(obj, m, &stats); 972 } 973 974 seq_printf(m, "Total: %4d objects, %9zu bytes\n", 975 stats.all.count, stats.all.size); 976 seq_printf(m, "Active: %4d objects, %9zu bytes\n", 977 stats.active.count, stats.active.size); 978 seq_printf(m, "Resident: %4d objects, %9zu bytes\n", 979 stats.resident.count, stats.resident.size); 980 seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n", 981 stats.purgeable.count, stats.purgeable.size); 982 seq_printf(m, "Purged: %4d objects, %9zu bytes\n", 983 stats.purged.count, stats.purged.size); 984 } 985 #endif 986 987 /* don't call directly! Use drm_gem_object_put() */ 988 static void msm_gem_free_object(struct drm_gem_object *obj) 989 { 990 struct msm_gem_object *msm_obj = to_msm_bo(obj); 991 struct drm_device *dev = obj->dev; 992 struct msm_drm_private *priv = dev->dev_private; 993 994 mutex_lock(&priv->obj_lock); 995 list_del(&msm_obj->node); 996 mutex_unlock(&priv->obj_lock); 997 998 mutex_lock(&priv->mm_lock); 999 if (msm_obj->dontneed) 1000 mark_unpurgeable(msm_obj); 1001 list_del(&msm_obj->mm_list); 1002 mutex_unlock(&priv->mm_lock); 1003 1004 put_iova_spaces(obj, true); 1005 1006 if (obj->import_attach) { 1007 GEM_WARN_ON(msm_obj->vaddr); 1008 1009 /* Don't drop the pages for imported dmabuf, as they are not 1010 * ours, just free the array we allocated: 1011 */ 1012 kvfree(msm_obj->pages); 1013 1014 put_iova_vmas(obj); 1015 1016 drm_prime_gem_destroy(obj, msm_obj->sgt); 1017 } else { 1018 msm_gem_vunmap(obj); 1019 put_pages(obj); 1020 put_iova_vmas(obj); 1021 } 1022 1023 drm_gem_object_release(obj); 1024 1025 kfree(msm_obj); 1026 } 1027 1028 static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 1029 { 1030 struct msm_gem_object *msm_obj = to_msm_bo(obj); 1031 1032 vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 1033 vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags)); 1034 1035 return 0; 1036 } 1037 1038 /* convenience method to construct a GEM buffer object, and userspace handle */ 1039 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 1040 uint32_t size, uint32_t flags, uint32_t *handle, 1041 char *name) 1042 { 1043 struct drm_gem_object *obj; 1044 int ret; 1045 1046 obj = msm_gem_new(dev, size, flags); 1047 1048 if (IS_ERR(obj)) 1049 return PTR_ERR(obj); 1050 1051 if (name) 1052 msm_gem_object_set_name(obj, "%s", name); 1053 1054 ret = drm_gem_handle_create(file, obj, handle); 1055 1056 /* drop reference from allocate - handle holds it now */ 1057 drm_gem_object_put(obj); 1058 1059 return ret; 1060 } 1061 1062 static const struct vm_operations_struct vm_ops = { 1063 .fault = msm_gem_fault, 1064 .open = drm_gem_vm_open, 1065 .close = drm_gem_vm_close, 1066 }; 1067 1068 static const struct drm_gem_object_funcs msm_gem_object_funcs = { 1069 .free = msm_gem_free_object, 1070 .pin = msm_gem_prime_pin, 1071 .unpin = msm_gem_prime_unpin, 1072 .get_sg_table = msm_gem_prime_get_sg_table, 1073 .vmap = msm_gem_prime_vmap, 1074 .vunmap = msm_gem_prime_vunmap, 1075 .mmap = msm_gem_object_mmap, 1076 .vm_ops = &vm_ops, 1077 }; 1078 1079 static int msm_gem_new_impl(struct drm_device *dev, 1080 uint32_t size, uint32_t flags, 1081 struct drm_gem_object **obj) 1082 { 1083 struct msm_drm_private *priv = dev->dev_private; 1084 struct msm_gem_object *msm_obj; 1085 1086 switch (flags & MSM_BO_CACHE_MASK) { 1087 case MSM_BO_CACHED: 1088 case MSM_BO_WC: 1089 break; 1090 case MSM_BO_CACHED_COHERENT: 1091 if (priv->has_cached_coherent) 1092 break; 1093 fallthrough; 1094 default: 1095 DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n", 1096 (flags & MSM_BO_CACHE_MASK)); 1097 return -EINVAL; 1098 } 1099 1100 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); 1101 if (!msm_obj) 1102 return -ENOMEM; 1103 1104 msm_obj->flags = flags; 1105 msm_obj->madv = MSM_MADV_WILLNEED; 1106 1107 INIT_LIST_HEAD(&msm_obj->node); 1108 INIT_LIST_HEAD(&msm_obj->vmas); 1109 1110 *obj = &msm_obj->base; 1111 (*obj)->funcs = &msm_gem_object_funcs; 1112 1113 return 0; 1114 } 1115 1116 struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags) 1117 { 1118 struct msm_drm_private *priv = dev->dev_private; 1119 struct msm_gem_object *msm_obj; 1120 struct drm_gem_object *obj = NULL; 1121 bool use_vram = false; 1122 int ret; 1123 1124 size = PAGE_ALIGN(size); 1125 1126 if (!msm_use_mmu(dev)) 1127 use_vram = true; 1128 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size) 1129 use_vram = true; 1130 1131 if (GEM_WARN_ON(use_vram && !priv->vram.size)) 1132 return ERR_PTR(-EINVAL); 1133 1134 /* Disallow zero sized objects as they make the underlying 1135 * infrastructure grumpy 1136 */ 1137 if (size == 0) 1138 return ERR_PTR(-EINVAL); 1139 1140 ret = msm_gem_new_impl(dev, size, flags, &obj); 1141 if (ret) 1142 return ERR_PTR(ret); 1143 1144 msm_obj = to_msm_bo(obj); 1145 1146 if (use_vram) { 1147 struct msm_gem_vma *vma; 1148 struct page **pages; 1149 1150 drm_gem_private_object_init(dev, obj, size); 1151 1152 msm_gem_lock(obj); 1153 1154 vma = add_vma(obj, NULL); 1155 msm_gem_unlock(obj); 1156 if (IS_ERR(vma)) { 1157 ret = PTR_ERR(vma); 1158 goto fail; 1159 } 1160 1161 to_msm_bo(obj)->vram_node = &vma->node; 1162 1163 /* Call chain get_pages() -> update_inactive() tries to 1164 * access msm_obj->mm_list, but it is not initialized yet. 1165 * To avoid NULL pointer dereference error, initialize 1166 * mm_list to be empty. 1167 */ 1168 INIT_LIST_HEAD(&msm_obj->mm_list); 1169 1170 msm_gem_lock(obj); 1171 pages = get_pages(obj); 1172 msm_gem_unlock(obj); 1173 if (IS_ERR(pages)) { 1174 ret = PTR_ERR(pages); 1175 goto fail; 1176 } 1177 1178 vma->iova = physaddr(obj); 1179 } else { 1180 ret = drm_gem_object_init(dev, obj, size); 1181 if (ret) 1182 goto fail; 1183 /* 1184 * Our buffers are kept pinned, so allocating them from the 1185 * MOVABLE zone is a really bad idea, and conflicts with CMA. 1186 * See comments above new_inode() why this is required _and_ 1187 * expected if you're going to pin these pages. 1188 */ 1189 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); 1190 } 1191 1192 mutex_lock(&priv->mm_lock); 1193 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); 1194 mutex_unlock(&priv->mm_lock); 1195 1196 mutex_lock(&priv->obj_lock); 1197 list_add_tail(&msm_obj->node, &priv->objects); 1198 mutex_unlock(&priv->obj_lock); 1199 1200 return obj; 1201 1202 fail: 1203 drm_gem_object_put(obj); 1204 return ERR_PTR(ret); 1205 } 1206 1207 struct drm_gem_object *msm_gem_import(struct drm_device *dev, 1208 struct dma_buf *dmabuf, struct sg_table *sgt) 1209 { 1210 struct msm_drm_private *priv = dev->dev_private; 1211 struct msm_gem_object *msm_obj; 1212 struct drm_gem_object *obj; 1213 uint32_t size; 1214 int ret, npages; 1215 1216 /* if we don't have IOMMU, don't bother pretending we can import: */ 1217 if (!msm_use_mmu(dev)) { 1218 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); 1219 return ERR_PTR(-EINVAL); 1220 } 1221 1222 size = PAGE_ALIGN(dmabuf->size); 1223 1224 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); 1225 if (ret) 1226 return ERR_PTR(ret); 1227 1228 drm_gem_private_object_init(dev, obj, size); 1229 1230 npages = size / PAGE_SIZE; 1231 1232 msm_obj = to_msm_bo(obj); 1233 msm_gem_lock(obj); 1234 msm_obj->sgt = sgt; 1235 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 1236 if (!msm_obj->pages) { 1237 msm_gem_unlock(obj); 1238 ret = -ENOMEM; 1239 goto fail; 1240 } 1241 1242 ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages); 1243 if (ret) { 1244 msm_gem_unlock(obj); 1245 goto fail; 1246 } 1247 1248 msm_gem_unlock(obj); 1249 1250 mutex_lock(&priv->mm_lock); 1251 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); 1252 mutex_unlock(&priv->mm_lock); 1253 1254 mutex_lock(&priv->obj_lock); 1255 list_add_tail(&msm_obj->node, &priv->objects); 1256 mutex_unlock(&priv->obj_lock); 1257 1258 return obj; 1259 1260 fail: 1261 drm_gem_object_put(obj); 1262 return ERR_PTR(ret); 1263 } 1264 1265 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1266 uint32_t flags, struct msm_gem_address_space *aspace, 1267 struct drm_gem_object **bo, uint64_t *iova) 1268 { 1269 void *vaddr; 1270 struct drm_gem_object *obj = msm_gem_new(dev, size, flags); 1271 int ret; 1272 1273 if (IS_ERR(obj)) 1274 return ERR_CAST(obj); 1275 1276 if (iova) { 1277 ret = msm_gem_get_and_pin_iova(obj, aspace, iova); 1278 if (ret) 1279 goto err; 1280 } 1281 1282 vaddr = msm_gem_get_vaddr(obj); 1283 if (IS_ERR(vaddr)) { 1284 msm_gem_unpin_iova(obj, aspace); 1285 ret = PTR_ERR(vaddr); 1286 goto err; 1287 } 1288 1289 if (bo) 1290 *bo = obj; 1291 1292 return vaddr; 1293 err: 1294 drm_gem_object_put(obj); 1295 1296 return ERR_PTR(ret); 1297 1298 } 1299 1300 void msm_gem_kernel_put(struct drm_gem_object *bo, 1301 struct msm_gem_address_space *aspace) 1302 { 1303 if (IS_ERR_OR_NULL(bo)) 1304 return; 1305 1306 msm_gem_put_vaddr(bo); 1307 msm_gem_unpin_iova(bo, aspace); 1308 drm_gem_object_put(bo); 1309 } 1310 1311 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...) 1312 { 1313 struct msm_gem_object *msm_obj = to_msm_bo(bo); 1314 va_list ap; 1315 1316 if (!fmt) 1317 return; 1318 1319 va_start(ap, fmt); 1320 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap); 1321 va_end(ap); 1322 } 1323