1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #include <linux/dma-map-ops.h> 8 #include <linux/vmalloc.h> 9 #include <linux/spinlock.h> 10 #include <linux/shmem_fs.h> 11 #include <linux/dma-buf.h> 12 #include <linux/pfn_t.h> 13 14 #include <drm/drm_prime.h> 15 16 #include "msm_drv.h" 17 #include "msm_fence.h" 18 #include "msm_gem.h" 19 #include "msm_gpu.h" 20 #include "msm_mmu.h" 21 22 static void update_inactive(struct msm_gem_object *msm_obj); 23 24 static dma_addr_t physaddr(struct drm_gem_object *obj) 25 { 26 struct msm_gem_object *msm_obj = to_msm_bo(obj); 27 struct msm_drm_private *priv = obj->dev->dev_private; 28 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + 29 priv->vram.paddr; 30 } 31 32 static bool use_pages(struct drm_gem_object *obj) 33 { 34 struct msm_gem_object *msm_obj = to_msm_bo(obj); 35 return !msm_obj->vram_node; 36 } 37 38 /* 39 * Cache sync.. this is a bit over-complicated, to fit dma-mapping 40 * API. Really GPU cache is out of scope here (handled on cmdstream) 41 * and all we need to do is invalidate newly allocated pages before 42 * mapping to CPU as uncached/writecombine. 43 * 44 * On top of this, we have the added headache, that depending on 45 * display generation, the display's iommu may be wired up to either 46 * the toplevel drm device (mdss), or to the mdp sub-node, meaning 47 * that here we either have dma-direct or iommu ops. 48 * 49 * Let this be a cautionary tail of abstraction gone wrong. 50 */ 51 52 static void sync_for_device(struct msm_gem_object *msm_obj) 53 { 54 struct device *dev = msm_obj->base.dev->dev; 55 56 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 57 } 58 59 static void sync_for_cpu(struct msm_gem_object *msm_obj) 60 { 61 struct device *dev = msm_obj->base.dev->dev; 62 63 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 64 } 65 66 /* allocate pages from VRAM carveout, used when no IOMMU: */ 67 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) 68 { 69 struct msm_gem_object *msm_obj = to_msm_bo(obj); 70 struct msm_drm_private *priv = obj->dev->dev_private; 71 dma_addr_t paddr; 72 struct page **p; 73 int ret, i; 74 75 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 76 if (!p) 77 return ERR_PTR(-ENOMEM); 78 79 spin_lock(&priv->vram.lock); 80 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); 81 spin_unlock(&priv->vram.lock); 82 if (ret) { 83 kvfree(p); 84 return ERR_PTR(ret); 85 } 86 87 paddr = physaddr(obj); 88 for (i = 0; i < npages; i++) { 89 p[i] = pfn_to_page(__phys_to_pfn(paddr)); 90 paddr += PAGE_SIZE; 91 } 92 93 return p; 94 } 95 96 static struct page **get_pages(struct drm_gem_object *obj) 97 { 98 struct msm_gem_object *msm_obj = to_msm_bo(obj); 99 100 GEM_WARN_ON(!msm_gem_is_locked(obj)); 101 102 if (!msm_obj->pages) { 103 struct drm_device *dev = obj->dev; 104 struct page **p; 105 int npages = obj->size >> PAGE_SHIFT; 106 107 if (use_pages(obj)) 108 p = drm_gem_get_pages(obj); 109 else 110 p = get_pages_vram(obj, npages); 111 112 if (IS_ERR(p)) { 113 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", 114 PTR_ERR(p)); 115 return p; 116 } 117 118 msm_obj->pages = p; 119 120 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages); 121 if (IS_ERR(msm_obj->sgt)) { 122 void *ptr = ERR_CAST(msm_obj->sgt); 123 124 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n"); 125 msm_obj->sgt = NULL; 126 return ptr; 127 } 128 129 /* For non-cached buffers, ensure the new pages are clean 130 * because display controller, GPU, etc. are not coherent: 131 */ 132 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 133 sync_for_device(msm_obj); 134 135 update_inactive(msm_obj); 136 } 137 138 return msm_obj->pages; 139 } 140 141 static void put_pages_vram(struct drm_gem_object *obj) 142 { 143 struct msm_gem_object *msm_obj = to_msm_bo(obj); 144 struct msm_drm_private *priv = obj->dev->dev_private; 145 146 spin_lock(&priv->vram.lock); 147 drm_mm_remove_node(msm_obj->vram_node); 148 spin_unlock(&priv->vram.lock); 149 150 kvfree(msm_obj->pages); 151 } 152 153 static void put_pages(struct drm_gem_object *obj) 154 { 155 struct msm_gem_object *msm_obj = to_msm_bo(obj); 156 157 if (msm_obj->pages) { 158 if (msm_obj->sgt) { 159 /* For non-cached buffers, ensure the new 160 * pages are clean because display controller, 161 * GPU, etc. are not coherent: 162 */ 163 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 164 sync_for_cpu(msm_obj); 165 166 sg_free_table(msm_obj->sgt); 167 kfree(msm_obj->sgt); 168 msm_obj->sgt = NULL; 169 } 170 171 if (use_pages(obj)) 172 drm_gem_put_pages(obj, msm_obj->pages, true, false); 173 else 174 put_pages_vram(obj); 175 176 msm_obj->pages = NULL; 177 } 178 } 179 180 struct page **msm_gem_get_pages(struct drm_gem_object *obj) 181 { 182 struct msm_gem_object *msm_obj = to_msm_bo(obj); 183 struct page **p; 184 185 msm_gem_lock(obj); 186 187 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 188 msm_gem_unlock(obj); 189 return ERR_PTR(-EBUSY); 190 } 191 192 p = get_pages(obj); 193 194 if (!IS_ERR(p)) { 195 msm_obj->pin_count++; 196 update_inactive(msm_obj); 197 } 198 199 msm_gem_unlock(obj); 200 return p; 201 } 202 203 void msm_gem_put_pages(struct drm_gem_object *obj) 204 { 205 struct msm_gem_object *msm_obj = to_msm_bo(obj); 206 207 msm_gem_lock(obj); 208 msm_obj->pin_count--; 209 GEM_WARN_ON(msm_obj->pin_count < 0); 210 update_inactive(msm_obj); 211 msm_gem_unlock(obj); 212 } 213 214 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot) 215 { 216 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 217 return pgprot_writecombine(prot); 218 return prot; 219 } 220 221 static vm_fault_t msm_gem_fault(struct vm_fault *vmf) 222 { 223 struct vm_area_struct *vma = vmf->vma; 224 struct drm_gem_object *obj = vma->vm_private_data; 225 struct msm_gem_object *msm_obj = to_msm_bo(obj); 226 struct page **pages; 227 unsigned long pfn; 228 pgoff_t pgoff; 229 int err; 230 vm_fault_t ret; 231 232 /* 233 * vm_ops.open/drm_gem_mmap_obj and close get and put 234 * a reference on obj. So, we dont need to hold one here. 235 */ 236 err = msm_gem_lock_interruptible(obj); 237 if (err) { 238 ret = VM_FAULT_NOPAGE; 239 goto out; 240 } 241 242 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 243 msm_gem_unlock(obj); 244 return VM_FAULT_SIGBUS; 245 } 246 247 /* make sure we have pages attached now */ 248 pages = get_pages(obj); 249 if (IS_ERR(pages)) { 250 ret = vmf_error(PTR_ERR(pages)); 251 goto out_unlock; 252 } 253 254 /* We don't use vmf->pgoff since that has the fake offset: */ 255 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 256 257 pfn = page_to_pfn(pages[pgoff]); 258 259 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 260 pfn, pfn << PAGE_SHIFT); 261 262 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); 263 out_unlock: 264 msm_gem_unlock(obj); 265 out: 266 return ret; 267 } 268 269 /** get mmap offset */ 270 static uint64_t mmap_offset(struct drm_gem_object *obj) 271 { 272 struct drm_device *dev = obj->dev; 273 int ret; 274 275 GEM_WARN_ON(!msm_gem_is_locked(obj)); 276 277 /* Make it mmapable */ 278 ret = drm_gem_create_mmap_offset(obj); 279 280 if (ret) { 281 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n"); 282 return 0; 283 } 284 285 return drm_vma_node_offset_addr(&obj->vma_node); 286 } 287 288 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) 289 { 290 uint64_t offset; 291 292 msm_gem_lock(obj); 293 offset = mmap_offset(obj); 294 msm_gem_unlock(obj); 295 return offset; 296 } 297 298 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, 299 struct msm_gem_address_space *aspace) 300 { 301 struct msm_gem_object *msm_obj = to_msm_bo(obj); 302 struct msm_gem_vma *vma; 303 304 GEM_WARN_ON(!msm_gem_is_locked(obj)); 305 306 vma = kzalloc(sizeof(*vma), GFP_KERNEL); 307 if (!vma) 308 return ERR_PTR(-ENOMEM); 309 310 vma->aspace = aspace; 311 312 list_add_tail(&vma->list, &msm_obj->vmas); 313 314 return vma; 315 } 316 317 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, 318 struct msm_gem_address_space *aspace) 319 { 320 struct msm_gem_object *msm_obj = to_msm_bo(obj); 321 struct msm_gem_vma *vma; 322 323 GEM_WARN_ON(!msm_gem_is_locked(obj)); 324 325 list_for_each_entry(vma, &msm_obj->vmas, list) { 326 if (vma->aspace == aspace) 327 return vma; 328 } 329 330 return NULL; 331 } 332 333 static void del_vma(struct msm_gem_vma *vma) 334 { 335 if (!vma) 336 return; 337 338 list_del(&vma->list); 339 kfree(vma); 340 } 341 342 /* 343 * If close is true, this also closes the VMA (releasing the allocated 344 * iova range) in addition to removing the iommu mapping. In the eviction 345 * case (!close), we keep the iova allocated, but only remove the iommu 346 * mapping. 347 */ 348 static void 349 put_iova_spaces(struct drm_gem_object *obj, bool close) 350 { 351 struct msm_gem_object *msm_obj = to_msm_bo(obj); 352 struct msm_gem_vma *vma; 353 354 GEM_WARN_ON(!msm_gem_is_locked(obj)); 355 356 list_for_each_entry(vma, &msm_obj->vmas, list) { 357 if (vma->aspace) { 358 msm_gem_purge_vma(vma->aspace, vma); 359 if (close) 360 msm_gem_close_vma(vma->aspace, vma); 361 } 362 } 363 } 364 365 /* Called with msm_obj locked */ 366 static void 367 put_iova_vmas(struct drm_gem_object *obj) 368 { 369 struct msm_gem_object *msm_obj = to_msm_bo(obj); 370 struct msm_gem_vma *vma, *tmp; 371 372 GEM_WARN_ON(!msm_gem_is_locked(obj)); 373 374 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { 375 del_vma(vma); 376 } 377 } 378 379 static int get_iova_locked(struct drm_gem_object *obj, 380 struct msm_gem_address_space *aspace, uint64_t *iova, 381 u64 range_start, u64 range_end) 382 { 383 struct msm_gem_vma *vma; 384 int ret = 0; 385 386 GEM_WARN_ON(!msm_gem_is_locked(obj)); 387 388 vma = lookup_vma(obj, aspace); 389 390 if (!vma) { 391 vma = add_vma(obj, aspace); 392 if (IS_ERR(vma)) 393 return PTR_ERR(vma); 394 395 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT, 396 range_start, range_end); 397 if (ret) { 398 del_vma(vma); 399 return ret; 400 } 401 } 402 403 *iova = vma->iova; 404 return 0; 405 } 406 407 static int msm_gem_pin_iova(struct drm_gem_object *obj, 408 struct msm_gem_address_space *aspace) 409 { 410 struct msm_gem_object *msm_obj = to_msm_bo(obj); 411 struct msm_gem_vma *vma; 412 struct page **pages; 413 int ret, prot = IOMMU_READ; 414 415 if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) 416 prot |= IOMMU_WRITE; 417 418 if (msm_obj->flags & MSM_BO_MAP_PRIV) 419 prot |= IOMMU_PRIV; 420 421 if (msm_obj->flags & MSM_BO_CACHED_COHERENT) 422 prot |= IOMMU_CACHE; 423 424 GEM_WARN_ON(!msm_gem_is_locked(obj)); 425 426 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) 427 return -EBUSY; 428 429 vma = lookup_vma(obj, aspace); 430 if (GEM_WARN_ON(!vma)) 431 return -EINVAL; 432 433 pages = get_pages(obj); 434 if (IS_ERR(pages)) 435 return PTR_ERR(pages); 436 437 ret = msm_gem_map_vma(aspace, vma, prot, 438 msm_obj->sgt, obj->size >> PAGE_SHIFT); 439 440 if (!ret) 441 msm_obj->pin_count++; 442 443 return ret; 444 } 445 446 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, 447 struct msm_gem_address_space *aspace, uint64_t *iova, 448 u64 range_start, u64 range_end) 449 { 450 u64 local; 451 int ret; 452 453 GEM_WARN_ON(!msm_gem_is_locked(obj)); 454 455 ret = get_iova_locked(obj, aspace, &local, 456 range_start, range_end); 457 458 if (!ret) 459 ret = msm_gem_pin_iova(obj, aspace); 460 461 if (!ret) 462 *iova = local; 463 464 return ret; 465 } 466 467 /* 468 * get iova and pin it. Should have a matching put 469 * limits iova to specified range (in pages) 470 */ 471 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, 472 struct msm_gem_address_space *aspace, uint64_t *iova, 473 u64 range_start, u64 range_end) 474 { 475 int ret; 476 477 msm_gem_lock(obj); 478 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end); 479 msm_gem_unlock(obj); 480 481 return ret; 482 } 483 484 int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj, 485 struct msm_gem_address_space *aspace, uint64_t *iova) 486 { 487 return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX); 488 } 489 490 /* get iova and pin it. Should have a matching put */ 491 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, 492 struct msm_gem_address_space *aspace, uint64_t *iova) 493 { 494 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX); 495 } 496 497 /* 498 * Get an iova but don't pin it. Doesn't need a put because iovas are currently 499 * valid for the life of the object 500 */ 501 int msm_gem_get_iova(struct drm_gem_object *obj, 502 struct msm_gem_address_space *aspace, uint64_t *iova) 503 { 504 int ret; 505 506 msm_gem_lock(obj); 507 ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX); 508 msm_gem_unlock(obj); 509 510 return ret; 511 } 512 513 /* get iova without taking a reference, used in places where you have 514 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova' 515 */ 516 uint64_t msm_gem_iova(struct drm_gem_object *obj, 517 struct msm_gem_address_space *aspace) 518 { 519 struct msm_gem_vma *vma; 520 521 msm_gem_lock(obj); 522 vma = lookup_vma(obj, aspace); 523 msm_gem_unlock(obj); 524 GEM_WARN_ON(!vma); 525 526 return vma ? vma->iova : 0; 527 } 528 529 /* 530 * Locked variant of msm_gem_unpin_iova() 531 */ 532 void msm_gem_unpin_iova_locked(struct drm_gem_object *obj, 533 struct msm_gem_address_space *aspace) 534 { 535 struct msm_gem_object *msm_obj = to_msm_bo(obj); 536 struct msm_gem_vma *vma; 537 538 GEM_WARN_ON(!msm_gem_is_locked(obj)); 539 540 vma = lookup_vma(obj, aspace); 541 542 if (!GEM_WARN_ON(!vma)) { 543 msm_gem_unmap_vma(aspace, vma); 544 545 msm_obj->pin_count--; 546 GEM_WARN_ON(msm_obj->pin_count < 0); 547 548 update_inactive(msm_obj); 549 } 550 } 551 552 /* 553 * Unpin a iova by updating the reference counts. The memory isn't actually 554 * purged until something else (shrinker, mm_notifier, destroy, etc) decides 555 * to get rid of it 556 */ 557 void msm_gem_unpin_iova(struct drm_gem_object *obj, 558 struct msm_gem_address_space *aspace) 559 { 560 msm_gem_lock(obj); 561 msm_gem_unpin_iova_locked(obj, aspace); 562 msm_gem_unlock(obj); 563 } 564 565 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 566 struct drm_mode_create_dumb *args) 567 { 568 args->pitch = align_pitch(args->width, args->bpp); 569 args->size = PAGE_ALIGN(args->pitch * args->height); 570 return msm_gem_new_handle(dev, file, args->size, 571 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb"); 572 } 573 574 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 575 uint32_t handle, uint64_t *offset) 576 { 577 struct drm_gem_object *obj; 578 int ret = 0; 579 580 /* GEM does all our handle to object mapping */ 581 obj = drm_gem_object_lookup(file, handle); 582 if (obj == NULL) { 583 ret = -ENOENT; 584 goto fail; 585 } 586 587 *offset = msm_gem_mmap_offset(obj); 588 589 drm_gem_object_put(obj); 590 591 fail: 592 return ret; 593 } 594 595 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) 596 { 597 struct msm_gem_object *msm_obj = to_msm_bo(obj); 598 int ret = 0; 599 600 GEM_WARN_ON(!msm_gem_is_locked(obj)); 601 602 if (obj->import_attach) 603 return ERR_PTR(-ENODEV); 604 605 if (GEM_WARN_ON(msm_obj->madv > madv)) { 606 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", 607 msm_obj->madv, madv); 608 return ERR_PTR(-EBUSY); 609 } 610 611 /* increment vmap_count *before* vmap() call, so shrinker can 612 * check vmap_count (is_vunmapable()) outside of msm_obj lock. 613 * This guarantees that we won't try to msm_gem_vunmap() this 614 * same object from within the vmap() call (while we already 615 * hold msm_obj lock) 616 */ 617 msm_obj->vmap_count++; 618 619 if (!msm_obj->vaddr) { 620 struct page **pages = get_pages(obj); 621 if (IS_ERR(pages)) { 622 ret = PTR_ERR(pages); 623 goto fail; 624 } 625 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 626 VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL)); 627 if (msm_obj->vaddr == NULL) { 628 ret = -ENOMEM; 629 goto fail; 630 } 631 632 update_inactive(msm_obj); 633 } 634 635 return msm_obj->vaddr; 636 637 fail: 638 msm_obj->vmap_count--; 639 return ERR_PTR(ret); 640 } 641 642 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) 643 { 644 return get_vaddr(obj, MSM_MADV_WILLNEED); 645 } 646 647 void *msm_gem_get_vaddr(struct drm_gem_object *obj) 648 { 649 void *ret; 650 651 msm_gem_lock(obj); 652 ret = msm_gem_get_vaddr_locked(obj); 653 msm_gem_unlock(obj); 654 655 return ret; 656 } 657 658 /* 659 * Don't use this! It is for the very special case of dumping 660 * submits from GPU hangs or faults, were the bo may already 661 * be MSM_MADV_DONTNEED, but we know the buffer is still on the 662 * active list. 663 */ 664 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) 665 { 666 return get_vaddr(obj, __MSM_MADV_PURGED); 667 } 668 669 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) 670 { 671 struct msm_gem_object *msm_obj = to_msm_bo(obj); 672 673 GEM_WARN_ON(!msm_gem_is_locked(obj)); 674 GEM_WARN_ON(msm_obj->vmap_count < 1); 675 676 msm_obj->vmap_count--; 677 } 678 679 void msm_gem_put_vaddr(struct drm_gem_object *obj) 680 { 681 msm_gem_lock(obj); 682 msm_gem_put_vaddr_locked(obj); 683 msm_gem_unlock(obj); 684 } 685 686 /* Update madvise status, returns true if not purged, else 687 * false or -errno. 688 */ 689 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) 690 { 691 struct msm_gem_object *msm_obj = to_msm_bo(obj); 692 693 msm_gem_lock(obj); 694 695 if (msm_obj->madv != __MSM_MADV_PURGED) 696 msm_obj->madv = madv; 697 698 madv = msm_obj->madv; 699 700 /* If the obj is inactive, we might need to move it 701 * between inactive lists 702 */ 703 if (msm_obj->active_count == 0) 704 update_inactive(msm_obj); 705 706 msm_gem_unlock(obj); 707 708 return (madv != __MSM_MADV_PURGED); 709 } 710 711 void msm_gem_purge(struct drm_gem_object *obj) 712 { 713 struct drm_device *dev = obj->dev; 714 struct msm_gem_object *msm_obj = to_msm_bo(obj); 715 716 GEM_WARN_ON(!msm_gem_is_locked(obj)); 717 GEM_WARN_ON(!is_purgeable(msm_obj)); 718 719 /* Get rid of any iommu mapping(s): */ 720 put_iova_spaces(obj, true); 721 722 msm_gem_vunmap(obj); 723 724 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 725 726 put_pages(obj); 727 728 put_iova_vmas(obj); 729 730 msm_obj->madv = __MSM_MADV_PURGED; 731 update_inactive(msm_obj); 732 733 drm_gem_free_mmap_offset(obj); 734 735 /* Our goal here is to return as much of the memory as 736 * is possible back to the system as we are called from OOM. 737 * To do this we must instruct the shmfs to drop all of its 738 * backing pages, *now*. 739 */ 740 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 741 742 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 743 0, (loff_t)-1); 744 } 745 746 /* 747 * Unpin the backing pages and make them available to be swapped out. 748 */ 749 void msm_gem_evict(struct drm_gem_object *obj) 750 { 751 struct drm_device *dev = obj->dev; 752 struct msm_gem_object *msm_obj = to_msm_bo(obj); 753 754 GEM_WARN_ON(!msm_gem_is_locked(obj)); 755 GEM_WARN_ON(is_unevictable(msm_obj)); 756 GEM_WARN_ON(!msm_obj->evictable); 757 GEM_WARN_ON(msm_obj->active_count); 758 759 /* Get rid of any iommu mapping(s): */ 760 put_iova_spaces(obj, false); 761 762 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 763 764 put_pages(obj); 765 766 update_inactive(msm_obj); 767 } 768 769 void msm_gem_vunmap(struct drm_gem_object *obj) 770 { 771 struct msm_gem_object *msm_obj = to_msm_bo(obj); 772 773 GEM_WARN_ON(!msm_gem_is_locked(obj)); 774 775 if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj))) 776 return; 777 778 vunmap(msm_obj->vaddr); 779 msm_obj->vaddr = NULL; 780 } 781 782 void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu) 783 { 784 struct msm_gem_object *msm_obj = to_msm_bo(obj); 785 struct msm_drm_private *priv = obj->dev->dev_private; 786 787 might_sleep(); 788 GEM_WARN_ON(!msm_gem_is_locked(obj)); 789 GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); 790 GEM_WARN_ON(msm_obj->dontneed); 791 792 if (msm_obj->active_count++ == 0) { 793 mutex_lock(&priv->mm_lock); 794 if (msm_obj->evictable) 795 mark_unevictable(msm_obj); 796 list_move_tail(&msm_obj->mm_list, &gpu->active_list); 797 mutex_unlock(&priv->mm_lock); 798 } 799 } 800 801 void msm_gem_active_put(struct drm_gem_object *obj) 802 { 803 struct msm_gem_object *msm_obj = to_msm_bo(obj); 804 805 might_sleep(); 806 GEM_WARN_ON(!msm_gem_is_locked(obj)); 807 808 if (--msm_obj->active_count == 0) { 809 update_inactive(msm_obj); 810 } 811 } 812 813 static void update_inactive(struct msm_gem_object *msm_obj) 814 { 815 struct msm_drm_private *priv = msm_obj->base.dev->dev_private; 816 817 GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base)); 818 819 if (msm_obj->active_count != 0) 820 return; 821 822 mutex_lock(&priv->mm_lock); 823 824 if (msm_obj->dontneed) 825 mark_unpurgeable(msm_obj); 826 if (msm_obj->evictable) 827 mark_unevictable(msm_obj); 828 829 list_del(&msm_obj->mm_list); 830 if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) { 831 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); 832 mark_evictable(msm_obj); 833 } else if (msm_obj->madv == MSM_MADV_DONTNEED) { 834 list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed); 835 mark_purgeable(msm_obj); 836 } else { 837 GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt); 838 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); 839 } 840 841 mutex_unlock(&priv->mm_lock); 842 } 843 844 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 845 { 846 bool write = !!(op & MSM_PREP_WRITE); 847 unsigned long remain = 848 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 849 long ret; 850 851 ret = dma_resv_wait_timeout(obj->resv, write, true, remain); 852 if (ret == 0) 853 return remain == 0 ? -EBUSY : -ETIMEDOUT; 854 else if (ret < 0) 855 return ret; 856 857 /* TODO cache maintenance */ 858 859 return 0; 860 } 861 862 int msm_gem_cpu_fini(struct drm_gem_object *obj) 863 { 864 /* TODO cache maintenance */ 865 return 0; 866 } 867 868 #ifdef CONFIG_DEBUG_FS 869 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, 870 struct msm_gem_stats *stats) 871 { 872 struct msm_gem_object *msm_obj = to_msm_bo(obj); 873 struct dma_resv *robj = obj->resv; 874 struct msm_gem_vma *vma; 875 uint64_t off = drm_vma_node_start(&obj->vma_node); 876 const char *madv; 877 878 msm_gem_lock(obj); 879 880 stats->all.count++; 881 stats->all.size += obj->size; 882 883 if (is_active(msm_obj)) { 884 stats->active.count++; 885 stats->active.size += obj->size; 886 } 887 888 if (msm_obj->pages) { 889 stats->resident.count++; 890 stats->resident.size += obj->size; 891 } 892 893 switch (msm_obj->madv) { 894 case __MSM_MADV_PURGED: 895 stats->purged.count++; 896 stats->purged.size += obj->size; 897 madv = " purged"; 898 break; 899 case MSM_MADV_DONTNEED: 900 stats->purgeable.count++; 901 stats->purgeable.size += obj->size; 902 madv = " purgeable"; 903 break; 904 case MSM_MADV_WILLNEED: 905 default: 906 madv = ""; 907 break; 908 } 909 910 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p", 911 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 912 obj->name, kref_read(&obj->refcount), 913 off, msm_obj->vaddr); 914 915 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name); 916 917 if (!list_empty(&msm_obj->vmas)) { 918 919 seq_puts(m, " vmas:"); 920 921 list_for_each_entry(vma, &msm_obj->vmas, list) { 922 const char *name, *comm; 923 if (vma->aspace) { 924 struct msm_gem_address_space *aspace = vma->aspace; 925 struct task_struct *task = 926 get_pid_task(aspace->pid, PIDTYPE_PID); 927 if (task) { 928 comm = kstrdup(task->comm, GFP_KERNEL); 929 } else { 930 comm = NULL; 931 } 932 name = aspace->name; 933 } else { 934 name = comm = NULL; 935 } 936 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]", 937 name, comm ? ":" : "", comm ? comm : "", 938 vma->aspace, vma->iova, 939 vma->mapped ? "mapped" : "unmapped", 940 vma->inuse); 941 kfree(comm); 942 } 943 944 seq_puts(m, "\n"); 945 } 946 947 dma_resv_describe(robj, m); 948 msm_gem_unlock(obj); 949 } 950 951 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) 952 { 953 struct msm_gem_stats stats = {}; 954 struct msm_gem_object *msm_obj; 955 956 seq_puts(m, " flags id ref offset kaddr size madv name\n"); 957 list_for_each_entry(msm_obj, list, node) { 958 struct drm_gem_object *obj = &msm_obj->base; 959 seq_puts(m, " "); 960 msm_gem_describe(obj, m, &stats); 961 } 962 963 seq_printf(m, "Total: %4d objects, %9zu bytes\n", 964 stats.all.count, stats.all.size); 965 seq_printf(m, "Active: %4d objects, %9zu bytes\n", 966 stats.active.count, stats.active.size); 967 seq_printf(m, "Resident: %4d objects, %9zu bytes\n", 968 stats.resident.count, stats.resident.size); 969 seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n", 970 stats.purgeable.count, stats.purgeable.size); 971 seq_printf(m, "Purged: %4d objects, %9zu bytes\n", 972 stats.purged.count, stats.purged.size); 973 } 974 #endif 975 976 /* don't call directly! Use drm_gem_object_put() */ 977 void msm_gem_free_object(struct drm_gem_object *obj) 978 { 979 struct msm_gem_object *msm_obj = to_msm_bo(obj); 980 struct drm_device *dev = obj->dev; 981 struct msm_drm_private *priv = dev->dev_private; 982 983 mutex_lock(&priv->obj_lock); 984 list_del(&msm_obj->node); 985 mutex_unlock(&priv->obj_lock); 986 987 mutex_lock(&priv->mm_lock); 988 if (msm_obj->dontneed) 989 mark_unpurgeable(msm_obj); 990 list_del(&msm_obj->mm_list); 991 mutex_unlock(&priv->mm_lock); 992 993 msm_gem_lock(obj); 994 995 /* object should not be on active list: */ 996 GEM_WARN_ON(is_active(msm_obj)); 997 998 put_iova_spaces(obj, true); 999 1000 if (obj->import_attach) { 1001 GEM_WARN_ON(msm_obj->vaddr); 1002 1003 /* Don't drop the pages for imported dmabuf, as they are not 1004 * ours, just free the array we allocated: 1005 */ 1006 kvfree(msm_obj->pages); 1007 1008 put_iova_vmas(obj); 1009 1010 /* dma_buf_detach() grabs resv lock, so we need to unlock 1011 * prior to drm_prime_gem_destroy 1012 */ 1013 msm_gem_unlock(obj); 1014 1015 drm_prime_gem_destroy(obj, msm_obj->sgt); 1016 } else { 1017 msm_gem_vunmap(obj); 1018 put_pages(obj); 1019 put_iova_vmas(obj); 1020 msm_gem_unlock(obj); 1021 } 1022 1023 drm_gem_object_release(obj); 1024 1025 kfree(msm_obj); 1026 } 1027 1028 static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 1029 { 1030 struct msm_gem_object *msm_obj = to_msm_bo(obj); 1031 1032 vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP; 1033 vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags)); 1034 1035 return 0; 1036 } 1037 1038 /* convenience method to construct a GEM buffer object, and userspace handle */ 1039 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 1040 uint32_t size, uint32_t flags, uint32_t *handle, 1041 char *name) 1042 { 1043 struct drm_gem_object *obj; 1044 int ret; 1045 1046 obj = msm_gem_new(dev, size, flags); 1047 1048 if (IS_ERR(obj)) 1049 return PTR_ERR(obj); 1050 1051 if (name) 1052 msm_gem_object_set_name(obj, "%s", name); 1053 1054 ret = drm_gem_handle_create(file, obj, handle); 1055 1056 /* drop reference from allocate - handle holds it now */ 1057 drm_gem_object_put(obj); 1058 1059 return ret; 1060 } 1061 1062 static const struct vm_operations_struct vm_ops = { 1063 .fault = msm_gem_fault, 1064 .open = drm_gem_vm_open, 1065 .close = drm_gem_vm_close, 1066 }; 1067 1068 static const struct drm_gem_object_funcs msm_gem_object_funcs = { 1069 .free = msm_gem_free_object, 1070 .pin = msm_gem_prime_pin, 1071 .unpin = msm_gem_prime_unpin, 1072 .get_sg_table = msm_gem_prime_get_sg_table, 1073 .vmap = msm_gem_prime_vmap, 1074 .vunmap = msm_gem_prime_vunmap, 1075 .mmap = msm_gem_object_mmap, 1076 .vm_ops = &vm_ops, 1077 }; 1078 1079 static int msm_gem_new_impl(struct drm_device *dev, 1080 uint32_t size, uint32_t flags, 1081 struct drm_gem_object **obj) 1082 { 1083 struct msm_drm_private *priv = dev->dev_private; 1084 struct msm_gem_object *msm_obj; 1085 1086 switch (flags & MSM_BO_CACHE_MASK) { 1087 case MSM_BO_UNCACHED: 1088 case MSM_BO_CACHED: 1089 case MSM_BO_WC: 1090 break; 1091 case MSM_BO_CACHED_COHERENT: 1092 if (priv->has_cached_coherent) 1093 break; 1094 fallthrough; 1095 default: 1096 DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n", 1097 (flags & MSM_BO_CACHE_MASK)); 1098 return -EINVAL; 1099 } 1100 1101 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); 1102 if (!msm_obj) 1103 return -ENOMEM; 1104 1105 msm_obj->flags = flags; 1106 msm_obj->madv = MSM_MADV_WILLNEED; 1107 1108 INIT_LIST_HEAD(&msm_obj->node); 1109 INIT_LIST_HEAD(&msm_obj->vmas); 1110 1111 *obj = &msm_obj->base; 1112 (*obj)->funcs = &msm_gem_object_funcs; 1113 1114 return 0; 1115 } 1116 1117 struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags) 1118 { 1119 struct msm_drm_private *priv = dev->dev_private; 1120 struct msm_gem_object *msm_obj; 1121 struct drm_gem_object *obj = NULL; 1122 bool use_vram = false; 1123 int ret; 1124 1125 size = PAGE_ALIGN(size); 1126 1127 if (!msm_use_mmu(dev)) 1128 use_vram = true; 1129 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size) 1130 use_vram = true; 1131 1132 if (GEM_WARN_ON(use_vram && !priv->vram.size)) 1133 return ERR_PTR(-EINVAL); 1134 1135 /* Disallow zero sized objects as they make the underlying 1136 * infrastructure grumpy 1137 */ 1138 if (size == 0) 1139 return ERR_PTR(-EINVAL); 1140 1141 ret = msm_gem_new_impl(dev, size, flags, &obj); 1142 if (ret) 1143 return ERR_PTR(ret); 1144 1145 msm_obj = to_msm_bo(obj); 1146 1147 if (use_vram) { 1148 struct msm_gem_vma *vma; 1149 struct page **pages; 1150 1151 drm_gem_private_object_init(dev, obj, size); 1152 1153 msm_gem_lock(obj); 1154 1155 vma = add_vma(obj, NULL); 1156 msm_gem_unlock(obj); 1157 if (IS_ERR(vma)) { 1158 ret = PTR_ERR(vma); 1159 goto fail; 1160 } 1161 1162 to_msm_bo(obj)->vram_node = &vma->node; 1163 1164 /* Call chain get_pages() -> update_inactive() tries to 1165 * access msm_obj->mm_list, but it is not initialized yet. 1166 * To avoid NULL pointer dereference error, initialize 1167 * mm_list to be empty. 1168 */ 1169 INIT_LIST_HEAD(&msm_obj->mm_list); 1170 1171 msm_gem_lock(obj); 1172 pages = get_pages(obj); 1173 msm_gem_unlock(obj); 1174 if (IS_ERR(pages)) { 1175 ret = PTR_ERR(pages); 1176 goto fail; 1177 } 1178 1179 vma->iova = physaddr(obj); 1180 } else { 1181 ret = drm_gem_object_init(dev, obj, size); 1182 if (ret) 1183 goto fail; 1184 /* 1185 * Our buffers are kept pinned, so allocating them from the 1186 * MOVABLE zone is a really bad idea, and conflicts with CMA. 1187 * See comments above new_inode() why this is required _and_ 1188 * expected if you're going to pin these pages. 1189 */ 1190 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); 1191 } 1192 1193 mutex_lock(&priv->mm_lock); 1194 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); 1195 mutex_unlock(&priv->mm_lock); 1196 1197 mutex_lock(&priv->obj_lock); 1198 list_add_tail(&msm_obj->node, &priv->objects); 1199 mutex_unlock(&priv->obj_lock); 1200 1201 return obj; 1202 1203 fail: 1204 drm_gem_object_put(obj); 1205 return ERR_PTR(ret); 1206 } 1207 1208 struct drm_gem_object *msm_gem_import(struct drm_device *dev, 1209 struct dma_buf *dmabuf, struct sg_table *sgt) 1210 { 1211 struct msm_drm_private *priv = dev->dev_private; 1212 struct msm_gem_object *msm_obj; 1213 struct drm_gem_object *obj; 1214 uint32_t size; 1215 int ret, npages; 1216 1217 /* if we don't have IOMMU, don't bother pretending we can import: */ 1218 if (!msm_use_mmu(dev)) { 1219 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); 1220 return ERR_PTR(-EINVAL); 1221 } 1222 1223 size = PAGE_ALIGN(dmabuf->size); 1224 1225 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); 1226 if (ret) 1227 return ERR_PTR(ret); 1228 1229 drm_gem_private_object_init(dev, obj, size); 1230 1231 npages = size / PAGE_SIZE; 1232 1233 msm_obj = to_msm_bo(obj); 1234 msm_gem_lock(obj); 1235 msm_obj->sgt = sgt; 1236 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 1237 if (!msm_obj->pages) { 1238 msm_gem_unlock(obj); 1239 ret = -ENOMEM; 1240 goto fail; 1241 } 1242 1243 ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages); 1244 if (ret) { 1245 msm_gem_unlock(obj); 1246 goto fail; 1247 } 1248 1249 msm_gem_unlock(obj); 1250 1251 mutex_lock(&priv->mm_lock); 1252 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); 1253 mutex_unlock(&priv->mm_lock); 1254 1255 mutex_lock(&priv->obj_lock); 1256 list_add_tail(&msm_obj->node, &priv->objects); 1257 mutex_unlock(&priv->obj_lock); 1258 1259 return obj; 1260 1261 fail: 1262 drm_gem_object_put(obj); 1263 return ERR_PTR(ret); 1264 } 1265 1266 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1267 uint32_t flags, struct msm_gem_address_space *aspace, 1268 struct drm_gem_object **bo, uint64_t *iova) 1269 { 1270 void *vaddr; 1271 struct drm_gem_object *obj = msm_gem_new(dev, size, flags); 1272 int ret; 1273 1274 if (IS_ERR(obj)) 1275 return ERR_CAST(obj); 1276 1277 if (iova) { 1278 ret = msm_gem_get_and_pin_iova(obj, aspace, iova); 1279 if (ret) 1280 goto err; 1281 } 1282 1283 vaddr = msm_gem_get_vaddr(obj); 1284 if (IS_ERR(vaddr)) { 1285 msm_gem_unpin_iova(obj, aspace); 1286 ret = PTR_ERR(vaddr); 1287 goto err; 1288 } 1289 1290 if (bo) 1291 *bo = obj; 1292 1293 return vaddr; 1294 err: 1295 drm_gem_object_put(obj); 1296 1297 return ERR_PTR(ret); 1298 1299 } 1300 1301 void msm_gem_kernel_put(struct drm_gem_object *bo, 1302 struct msm_gem_address_space *aspace) 1303 { 1304 if (IS_ERR_OR_NULL(bo)) 1305 return; 1306 1307 msm_gem_put_vaddr(bo); 1308 msm_gem_unpin_iova(bo, aspace); 1309 drm_gem_object_put(bo); 1310 } 1311 1312 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...) 1313 { 1314 struct msm_gem_object *msm_obj = to_msm_bo(bo); 1315 va_list ap; 1316 1317 if (!fmt) 1318 return; 1319 1320 va_start(ap, fmt); 1321 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap); 1322 va_end(ap); 1323 } 1324