1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #include <linux/dma-map-ops.h> 8 #include <linux/spinlock.h> 9 #include <linux/shmem_fs.h> 10 #include <linux/dma-buf.h> 11 #include <linux/pfn_t.h> 12 13 #include <drm/drm_prime.h> 14 15 #include "msm_drv.h" 16 #include "msm_fence.h" 17 #include "msm_gem.h" 18 #include "msm_gpu.h" 19 #include "msm_mmu.h" 20 21 static void update_inactive(struct msm_gem_object *msm_obj); 22 23 static dma_addr_t physaddr(struct drm_gem_object *obj) 24 { 25 struct msm_gem_object *msm_obj = to_msm_bo(obj); 26 struct msm_drm_private *priv = obj->dev->dev_private; 27 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + 28 priv->vram.paddr; 29 } 30 31 static bool use_pages(struct drm_gem_object *obj) 32 { 33 struct msm_gem_object *msm_obj = to_msm_bo(obj); 34 return !msm_obj->vram_node; 35 } 36 37 /* 38 * Cache sync.. this is a bit over-complicated, to fit dma-mapping 39 * API. Really GPU cache is out of scope here (handled on cmdstream) 40 * and all we need to do is invalidate newly allocated pages before 41 * mapping to CPU as uncached/writecombine. 42 * 43 * On top of this, we have the added headache, that depending on 44 * display generation, the display's iommu may be wired up to either 45 * the toplevel drm device (mdss), or to the mdp sub-node, meaning 46 * that here we either have dma-direct or iommu ops. 47 * 48 * Let this be a cautionary tail of abstraction gone wrong. 49 */ 50 51 static void sync_for_device(struct msm_gem_object *msm_obj) 52 { 53 struct device *dev = msm_obj->base.dev->dev; 54 55 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 56 } 57 58 static void sync_for_cpu(struct msm_gem_object *msm_obj) 59 { 60 struct device *dev = msm_obj->base.dev->dev; 61 62 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 63 } 64 65 /* allocate pages from VRAM carveout, used when no IOMMU: */ 66 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) 67 { 68 struct msm_gem_object *msm_obj = to_msm_bo(obj); 69 struct msm_drm_private *priv = obj->dev->dev_private; 70 dma_addr_t paddr; 71 struct page **p; 72 int ret, i; 73 74 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 75 if (!p) 76 return ERR_PTR(-ENOMEM); 77 78 spin_lock(&priv->vram.lock); 79 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); 80 spin_unlock(&priv->vram.lock); 81 if (ret) { 82 kvfree(p); 83 return ERR_PTR(ret); 84 } 85 86 paddr = physaddr(obj); 87 for (i = 0; i < npages; i++) { 88 p[i] = pfn_to_page(__phys_to_pfn(paddr)); 89 paddr += PAGE_SIZE; 90 } 91 92 return p; 93 } 94 95 static struct page **get_pages(struct drm_gem_object *obj) 96 { 97 struct msm_gem_object *msm_obj = to_msm_bo(obj); 98 99 GEM_WARN_ON(!msm_gem_is_locked(obj)); 100 101 if (!msm_obj->pages) { 102 struct drm_device *dev = obj->dev; 103 struct page **p; 104 int npages = obj->size >> PAGE_SHIFT; 105 106 if (use_pages(obj)) 107 p = drm_gem_get_pages(obj); 108 else 109 p = get_pages_vram(obj, npages); 110 111 if (IS_ERR(p)) { 112 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", 113 PTR_ERR(p)); 114 return p; 115 } 116 117 msm_obj->pages = p; 118 119 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages); 120 if (IS_ERR(msm_obj->sgt)) { 121 void *ptr = ERR_CAST(msm_obj->sgt); 122 123 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n"); 124 msm_obj->sgt = NULL; 125 return ptr; 126 } 127 128 /* For non-cached buffers, ensure the new pages are clean 129 * because display controller, GPU, etc. are not coherent: 130 */ 131 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 132 sync_for_device(msm_obj); 133 134 update_inactive(msm_obj); 135 } 136 137 return msm_obj->pages; 138 } 139 140 static void put_pages_vram(struct drm_gem_object *obj) 141 { 142 struct msm_gem_object *msm_obj = to_msm_bo(obj); 143 struct msm_drm_private *priv = obj->dev->dev_private; 144 145 spin_lock(&priv->vram.lock); 146 drm_mm_remove_node(msm_obj->vram_node); 147 spin_unlock(&priv->vram.lock); 148 149 kvfree(msm_obj->pages); 150 } 151 152 static void put_pages(struct drm_gem_object *obj) 153 { 154 struct msm_gem_object *msm_obj = to_msm_bo(obj); 155 156 if (msm_obj->pages) { 157 if (msm_obj->sgt) { 158 /* For non-cached buffers, ensure the new 159 * pages are clean because display controller, 160 * GPU, etc. are not coherent: 161 */ 162 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 163 sync_for_cpu(msm_obj); 164 165 sg_free_table(msm_obj->sgt); 166 kfree(msm_obj->sgt); 167 msm_obj->sgt = NULL; 168 } 169 170 if (use_pages(obj)) 171 drm_gem_put_pages(obj, msm_obj->pages, true, false); 172 else 173 put_pages_vram(obj); 174 175 msm_obj->pages = NULL; 176 } 177 } 178 179 struct page **msm_gem_get_pages(struct drm_gem_object *obj) 180 { 181 struct msm_gem_object *msm_obj = to_msm_bo(obj); 182 struct page **p; 183 184 msm_gem_lock(obj); 185 186 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 187 msm_gem_unlock(obj); 188 return ERR_PTR(-EBUSY); 189 } 190 191 p = get_pages(obj); 192 193 if (!IS_ERR(p)) { 194 msm_obj->pin_count++; 195 update_inactive(msm_obj); 196 } 197 198 msm_gem_unlock(obj); 199 return p; 200 } 201 202 void msm_gem_put_pages(struct drm_gem_object *obj) 203 { 204 struct msm_gem_object *msm_obj = to_msm_bo(obj); 205 206 msm_gem_lock(obj); 207 msm_obj->pin_count--; 208 GEM_WARN_ON(msm_obj->pin_count < 0); 209 update_inactive(msm_obj); 210 msm_gem_unlock(obj); 211 } 212 213 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot) 214 { 215 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 216 return pgprot_writecombine(prot); 217 return prot; 218 } 219 220 static vm_fault_t msm_gem_fault(struct vm_fault *vmf) 221 { 222 struct vm_area_struct *vma = vmf->vma; 223 struct drm_gem_object *obj = vma->vm_private_data; 224 struct msm_gem_object *msm_obj = to_msm_bo(obj); 225 struct page **pages; 226 unsigned long pfn; 227 pgoff_t pgoff; 228 int err; 229 vm_fault_t ret; 230 231 /* 232 * vm_ops.open/drm_gem_mmap_obj and close get and put 233 * a reference on obj. So, we dont need to hold one here. 234 */ 235 err = msm_gem_lock_interruptible(obj); 236 if (err) { 237 ret = VM_FAULT_NOPAGE; 238 goto out; 239 } 240 241 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 242 msm_gem_unlock(obj); 243 return VM_FAULT_SIGBUS; 244 } 245 246 /* make sure we have pages attached now */ 247 pages = get_pages(obj); 248 if (IS_ERR(pages)) { 249 ret = vmf_error(PTR_ERR(pages)); 250 goto out_unlock; 251 } 252 253 /* We don't use vmf->pgoff since that has the fake offset: */ 254 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 255 256 pfn = page_to_pfn(pages[pgoff]); 257 258 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 259 pfn, pfn << PAGE_SHIFT); 260 261 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); 262 out_unlock: 263 msm_gem_unlock(obj); 264 out: 265 return ret; 266 } 267 268 /** get mmap offset */ 269 static uint64_t mmap_offset(struct drm_gem_object *obj) 270 { 271 struct drm_device *dev = obj->dev; 272 int ret; 273 274 GEM_WARN_ON(!msm_gem_is_locked(obj)); 275 276 /* Make it mmapable */ 277 ret = drm_gem_create_mmap_offset(obj); 278 279 if (ret) { 280 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n"); 281 return 0; 282 } 283 284 return drm_vma_node_offset_addr(&obj->vma_node); 285 } 286 287 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) 288 { 289 uint64_t offset; 290 291 msm_gem_lock(obj); 292 offset = mmap_offset(obj); 293 msm_gem_unlock(obj); 294 return offset; 295 } 296 297 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, 298 struct msm_gem_address_space *aspace) 299 { 300 struct msm_gem_object *msm_obj = to_msm_bo(obj); 301 struct msm_gem_vma *vma; 302 303 GEM_WARN_ON(!msm_gem_is_locked(obj)); 304 305 vma = kzalloc(sizeof(*vma), GFP_KERNEL); 306 if (!vma) 307 return ERR_PTR(-ENOMEM); 308 309 vma->aspace = aspace; 310 311 list_add_tail(&vma->list, &msm_obj->vmas); 312 313 return vma; 314 } 315 316 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, 317 struct msm_gem_address_space *aspace) 318 { 319 struct msm_gem_object *msm_obj = to_msm_bo(obj); 320 struct msm_gem_vma *vma; 321 322 GEM_WARN_ON(!msm_gem_is_locked(obj)); 323 324 list_for_each_entry(vma, &msm_obj->vmas, list) { 325 if (vma->aspace == aspace) 326 return vma; 327 } 328 329 return NULL; 330 } 331 332 static void del_vma(struct msm_gem_vma *vma) 333 { 334 if (!vma) 335 return; 336 337 list_del(&vma->list); 338 kfree(vma); 339 } 340 341 /* 342 * If close is true, this also closes the VMA (releasing the allocated 343 * iova range) in addition to removing the iommu mapping. In the eviction 344 * case (!close), we keep the iova allocated, but only remove the iommu 345 * mapping. 346 */ 347 static void 348 put_iova_spaces(struct drm_gem_object *obj, bool close) 349 { 350 struct msm_gem_object *msm_obj = to_msm_bo(obj); 351 struct msm_gem_vma *vma; 352 353 GEM_WARN_ON(!msm_gem_is_locked(obj)); 354 355 list_for_each_entry(vma, &msm_obj->vmas, list) { 356 if (vma->aspace) { 357 msm_gem_purge_vma(vma->aspace, vma); 358 if (close) 359 msm_gem_close_vma(vma->aspace, vma); 360 } 361 } 362 } 363 364 /* Called with msm_obj locked */ 365 static void 366 put_iova_vmas(struct drm_gem_object *obj) 367 { 368 struct msm_gem_object *msm_obj = to_msm_bo(obj); 369 struct msm_gem_vma *vma, *tmp; 370 371 GEM_WARN_ON(!msm_gem_is_locked(obj)); 372 373 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { 374 del_vma(vma); 375 } 376 } 377 378 static int get_iova_locked(struct drm_gem_object *obj, 379 struct msm_gem_address_space *aspace, uint64_t *iova, 380 u64 range_start, u64 range_end) 381 { 382 struct msm_gem_vma *vma; 383 int ret = 0; 384 385 GEM_WARN_ON(!msm_gem_is_locked(obj)); 386 387 vma = lookup_vma(obj, aspace); 388 389 if (!vma) { 390 vma = add_vma(obj, aspace); 391 if (IS_ERR(vma)) 392 return PTR_ERR(vma); 393 394 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT, 395 range_start, range_end); 396 if (ret) { 397 del_vma(vma); 398 return ret; 399 } 400 } 401 402 *iova = vma->iova; 403 return 0; 404 } 405 406 static int msm_gem_pin_iova(struct drm_gem_object *obj, 407 struct msm_gem_address_space *aspace) 408 { 409 struct msm_gem_object *msm_obj = to_msm_bo(obj); 410 struct msm_gem_vma *vma; 411 struct page **pages; 412 int ret, prot = IOMMU_READ; 413 414 if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) 415 prot |= IOMMU_WRITE; 416 417 if (msm_obj->flags & MSM_BO_MAP_PRIV) 418 prot |= IOMMU_PRIV; 419 420 if (msm_obj->flags & MSM_BO_CACHED_COHERENT) 421 prot |= IOMMU_CACHE; 422 423 GEM_WARN_ON(!msm_gem_is_locked(obj)); 424 425 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) 426 return -EBUSY; 427 428 vma = lookup_vma(obj, aspace); 429 if (GEM_WARN_ON(!vma)) 430 return -EINVAL; 431 432 pages = get_pages(obj); 433 if (IS_ERR(pages)) 434 return PTR_ERR(pages); 435 436 ret = msm_gem_map_vma(aspace, vma, prot, 437 msm_obj->sgt, obj->size >> PAGE_SHIFT); 438 439 if (!ret) 440 msm_obj->pin_count++; 441 442 return ret; 443 } 444 445 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, 446 struct msm_gem_address_space *aspace, uint64_t *iova, 447 u64 range_start, u64 range_end) 448 { 449 u64 local; 450 int ret; 451 452 GEM_WARN_ON(!msm_gem_is_locked(obj)); 453 454 ret = get_iova_locked(obj, aspace, &local, 455 range_start, range_end); 456 457 if (!ret) 458 ret = msm_gem_pin_iova(obj, aspace); 459 460 if (!ret) 461 *iova = local; 462 463 return ret; 464 } 465 466 /* 467 * get iova and pin it. Should have a matching put 468 * limits iova to specified range (in pages) 469 */ 470 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, 471 struct msm_gem_address_space *aspace, uint64_t *iova, 472 u64 range_start, u64 range_end) 473 { 474 int ret; 475 476 msm_gem_lock(obj); 477 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end); 478 msm_gem_unlock(obj); 479 480 return ret; 481 } 482 483 int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj, 484 struct msm_gem_address_space *aspace, uint64_t *iova) 485 { 486 return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX); 487 } 488 489 /* get iova and pin it. Should have a matching put */ 490 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, 491 struct msm_gem_address_space *aspace, uint64_t *iova) 492 { 493 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX); 494 } 495 496 /* 497 * Get an iova but don't pin it. Doesn't need a put because iovas are currently 498 * valid for the life of the object 499 */ 500 int msm_gem_get_iova(struct drm_gem_object *obj, 501 struct msm_gem_address_space *aspace, uint64_t *iova) 502 { 503 int ret; 504 505 msm_gem_lock(obj); 506 ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX); 507 msm_gem_unlock(obj); 508 509 return ret; 510 } 511 512 /* get iova without taking a reference, used in places where you have 513 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova' 514 */ 515 uint64_t msm_gem_iova(struct drm_gem_object *obj, 516 struct msm_gem_address_space *aspace) 517 { 518 struct msm_gem_vma *vma; 519 520 msm_gem_lock(obj); 521 vma = lookup_vma(obj, aspace); 522 msm_gem_unlock(obj); 523 GEM_WARN_ON(!vma); 524 525 return vma ? vma->iova : 0; 526 } 527 528 /* 529 * Locked variant of msm_gem_unpin_iova() 530 */ 531 void msm_gem_unpin_iova_locked(struct drm_gem_object *obj, 532 struct msm_gem_address_space *aspace) 533 { 534 struct msm_gem_object *msm_obj = to_msm_bo(obj); 535 struct msm_gem_vma *vma; 536 537 GEM_WARN_ON(!msm_gem_is_locked(obj)); 538 539 vma = lookup_vma(obj, aspace); 540 541 if (!GEM_WARN_ON(!vma)) { 542 msm_gem_unmap_vma(aspace, vma); 543 544 msm_obj->pin_count--; 545 GEM_WARN_ON(msm_obj->pin_count < 0); 546 547 update_inactive(msm_obj); 548 } 549 } 550 551 /* 552 * Unpin a iova by updating the reference counts. The memory isn't actually 553 * purged until something else (shrinker, mm_notifier, destroy, etc) decides 554 * to get rid of it 555 */ 556 void msm_gem_unpin_iova(struct drm_gem_object *obj, 557 struct msm_gem_address_space *aspace) 558 { 559 msm_gem_lock(obj); 560 msm_gem_unpin_iova_locked(obj, aspace); 561 msm_gem_unlock(obj); 562 } 563 564 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 565 struct drm_mode_create_dumb *args) 566 { 567 args->pitch = align_pitch(args->width, args->bpp); 568 args->size = PAGE_ALIGN(args->pitch * args->height); 569 return msm_gem_new_handle(dev, file, args->size, 570 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb"); 571 } 572 573 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 574 uint32_t handle, uint64_t *offset) 575 { 576 struct drm_gem_object *obj; 577 int ret = 0; 578 579 /* GEM does all our handle to object mapping */ 580 obj = drm_gem_object_lookup(file, handle); 581 if (obj == NULL) { 582 ret = -ENOENT; 583 goto fail; 584 } 585 586 *offset = msm_gem_mmap_offset(obj); 587 588 drm_gem_object_put(obj); 589 590 fail: 591 return ret; 592 } 593 594 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) 595 { 596 struct msm_gem_object *msm_obj = to_msm_bo(obj); 597 int ret = 0; 598 599 GEM_WARN_ON(!msm_gem_is_locked(obj)); 600 601 if (obj->import_attach) 602 return ERR_PTR(-ENODEV); 603 604 if (GEM_WARN_ON(msm_obj->madv > madv)) { 605 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", 606 msm_obj->madv, madv); 607 return ERR_PTR(-EBUSY); 608 } 609 610 /* increment vmap_count *before* vmap() call, so shrinker can 611 * check vmap_count (is_vunmapable()) outside of msm_obj lock. 612 * This guarantees that we won't try to msm_gem_vunmap() this 613 * same object from within the vmap() call (while we already 614 * hold msm_obj lock) 615 */ 616 msm_obj->vmap_count++; 617 618 if (!msm_obj->vaddr) { 619 struct page **pages = get_pages(obj); 620 if (IS_ERR(pages)) { 621 ret = PTR_ERR(pages); 622 goto fail; 623 } 624 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 625 VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL)); 626 if (msm_obj->vaddr == NULL) { 627 ret = -ENOMEM; 628 goto fail; 629 } 630 631 update_inactive(msm_obj); 632 } 633 634 return msm_obj->vaddr; 635 636 fail: 637 msm_obj->vmap_count--; 638 return ERR_PTR(ret); 639 } 640 641 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) 642 { 643 return get_vaddr(obj, MSM_MADV_WILLNEED); 644 } 645 646 void *msm_gem_get_vaddr(struct drm_gem_object *obj) 647 { 648 void *ret; 649 650 msm_gem_lock(obj); 651 ret = msm_gem_get_vaddr_locked(obj); 652 msm_gem_unlock(obj); 653 654 return ret; 655 } 656 657 /* 658 * Don't use this! It is for the very special case of dumping 659 * submits from GPU hangs or faults, were the bo may already 660 * be MSM_MADV_DONTNEED, but we know the buffer is still on the 661 * active list. 662 */ 663 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) 664 { 665 return get_vaddr(obj, __MSM_MADV_PURGED); 666 } 667 668 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) 669 { 670 struct msm_gem_object *msm_obj = to_msm_bo(obj); 671 672 GEM_WARN_ON(!msm_gem_is_locked(obj)); 673 GEM_WARN_ON(msm_obj->vmap_count < 1); 674 675 msm_obj->vmap_count--; 676 } 677 678 void msm_gem_put_vaddr(struct drm_gem_object *obj) 679 { 680 msm_gem_lock(obj); 681 msm_gem_put_vaddr_locked(obj); 682 msm_gem_unlock(obj); 683 } 684 685 /* Update madvise status, returns true if not purged, else 686 * false or -errno. 687 */ 688 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) 689 { 690 struct msm_gem_object *msm_obj = to_msm_bo(obj); 691 692 msm_gem_lock(obj); 693 694 if (msm_obj->madv != __MSM_MADV_PURGED) 695 msm_obj->madv = madv; 696 697 madv = msm_obj->madv; 698 699 /* If the obj is inactive, we might need to move it 700 * between inactive lists 701 */ 702 if (msm_obj->active_count == 0) 703 update_inactive(msm_obj); 704 705 msm_gem_unlock(obj); 706 707 return (madv != __MSM_MADV_PURGED); 708 } 709 710 void msm_gem_purge(struct drm_gem_object *obj) 711 { 712 struct drm_device *dev = obj->dev; 713 struct msm_gem_object *msm_obj = to_msm_bo(obj); 714 715 GEM_WARN_ON(!msm_gem_is_locked(obj)); 716 GEM_WARN_ON(!is_purgeable(msm_obj)); 717 718 /* Get rid of any iommu mapping(s): */ 719 put_iova_spaces(obj, true); 720 721 msm_gem_vunmap(obj); 722 723 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 724 725 put_pages(obj); 726 727 put_iova_vmas(obj); 728 729 msm_obj->madv = __MSM_MADV_PURGED; 730 update_inactive(msm_obj); 731 732 drm_gem_free_mmap_offset(obj); 733 734 /* Our goal here is to return as much of the memory as 735 * is possible back to the system as we are called from OOM. 736 * To do this we must instruct the shmfs to drop all of its 737 * backing pages, *now*. 738 */ 739 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 740 741 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 742 0, (loff_t)-1); 743 } 744 745 /* 746 * Unpin the backing pages and make them available to be swapped out. 747 */ 748 void msm_gem_evict(struct drm_gem_object *obj) 749 { 750 struct drm_device *dev = obj->dev; 751 struct msm_gem_object *msm_obj = to_msm_bo(obj); 752 753 GEM_WARN_ON(!msm_gem_is_locked(obj)); 754 GEM_WARN_ON(is_unevictable(msm_obj)); 755 GEM_WARN_ON(!msm_obj->evictable); 756 GEM_WARN_ON(msm_obj->active_count); 757 758 /* Get rid of any iommu mapping(s): */ 759 put_iova_spaces(obj, false); 760 761 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 762 763 put_pages(obj); 764 765 update_inactive(msm_obj); 766 } 767 768 void msm_gem_vunmap(struct drm_gem_object *obj) 769 { 770 struct msm_gem_object *msm_obj = to_msm_bo(obj); 771 772 GEM_WARN_ON(!msm_gem_is_locked(obj)); 773 774 if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj))) 775 return; 776 777 vunmap(msm_obj->vaddr); 778 msm_obj->vaddr = NULL; 779 } 780 781 void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu) 782 { 783 struct msm_gem_object *msm_obj = to_msm_bo(obj); 784 struct msm_drm_private *priv = obj->dev->dev_private; 785 786 might_sleep(); 787 GEM_WARN_ON(!msm_gem_is_locked(obj)); 788 GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); 789 GEM_WARN_ON(msm_obj->dontneed); 790 791 if (msm_obj->active_count++ == 0) { 792 mutex_lock(&priv->mm_lock); 793 if (msm_obj->evictable) 794 mark_unevictable(msm_obj); 795 list_move_tail(&msm_obj->mm_list, &gpu->active_list); 796 mutex_unlock(&priv->mm_lock); 797 } 798 } 799 800 void msm_gem_active_put(struct drm_gem_object *obj) 801 { 802 struct msm_gem_object *msm_obj = to_msm_bo(obj); 803 804 might_sleep(); 805 GEM_WARN_ON(!msm_gem_is_locked(obj)); 806 807 if (--msm_obj->active_count == 0) { 808 update_inactive(msm_obj); 809 } 810 } 811 812 static void update_inactive(struct msm_gem_object *msm_obj) 813 { 814 struct msm_drm_private *priv = msm_obj->base.dev->dev_private; 815 816 GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base)); 817 818 if (msm_obj->active_count != 0) 819 return; 820 821 mutex_lock(&priv->mm_lock); 822 823 if (msm_obj->dontneed) 824 mark_unpurgeable(msm_obj); 825 if (msm_obj->evictable) 826 mark_unevictable(msm_obj); 827 828 list_del(&msm_obj->mm_list); 829 if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) { 830 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); 831 mark_evictable(msm_obj); 832 } else if (msm_obj->madv == MSM_MADV_DONTNEED) { 833 list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed); 834 mark_purgeable(msm_obj); 835 } else { 836 GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt); 837 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); 838 } 839 840 mutex_unlock(&priv->mm_lock); 841 } 842 843 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 844 { 845 bool write = !!(op & MSM_PREP_WRITE); 846 unsigned long remain = 847 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 848 long ret; 849 850 ret = dma_resv_wait_timeout(obj->resv, write, true, remain); 851 if (ret == 0) 852 return remain == 0 ? -EBUSY : -ETIMEDOUT; 853 else if (ret < 0) 854 return ret; 855 856 /* TODO cache maintenance */ 857 858 return 0; 859 } 860 861 int msm_gem_cpu_fini(struct drm_gem_object *obj) 862 { 863 /* TODO cache maintenance */ 864 return 0; 865 } 866 867 #ifdef CONFIG_DEBUG_FS 868 static void describe_fence(struct dma_fence *fence, const char *type, 869 struct seq_file *m) 870 { 871 if (!dma_fence_is_signaled(fence)) 872 seq_printf(m, "\t%9s: %s %s seq %llu\n", type, 873 fence->ops->get_driver_name(fence), 874 fence->ops->get_timeline_name(fence), 875 fence->seqno); 876 } 877 878 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, 879 struct msm_gem_stats *stats) 880 { 881 struct msm_gem_object *msm_obj = to_msm_bo(obj); 882 struct dma_resv *robj = obj->resv; 883 struct dma_resv_iter cursor; 884 struct dma_fence *fence; 885 struct msm_gem_vma *vma; 886 uint64_t off = drm_vma_node_start(&obj->vma_node); 887 const char *madv; 888 889 msm_gem_lock(obj); 890 891 stats->all.count++; 892 stats->all.size += obj->size; 893 894 if (is_active(msm_obj)) { 895 stats->active.count++; 896 stats->active.size += obj->size; 897 } 898 899 if (msm_obj->pages) { 900 stats->resident.count++; 901 stats->resident.size += obj->size; 902 } 903 904 switch (msm_obj->madv) { 905 case __MSM_MADV_PURGED: 906 stats->purged.count++; 907 stats->purged.size += obj->size; 908 madv = " purged"; 909 break; 910 case MSM_MADV_DONTNEED: 911 stats->purgeable.count++; 912 stats->purgeable.size += obj->size; 913 madv = " purgeable"; 914 break; 915 case MSM_MADV_WILLNEED: 916 default: 917 madv = ""; 918 break; 919 } 920 921 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p", 922 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 923 obj->name, kref_read(&obj->refcount), 924 off, msm_obj->vaddr); 925 926 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name); 927 928 if (!list_empty(&msm_obj->vmas)) { 929 930 seq_puts(m, " vmas:"); 931 932 list_for_each_entry(vma, &msm_obj->vmas, list) { 933 const char *name, *comm; 934 if (vma->aspace) { 935 struct msm_gem_address_space *aspace = vma->aspace; 936 struct task_struct *task = 937 get_pid_task(aspace->pid, PIDTYPE_PID); 938 if (task) { 939 comm = kstrdup(task->comm, GFP_KERNEL); 940 } else { 941 comm = NULL; 942 } 943 name = aspace->name; 944 } else { 945 name = comm = NULL; 946 } 947 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]", 948 name, comm ? ":" : "", comm ? comm : "", 949 vma->aspace, vma->iova, 950 vma->mapped ? "mapped" : "unmapped", 951 vma->inuse); 952 kfree(comm); 953 } 954 955 seq_puts(m, "\n"); 956 } 957 958 dma_resv_for_each_fence(&cursor, robj, true, fence) { 959 if (dma_resv_iter_is_exclusive(&cursor)) 960 describe_fence(fence, "Exclusive", m); 961 else 962 describe_fence(fence, "Shared", m); 963 } 964 965 msm_gem_unlock(obj); 966 } 967 968 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) 969 { 970 struct msm_gem_stats stats = {}; 971 struct msm_gem_object *msm_obj; 972 973 seq_puts(m, " flags id ref offset kaddr size madv name\n"); 974 list_for_each_entry(msm_obj, list, node) { 975 struct drm_gem_object *obj = &msm_obj->base; 976 seq_puts(m, " "); 977 msm_gem_describe(obj, m, &stats); 978 } 979 980 seq_printf(m, "Total: %4d objects, %9zu bytes\n", 981 stats.all.count, stats.all.size); 982 seq_printf(m, "Active: %4d objects, %9zu bytes\n", 983 stats.active.count, stats.active.size); 984 seq_printf(m, "Resident: %4d objects, %9zu bytes\n", 985 stats.resident.count, stats.resident.size); 986 seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n", 987 stats.purgeable.count, stats.purgeable.size); 988 seq_printf(m, "Purged: %4d objects, %9zu bytes\n", 989 stats.purged.count, stats.purged.size); 990 } 991 #endif 992 993 /* don't call directly! Use drm_gem_object_put() */ 994 void msm_gem_free_object(struct drm_gem_object *obj) 995 { 996 struct msm_gem_object *msm_obj = to_msm_bo(obj); 997 struct drm_device *dev = obj->dev; 998 struct msm_drm_private *priv = dev->dev_private; 999 1000 mutex_lock(&priv->obj_lock); 1001 list_del(&msm_obj->node); 1002 mutex_unlock(&priv->obj_lock); 1003 1004 mutex_lock(&priv->mm_lock); 1005 if (msm_obj->dontneed) 1006 mark_unpurgeable(msm_obj); 1007 list_del(&msm_obj->mm_list); 1008 mutex_unlock(&priv->mm_lock); 1009 1010 msm_gem_lock(obj); 1011 1012 /* object should not be on active list: */ 1013 GEM_WARN_ON(is_active(msm_obj)); 1014 1015 put_iova_spaces(obj, true); 1016 1017 if (obj->import_attach) { 1018 GEM_WARN_ON(msm_obj->vaddr); 1019 1020 /* Don't drop the pages for imported dmabuf, as they are not 1021 * ours, just free the array we allocated: 1022 */ 1023 kvfree(msm_obj->pages); 1024 1025 put_iova_vmas(obj); 1026 1027 /* dma_buf_detach() grabs resv lock, so we need to unlock 1028 * prior to drm_prime_gem_destroy 1029 */ 1030 msm_gem_unlock(obj); 1031 1032 drm_prime_gem_destroy(obj, msm_obj->sgt); 1033 } else { 1034 msm_gem_vunmap(obj); 1035 put_pages(obj); 1036 put_iova_vmas(obj); 1037 msm_gem_unlock(obj); 1038 } 1039 1040 drm_gem_object_release(obj); 1041 1042 kfree(msm_obj); 1043 } 1044 1045 static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 1046 { 1047 struct msm_gem_object *msm_obj = to_msm_bo(obj); 1048 1049 vma->vm_flags &= ~VM_PFNMAP; 1050 vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; 1051 vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags)); 1052 1053 return 0; 1054 } 1055 1056 /* convenience method to construct a GEM buffer object, and userspace handle */ 1057 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 1058 uint32_t size, uint32_t flags, uint32_t *handle, 1059 char *name) 1060 { 1061 struct drm_gem_object *obj; 1062 int ret; 1063 1064 obj = msm_gem_new(dev, size, flags); 1065 1066 if (IS_ERR(obj)) 1067 return PTR_ERR(obj); 1068 1069 if (name) 1070 msm_gem_object_set_name(obj, "%s", name); 1071 1072 ret = drm_gem_handle_create(file, obj, handle); 1073 1074 /* drop reference from allocate - handle holds it now */ 1075 drm_gem_object_put(obj); 1076 1077 return ret; 1078 } 1079 1080 static const struct vm_operations_struct vm_ops = { 1081 .fault = msm_gem_fault, 1082 .open = drm_gem_vm_open, 1083 .close = drm_gem_vm_close, 1084 }; 1085 1086 static const struct drm_gem_object_funcs msm_gem_object_funcs = { 1087 .free = msm_gem_free_object, 1088 .pin = msm_gem_prime_pin, 1089 .unpin = msm_gem_prime_unpin, 1090 .get_sg_table = msm_gem_prime_get_sg_table, 1091 .vmap = msm_gem_prime_vmap, 1092 .vunmap = msm_gem_prime_vunmap, 1093 .mmap = msm_gem_object_mmap, 1094 .vm_ops = &vm_ops, 1095 }; 1096 1097 static int msm_gem_new_impl(struct drm_device *dev, 1098 uint32_t size, uint32_t flags, 1099 struct drm_gem_object **obj) 1100 { 1101 struct msm_drm_private *priv = dev->dev_private; 1102 struct msm_gem_object *msm_obj; 1103 1104 switch (flags & MSM_BO_CACHE_MASK) { 1105 case MSM_BO_UNCACHED: 1106 case MSM_BO_CACHED: 1107 case MSM_BO_WC: 1108 break; 1109 case MSM_BO_CACHED_COHERENT: 1110 if (priv->has_cached_coherent) 1111 break; 1112 fallthrough; 1113 default: 1114 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", 1115 (flags & MSM_BO_CACHE_MASK)); 1116 return -EINVAL; 1117 } 1118 1119 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); 1120 if (!msm_obj) 1121 return -ENOMEM; 1122 1123 msm_obj->flags = flags; 1124 msm_obj->madv = MSM_MADV_WILLNEED; 1125 1126 INIT_LIST_HEAD(&msm_obj->vmas); 1127 1128 *obj = &msm_obj->base; 1129 (*obj)->funcs = &msm_gem_object_funcs; 1130 1131 return 0; 1132 } 1133 1134 struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags) 1135 { 1136 struct msm_drm_private *priv = dev->dev_private; 1137 struct msm_gem_object *msm_obj; 1138 struct drm_gem_object *obj = NULL; 1139 bool use_vram = false; 1140 int ret; 1141 1142 size = PAGE_ALIGN(size); 1143 1144 if (!msm_use_mmu(dev)) 1145 use_vram = true; 1146 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size) 1147 use_vram = true; 1148 1149 if (GEM_WARN_ON(use_vram && !priv->vram.size)) 1150 return ERR_PTR(-EINVAL); 1151 1152 /* Disallow zero sized objects as they make the underlying 1153 * infrastructure grumpy 1154 */ 1155 if (size == 0) 1156 return ERR_PTR(-EINVAL); 1157 1158 ret = msm_gem_new_impl(dev, size, flags, &obj); 1159 if (ret) 1160 goto fail; 1161 1162 msm_obj = to_msm_bo(obj); 1163 1164 if (use_vram) { 1165 struct msm_gem_vma *vma; 1166 struct page **pages; 1167 1168 drm_gem_private_object_init(dev, obj, size); 1169 1170 msm_gem_lock(obj); 1171 1172 vma = add_vma(obj, NULL); 1173 msm_gem_unlock(obj); 1174 if (IS_ERR(vma)) { 1175 ret = PTR_ERR(vma); 1176 goto fail; 1177 } 1178 1179 to_msm_bo(obj)->vram_node = &vma->node; 1180 1181 /* Call chain get_pages() -> update_inactive() tries to 1182 * access msm_obj->mm_list, but it is not initialized yet. 1183 * To avoid NULL pointer dereference error, initialize 1184 * mm_list to be empty. 1185 */ 1186 INIT_LIST_HEAD(&msm_obj->mm_list); 1187 1188 msm_gem_lock(obj); 1189 pages = get_pages(obj); 1190 msm_gem_unlock(obj); 1191 if (IS_ERR(pages)) { 1192 ret = PTR_ERR(pages); 1193 goto fail; 1194 } 1195 1196 vma->iova = physaddr(obj); 1197 } else { 1198 ret = drm_gem_object_init(dev, obj, size); 1199 if (ret) 1200 goto fail; 1201 /* 1202 * Our buffers are kept pinned, so allocating them from the 1203 * MOVABLE zone is a really bad idea, and conflicts with CMA. 1204 * See comments above new_inode() why this is required _and_ 1205 * expected if you're going to pin these pages. 1206 */ 1207 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); 1208 } 1209 1210 mutex_lock(&priv->mm_lock); 1211 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); 1212 mutex_unlock(&priv->mm_lock); 1213 1214 mutex_lock(&priv->obj_lock); 1215 list_add_tail(&msm_obj->node, &priv->objects); 1216 mutex_unlock(&priv->obj_lock); 1217 1218 return obj; 1219 1220 fail: 1221 drm_gem_object_put(obj); 1222 return ERR_PTR(ret); 1223 } 1224 1225 struct drm_gem_object *msm_gem_import(struct drm_device *dev, 1226 struct dma_buf *dmabuf, struct sg_table *sgt) 1227 { 1228 struct msm_drm_private *priv = dev->dev_private; 1229 struct msm_gem_object *msm_obj; 1230 struct drm_gem_object *obj; 1231 uint32_t size; 1232 int ret, npages; 1233 1234 /* if we don't have IOMMU, don't bother pretending we can import: */ 1235 if (!msm_use_mmu(dev)) { 1236 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); 1237 return ERR_PTR(-EINVAL); 1238 } 1239 1240 size = PAGE_ALIGN(dmabuf->size); 1241 1242 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); 1243 if (ret) 1244 goto fail; 1245 1246 drm_gem_private_object_init(dev, obj, size); 1247 1248 npages = size / PAGE_SIZE; 1249 1250 msm_obj = to_msm_bo(obj); 1251 msm_gem_lock(obj); 1252 msm_obj->sgt = sgt; 1253 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 1254 if (!msm_obj->pages) { 1255 msm_gem_unlock(obj); 1256 ret = -ENOMEM; 1257 goto fail; 1258 } 1259 1260 ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages); 1261 if (ret) { 1262 msm_gem_unlock(obj); 1263 goto fail; 1264 } 1265 1266 msm_gem_unlock(obj); 1267 1268 mutex_lock(&priv->mm_lock); 1269 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); 1270 mutex_unlock(&priv->mm_lock); 1271 1272 mutex_lock(&priv->obj_lock); 1273 list_add_tail(&msm_obj->node, &priv->objects); 1274 mutex_unlock(&priv->obj_lock); 1275 1276 return obj; 1277 1278 fail: 1279 drm_gem_object_put(obj); 1280 return ERR_PTR(ret); 1281 } 1282 1283 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1284 uint32_t flags, struct msm_gem_address_space *aspace, 1285 struct drm_gem_object **bo, uint64_t *iova) 1286 { 1287 void *vaddr; 1288 struct drm_gem_object *obj = msm_gem_new(dev, size, flags); 1289 int ret; 1290 1291 if (IS_ERR(obj)) 1292 return ERR_CAST(obj); 1293 1294 if (iova) { 1295 ret = msm_gem_get_and_pin_iova(obj, aspace, iova); 1296 if (ret) 1297 goto err; 1298 } 1299 1300 vaddr = msm_gem_get_vaddr(obj); 1301 if (IS_ERR(vaddr)) { 1302 msm_gem_unpin_iova(obj, aspace); 1303 ret = PTR_ERR(vaddr); 1304 goto err; 1305 } 1306 1307 if (bo) 1308 *bo = obj; 1309 1310 return vaddr; 1311 err: 1312 drm_gem_object_put(obj); 1313 1314 return ERR_PTR(ret); 1315 1316 } 1317 1318 void msm_gem_kernel_put(struct drm_gem_object *bo, 1319 struct msm_gem_address_space *aspace) 1320 { 1321 if (IS_ERR_OR_NULL(bo)) 1322 return; 1323 1324 msm_gem_put_vaddr(bo); 1325 msm_gem_unpin_iova(bo, aspace); 1326 drm_gem_object_put(bo); 1327 } 1328 1329 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...) 1330 { 1331 struct msm_gem_object *msm_obj = to_msm_bo(bo); 1332 va_list ap; 1333 1334 if (!fmt) 1335 return; 1336 1337 va_start(ap, fmt); 1338 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap); 1339 va_end(ap); 1340 } 1341