1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #include <linux/dma-map-ops.h> 8 #include <linux/vmalloc.h> 9 #include <linux/spinlock.h> 10 #include <linux/shmem_fs.h> 11 #include <linux/dma-buf.h> 12 #include <linux/pfn_t.h> 13 14 #include <drm/drm_prime.h> 15 16 #include "msm_drv.h" 17 #include "msm_fence.h" 18 #include "msm_gem.h" 19 #include "msm_gpu.h" 20 #include "msm_mmu.h" 21 22 static dma_addr_t physaddr(struct drm_gem_object *obj) 23 { 24 struct msm_gem_object *msm_obj = to_msm_bo(obj); 25 struct msm_drm_private *priv = obj->dev->dev_private; 26 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + 27 priv->vram.paddr; 28 } 29 30 static bool use_pages(struct drm_gem_object *obj) 31 { 32 struct msm_gem_object *msm_obj = to_msm_bo(obj); 33 return !msm_obj->vram_node; 34 } 35 36 /* 37 * Cache sync.. this is a bit over-complicated, to fit dma-mapping 38 * API. Really GPU cache is out of scope here (handled on cmdstream) 39 * and all we need to do is invalidate newly allocated pages before 40 * mapping to CPU as uncached/writecombine. 41 * 42 * On top of this, we have the added headache, that depending on 43 * display generation, the display's iommu may be wired up to either 44 * the toplevel drm device (mdss), or to the mdp sub-node, meaning 45 * that here we either have dma-direct or iommu ops. 46 * 47 * Let this be a cautionary tail of abstraction gone wrong. 48 */ 49 50 static void sync_for_device(struct msm_gem_object *msm_obj) 51 { 52 struct device *dev = msm_obj->base.dev->dev; 53 54 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 55 } 56 57 static void sync_for_cpu(struct msm_gem_object *msm_obj) 58 { 59 struct device *dev = msm_obj->base.dev->dev; 60 61 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 62 } 63 64 static void update_lru_active(struct drm_gem_object *obj) 65 { 66 struct msm_drm_private *priv = obj->dev->dev_private; 67 struct msm_gem_object *msm_obj = to_msm_bo(obj); 68 69 GEM_WARN_ON(!msm_obj->pages); 70 71 if (msm_obj->pin_count) { 72 drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj); 73 } else if (msm_obj->madv == MSM_MADV_WILLNEED) { 74 drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj); 75 } else { 76 GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED); 77 78 drm_gem_lru_move_tail_locked(&priv->lru.dontneed, obj); 79 } 80 } 81 82 static void update_lru_locked(struct drm_gem_object *obj) 83 { 84 struct msm_drm_private *priv = obj->dev->dev_private; 85 struct msm_gem_object *msm_obj = to_msm_bo(obj); 86 87 msm_gem_assert_locked(&msm_obj->base); 88 89 if (!msm_obj->pages) { 90 GEM_WARN_ON(msm_obj->pin_count); 91 92 drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj); 93 } else { 94 update_lru_active(obj); 95 } 96 } 97 98 static void update_lru(struct drm_gem_object *obj) 99 { 100 struct msm_drm_private *priv = obj->dev->dev_private; 101 102 mutex_lock(&priv->lru.lock); 103 update_lru_locked(obj); 104 mutex_unlock(&priv->lru.lock); 105 } 106 107 /* allocate pages from VRAM carveout, used when no IOMMU: */ 108 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) 109 { 110 struct msm_gem_object *msm_obj = to_msm_bo(obj); 111 struct msm_drm_private *priv = obj->dev->dev_private; 112 dma_addr_t paddr; 113 struct page **p; 114 int ret, i; 115 116 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 117 if (!p) 118 return ERR_PTR(-ENOMEM); 119 120 spin_lock(&priv->vram.lock); 121 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); 122 spin_unlock(&priv->vram.lock); 123 if (ret) { 124 kvfree(p); 125 return ERR_PTR(ret); 126 } 127 128 paddr = physaddr(obj); 129 for (i = 0; i < npages; i++) { 130 p[i] = pfn_to_page(__phys_to_pfn(paddr)); 131 paddr += PAGE_SIZE; 132 } 133 134 return p; 135 } 136 137 static struct page **get_pages(struct drm_gem_object *obj) 138 { 139 struct msm_gem_object *msm_obj = to_msm_bo(obj); 140 141 msm_gem_assert_locked(obj); 142 143 if (!msm_obj->pages) { 144 struct drm_device *dev = obj->dev; 145 struct page **p; 146 int npages = obj->size >> PAGE_SHIFT; 147 148 if (use_pages(obj)) 149 p = drm_gem_get_pages(obj); 150 else 151 p = get_pages_vram(obj, npages); 152 153 if (IS_ERR(p)) { 154 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", 155 PTR_ERR(p)); 156 return p; 157 } 158 159 msm_obj->pages = p; 160 161 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages); 162 if (IS_ERR(msm_obj->sgt)) { 163 void *ptr = ERR_CAST(msm_obj->sgt); 164 165 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n"); 166 msm_obj->sgt = NULL; 167 return ptr; 168 } 169 170 /* For non-cached buffers, ensure the new pages are clean 171 * because display controller, GPU, etc. are not coherent: 172 */ 173 if (msm_obj->flags & MSM_BO_WC) 174 sync_for_device(msm_obj); 175 176 update_lru(obj); 177 } 178 179 return msm_obj->pages; 180 } 181 182 static void put_pages_vram(struct drm_gem_object *obj) 183 { 184 struct msm_gem_object *msm_obj = to_msm_bo(obj); 185 struct msm_drm_private *priv = obj->dev->dev_private; 186 187 spin_lock(&priv->vram.lock); 188 drm_mm_remove_node(msm_obj->vram_node); 189 spin_unlock(&priv->vram.lock); 190 191 kvfree(msm_obj->pages); 192 } 193 194 static void put_pages(struct drm_gem_object *obj) 195 { 196 struct msm_gem_object *msm_obj = to_msm_bo(obj); 197 198 if (msm_obj->pages) { 199 if (msm_obj->sgt) { 200 /* For non-cached buffers, ensure the new 201 * pages are clean because display controller, 202 * GPU, etc. are not coherent: 203 */ 204 if (msm_obj->flags & MSM_BO_WC) 205 sync_for_cpu(msm_obj); 206 207 sg_free_table(msm_obj->sgt); 208 kfree(msm_obj->sgt); 209 msm_obj->sgt = NULL; 210 } 211 212 if (use_pages(obj)) 213 drm_gem_put_pages(obj, msm_obj->pages, true, false); 214 else 215 put_pages_vram(obj); 216 217 msm_obj->pages = NULL; 218 update_lru(obj); 219 } 220 } 221 222 static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj, 223 unsigned madv) 224 { 225 struct msm_gem_object *msm_obj = to_msm_bo(obj); 226 227 msm_gem_assert_locked(obj); 228 229 if (GEM_WARN_ON(msm_obj->madv > madv)) { 230 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", 231 msm_obj->madv, madv); 232 return ERR_PTR(-EBUSY); 233 } 234 235 return get_pages(obj); 236 } 237 238 /* 239 * Update the pin count of the object, call under lru.lock 240 */ 241 void msm_gem_pin_obj_locked(struct drm_gem_object *obj) 242 { 243 struct msm_drm_private *priv = obj->dev->dev_private; 244 245 msm_gem_assert_locked(obj); 246 247 to_msm_bo(obj)->pin_count++; 248 drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj); 249 } 250 251 static void pin_obj_locked(struct drm_gem_object *obj) 252 { 253 struct msm_drm_private *priv = obj->dev->dev_private; 254 255 mutex_lock(&priv->lru.lock); 256 msm_gem_pin_obj_locked(obj); 257 mutex_unlock(&priv->lru.lock); 258 } 259 260 struct page **msm_gem_pin_pages(struct drm_gem_object *obj) 261 { 262 struct page **p; 263 264 msm_gem_lock(obj); 265 p = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED); 266 if (!IS_ERR(p)) 267 pin_obj_locked(obj); 268 msm_gem_unlock(obj); 269 270 return p; 271 } 272 273 void msm_gem_unpin_pages(struct drm_gem_object *obj) 274 { 275 msm_gem_lock(obj); 276 msm_gem_unpin_locked(obj); 277 msm_gem_unlock(obj); 278 } 279 280 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot) 281 { 282 if (msm_obj->flags & MSM_BO_WC) 283 return pgprot_writecombine(prot); 284 return prot; 285 } 286 287 static vm_fault_t msm_gem_fault(struct vm_fault *vmf) 288 { 289 struct vm_area_struct *vma = vmf->vma; 290 struct drm_gem_object *obj = vma->vm_private_data; 291 struct msm_gem_object *msm_obj = to_msm_bo(obj); 292 struct page **pages; 293 unsigned long pfn; 294 pgoff_t pgoff; 295 int err; 296 vm_fault_t ret; 297 298 /* 299 * vm_ops.open/drm_gem_mmap_obj and close get and put 300 * a reference on obj. So, we dont need to hold one here. 301 */ 302 err = msm_gem_lock_interruptible(obj); 303 if (err) { 304 ret = VM_FAULT_NOPAGE; 305 goto out; 306 } 307 308 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 309 msm_gem_unlock(obj); 310 return VM_FAULT_SIGBUS; 311 } 312 313 /* make sure we have pages attached now */ 314 pages = get_pages(obj); 315 if (IS_ERR(pages)) { 316 ret = vmf_error(PTR_ERR(pages)); 317 goto out_unlock; 318 } 319 320 /* We don't use vmf->pgoff since that has the fake offset: */ 321 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 322 323 pfn = page_to_pfn(pages[pgoff]); 324 325 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 326 pfn, pfn << PAGE_SHIFT); 327 328 ret = vmf_insert_pfn(vma, vmf->address, pfn); 329 330 out_unlock: 331 msm_gem_unlock(obj); 332 out: 333 return ret; 334 } 335 336 /** get mmap offset */ 337 static uint64_t mmap_offset(struct drm_gem_object *obj) 338 { 339 struct drm_device *dev = obj->dev; 340 int ret; 341 342 msm_gem_assert_locked(obj); 343 344 /* Make it mmapable */ 345 ret = drm_gem_create_mmap_offset(obj); 346 347 if (ret) { 348 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n"); 349 return 0; 350 } 351 352 return drm_vma_node_offset_addr(&obj->vma_node); 353 } 354 355 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) 356 { 357 uint64_t offset; 358 359 msm_gem_lock(obj); 360 offset = mmap_offset(obj); 361 msm_gem_unlock(obj); 362 return offset; 363 } 364 365 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, 366 struct msm_gem_address_space *aspace) 367 { 368 struct msm_gem_object *msm_obj = to_msm_bo(obj); 369 struct msm_gem_vma *vma; 370 371 msm_gem_assert_locked(obj); 372 373 vma = msm_gem_vma_new(aspace); 374 if (!vma) 375 return ERR_PTR(-ENOMEM); 376 377 list_add_tail(&vma->list, &msm_obj->vmas); 378 379 return vma; 380 } 381 382 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, 383 struct msm_gem_address_space *aspace) 384 { 385 struct msm_gem_object *msm_obj = to_msm_bo(obj); 386 struct msm_gem_vma *vma; 387 388 msm_gem_assert_locked(obj); 389 390 list_for_each_entry(vma, &msm_obj->vmas, list) { 391 if (vma->aspace == aspace) 392 return vma; 393 } 394 395 return NULL; 396 } 397 398 static void del_vma(struct msm_gem_vma *vma) 399 { 400 if (!vma) 401 return; 402 403 list_del(&vma->list); 404 kfree(vma); 405 } 406 407 /* 408 * If close is true, this also closes the VMA (releasing the allocated 409 * iova range) in addition to removing the iommu mapping. In the eviction 410 * case (!close), we keep the iova allocated, but only remove the iommu 411 * mapping. 412 */ 413 static void 414 put_iova_spaces(struct drm_gem_object *obj, bool close) 415 { 416 struct msm_gem_object *msm_obj = to_msm_bo(obj); 417 struct msm_gem_vma *vma; 418 419 msm_gem_assert_locked(obj); 420 421 list_for_each_entry(vma, &msm_obj->vmas, list) { 422 if (vma->aspace) { 423 msm_gem_vma_purge(vma); 424 if (close) 425 msm_gem_vma_close(vma); 426 } 427 } 428 } 429 430 /* Called with msm_obj locked */ 431 static void 432 put_iova_vmas(struct drm_gem_object *obj) 433 { 434 struct msm_gem_object *msm_obj = to_msm_bo(obj); 435 struct msm_gem_vma *vma, *tmp; 436 437 msm_gem_assert_locked(obj); 438 439 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { 440 del_vma(vma); 441 } 442 } 443 444 static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj, 445 struct msm_gem_address_space *aspace, 446 u64 range_start, u64 range_end) 447 { 448 struct msm_gem_vma *vma; 449 450 msm_gem_assert_locked(obj); 451 452 vma = lookup_vma(obj, aspace); 453 454 if (!vma) { 455 int ret; 456 457 vma = add_vma(obj, aspace); 458 if (IS_ERR(vma)) 459 return vma; 460 461 ret = msm_gem_vma_init(vma, obj->size, 462 range_start, range_end); 463 if (ret) { 464 del_vma(vma); 465 return ERR_PTR(ret); 466 } 467 } else { 468 GEM_WARN_ON(vma->iova < range_start); 469 GEM_WARN_ON((vma->iova + obj->size) > range_end); 470 } 471 472 return vma; 473 } 474 475 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma) 476 { 477 struct msm_gem_object *msm_obj = to_msm_bo(obj); 478 struct page **pages; 479 int prot = IOMMU_READ; 480 481 if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) 482 prot |= IOMMU_WRITE; 483 484 if (msm_obj->flags & MSM_BO_MAP_PRIV) 485 prot |= IOMMU_PRIV; 486 487 if (msm_obj->flags & MSM_BO_CACHED_COHERENT) 488 prot |= IOMMU_CACHE; 489 490 msm_gem_assert_locked(obj); 491 492 pages = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED); 493 if (IS_ERR(pages)) 494 return PTR_ERR(pages); 495 496 return msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size); 497 } 498 499 void msm_gem_unpin_locked(struct drm_gem_object *obj) 500 { 501 struct msm_drm_private *priv = obj->dev->dev_private; 502 struct msm_gem_object *msm_obj = to_msm_bo(obj); 503 504 msm_gem_assert_locked(obj); 505 506 mutex_lock(&priv->lru.lock); 507 msm_obj->pin_count--; 508 GEM_WARN_ON(msm_obj->pin_count < 0); 509 update_lru_locked(obj); 510 mutex_unlock(&priv->lru.lock); 511 } 512 513 /* Special unpin path for use in fence-signaling path, avoiding the need 514 * to hold the obj lock by only depending on things that a protected by 515 * the LRU lock. In particular we know that that we already have backing 516 * and and that the object's dma_resv has the fence for the current 517 * submit/job which will prevent us racing against page eviction. 518 */ 519 void msm_gem_unpin_active(struct drm_gem_object *obj) 520 { 521 struct msm_gem_object *msm_obj = to_msm_bo(obj); 522 523 msm_obj->pin_count--; 524 GEM_WARN_ON(msm_obj->pin_count < 0); 525 update_lru_active(obj); 526 } 527 528 struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj, 529 struct msm_gem_address_space *aspace) 530 { 531 return get_vma_locked(obj, aspace, 0, U64_MAX); 532 } 533 534 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, 535 struct msm_gem_address_space *aspace, uint64_t *iova, 536 u64 range_start, u64 range_end) 537 { 538 struct msm_gem_vma *vma; 539 int ret; 540 541 msm_gem_assert_locked(obj); 542 543 vma = get_vma_locked(obj, aspace, range_start, range_end); 544 if (IS_ERR(vma)) 545 return PTR_ERR(vma); 546 547 ret = msm_gem_pin_vma_locked(obj, vma); 548 if (!ret) { 549 *iova = vma->iova; 550 pin_obj_locked(obj); 551 } 552 553 return ret; 554 } 555 556 /* 557 * get iova and pin it. Should have a matching put 558 * limits iova to specified range (in pages) 559 */ 560 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, 561 struct msm_gem_address_space *aspace, uint64_t *iova, 562 u64 range_start, u64 range_end) 563 { 564 int ret; 565 566 msm_gem_lock(obj); 567 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end); 568 msm_gem_unlock(obj); 569 570 return ret; 571 } 572 573 /* get iova and pin it. Should have a matching put */ 574 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, 575 struct msm_gem_address_space *aspace, uint64_t *iova) 576 { 577 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX); 578 } 579 580 /* 581 * Get an iova but don't pin it. Doesn't need a put because iovas are currently 582 * valid for the life of the object 583 */ 584 int msm_gem_get_iova(struct drm_gem_object *obj, 585 struct msm_gem_address_space *aspace, uint64_t *iova) 586 { 587 struct msm_gem_vma *vma; 588 int ret = 0; 589 590 msm_gem_lock(obj); 591 vma = get_vma_locked(obj, aspace, 0, U64_MAX); 592 if (IS_ERR(vma)) { 593 ret = PTR_ERR(vma); 594 } else { 595 *iova = vma->iova; 596 } 597 msm_gem_unlock(obj); 598 599 return ret; 600 } 601 602 static int clear_iova(struct drm_gem_object *obj, 603 struct msm_gem_address_space *aspace) 604 { 605 struct msm_gem_vma *vma = lookup_vma(obj, aspace); 606 607 if (!vma) 608 return 0; 609 610 if (msm_gem_vma_inuse(vma)) 611 return -EBUSY; 612 613 msm_gem_vma_purge(vma); 614 msm_gem_vma_close(vma); 615 del_vma(vma); 616 617 return 0; 618 } 619 620 /* 621 * Get the requested iova but don't pin it. Fails if the requested iova is 622 * not available. Doesn't need a put because iovas are currently valid for 623 * the life of the object. 624 * 625 * Setting an iova of zero will clear the vma. 626 */ 627 int msm_gem_set_iova(struct drm_gem_object *obj, 628 struct msm_gem_address_space *aspace, uint64_t iova) 629 { 630 int ret = 0; 631 632 msm_gem_lock(obj); 633 if (!iova) { 634 ret = clear_iova(obj, aspace); 635 } else { 636 struct msm_gem_vma *vma; 637 vma = get_vma_locked(obj, aspace, iova, iova + obj->size); 638 if (IS_ERR(vma)) { 639 ret = PTR_ERR(vma); 640 } else if (GEM_WARN_ON(vma->iova != iova)) { 641 clear_iova(obj, aspace); 642 ret = -EBUSY; 643 } 644 } 645 msm_gem_unlock(obj); 646 647 return ret; 648 } 649 650 /* 651 * Unpin a iova by updating the reference counts. The memory isn't actually 652 * purged until something else (shrinker, mm_notifier, destroy, etc) decides 653 * to get rid of it 654 */ 655 void msm_gem_unpin_iova(struct drm_gem_object *obj, 656 struct msm_gem_address_space *aspace) 657 { 658 struct msm_gem_vma *vma; 659 660 msm_gem_lock(obj); 661 vma = lookup_vma(obj, aspace); 662 if (!GEM_WARN_ON(!vma)) { 663 msm_gem_vma_unpin(vma); 664 msm_gem_unpin_locked(obj); 665 } 666 msm_gem_unlock(obj); 667 } 668 669 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 670 struct drm_mode_create_dumb *args) 671 { 672 args->pitch = align_pitch(args->width, args->bpp); 673 args->size = PAGE_ALIGN(args->pitch * args->height); 674 return msm_gem_new_handle(dev, file, args->size, 675 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb"); 676 } 677 678 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 679 uint32_t handle, uint64_t *offset) 680 { 681 struct drm_gem_object *obj; 682 int ret = 0; 683 684 /* GEM does all our handle to object mapping */ 685 obj = drm_gem_object_lookup(file, handle); 686 if (obj == NULL) { 687 ret = -ENOENT; 688 goto fail; 689 } 690 691 *offset = msm_gem_mmap_offset(obj); 692 693 drm_gem_object_put(obj); 694 695 fail: 696 return ret; 697 } 698 699 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) 700 { 701 struct msm_gem_object *msm_obj = to_msm_bo(obj); 702 struct page **pages; 703 int ret = 0; 704 705 msm_gem_assert_locked(obj); 706 707 if (obj->import_attach) 708 return ERR_PTR(-ENODEV); 709 710 pages = msm_gem_pin_pages_locked(obj, madv); 711 if (IS_ERR(pages)) 712 return ERR_CAST(pages); 713 714 pin_obj_locked(obj); 715 716 /* increment vmap_count *before* vmap() call, so shrinker can 717 * check vmap_count (is_vunmapable()) outside of msm_obj lock. 718 * This guarantees that we won't try to msm_gem_vunmap() this 719 * same object from within the vmap() call (while we already 720 * hold msm_obj lock) 721 */ 722 msm_obj->vmap_count++; 723 724 if (!msm_obj->vaddr) { 725 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 726 VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL)); 727 if (msm_obj->vaddr == NULL) { 728 ret = -ENOMEM; 729 goto fail; 730 } 731 } 732 733 return msm_obj->vaddr; 734 735 fail: 736 msm_obj->vmap_count--; 737 msm_gem_unpin_locked(obj); 738 return ERR_PTR(ret); 739 } 740 741 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) 742 { 743 return get_vaddr(obj, MSM_MADV_WILLNEED); 744 } 745 746 void *msm_gem_get_vaddr(struct drm_gem_object *obj) 747 { 748 void *ret; 749 750 msm_gem_lock(obj); 751 ret = msm_gem_get_vaddr_locked(obj); 752 msm_gem_unlock(obj); 753 754 return ret; 755 } 756 757 /* 758 * Don't use this! It is for the very special case of dumping 759 * submits from GPU hangs or faults, were the bo may already 760 * be MSM_MADV_DONTNEED, but we know the buffer is still on the 761 * active list. 762 */ 763 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) 764 { 765 return get_vaddr(obj, __MSM_MADV_PURGED); 766 } 767 768 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) 769 { 770 struct msm_gem_object *msm_obj = to_msm_bo(obj); 771 772 msm_gem_assert_locked(obj); 773 GEM_WARN_ON(msm_obj->vmap_count < 1); 774 775 msm_obj->vmap_count--; 776 msm_gem_unpin_locked(obj); 777 } 778 779 void msm_gem_put_vaddr(struct drm_gem_object *obj) 780 { 781 msm_gem_lock(obj); 782 msm_gem_put_vaddr_locked(obj); 783 msm_gem_unlock(obj); 784 } 785 786 /* Update madvise status, returns true if not purged, else 787 * false or -errno. 788 */ 789 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) 790 { 791 struct msm_drm_private *priv = obj->dev->dev_private; 792 struct msm_gem_object *msm_obj = to_msm_bo(obj); 793 794 msm_gem_lock(obj); 795 796 mutex_lock(&priv->lru.lock); 797 798 if (msm_obj->madv != __MSM_MADV_PURGED) 799 msm_obj->madv = madv; 800 801 madv = msm_obj->madv; 802 803 /* If the obj is inactive, we might need to move it 804 * between inactive lists 805 */ 806 update_lru_locked(obj); 807 808 mutex_unlock(&priv->lru.lock); 809 810 msm_gem_unlock(obj); 811 812 return (madv != __MSM_MADV_PURGED); 813 } 814 815 void msm_gem_purge(struct drm_gem_object *obj) 816 { 817 struct drm_device *dev = obj->dev; 818 struct msm_drm_private *priv = obj->dev->dev_private; 819 struct msm_gem_object *msm_obj = to_msm_bo(obj); 820 821 msm_gem_assert_locked(obj); 822 GEM_WARN_ON(!is_purgeable(msm_obj)); 823 824 /* Get rid of any iommu mapping(s): */ 825 put_iova_spaces(obj, true); 826 827 msm_gem_vunmap(obj); 828 829 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 830 831 put_pages(obj); 832 833 put_iova_vmas(obj); 834 835 mutex_lock(&priv->lru.lock); 836 /* A one-way transition: */ 837 msm_obj->madv = __MSM_MADV_PURGED; 838 mutex_unlock(&priv->lru.lock); 839 840 drm_gem_free_mmap_offset(obj); 841 842 /* Our goal here is to return as much of the memory as 843 * is possible back to the system as we are called from OOM. 844 * To do this we must instruct the shmfs to drop all of its 845 * backing pages, *now*. 846 */ 847 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 848 849 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 850 0, (loff_t)-1); 851 } 852 853 /* 854 * Unpin the backing pages and make them available to be swapped out. 855 */ 856 void msm_gem_evict(struct drm_gem_object *obj) 857 { 858 struct drm_device *dev = obj->dev; 859 struct msm_gem_object *msm_obj = to_msm_bo(obj); 860 861 msm_gem_assert_locked(obj); 862 GEM_WARN_ON(is_unevictable(msm_obj)); 863 864 /* Get rid of any iommu mapping(s): */ 865 put_iova_spaces(obj, false); 866 867 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 868 869 put_pages(obj); 870 } 871 872 void msm_gem_vunmap(struct drm_gem_object *obj) 873 { 874 struct msm_gem_object *msm_obj = to_msm_bo(obj); 875 876 msm_gem_assert_locked(obj); 877 878 if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj))) 879 return; 880 881 vunmap(msm_obj->vaddr); 882 msm_obj->vaddr = NULL; 883 } 884 885 bool msm_gem_active(struct drm_gem_object *obj) 886 { 887 msm_gem_assert_locked(obj); 888 889 if (to_msm_bo(obj)->pin_count) 890 return true; 891 892 return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true)); 893 } 894 895 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 896 { 897 bool write = !!(op & MSM_PREP_WRITE); 898 unsigned long remain = 899 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 900 long ret; 901 902 if (op & MSM_PREP_BOOST) { 903 dma_resv_set_deadline(obj->resv, dma_resv_usage_rw(write), 904 ktime_get()); 905 } 906 907 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write), 908 true, remain); 909 if (ret == 0) 910 return remain == 0 ? -EBUSY : -ETIMEDOUT; 911 else if (ret < 0) 912 return ret; 913 914 /* TODO cache maintenance */ 915 916 return 0; 917 } 918 919 int msm_gem_cpu_fini(struct drm_gem_object *obj) 920 { 921 /* TODO cache maintenance */ 922 return 0; 923 } 924 925 #ifdef CONFIG_DEBUG_FS 926 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, 927 struct msm_gem_stats *stats) 928 { 929 struct msm_gem_object *msm_obj = to_msm_bo(obj); 930 struct dma_resv *robj = obj->resv; 931 struct msm_gem_vma *vma; 932 uint64_t off = drm_vma_node_start(&obj->vma_node); 933 const char *madv; 934 935 msm_gem_lock(obj); 936 937 stats->all.count++; 938 stats->all.size += obj->size; 939 940 if (msm_gem_active(obj)) { 941 stats->active.count++; 942 stats->active.size += obj->size; 943 } 944 945 if (msm_obj->pages) { 946 stats->resident.count++; 947 stats->resident.size += obj->size; 948 } 949 950 switch (msm_obj->madv) { 951 case __MSM_MADV_PURGED: 952 stats->purged.count++; 953 stats->purged.size += obj->size; 954 madv = " purged"; 955 break; 956 case MSM_MADV_DONTNEED: 957 stats->purgeable.count++; 958 stats->purgeable.size += obj->size; 959 madv = " purgeable"; 960 break; 961 case MSM_MADV_WILLNEED: 962 default: 963 madv = ""; 964 break; 965 } 966 967 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p", 968 msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I', 969 obj->name, kref_read(&obj->refcount), 970 off, msm_obj->vaddr); 971 972 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name); 973 974 if (!list_empty(&msm_obj->vmas)) { 975 976 seq_puts(m, " vmas:"); 977 978 list_for_each_entry(vma, &msm_obj->vmas, list) { 979 const char *name, *comm; 980 if (vma->aspace) { 981 struct msm_gem_address_space *aspace = vma->aspace; 982 struct task_struct *task = 983 get_pid_task(aspace->pid, PIDTYPE_PID); 984 if (task) { 985 comm = kstrdup(task->comm, GFP_KERNEL); 986 put_task_struct(task); 987 } else { 988 comm = NULL; 989 } 990 name = aspace->name; 991 } else { 992 name = comm = NULL; 993 } 994 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]", 995 name, comm ? ":" : "", comm ? comm : "", 996 vma->aspace, vma->iova, 997 vma->mapped ? "mapped" : "unmapped", 998 msm_gem_vma_inuse(vma)); 999 kfree(comm); 1000 } 1001 1002 seq_puts(m, "\n"); 1003 } 1004 1005 dma_resv_describe(robj, m); 1006 msm_gem_unlock(obj); 1007 } 1008 1009 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) 1010 { 1011 struct msm_gem_stats stats = {}; 1012 struct msm_gem_object *msm_obj; 1013 1014 seq_puts(m, " flags id ref offset kaddr size madv name\n"); 1015 list_for_each_entry(msm_obj, list, node) { 1016 struct drm_gem_object *obj = &msm_obj->base; 1017 seq_puts(m, " "); 1018 msm_gem_describe(obj, m, &stats); 1019 } 1020 1021 seq_printf(m, "Total: %4d objects, %9zu bytes\n", 1022 stats.all.count, stats.all.size); 1023 seq_printf(m, "Active: %4d objects, %9zu bytes\n", 1024 stats.active.count, stats.active.size); 1025 seq_printf(m, "Resident: %4d objects, %9zu bytes\n", 1026 stats.resident.count, stats.resident.size); 1027 seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n", 1028 stats.purgeable.count, stats.purgeable.size); 1029 seq_printf(m, "Purged: %4d objects, %9zu bytes\n", 1030 stats.purged.count, stats.purged.size); 1031 } 1032 #endif 1033 1034 /* don't call directly! Use drm_gem_object_put() */ 1035 static void msm_gem_free_object(struct drm_gem_object *obj) 1036 { 1037 struct msm_gem_object *msm_obj = to_msm_bo(obj); 1038 struct drm_device *dev = obj->dev; 1039 struct msm_drm_private *priv = dev->dev_private; 1040 1041 mutex_lock(&priv->obj_lock); 1042 list_del(&msm_obj->node); 1043 mutex_unlock(&priv->obj_lock); 1044 1045 put_iova_spaces(obj, true); 1046 1047 if (obj->import_attach) { 1048 GEM_WARN_ON(msm_obj->vaddr); 1049 1050 /* Don't drop the pages for imported dmabuf, as they are not 1051 * ours, just free the array we allocated: 1052 */ 1053 kvfree(msm_obj->pages); 1054 1055 put_iova_vmas(obj); 1056 1057 drm_prime_gem_destroy(obj, msm_obj->sgt); 1058 } else { 1059 msm_gem_vunmap(obj); 1060 put_pages(obj); 1061 put_iova_vmas(obj); 1062 } 1063 1064 drm_gem_object_release(obj); 1065 1066 kfree(msm_obj); 1067 } 1068 1069 static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 1070 { 1071 struct msm_gem_object *msm_obj = to_msm_bo(obj); 1072 1073 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); 1074 vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags)); 1075 1076 return 0; 1077 } 1078 1079 /* convenience method to construct a GEM buffer object, and userspace handle */ 1080 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 1081 uint32_t size, uint32_t flags, uint32_t *handle, 1082 char *name) 1083 { 1084 struct drm_gem_object *obj; 1085 int ret; 1086 1087 obj = msm_gem_new(dev, size, flags); 1088 1089 if (IS_ERR(obj)) 1090 return PTR_ERR(obj); 1091 1092 if (name) 1093 msm_gem_object_set_name(obj, "%s", name); 1094 1095 ret = drm_gem_handle_create(file, obj, handle); 1096 1097 /* drop reference from allocate - handle holds it now */ 1098 drm_gem_object_put(obj); 1099 1100 return ret; 1101 } 1102 1103 static enum drm_gem_object_status msm_gem_status(struct drm_gem_object *obj) 1104 { 1105 struct msm_gem_object *msm_obj = to_msm_bo(obj); 1106 enum drm_gem_object_status status = 0; 1107 1108 if (msm_obj->pages) 1109 status |= DRM_GEM_OBJECT_RESIDENT; 1110 1111 if (msm_obj->madv == MSM_MADV_DONTNEED) 1112 status |= DRM_GEM_OBJECT_PURGEABLE; 1113 1114 return status; 1115 } 1116 1117 static const struct vm_operations_struct vm_ops = { 1118 .fault = msm_gem_fault, 1119 .open = drm_gem_vm_open, 1120 .close = drm_gem_vm_close, 1121 }; 1122 1123 static const struct drm_gem_object_funcs msm_gem_object_funcs = { 1124 .free = msm_gem_free_object, 1125 .pin = msm_gem_prime_pin, 1126 .unpin = msm_gem_prime_unpin, 1127 .get_sg_table = msm_gem_prime_get_sg_table, 1128 .vmap = msm_gem_prime_vmap, 1129 .vunmap = msm_gem_prime_vunmap, 1130 .mmap = msm_gem_object_mmap, 1131 .status = msm_gem_status, 1132 .vm_ops = &vm_ops, 1133 }; 1134 1135 static int msm_gem_new_impl(struct drm_device *dev, 1136 uint32_t size, uint32_t flags, 1137 struct drm_gem_object **obj) 1138 { 1139 struct msm_drm_private *priv = dev->dev_private; 1140 struct msm_gem_object *msm_obj; 1141 1142 switch (flags & MSM_BO_CACHE_MASK) { 1143 case MSM_BO_CACHED: 1144 case MSM_BO_WC: 1145 break; 1146 case MSM_BO_CACHED_COHERENT: 1147 if (priv->has_cached_coherent) 1148 break; 1149 fallthrough; 1150 default: 1151 DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n", 1152 (flags & MSM_BO_CACHE_MASK)); 1153 return -EINVAL; 1154 } 1155 1156 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); 1157 if (!msm_obj) 1158 return -ENOMEM; 1159 1160 msm_obj->flags = flags; 1161 msm_obj->madv = MSM_MADV_WILLNEED; 1162 1163 INIT_LIST_HEAD(&msm_obj->node); 1164 INIT_LIST_HEAD(&msm_obj->vmas); 1165 1166 *obj = &msm_obj->base; 1167 (*obj)->funcs = &msm_gem_object_funcs; 1168 1169 return 0; 1170 } 1171 1172 struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags) 1173 { 1174 struct msm_drm_private *priv = dev->dev_private; 1175 struct msm_gem_object *msm_obj; 1176 struct drm_gem_object *obj = NULL; 1177 bool use_vram = false; 1178 int ret; 1179 1180 size = PAGE_ALIGN(size); 1181 1182 if (!msm_use_mmu(dev)) 1183 use_vram = true; 1184 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size) 1185 use_vram = true; 1186 1187 if (GEM_WARN_ON(use_vram && !priv->vram.size)) 1188 return ERR_PTR(-EINVAL); 1189 1190 /* Disallow zero sized objects as they make the underlying 1191 * infrastructure grumpy 1192 */ 1193 if (size == 0) 1194 return ERR_PTR(-EINVAL); 1195 1196 ret = msm_gem_new_impl(dev, size, flags, &obj); 1197 if (ret) 1198 return ERR_PTR(ret); 1199 1200 msm_obj = to_msm_bo(obj); 1201 1202 if (use_vram) { 1203 struct msm_gem_vma *vma; 1204 struct page **pages; 1205 1206 drm_gem_private_object_init(dev, obj, size); 1207 1208 msm_gem_lock(obj); 1209 1210 vma = add_vma(obj, NULL); 1211 msm_gem_unlock(obj); 1212 if (IS_ERR(vma)) { 1213 ret = PTR_ERR(vma); 1214 goto fail; 1215 } 1216 1217 to_msm_bo(obj)->vram_node = &vma->node; 1218 1219 msm_gem_lock(obj); 1220 pages = get_pages(obj); 1221 msm_gem_unlock(obj); 1222 if (IS_ERR(pages)) { 1223 ret = PTR_ERR(pages); 1224 goto fail; 1225 } 1226 1227 vma->iova = physaddr(obj); 1228 } else { 1229 ret = drm_gem_object_init(dev, obj, size); 1230 if (ret) 1231 goto fail; 1232 /* 1233 * Our buffers are kept pinned, so allocating them from the 1234 * MOVABLE zone is a really bad idea, and conflicts with CMA. 1235 * See comments above new_inode() why this is required _and_ 1236 * expected if you're going to pin these pages. 1237 */ 1238 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); 1239 } 1240 1241 drm_gem_lru_move_tail(&priv->lru.unbacked, obj); 1242 1243 mutex_lock(&priv->obj_lock); 1244 list_add_tail(&msm_obj->node, &priv->objects); 1245 mutex_unlock(&priv->obj_lock); 1246 1247 return obj; 1248 1249 fail: 1250 drm_gem_object_put(obj); 1251 return ERR_PTR(ret); 1252 } 1253 1254 struct drm_gem_object *msm_gem_import(struct drm_device *dev, 1255 struct dma_buf *dmabuf, struct sg_table *sgt) 1256 { 1257 struct msm_drm_private *priv = dev->dev_private; 1258 struct msm_gem_object *msm_obj; 1259 struct drm_gem_object *obj; 1260 uint32_t size; 1261 int ret, npages; 1262 1263 /* if we don't have IOMMU, don't bother pretending we can import: */ 1264 if (!msm_use_mmu(dev)) { 1265 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); 1266 return ERR_PTR(-EINVAL); 1267 } 1268 1269 size = PAGE_ALIGN(dmabuf->size); 1270 1271 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); 1272 if (ret) 1273 return ERR_PTR(ret); 1274 1275 drm_gem_private_object_init(dev, obj, size); 1276 1277 npages = size / PAGE_SIZE; 1278 1279 msm_obj = to_msm_bo(obj); 1280 msm_gem_lock(obj); 1281 msm_obj->sgt = sgt; 1282 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 1283 if (!msm_obj->pages) { 1284 msm_gem_unlock(obj); 1285 ret = -ENOMEM; 1286 goto fail; 1287 } 1288 1289 ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages); 1290 if (ret) { 1291 msm_gem_unlock(obj); 1292 goto fail; 1293 } 1294 1295 msm_gem_unlock(obj); 1296 1297 drm_gem_lru_move_tail(&priv->lru.pinned, obj); 1298 1299 mutex_lock(&priv->obj_lock); 1300 list_add_tail(&msm_obj->node, &priv->objects); 1301 mutex_unlock(&priv->obj_lock); 1302 1303 return obj; 1304 1305 fail: 1306 drm_gem_object_put(obj); 1307 return ERR_PTR(ret); 1308 } 1309 1310 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1311 uint32_t flags, struct msm_gem_address_space *aspace, 1312 struct drm_gem_object **bo, uint64_t *iova) 1313 { 1314 void *vaddr; 1315 struct drm_gem_object *obj = msm_gem_new(dev, size, flags); 1316 int ret; 1317 1318 if (IS_ERR(obj)) 1319 return ERR_CAST(obj); 1320 1321 if (iova) { 1322 ret = msm_gem_get_and_pin_iova(obj, aspace, iova); 1323 if (ret) 1324 goto err; 1325 } 1326 1327 vaddr = msm_gem_get_vaddr(obj); 1328 if (IS_ERR(vaddr)) { 1329 msm_gem_unpin_iova(obj, aspace); 1330 ret = PTR_ERR(vaddr); 1331 goto err; 1332 } 1333 1334 if (bo) 1335 *bo = obj; 1336 1337 return vaddr; 1338 err: 1339 drm_gem_object_put(obj); 1340 1341 return ERR_PTR(ret); 1342 1343 } 1344 1345 void msm_gem_kernel_put(struct drm_gem_object *bo, 1346 struct msm_gem_address_space *aspace) 1347 { 1348 if (IS_ERR_OR_NULL(bo)) 1349 return; 1350 1351 msm_gem_put_vaddr(bo); 1352 msm_gem_unpin_iova(bo, aspace); 1353 drm_gem_object_put(bo); 1354 } 1355 1356 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...) 1357 { 1358 struct msm_gem_object *msm_obj = to_msm_bo(bo); 1359 va_list ap; 1360 1361 if (!fmt) 1362 return; 1363 1364 va_start(ap, fmt); 1365 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap); 1366 va_end(ap); 1367 } 1368