1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #include <linux/dma-map-ops.h> 8 #include <linux/spinlock.h> 9 #include <linux/shmem_fs.h> 10 #include <linux/dma-buf.h> 11 #include <linux/pfn_t.h> 12 13 #include <drm/drm_prime.h> 14 15 #include "msm_drv.h" 16 #include "msm_fence.h" 17 #include "msm_gem.h" 18 #include "msm_gpu.h" 19 #include "msm_mmu.h" 20 21 static void update_inactive(struct msm_gem_object *msm_obj); 22 23 static dma_addr_t physaddr(struct drm_gem_object *obj) 24 { 25 struct msm_gem_object *msm_obj = to_msm_bo(obj); 26 struct msm_drm_private *priv = obj->dev->dev_private; 27 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + 28 priv->vram.paddr; 29 } 30 31 static bool use_pages(struct drm_gem_object *obj) 32 { 33 struct msm_gem_object *msm_obj = to_msm_bo(obj); 34 return !msm_obj->vram_node; 35 } 36 37 /* 38 * Cache sync.. this is a bit over-complicated, to fit dma-mapping 39 * API. Really GPU cache is out of scope here (handled on cmdstream) 40 * and all we need to do is invalidate newly allocated pages before 41 * mapping to CPU as uncached/writecombine. 42 * 43 * On top of this, we have the added headache, that depending on 44 * display generation, the display's iommu may be wired up to either 45 * the toplevel drm device (mdss), or to the mdp sub-node, meaning 46 * that here we either have dma-direct or iommu ops. 47 * 48 * Let this be a cautionary tail of abstraction gone wrong. 49 */ 50 51 static void sync_for_device(struct msm_gem_object *msm_obj) 52 { 53 struct device *dev = msm_obj->base.dev->dev; 54 55 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 56 } 57 58 static void sync_for_cpu(struct msm_gem_object *msm_obj) 59 { 60 struct device *dev = msm_obj->base.dev->dev; 61 62 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 63 } 64 65 /* allocate pages from VRAM carveout, used when no IOMMU: */ 66 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) 67 { 68 struct msm_gem_object *msm_obj = to_msm_bo(obj); 69 struct msm_drm_private *priv = obj->dev->dev_private; 70 dma_addr_t paddr; 71 struct page **p; 72 int ret, i; 73 74 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 75 if (!p) 76 return ERR_PTR(-ENOMEM); 77 78 spin_lock(&priv->vram.lock); 79 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); 80 spin_unlock(&priv->vram.lock); 81 if (ret) { 82 kvfree(p); 83 return ERR_PTR(ret); 84 } 85 86 paddr = physaddr(obj); 87 for (i = 0; i < npages; i++) { 88 p[i] = phys_to_page(paddr); 89 paddr += PAGE_SIZE; 90 } 91 92 return p; 93 } 94 95 static struct page **get_pages(struct drm_gem_object *obj) 96 { 97 struct msm_gem_object *msm_obj = to_msm_bo(obj); 98 99 if (!msm_obj->pages) { 100 struct drm_device *dev = obj->dev; 101 struct page **p; 102 int npages = obj->size >> PAGE_SHIFT; 103 104 if (use_pages(obj)) 105 p = drm_gem_get_pages(obj); 106 else 107 p = get_pages_vram(obj, npages); 108 109 if (IS_ERR(p)) { 110 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", 111 PTR_ERR(p)); 112 return p; 113 } 114 115 msm_obj->pages = p; 116 117 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages); 118 if (IS_ERR(msm_obj->sgt)) { 119 void *ptr = ERR_CAST(msm_obj->sgt); 120 121 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n"); 122 msm_obj->sgt = NULL; 123 return ptr; 124 } 125 126 /* For non-cached buffers, ensure the new pages are clean 127 * because display controller, GPU, etc. are not coherent: 128 */ 129 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 130 sync_for_device(msm_obj); 131 } 132 133 return msm_obj->pages; 134 } 135 136 static void put_pages_vram(struct drm_gem_object *obj) 137 { 138 struct msm_gem_object *msm_obj = to_msm_bo(obj); 139 struct msm_drm_private *priv = obj->dev->dev_private; 140 141 spin_lock(&priv->vram.lock); 142 drm_mm_remove_node(msm_obj->vram_node); 143 spin_unlock(&priv->vram.lock); 144 145 kvfree(msm_obj->pages); 146 } 147 148 static void put_pages(struct drm_gem_object *obj) 149 { 150 struct msm_gem_object *msm_obj = to_msm_bo(obj); 151 152 if (msm_obj->pages) { 153 if (msm_obj->sgt) { 154 /* For non-cached buffers, ensure the new 155 * pages are clean because display controller, 156 * GPU, etc. are not coherent: 157 */ 158 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 159 sync_for_cpu(msm_obj); 160 161 sg_free_table(msm_obj->sgt); 162 kfree(msm_obj->sgt); 163 } 164 165 if (use_pages(obj)) 166 drm_gem_put_pages(obj, msm_obj->pages, true, false); 167 else 168 put_pages_vram(obj); 169 170 msm_obj->pages = NULL; 171 } 172 } 173 174 struct page **msm_gem_get_pages(struct drm_gem_object *obj) 175 { 176 struct msm_gem_object *msm_obj = to_msm_bo(obj); 177 struct page **p; 178 179 msm_gem_lock(obj); 180 181 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 182 msm_gem_unlock(obj); 183 return ERR_PTR(-EBUSY); 184 } 185 186 p = get_pages(obj); 187 msm_gem_unlock(obj); 188 return p; 189 } 190 191 void msm_gem_put_pages(struct drm_gem_object *obj) 192 { 193 /* when we start tracking the pin count, then do something here */ 194 } 195 196 int msm_gem_mmap_obj(struct drm_gem_object *obj, 197 struct vm_area_struct *vma) 198 { 199 struct msm_gem_object *msm_obj = to_msm_bo(obj); 200 201 vma->vm_flags &= ~VM_PFNMAP; 202 vma->vm_flags |= VM_MIXEDMAP; 203 204 if (msm_obj->flags & MSM_BO_WC) { 205 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 206 } else if (msm_obj->flags & MSM_BO_UNCACHED) { 207 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 208 } else { 209 /* 210 * Shunt off cached objs to shmem file so they have their own 211 * address_space (so unmap_mapping_range does what we want, 212 * in particular in the case of mmap'd dmabufs) 213 */ 214 fput(vma->vm_file); 215 get_file(obj->filp); 216 vma->vm_pgoff = 0; 217 vma->vm_file = obj->filp; 218 219 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 220 } 221 222 return 0; 223 } 224 225 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 226 { 227 int ret; 228 229 ret = drm_gem_mmap(filp, vma); 230 if (ret) { 231 DBG("mmap failed: %d", ret); 232 return ret; 233 } 234 235 return msm_gem_mmap_obj(vma->vm_private_data, vma); 236 } 237 238 vm_fault_t msm_gem_fault(struct vm_fault *vmf) 239 { 240 struct vm_area_struct *vma = vmf->vma; 241 struct drm_gem_object *obj = vma->vm_private_data; 242 struct msm_gem_object *msm_obj = to_msm_bo(obj); 243 struct page **pages; 244 unsigned long pfn; 245 pgoff_t pgoff; 246 int err; 247 vm_fault_t ret; 248 249 /* 250 * vm_ops.open/drm_gem_mmap_obj and close get and put 251 * a reference on obj. So, we dont need to hold one here. 252 */ 253 err = msm_gem_lock_interruptible(obj); 254 if (err) { 255 ret = VM_FAULT_NOPAGE; 256 goto out; 257 } 258 259 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 260 msm_gem_unlock(obj); 261 return VM_FAULT_SIGBUS; 262 } 263 264 /* make sure we have pages attached now */ 265 pages = get_pages(obj); 266 if (IS_ERR(pages)) { 267 ret = vmf_error(PTR_ERR(pages)); 268 goto out_unlock; 269 } 270 271 /* We don't use vmf->pgoff since that has the fake offset: */ 272 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 273 274 pfn = page_to_pfn(pages[pgoff]); 275 276 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 277 pfn, pfn << PAGE_SHIFT); 278 279 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); 280 out_unlock: 281 msm_gem_unlock(obj); 282 out: 283 return ret; 284 } 285 286 /** get mmap offset */ 287 static uint64_t mmap_offset(struct drm_gem_object *obj) 288 { 289 struct drm_device *dev = obj->dev; 290 int ret; 291 292 WARN_ON(!msm_gem_is_locked(obj)); 293 294 /* Make it mmapable */ 295 ret = drm_gem_create_mmap_offset(obj); 296 297 if (ret) { 298 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n"); 299 return 0; 300 } 301 302 return drm_vma_node_offset_addr(&obj->vma_node); 303 } 304 305 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) 306 { 307 uint64_t offset; 308 309 msm_gem_lock(obj); 310 offset = mmap_offset(obj); 311 msm_gem_unlock(obj); 312 return offset; 313 } 314 315 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, 316 struct msm_gem_address_space *aspace) 317 { 318 struct msm_gem_object *msm_obj = to_msm_bo(obj); 319 struct msm_gem_vma *vma; 320 321 WARN_ON(!msm_gem_is_locked(obj)); 322 323 vma = kzalloc(sizeof(*vma), GFP_KERNEL); 324 if (!vma) 325 return ERR_PTR(-ENOMEM); 326 327 vma->aspace = aspace; 328 329 list_add_tail(&vma->list, &msm_obj->vmas); 330 331 return vma; 332 } 333 334 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, 335 struct msm_gem_address_space *aspace) 336 { 337 struct msm_gem_object *msm_obj = to_msm_bo(obj); 338 struct msm_gem_vma *vma; 339 340 WARN_ON(!msm_gem_is_locked(obj)); 341 342 list_for_each_entry(vma, &msm_obj->vmas, list) { 343 if (vma->aspace == aspace) 344 return vma; 345 } 346 347 return NULL; 348 } 349 350 static void del_vma(struct msm_gem_vma *vma) 351 { 352 if (!vma) 353 return; 354 355 list_del(&vma->list); 356 kfree(vma); 357 } 358 359 /* Called with msm_obj locked */ 360 static void 361 put_iova(struct drm_gem_object *obj) 362 { 363 struct msm_gem_object *msm_obj = to_msm_bo(obj); 364 struct msm_gem_vma *vma, *tmp; 365 366 WARN_ON(!msm_gem_is_locked(obj)); 367 368 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { 369 if (vma->aspace) { 370 msm_gem_purge_vma(vma->aspace, vma); 371 msm_gem_close_vma(vma->aspace, vma); 372 } 373 del_vma(vma); 374 } 375 } 376 377 static int get_iova_locked(struct drm_gem_object *obj, 378 struct msm_gem_address_space *aspace, uint64_t *iova, 379 u64 range_start, u64 range_end) 380 { 381 struct msm_gem_vma *vma; 382 int ret = 0; 383 384 WARN_ON(!msm_gem_is_locked(obj)); 385 386 vma = lookup_vma(obj, aspace); 387 388 if (!vma) { 389 vma = add_vma(obj, aspace); 390 if (IS_ERR(vma)) 391 return PTR_ERR(vma); 392 393 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT, 394 range_start, range_end); 395 if (ret) { 396 del_vma(vma); 397 return ret; 398 } 399 } 400 401 *iova = vma->iova; 402 return 0; 403 } 404 405 static int msm_gem_pin_iova(struct drm_gem_object *obj, 406 struct msm_gem_address_space *aspace) 407 { 408 struct msm_gem_object *msm_obj = to_msm_bo(obj); 409 struct msm_gem_vma *vma; 410 struct page **pages; 411 int prot = IOMMU_READ; 412 413 if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) 414 prot |= IOMMU_WRITE; 415 416 if (msm_obj->flags & MSM_BO_MAP_PRIV) 417 prot |= IOMMU_PRIV; 418 419 WARN_ON(!msm_gem_is_locked(obj)); 420 421 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) 422 return -EBUSY; 423 424 vma = lookup_vma(obj, aspace); 425 if (WARN_ON(!vma)) 426 return -EINVAL; 427 428 pages = get_pages(obj); 429 if (IS_ERR(pages)) 430 return PTR_ERR(pages); 431 432 return msm_gem_map_vma(aspace, vma, prot, 433 msm_obj->sgt, obj->size >> PAGE_SHIFT); 434 } 435 436 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, 437 struct msm_gem_address_space *aspace, uint64_t *iova, 438 u64 range_start, u64 range_end) 439 { 440 u64 local; 441 int ret; 442 443 WARN_ON(!msm_gem_is_locked(obj)); 444 445 ret = get_iova_locked(obj, aspace, &local, 446 range_start, range_end); 447 448 if (!ret) 449 ret = msm_gem_pin_iova(obj, aspace); 450 451 if (!ret) 452 *iova = local; 453 454 return ret; 455 } 456 457 /* 458 * get iova and pin it. Should have a matching put 459 * limits iova to specified range (in pages) 460 */ 461 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, 462 struct msm_gem_address_space *aspace, uint64_t *iova, 463 u64 range_start, u64 range_end) 464 { 465 int ret; 466 467 msm_gem_lock(obj); 468 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end); 469 msm_gem_unlock(obj); 470 471 return ret; 472 } 473 474 int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj, 475 struct msm_gem_address_space *aspace, uint64_t *iova) 476 { 477 return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX); 478 } 479 480 /* get iova and pin it. Should have a matching put */ 481 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, 482 struct msm_gem_address_space *aspace, uint64_t *iova) 483 { 484 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX); 485 } 486 487 /* 488 * Get an iova but don't pin it. Doesn't need a put because iovas are currently 489 * valid for the life of the object 490 */ 491 int msm_gem_get_iova(struct drm_gem_object *obj, 492 struct msm_gem_address_space *aspace, uint64_t *iova) 493 { 494 int ret; 495 496 msm_gem_lock(obj); 497 ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX); 498 msm_gem_unlock(obj); 499 500 return ret; 501 } 502 503 /* get iova without taking a reference, used in places where you have 504 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova' 505 */ 506 uint64_t msm_gem_iova(struct drm_gem_object *obj, 507 struct msm_gem_address_space *aspace) 508 { 509 struct msm_gem_vma *vma; 510 511 msm_gem_lock(obj); 512 vma = lookup_vma(obj, aspace); 513 msm_gem_unlock(obj); 514 WARN_ON(!vma); 515 516 return vma ? vma->iova : 0; 517 } 518 519 /* 520 * Locked variant of msm_gem_unpin_iova() 521 */ 522 void msm_gem_unpin_iova_locked(struct drm_gem_object *obj, 523 struct msm_gem_address_space *aspace) 524 { 525 struct msm_gem_vma *vma; 526 527 WARN_ON(!msm_gem_is_locked(obj)); 528 529 vma = lookup_vma(obj, aspace); 530 531 if (!WARN_ON(!vma)) 532 msm_gem_unmap_vma(aspace, vma); 533 } 534 535 /* 536 * Unpin a iova by updating the reference counts. The memory isn't actually 537 * purged until something else (shrinker, mm_notifier, destroy, etc) decides 538 * to get rid of it 539 */ 540 void msm_gem_unpin_iova(struct drm_gem_object *obj, 541 struct msm_gem_address_space *aspace) 542 { 543 msm_gem_lock(obj); 544 msm_gem_unpin_iova_locked(obj, aspace); 545 msm_gem_unlock(obj); 546 } 547 548 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 549 struct drm_mode_create_dumb *args) 550 { 551 args->pitch = align_pitch(args->width, args->bpp); 552 args->size = PAGE_ALIGN(args->pitch * args->height); 553 return msm_gem_new_handle(dev, file, args->size, 554 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb"); 555 } 556 557 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 558 uint32_t handle, uint64_t *offset) 559 { 560 struct drm_gem_object *obj; 561 int ret = 0; 562 563 /* GEM does all our handle to object mapping */ 564 obj = drm_gem_object_lookup(file, handle); 565 if (obj == NULL) { 566 ret = -ENOENT; 567 goto fail; 568 } 569 570 *offset = msm_gem_mmap_offset(obj); 571 572 drm_gem_object_put(obj); 573 574 fail: 575 return ret; 576 } 577 578 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) 579 { 580 struct msm_gem_object *msm_obj = to_msm_bo(obj); 581 int ret = 0; 582 583 WARN_ON(!msm_gem_is_locked(obj)); 584 585 if (obj->import_attach) 586 return ERR_PTR(-ENODEV); 587 588 if (WARN_ON(msm_obj->madv > madv)) { 589 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", 590 msm_obj->madv, madv); 591 return ERR_PTR(-EBUSY); 592 } 593 594 /* increment vmap_count *before* vmap() call, so shrinker can 595 * check vmap_count (is_vunmapable()) outside of msm_obj lock. 596 * This guarantees that we won't try to msm_gem_vunmap() this 597 * same object from within the vmap() call (while we already 598 * hold msm_obj lock) 599 */ 600 msm_obj->vmap_count++; 601 602 if (!msm_obj->vaddr) { 603 struct page **pages = get_pages(obj); 604 if (IS_ERR(pages)) { 605 ret = PTR_ERR(pages); 606 goto fail; 607 } 608 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 609 VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 610 if (msm_obj->vaddr == NULL) { 611 ret = -ENOMEM; 612 goto fail; 613 } 614 } 615 616 return msm_obj->vaddr; 617 618 fail: 619 msm_obj->vmap_count--; 620 return ERR_PTR(ret); 621 } 622 623 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) 624 { 625 return get_vaddr(obj, MSM_MADV_WILLNEED); 626 } 627 628 void *msm_gem_get_vaddr(struct drm_gem_object *obj) 629 { 630 void *ret; 631 632 msm_gem_lock(obj); 633 ret = msm_gem_get_vaddr_locked(obj); 634 msm_gem_unlock(obj); 635 636 return ret; 637 } 638 639 /* 640 * Don't use this! It is for the very special case of dumping 641 * submits from GPU hangs or faults, were the bo may already 642 * be MSM_MADV_DONTNEED, but we know the buffer is still on the 643 * active list. 644 */ 645 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) 646 { 647 return get_vaddr(obj, __MSM_MADV_PURGED); 648 } 649 650 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) 651 { 652 struct msm_gem_object *msm_obj = to_msm_bo(obj); 653 654 WARN_ON(!msm_gem_is_locked(obj)); 655 WARN_ON(msm_obj->vmap_count < 1); 656 657 msm_obj->vmap_count--; 658 } 659 660 void msm_gem_put_vaddr(struct drm_gem_object *obj) 661 { 662 msm_gem_lock(obj); 663 msm_gem_put_vaddr_locked(obj); 664 msm_gem_unlock(obj); 665 } 666 667 /* Update madvise status, returns true if not purged, else 668 * false or -errno. 669 */ 670 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) 671 { 672 struct msm_gem_object *msm_obj = to_msm_bo(obj); 673 674 msm_gem_lock(obj); 675 676 if (msm_obj->madv != __MSM_MADV_PURGED) 677 msm_obj->madv = madv; 678 679 madv = msm_obj->madv; 680 681 /* If the obj is inactive, we might need to move it 682 * between inactive lists 683 */ 684 if (msm_obj->active_count == 0) 685 update_inactive(msm_obj); 686 687 msm_gem_unlock(obj); 688 689 return (madv != __MSM_MADV_PURGED); 690 } 691 692 void msm_gem_purge(struct drm_gem_object *obj) 693 { 694 struct drm_device *dev = obj->dev; 695 struct msm_gem_object *msm_obj = to_msm_bo(obj); 696 697 WARN_ON(!is_purgeable(msm_obj)); 698 WARN_ON(obj->import_attach); 699 700 put_iova(obj); 701 702 msm_gem_vunmap(obj); 703 704 put_pages(obj); 705 706 msm_obj->madv = __MSM_MADV_PURGED; 707 708 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 709 drm_gem_free_mmap_offset(obj); 710 711 /* Our goal here is to return as much of the memory as 712 * is possible back to the system as we are called from OOM. 713 * To do this we must instruct the shmfs to drop all of its 714 * backing pages, *now*. 715 */ 716 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 717 718 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 719 0, (loff_t)-1); 720 } 721 722 void msm_gem_vunmap(struct drm_gem_object *obj) 723 { 724 struct msm_gem_object *msm_obj = to_msm_bo(obj); 725 726 WARN_ON(!msm_gem_is_locked(obj)); 727 728 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) 729 return; 730 731 vunmap(msm_obj->vaddr); 732 msm_obj->vaddr = NULL; 733 } 734 735 /* must be called before _move_to_active().. */ 736 int msm_gem_sync_object(struct drm_gem_object *obj, 737 struct msm_fence_context *fctx, bool exclusive) 738 { 739 struct dma_resv_list *fobj; 740 struct dma_fence *fence; 741 int i, ret; 742 743 fobj = dma_resv_get_list(obj->resv); 744 if (!fobj || (fobj->shared_count == 0)) { 745 fence = dma_resv_get_excl(obj->resv); 746 /* don't need to wait on our own fences, since ring is fifo */ 747 if (fence && (fence->context != fctx->context)) { 748 ret = dma_fence_wait(fence, true); 749 if (ret) 750 return ret; 751 } 752 } 753 754 if (!exclusive || !fobj) 755 return 0; 756 757 for (i = 0; i < fobj->shared_count; i++) { 758 fence = rcu_dereference_protected(fobj->shared[i], 759 dma_resv_held(obj->resv)); 760 if (fence->context != fctx->context) { 761 ret = dma_fence_wait(fence, true); 762 if (ret) 763 return ret; 764 } 765 } 766 767 return 0; 768 } 769 770 void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu) 771 { 772 struct msm_gem_object *msm_obj = to_msm_bo(obj); 773 struct msm_drm_private *priv = obj->dev->dev_private; 774 775 might_sleep(); 776 WARN_ON(!msm_gem_is_locked(obj)); 777 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); 778 779 if (msm_obj->active_count++ == 0) { 780 mutex_lock(&priv->mm_lock); 781 list_del_init(&msm_obj->mm_list); 782 list_add_tail(&msm_obj->mm_list, &gpu->active_list); 783 mutex_unlock(&priv->mm_lock); 784 } 785 } 786 787 void msm_gem_active_put(struct drm_gem_object *obj) 788 { 789 struct msm_gem_object *msm_obj = to_msm_bo(obj); 790 791 might_sleep(); 792 WARN_ON(!msm_gem_is_locked(obj)); 793 794 if (--msm_obj->active_count == 0) { 795 update_inactive(msm_obj); 796 } 797 } 798 799 static void update_inactive(struct msm_gem_object *msm_obj) 800 { 801 struct msm_drm_private *priv = msm_obj->base.dev->dev_private; 802 803 mutex_lock(&priv->mm_lock); 804 WARN_ON(msm_obj->active_count != 0); 805 806 list_del_init(&msm_obj->mm_list); 807 if (msm_obj->madv == MSM_MADV_WILLNEED) 808 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); 809 else 810 list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed); 811 812 mutex_unlock(&priv->mm_lock); 813 } 814 815 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 816 { 817 bool write = !!(op & MSM_PREP_WRITE); 818 unsigned long remain = 819 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 820 long ret; 821 822 ret = dma_resv_wait_timeout_rcu(obj->resv, write, 823 true, remain); 824 if (ret == 0) 825 return remain == 0 ? -EBUSY : -ETIMEDOUT; 826 else if (ret < 0) 827 return ret; 828 829 /* TODO cache maintenance */ 830 831 return 0; 832 } 833 834 int msm_gem_cpu_fini(struct drm_gem_object *obj) 835 { 836 /* TODO cache maintenance */ 837 return 0; 838 } 839 840 #ifdef CONFIG_DEBUG_FS 841 static void describe_fence(struct dma_fence *fence, const char *type, 842 struct seq_file *m) 843 { 844 if (!dma_fence_is_signaled(fence)) 845 seq_printf(m, "\t%9s: %s %s seq %llu\n", type, 846 fence->ops->get_driver_name(fence), 847 fence->ops->get_timeline_name(fence), 848 fence->seqno); 849 } 850 851 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 852 { 853 struct msm_gem_object *msm_obj = to_msm_bo(obj); 854 struct dma_resv *robj = obj->resv; 855 struct dma_resv_list *fobj; 856 struct dma_fence *fence; 857 struct msm_gem_vma *vma; 858 uint64_t off = drm_vma_node_start(&obj->vma_node); 859 const char *madv; 860 861 msm_gem_lock(obj); 862 863 switch (msm_obj->madv) { 864 case __MSM_MADV_PURGED: 865 madv = " purged"; 866 break; 867 case MSM_MADV_DONTNEED: 868 madv = " purgeable"; 869 break; 870 case MSM_MADV_WILLNEED: 871 default: 872 madv = ""; 873 break; 874 } 875 876 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p", 877 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 878 obj->name, kref_read(&obj->refcount), 879 off, msm_obj->vaddr); 880 881 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name); 882 883 if (!list_empty(&msm_obj->vmas)) { 884 885 seq_puts(m, " vmas:"); 886 887 list_for_each_entry(vma, &msm_obj->vmas, list) { 888 const char *name, *comm; 889 if (vma->aspace) { 890 struct msm_gem_address_space *aspace = vma->aspace; 891 struct task_struct *task = 892 get_pid_task(aspace->pid, PIDTYPE_PID); 893 if (task) { 894 comm = kstrdup(task->comm, GFP_KERNEL); 895 } else { 896 comm = NULL; 897 } 898 name = aspace->name; 899 } else { 900 name = comm = NULL; 901 } 902 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]", 903 name, comm ? ":" : "", comm ? comm : "", 904 vma->aspace, vma->iova, 905 vma->mapped ? "mapped" : "unmapped", 906 vma->inuse); 907 kfree(comm); 908 } 909 910 seq_puts(m, "\n"); 911 } 912 913 rcu_read_lock(); 914 fobj = rcu_dereference(robj->fence); 915 if (fobj) { 916 unsigned int i, shared_count = fobj->shared_count; 917 918 for (i = 0; i < shared_count; i++) { 919 fence = rcu_dereference(fobj->shared[i]); 920 describe_fence(fence, "Shared", m); 921 } 922 } 923 924 fence = rcu_dereference(robj->fence_excl); 925 if (fence) 926 describe_fence(fence, "Exclusive", m); 927 rcu_read_unlock(); 928 929 msm_gem_unlock(obj); 930 } 931 932 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) 933 { 934 struct msm_gem_object *msm_obj; 935 int count = 0; 936 size_t size = 0; 937 938 seq_puts(m, " flags id ref offset kaddr size madv name\n"); 939 list_for_each_entry(msm_obj, list, mm_list) { 940 struct drm_gem_object *obj = &msm_obj->base; 941 seq_puts(m, " "); 942 msm_gem_describe(obj, m); 943 count++; 944 size += obj->size; 945 } 946 947 seq_printf(m, "Total %d objects, %zu bytes\n", count, size); 948 } 949 #endif 950 951 /* don't call directly! Use drm_gem_object_put_locked() and friends */ 952 void msm_gem_free_object(struct drm_gem_object *obj) 953 { 954 struct msm_gem_object *msm_obj = to_msm_bo(obj); 955 struct drm_device *dev = obj->dev; 956 struct msm_drm_private *priv = dev->dev_private; 957 958 mutex_lock(&priv->mm_lock); 959 list_del(&msm_obj->mm_list); 960 mutex_unlock(&priv->mm_lock); 961 962 msm_gem_lock(obj); 963 964 /* object should not be on active list: */ 965 WARN_ON(is_active(msm_obj)); 966 967 put_iova(obj); 968 969 if (obj->import_attach) { 970 WARN_ON(msm_obj->vaddr); 971 972 /* Don't drop the pages for imported dmabuf, as they are not 973 * ours, just free the array we allocated: 974 */ 975 if (msm_obj->pages) 976 kvfree(msm_obj->pages); 977 978 /* dma_buf_detach() grabs resv lock, so we need to unlock 979 * prior to drm_prime_gem_destroy 980 */ 981 msm_gem_unlock(obj); 982 983 drm_prime_gem_destroy(obj, msm_obj->sgt); 984 } else { 985 msm_gem_vunmap(obj); 986 put_pages(obj); 987 msm_gem_unlock(obj); 988 } 989 990 drm_gem_object_release(obj); 991 992 kfree(msm_obj); 993 } 994 995 /* convenience method to construct a GEM buffer object, and userspace handle */ 996 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 997 uint32_t size, uint32_t flags, uint32_t *handle, 998 char *name) 999 { 1000 struct drm_gem_object *obj; 1001 int ret; 1002 1003 obj = msm_gem_new(dev, size, flags); 1004 1005 if (IS_ERR(obj)) 1006 return PTR_ERR(obj); 1007 1008 if (name) 1009 msm_gem_object_set_name(obj, "%s", name); 1010 1011 ret = drm_gem_handle_create(file, obj, handle); 1012 1013 /* drop reference from allocate - handle holds it now */ 1014 drm_gem_object_put(obj); 1015 1016 return ret; 1017 } 1018 1019 static int msm_gem_new_impl(struct drm_device *dev, 1020 uint32_t size, uint32_t flags, 1021 struct drm_gem_object **obj) 1022 { 1023 struct msm_gem_object *msm_obj; 1024 1025 switch (flags & MSM_BO_CACHE_MASK) { 1026 case MSM_BO_UNCACHED: 1027 case MSM_BO_CACHED: 1028 case MSM_BO_WC: 1029 break; 1030 default: 1031 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", 1032 (flags & MSM_BO_CACHE_MASK)); 1033 return -EINVAL; 1034 } 1035 1036 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); 1037 if (!msm_obj) 1038 return -ENOMEM; 1039 1040 msm_obj->flags = flags; 1041 msm_obj->madv = MSM_MADV_WILLNEED; 1042 1043 INIT_LIST_HEAD(&msm_obj->submit_entry); 1044 INIT_LIST_HEAD(&msm_obj->vmas); 1045 1046 *obj = &msm_obj->base; 1047 1048 return 0; 1049 } 1050 1051 static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, 1052 uint32_t size, uint32_t flags, bool struct_mutex_locked) 1053 { 1054 struct msm_drm_private *priv = dev->dev_private; 1055 struct msm_gem_object *msm_obj; 1056 struct drm_gem_object *obj = NULL; 1057 bool use_vram = false; 1058 int ret; 1059 1060 size = PAGE_ALIGN(size); 1061 1062 if (!msm_use_mmu(dev)) 1063 use_vram = true; 1064 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size) 1065 use_vram = true; 1066 1067 if (WARN_ON(use_vram && !priv->vram.size)) 1068 return ERR_PTR(-EINVAL); 1069 1070 /* Disallow zero sized objects as they make the underlying 1071 * infrastructure grumpy 1072 */ 1073 if (size == 0) 1074 return ERR_PTR(-EINVAL); 1075 1076 ret = msm_gem_new_impl(dev, size, flags, &obj); 1077 if (ret) 1078 goto fail; 1079 1080 msm_obj = to_msm_bo(obj); 1081 1082 if (use_vram) { 1083 struct msm_gem_vma *vma; 1084 struct page **pages; 1085 1086 msm_gem_lock(obj); 1087 1088 vma = add_vma(obj, NULL); 1089 msm_gem_unlock(obj); 1090 if (IS_ERR(vma)) { 1091 ret = PTR_ERR(vma); 1092 goto fail; 1093 } 1094 1095 to_msm_bo(obj)->vram_node = &vma->node; 1096 1097 drm_gem_private_object_init(dev, obj, size); 1098 1099 pages = get_pages(obj); 1100 if (IS_ERR(pages)) { 1101 ret = PTR_ERR(pages); 1102 goto fail; 1103 } 1104 1105 vma->iova = physaddr(obj); 1106 } else { 1107 ret = drm_gem_object_init(dev, obj, size); 1108 if (ret) 1109 goto fail; 1110 /* 1111 * Our buffers are kept pinned, so allocating them from the 1112 * MOVABLE zone is a really bad idea, and conflicts with CMA. 1113 * See comments above new_inode() why this is required _and_ 1114 * expected if you're going to pin these pages. 1115 */ 1116 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); 1117 } 1118 1119 mutex_lock(&priv->mm_lock); 1120 /* Initially obj is idle, obj->madv == WILLNEED: */ 1121 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); 1122 mutex_unlock(&priv->mm_lock); 1123 1124 return obj; 1125 1126 fail: 1127 if (struct_mutex_locked) { 1128 drm_gem_object_put_locked(obj); 1129 } else { 1130 drm_gem_object_put(obj); 1131 } 1132 return ERR_PTR(ret); 1133 } 1134 1135 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, 1136 uint32_t size, uint32_t flags) 1137 { 1138 return _msm_gem_new(dev, size, flags, true); 1139 } 1140 1141 struct drm_gem_object *msm_gem_new(struct drm_device *dev, 1142 uint32_t size, uint32_t flags) 1143 { 1144 return _msm_gem_new(dev, size, flags, false); 1145 } 1146 1147 struct drm_gem_object *msm_gem_import(struct drm_device *dev, 1148 struct dma_buf *dmabuf, struct sg_table *sgt) 1149 { 1150 struct msm_drm_private *priv = dev->dev_private; 1151 struct msm_gem_object *msm_obj; 1152 struct drm_gem_object *obj; 1153 uint32_t size; 1154 int ret, npages; 1155 1156 /* if we don't have IOMMU, don't bother pretending we can import: */ 1157 if (!msm_use_mmu(dev)) { 1158 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); 1159 return ERR_PTR(-EINVAL); 1160 } 1161 1162 size = PAGE_ALIGN(dmabuf->size); 1163 1164 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); 1165 if (ret) 1166 goto fail; 1167 1168 drm_gem_private_object_init(dev, obj, size); 1169 1170 npages = size / PAGE_SIZE; 1171 1172 msm_obj = to_msm_bo(obj); 1173 msm_gem_lock(obj); 1174 msm_obj->sgt = sgt; 1175 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 1176 if (!msm_obj->pages) { 1177 msm_gem_unlock(obj); 1178 ret = -ENOMEM; 1179 goto fail; 1180 } 1181 1182 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); 1183 if (ret) { 1184 msm_gem_unlock(obj); 1185 goto fail; 1186 } 1187 1188 msm_gem_unlock(obj); 1189 1190 mutex_lock(&priv->mm_lock); 1191 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); 1192 mutex_unlock(&priv->mm_lock); 1193 1194 return obj; 1195 1196 fail: 1197 drm_gem_object_put(obj); 1198 return ERR_PTR(ret); 1199 } 1200 1201 static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1202 uint32_t flags, struct msm_gem_address_space *aspace, 1203 struct drm_gem_object **bo, uint64_t *iova, bool locked) 1204 { 1205 void *vaddr; 1206 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked); 1207 int ret; 1208 1209 if (IS_ERR(obj)) 1210 return ERR_CAST(obj); 1211 1212 if (iova) { 1213 ret = msm_gem_get_and_pin_iova(obj, aspace, iova); 1214 if (ret) 1215 goto err; 1216 } 1217 1218 vaddr = msm_gem_get_vaddr(obj); 1219 if (IS_ERR(vaddr)) { 1220 msm_gem_unpin_iova(obj, aspace); 1221 ret = PTR_ERR(vaddr); 1222 goto err; 1223 } 1224 1225 if (bo) 1226 *bo = obj; 1227 1228 return vaddr; 1229 err: 1230 if (locked) 1231 drm_gem_object_put_locked(obj); 1232 else 1233 drm_gem_object_put(obj); 1234 1235 return ERR_PTR(ret); 1236 1237 } 1238 1239 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1240 uint32_t flags, struct msm_gem_address_space *aspace, 1241 struct drm_gem_object **bo, uint64_t *iova) 1242 { 1243 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false); 1244 } 1245 1246 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, 1247 uint32_t flags, struct msm_gem_address_space *aspace, 1248 struct drm_gem_object **bo, uint64_t *iova) 1249 { 1250 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true); 1251 } 1252 1253 void msm_gem_kernel_put(struct drm_gem_object *bo, 1254 struct msm_gem_address_space *aspace, bool locked) 1255 { 1256 if (IS_ERR_OR_NULL(bo)) 1257 return; 1258 1259 msm_gem_put_vaddr(bo); 1260 msm_gem_unpin_iova(bo, aspace); 1261 1262 if (locked) 1263 drm_gem_object_put_locked(bo); 1264 else 1265 drm_gem_object_put(bo); 1266 } 1267 1268 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...) 1269 { 1270 struct msm_gem_object *msm_obj = to_msm_bo(bo); 1271 va_list ap; 1272 1273 if (!fmt) 1274 return; 1275 1276 va_start(ap, fmt); 1277 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap); 1278 va_end(ap); 1279 } 1280