1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #include <linux/dma-map-ops.h> 8 #include <linux/spinlock.h> 9 #include <linux/shmem_fs.h> 10 #include <linux/dma-buf.h> 11 #include <linux/pfn_t.h> 12 13 #include <drm/drm_prime.h> 14 15 #include "msm_drv.h" 16 #include "msm_fence.h" 17 #include "msm_gem.h" 18 #include "msm_gpu.h" 19 #include "msm_mmu.h" 20 21 static void msm_gem_vunmap_locked(struct drm_gem_object *obj); 22 23 24 static dma_addr_t physaddr(struct drm_gem_object *obj) 25 { 26 struct msm_gem_object *msm_obj = to_msm_bo(obj); 27 struct msm_drm_private *priv = obj->dev->dev_private; 28 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + 29 priv->vram.paddr; 30 } 31 32 static bool use_pages(struct drm_gem_object *obj) 33 { 34 struct msm_gem_object *msm_obj = to_msm_bo(obj); 35 return !msm_obj->vram_node; 36 } 37 38 /* 39 * Cache sync.. this is a bit over-complicated, to fit dma-mapping 40 * API. Really GPU cache is out of scope here (handled on cmdstream) 41 * and all we need to do is invalidate newly allocated pages before 42 * mapping to CPU as uncached/writecombine. 43 * 44 * On top of this, we have the added headache, that depending on 45 * display generation, the display's iommu may be wired up to either 46 * the toplevel drm device (mdss), or to the mdp sub-node, meaning 47 * that here we either have dma-direct or iommu ops. 48 * 49 * Let this be a cautionary tail of abstraction gone wrong. 50 */ 51 52 static void sync_for_device(struct msm_gem_object *msm_obj) 53 { 54 struct device *dev = msm_obj->base.dev->dev; 55 56 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 57 } 58 59 static void sync_for_cpu(struct msm_gem_object *msm_obj) 60 { 61 struct device *dev = msm_obj->base.dev->dev; 62 63 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 64 } 65 66 /* allocate pages from VRAM carveout, used when no IOMMU: */ 67 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) 68 { 69 struct msm_gem_object *msm_obj = to_msm_bo(obj); 70 struct msm_drm_private *priv = obj->dev->dev_private; 71 dma_addr_t paddr; 72 struct page **p; 73 int ret, i; 74 75 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 76 if (!p) 77 return ERR_PTR(-ENOMEM); 78 79 spin_lock(&priv->vram.lock); 80 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); 81 spin_unlock(&priv->vram.lock); 82 if (ret) { 83 kvfree(p); 84 return ERR_PTR(ret); 85 } 86 87 paddr = physaddr(obj); 88 for (i = 0; i < npages; i++) { 89 p[i] = phys_to_page(paddr); 90 paddr += PAGE_SIZE; 91 } 92 93 return p; 94 } 95 96 static struct page **get_pages(struct drm_gem_object *obj) 97 { 98 struct msm_gem_object *msm_obj = to_msm_bo(obj); 99 100 if (!msm_obj->pages) { 101 struct drm_device *dev = obj->dev; 102 struct page **p; 103 int npages = obj->size >> PAGE_SHIFT; 104 105 if (use_pages(obj)) 106 p = drm_gem_get_pages(obj); 107 else 108 p = get_pages_vram(obj, npages); 109 110 if (IS_ERR(p)) { 111 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", 112 PTR_ERR(p)); 113 return p; 114 } 115 116 msm_obj->pages = p; 117 118 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages); 119 if (IS_ERR(msm_obj->sgt)) { 120 void *ptr = ERR_CAST(msm_obj->sgt); 121 122 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n"); 123 msm_obj->sgt = NULL; 124 return ptr; 125 } 126 127 /* For non-cached buffers, ensure the new pages are clean 128 * because display controller, GPU, etc. are not coherent: 129 */ 130 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 131 sync_for_device(msm_obj); 132 } 133 134 return msm_obj->pages; 135 } 136 137 static void put_pages_vram(struct drm_gem_object *obj) 138 { 139 struct msm_gem_object *msm_obj = to_msm_bo(obj); 140 struct msm_drm_private *priv = obj->dev->dev_private; 141 142 spin_lock(&priv->vram.lock); 143 drm_mm_remove_node(msm_obj->vram_node); 144 spin_unlock(&priv->vram.lock); 145 146 kvfree(msm_obj->pages); 147 } 148 149 static void put_pages(struct drm_gem_object *obj) 150 { 151 struct msm_gem_object *msm_obj = to_msm_bo(obj); 152 153 if (msm_obj->pages) { 154 if (msm_obj->sgt) { 155 /* For non-cached buffers, ensure the new 156 * pages are clean because display controller, 157 * GPU, etc. are not coherent: 158 */ 159 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 160 sync_for_cpu(msm_obj); 161 162 sg_free_table(msm_obj->sgt); 163 kfree(msm_obj->sgt); 164 } 165 166 if (use_pages(obj)) 167 drm_gem_put_pages(obj, msm_obj->pages, true, false); 168 else 169 put_pages_vram(obj); 170 171 msm_obj->pages = NULL; 172 } 173 } 174 175 struct page **msm_gem_get_pages(struct drm_gem_object *obj) 176 { 177 struct msm_gem_object *msm_obj = to_msm_bo(obj); 178 struct page **p; 179 180 mutex_lock(&msm_obj->lock); 181 182 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 183 mutex_unlock(&msm_obj->lock); 184 return ERR_PTR(-EBUSY); 185 } 186 187 p = get_pages(obj); 188 mutex_unlock(&msm_obj->lock); 189 return p; 190 } 191 192 void msm_gem_put_pages(struct drm_gem_object *obj) 193 { 194 /* when we start tracking the pin count, then do something here */ 195 } 196 197 int msm_gem_mmap_obj(struct drm_gem_object *obj, 198 struct vm_area_struct *vma) 199 { 200 struct msm_gem_object *msm_obj = to_msm_bo(obj); 201 202 vma->vm_flags &= ~VM_PFNMAP; 203 vma->vm_flags |= VM_MIXEDMAP; 204 205 if (msm_obj->flags & MSM_BO_WC) { 206 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 207 } else if (msm_obj->flags & MSM_BO_UNCACHED) { 208 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 209 } else { 210 /* 211 * Shunt off cached objs to shmem file so they have their own 212 * address_space (so unmap_mapping_range does what we want, 213 * in particular in the case of mmap'd dmabufs) 214 */ 215 fput(vma->vm_file); 216 get_file(obj->filp); 217 vma->vm_pgoff = 0; 218 vma->vm_file = obj->filp; 219 220 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 221 } 222 223 return 0; 224 } 225 226 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 227 { 228 int ret; 229 230 ret = drm_gem_mmap(filp, vma); 231 if (ret) { 232 DBG("mmap failed: %d", ret); 233 return ret; 234 } 235 236 return msm_gem_mmap_obj(vma->vm_private_data, vma); 237 } 238 239 static vm_fault_t msm_gem_fault(struct vm_fault *vmf) 240 { 241 struct vm_area_struct *vma = vmf->vma; 242 struct drm_gem_object *obj = vma->vm_private_data; 243 struct msm_gem_object *msm_obj = to_msm_bo(obj); 244 struct page **pages; 245 unsigned long pfn; 246 pgoff_t pgoff; 247 int err; 248 vm_fault_t ret; 249 250 /* 251 * vm_ops.open/drm_gem_mmap_obj and close get and put 252 * a reference on obj. So, we dont need to hold one here. 253 */ 254 err = mutex_lock_interruptible(&msm_obj->lock); 255 if (err) { 256 ret = VM_FAULT_NOPAGE; 257 goto out; 258 } 259 260 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 261 mutex_unlock(&msm_obj->lock); 262 return VM_FAULT_SIGBUS; 263 } 264 265 /* make sure we have pages attached now */ 266 pages = get_pages(obj); 267 if (IS_ERR(pages)) { 268 ret = vmf_error(PTR_ERR(pages)); 269 goto out_unlock; 270 } 271 272 /* We don't use vmf->pgoff since that has the fake offset: */ 273 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 274 275 pfn = page_to_pfn(pages[pgoff]); 276 277 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 278 pfn, pfn << PAGE_SHIFT); 279 280 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); 281 out_unlock: 282 mutex_unlock(&msm_obj->lock); 283 out: 284 return ret; 285 } 286 287 /** get mmap offset */ 288 static uint64_t mmap_offset(struct drm_gem_object *obj) 289 { 290 struct drm_device *dev = obj->dev; 291 struct msm_gem_object *msm_obj = to_msm_bo(obj); 292 int ret; 293 294 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 295 296 /* Make it mmapable */ 297 ret = drm_gem_create_mmap_offset(obj); 298 299 if (ret) { 300 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n"); 301 return 0; 302 } 303 304 return drm_vma_node_offset_addr(&obj->vma_node); 305 } 306 307 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) 308 { 309 uint64_t offset; 310 struct msm_gem_object *msm_obj = to_msm_bo(obj); 311 312 mutex_lock(&msm_obj->lock); 313 offset = mmap_offset(obj); 314 mutex_unlock(&msm_obj->lock); 315 return offset; 316 } 317 318 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, 319 struct msm_gem_address_space *aspace) 320 { 321 struct msm_gem_object *msm_obj = to_msm_bo(obj); 322 struct msm_gem_vma *vma; 323 324 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 325 326 vma = kzalloc(sizeof(*vma), GFP_KERNEL); 327 if (!vma) 328 return ERR_PTR(-ENOMEM); 329 330 vma->aspace = aspace; 331 332 list_add_tail(&vma->list, &msm_obj->vmas); 333 334 return vma; 335 } 336 337 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, 338 struct msm_gem_address_space *aspace) 339 { 340 struct msm_gem_object *msm_obj = to_msm_bo(obj); 341 struct msm_gem_vma *vma; 342 343 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 344 345 list_for_each_entry(vma, &msm_obj->vmas, list) { 346 if (vma->aspace == aspace) 347 return vma; 348 } 349 350 return NULL; 351 } 352 353 static void del_vma(struct msm_gem_vma *vma) 354 { 355 if (!vma) 356 return; 357 358 list_del(&vma->list); 359 kfree(vma); 360 } 361 362 /* Called with msm_obj->lock locked */ 363 static void 364 put_iova(struct drm_gem_object *obj) 365 { 366 struct msm_gem_object *msm_obj = to_msm_bo(obj); 367 struct msm_gem_vma *vma, *tmp; 368 369 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 370 371 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { 372 if (vma->aspace) { 373 msm_gem_purge_vma(vma->aspace, vma); 374 msm_gem_close_vma(vma->aspace, vma); 375 } 376 del_vma(vma); 377 } 378 } 379 380 static int msm_gem_get_iova_locked(struct drm_gem_object *obj, 381 struct msm_gem_address_space *aspace, uint64_t *iova, 382 u64 range_start, u64 range_end) 383 { 384 struct msm_gem_object *msm_obj = to_msm_bo(obj); 385 struct msm_gem_vma *vma; 386 int ret = 0; 387 388 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 389 390 vma = lookup_vma(obj, aspace); 391 392 if (!vma) { 393 vma = add_vma(obj, aspace); 394 if (IS_ERR(vma)) 395 return PTR_ERR(vma); 396 397 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT, 398 range_start, range_end); 399 if (ret) { 400 del_vma(vma); 401 return ret; 402 } 403 } 404 405 *iova = vma->iova; 406 return 0; 407 } 408 409 static int msm_gem_pin_iova(struct drm_gem_object *obj, 410 struct msm_gem_address_space *aspace) 411 { 412 struct msm_gem_object *msm_obj = to_msm_bo(obj); 413 struct msm_gem_vma *vma; 414 struct page **pages; 415 int prot = IOMMU_READ; 416 417 if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) 418 prot |= IOMMU_WRITE; 419 420 if (msm_obj->flags & MSM_BO_MAP_PRIV) 421 prot |= IOMMU_PRIV; 422 423 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 424 425 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) 426 return -EBUSY; 427 428 vma = lookup_vma(obj, aspace); 429 if (WARN_ON(!vma)) 430 return -EINVAL; 431 432 pages = get_pages(obj); 433 if (IS_ERR(pages)) 434 return PTR_ERR(pages); 435 436 return msm_gem_map_vma(aspace, vma, prot, 437 msm_obj->sgt, obj->size >> PAGE_SHIFT); 438 } 439 440 /* 441 * get iova and pin it. Should have a matching put 442 * limits iova to specified range (in pages) 443 */ 444 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, 445 struct msm_gem_address_space *aspace, uint64_t *iova, 446 u64 range_start, u64 range_end) 447 { 448 struct msm_gem_object *msm_obj = to_msm_bo(obj); 449 u64 local; 450 int ret; 451 452 mutex_lock(&msm_obj->lock); 453 454 ret = msm_gem_get_iova_locked(obj, aspace, &local, 455 range_start, range_end); 456 457 if (!ret) 458 ret = msm_gem_pin_iova(obj, aspace); 459 460 if (!ret) 461 *iova = local; 462 463 mutex_unlock(&msm_obj->lock); 464 return ret; 465 } 466 467 /* get iova and pin it. Should have a matching put */ 468 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, 469 struct msm_gem_address_space *aspace, uint64_t *iova) 470 { 471 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX); 472 } 473 474 /* 475 * Get an iova but don't pin it. Doesn't need a put because iovas are currently 476 * valid for the life of the object 477 */ 478 int msm_gem_get_iova(struct drm_gem_object *obj, 479 struct msm_gem_address_space *aspace, uint64_t *iova) 480 { 481 struct msm_gem_object *msm_obj = to_msm_bo(obj); 482 int ret; 483 484 mutex_lock(&msm_obj->lock); 485 ret = msm_gem_get_iova_locked(obj, aspace, iova, 0, U64_MAX); 486 mutex_unlock(&msm_obj->lock); 487 488 return ret; 489 } 490 491 /* get iova without taking a reference, used in places where you have 492 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova' 493 */ 494 uint64_t msm_gem_iova(struct drm_gem_object *obj, 495 struct msm_gem_address_space *aspace) 496 { 497 struct msm_gem_object *msm_obj = to_msm_bo(obj); 498 struct msm_gem_vma *vma; 499 500 mutex_lock(&msm_obj->lock); 501 vma = lookup_vma(obj, aspace); 502 mutex_unlock(&msm_obj->lock); 503 WARN_ON(!vma); 504 505 return vma ? vma->iova : 0; 506 } 507 508 /* 509 * Unpin a iova by updating the reference counts. The memory isn't actually 510 * purged until something else (shrinker, mm_notifier, destroy, etc) decides 511 * to get rid of it 512 */ 513 void msm_gem_unpin_iova(struct drm_gem_object *obj, 514 struct msm_gem_address_space *aspace) 515 { 516 struct msm_gem_object *msm_obj = to_msm_bo(obj); 517 struct msm_gem_vma *vma; 518 519 mutex_lock(&msm_obj->lock); 520 vma = lookup_vma(obj, aspace); 521 522 if (!WARN_ON(!vma)) 523 msm_gem_unmap_vma(aspace, vma); 524 525 mutex_unlock(&msm_obj->lock); 526 } 527 528 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 529 struct drm_mode_create_dumb *args) 530 { 531 args->pitch = align_pitch(args->width, args->bpp); 532 args->size = PAGE_ALIGN(args->pitch * args->height); 533 return msm_gem_new_handle(dev, file, args->size, 534 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb"); 535 } 536 537 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 538 uint32_t handle, uint64_t *offset) 539 { 540 struct drm_gem_object *obj; 541 int ret = 0; 542 543 /* GEM does all our handle to object mapping */ 544 obj = drm_gem_object_lookup(file, handle); 545 if (obj == NULL) { 546 ret = -ENOENT; 547 goto fail; 548 } 549 550 *offset = msm_gem_mmap_offset(obj); 551 552 drm_gem_object_put(obj); 553 554 fail: 555 return ret; 556 } 557 558 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) 559 { 560 struct msm_gem_object *msm_obj = to_msm_bo(obj); 561 int ret = 0; 562 563 if (obj->import_attach) 564 return ERR_PTR(-ENODEV); 565 566 mutex_lock(&msm_obj->lock); 567 568 if (WARN_ON(msm_obj->madv > madv)) { 569 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", 570 msm_obj->madv, madv); 571 mutex_unlock(&msm_obj->lock); 572 return ERR_PTR(-EBUSY); 573 } 574 575 /* increment vmap_count *before* vmap() call, so shrinker can 576 * check vmap_count (is_vunmapable()) outside of msm_obj->lock. 577 * This guarantees that we won't try to msm_gem_vunmap() this 578 * same object from within the vmap() call (while we already 579 * hold msm_obj->lock) 580 */ 581 msm_obj->vmap_count++; 582 583 if (!msm_obj->vaddr) { 584 struct page **pages = get_pages(obj); 585 if (IS_ERR(pages)) { 586 ret = PTR_ERR(pages); 587 goto fail; 588 } 589 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 590 VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 591 if (msm_obj->vaddr == NULL) { 592 ret = -ENOMEM; 593 goto fail; 594 } 595 } 596 597 mutex_unlock(&msm_obj->lock); 598 return msm_obj->vaddr; 599 600 fail: 601 msm_obj->vmap_count--; 602 mutex_unlock(&msm_obj->lock); 603 return ERR_PTR(ret); 604 } 605 606 void *msm_gem_get_vaddr(struct drm_gem_object *obj) 607 { 608 return get_vaddr(obj, MSM_MADV_WILLNEED); 609 } 610 611 /* 612 * Don't use this! It is for the very special case of dumping 613 * submits from GPU hangs or faults, were the bo may already 614 * be MSM_MADV_DONTNEED, but we know the buffer is still on the 615 * active list. 616 */ 617 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) 618 { 619 return get_vaddr(obj, __MSM_MADV_PURGED); 620 } 621 622 void msm_gem_put_vaddr(struct drm_gem_object *obj) 623 { 624 struct msm_gem_object *msm_obj = to_msm_bo(obj); 625 626 mutex_lock(&msm_obj->lock); 627 WARN_ON(msm_obj->vmap_count < 1); 628 msm_obj->vmap_count--; 629 mutex_unlock(&msm_obj->lock); 630 } 631 632 /* Update madvise status, returns true if not purged, else 633 * false or -errno. 634 */ 635 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) 636 { 637 struct msm_gem_object *msm_obj = to_msm_bo(obj); 638 639 mutex_lock(&msm_obj->lock); 640 641 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 642 643 if (msm_obj->madv != __MSM_MADV_PURGED) 644 msm_obj->madv = madv; 645 646 madv = msm_obj->madv; 647 648 mutex_unlock(&msm_obj->lock); 649 650 return (madv != __MSM_MADV_PURGED); 651 } 652 653 void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass) 654 { 655 struct drm_device *dev = obj->dev; 656 struct msm_gem_object *msm_obj = to_msm_bo(obj); 657 658 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 659 WARN_ON(!is_purgeable(msm_obj)); 660 WARN_ON(obj->import_attach); 661 662 mutex_lock_nested(&msm_obj->lock, subclass); 663 664 put_iova(obj); 665 666 msm_gem_vunmap_locked(obj); 667 668 put_pages(obj); 669 670 msm_obj->madv = __MSM_MADV_PURGED; 671 672 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 673 drm_gem_free_mmap_offset(obj); 674 675 /* Our goal here is to return as much of the memory as 676 * is possible back to the system as we are called from OOM. 677 * To do this we must instruct the shmfs to drop all of its 678 * backing pages, *now*. 679 */ 680 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 681 682 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 683 0, (loff_t)-1); 684 685 mutex_unlock(&msm_obj->lock); 686 } 687 688 static void msm_gem_vunmap_locked(struct drm_gem_object *obj) 689 { 690 struct msm_gem_object *msm_obj = to_msm_bo(obj); 691 692 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 693 694 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) 695 return; 696 697 vunmap(msm_obj->vaddr); 698 msm_obj->vaddr = NULL; 699 } 700 701 void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass) 702 { 703 struct msm_gem_object *msm_obj = to_msm_bo(obj); 704 705 mutex_lock_nested(&msm_obj->lock, subclass); 706 msm_gem_vunmap_locked(obj); 707 mutex_unlock(&msm_obj->lock); 708 } 709 710 /* must be called before _move_to_active().. */ 711 int msm_gem_sync_object(struct drm_gem_object *obj, 712 struct msm_fence_context *fctx, bool exclusive) 713 { 714 struct dma_resv_list *fobj; 715 struct dma_fence *fence; 716 int i, ret; 717 718 fobj = dma_resv_get_list(obj->resv); 719 if (!fobj || (fobj->shared_count == 0)) { 720 fence = dma_resv_get_excl(obj->resv); 721 /* don't need to wait on our own fences, since ring is fifo */ 722 if (fence && (fence->context != fctx->context)) { 723 ret = dma_fence_wait(fence, true); 724 if (ret) 725 return ret; 726 } 727 } 728 729 if (!exclusive || !fobj) 730 return 0; 731 732 for (i = 0; i < fobj->shared_count; i++) { 733 fence = rcu_dereference_protected(fobj->shared[i], 734 dma_resv_held(obj->resv)); 735 if (fence->context != fctx->context) { 736 ret = dma_fence_wait(fence, true); 737 if (ret) 738 return ret; 739 } 740 } 741 742 return 0; 743 } 744 745 void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu) 746 { 747 struct msm_gem_object *msm_obj = to_msm_bo(obj); 748 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 749 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); 750 751 if (!atomic_fetch_inc(&msm_obj->active_count)) { 752 msm_obj->gpu = gpu; 753 list_del_init(&msm_obj->mm_list); 754 list_add_tail(&msm_obj->mm_list, &gpu->active_list); 755 } 756 } 757 758 void msm_gem_active_put(struct drm_gem_object *obj) 759 { 760 struct msm_gem_object *msm_obj = to_msm_bo(obj); 761 struct msm_drm_private *priv = obj->dev->dev_private; 762 763 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 764 765 if (!atomic_dec_return(&msm_obj->active_count)) { 766 msm_obj->gpu = NULL; 767 list_del_init(&msm_obj->mm_list); 768 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 769 } 770 } 771 772 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 773 { 774 bool write = !!(op & MSM_PREP_WRITE); 775 unsigned long remain = 776 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 777 long ret; 778 779 ret = dma_resv_wait_timeout_rcu(obj->resv, write, 780 true, remain); 781 if (ret == 0) 782 return remain == 0 ? -EBUSY : -ETIMEDOUT; 783 else if (ret < 0) 784 return ret; 785 786 /* TODO cache maintenance */ 787 788 return 0; 789 } 790 791 int msm_gem_cpu_fini(struct drm_gem_object *obj) 792 { 793 /* TODO cache maintenance */ 794 return 0; 795 } 796 797 #ifdef CONFIG_DEBUG_FS 798 static void describe_fence(struct dma_fence *fence, const char *type, 799 struct seq_file *m) 800 { 801 if (!dma_fence_is_signaled(fence)) 802 seq_printf(m, "\t%9s: %s %s seq %llu\n", type, 803 fence->ops->get_driver_name(fence), 804 fence->ops->get_timeline_name(fence), 805 fence->seqno); 806 } 807 808 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 809 { 810 struct msm_gem_object *msm_obj = to_msm_bo(obj); 811 struct dma_resv *robj = obj->resv; 812 struct dma_resv_list *fobj; 813 struct dma_fence *fence; 814 struct msm_gem_vma *vma; 815 uint64_t off = drm_vma_node_start(&obj->vma_node); 816 const char *madv; 817 818 mutex_lock(&msm_obj->lock); 819 820 switch (msm_obj->madv) { 821 case __MSM_MADV_PURGED: 822 madv = " purged"; 823 break; 824 case MSM_MADV_DONTNEED: 825 madv = " purgeable"; 826 break; 827 case MSM_MADV_WILLNEED: 828 default: 829 madv = ""; 830 break; 831 } 832 833 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p", 834 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 835 obj->name, kref_read(&obj->refcount), 836 off, msm_obj->vaddr); 837 838 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name); 839 840 if (!list_empty(&msm_obj->vmas)) { 841 842 seq_puts(m, " vmas:"); 843 844 list_for_each_entry(vma, &msm_obj->vmas, list) { 845 const char *name, *comm; 846 if (vma->aspace) { 847 struct msm_gem_address_space *aspace = vma->aspace; 848 struct task_struct *task = 849 get_pid_task(aspace->pid, PIDTYPE_PID); 850 if (task) { 851 comm = kstrdup(task->comm, GFP_KERNEL); 852 } else { 853 comm = NULL; 854 } 855 name = aspace->name; 856 } else { 857 name = comm = NULL; 858 } 859 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]", 860 name, comm ? ":" : "", comm ? comm : "", 861 vma->aspace, vma->iova, 862 vma->mapped ? "mapped" : "unmapped", 863 vma->inuse); 864 kfree(comm); 865 } 866 867 seq_puts(m, "\n"); 868 } 869 870 rcu_read_lock(); 871 fobj = rcu_dereference(robj->fence); 872 if (fobj) { 873 unsigned int i, shared_count = fobj->shared_count; 874 875 for (i = 0; i < shared_count; i++) { 876 fence = rcu_dereference(fobj->shared[i]); 877 describe_fence(fence, "Shared", m); 878 } 879 } 880 881 fence = rcu_dereference(robj->fence_excl); 882 if (fence) 883 describe_fence(fence, "Exclusive", m); 884 rcu_read_unlock(); 885 886 mutex_unlock(&msm_obj->lock); 887 } 888 889 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) 890 { 891 struct msm_gem_object *msm_obj; 892 int count = 0; 893 size_t size = 0; 894 895 seq_puts(m, " flags id ref offset kaddr size madv name\n"); 896 list_for_each_entry(msm_obj, list, mm_list) { 897 struct drm_gem_object *obj = &msm_obj->base; 898 seq_puts(m, " "); 899 msm_gem_describe(obj, m); 900 count++; 901 size += obj->size; 902 } 903 904 seq_printf(m, "Total %d objects, %zu bytes\n", count, size); 905 } 906 #endif 907 908 /* don't call directly! Use drm_gem_object_put_locked() and friends */ 909 void msm_gem_free_object(struct drm_gem_object *obj) 910 { 911 struct msm_gem_object *msm_obj = to_msm_bo(obj); 912 struct drm_device *dev = obj->dev; 913 struct msm_drm_private *priv = dev->dev_private; 914 915 if (llist_add(&msm_obj->freed, &priv->free_list)) 916 queue_work(priv->wq, &priv->free_work); 917 } 918 919 static void free_object(struct msm_gem_object *msm_obj) 920 { 921 struct drm_gem_object *obj = &msm_obj->base; 922 struct drm_device *dev = obj->dev; 923 924 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 925 926 /* object should not be on active list: */ 927 WARN_ON(is_active(msm_obj)); 928 929 list_del(&msm_obj->mm_list); 930 931 mutex_lock(&msm_obj->lock); 932 933 put_iova(obj); 934 935 if (obj->import_attach) { 936 WARN_ON(msm_obj->vaddr); 937 938 /* Don't drop the pages for imported dmabuf, as they are not 939 * ours, just free the array we allocated: 940 */ 941 if (msm_obj->pages) 942 kvfree(msm_obj->pages); 943 944 drm_prime_gem_destroy(obj, msm_obj->sgt); 945 } else { 946 msm_gem_vunmap_locked(obj); 947 put_pages(obj); 948 } 949 950 drm_gem_object_release(obj); 951 952 mutex_unlock(&msm_obj->lock); 953 kfree(msm_obj); 954 } 955 956 void msm_gem_free_work(struct work_struct *work) 957 { 958 struct msm_drm_private *priv = 959 container_of(work, struct msm_drm_private, free_work); 960 struct drm_device *dev = priv->dev; 961 struct llist_node *freed; 962 struct msm_gem_object *msm_obj, *next; 963 964 while ((freed = llist_del_all(&priv->free_list))) { 965 966 mutex_lock(&dev->struct_mutex); 967 968 llist_for_each_entry_safe(msm_obj, next, 969 freed, freed) 970 free_object(msm_obj); 971 972 mutex_unlock(&dev->struct_mutex); 973 974 if (need_resched()) 975 break; 976 } 977 } 978 979 /* convenience method to construct a GEM buffer object, and userspace handle */ 980 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 981 uint32_t size, uint32_t flags, uint32_t *handle, 982 char *name) 983 { 984 struct drm_gem_object *obj; 985 int ret; 986 987 obj = msm_gem_new(dev, size, flags); 988 989 if (IS_ERR(obj)) 990 return PTR_ERR(obj); 991 992 if (name) 993 msm_gem_object_set_name(obj, "%s", name); 994 995 ret = drm_gem_handle_create(file, obj, handle); 996 997 /* drop reference from allocate - handle holds it now */ 998 drm_gem_object_put(obj); 999 1000 return ret; 1001 } 1002 1003 static const struct vm_operations_struct vm_ops = { 1004 .fault = msm_gem_fault, 1005 .open = drm_gem_vm_open, 1006 .close = drm_gem_vm_close, 1007 }; 1008 1009 static const struct drm_gem_object_funcs msm_gem_object_funcs = { 1010 .free = msm_gem_free_object, 1011 .pin = msm_gem_prime_pin, 1012 .unpin = msm_gem_prime_unpin, 1013 .get_sg_table = msm_gem_prime_get_sg_table, 1014 .vmap = msm_gem_prime_vmap, 1015 .vunmap = msm_gem_prime_vunmap, 1016 .vm_ops = &vm_ops, 1017 }; 1018 1019 static int msm_gem_new_impl(struct drm_device *dev, 1020 uint32_t size, uint32_t flags, 1021 struct drm_gem_object **obj) 1022 { 1023 struct msm_gem_object *msm_obj; 1024 1025 switch (flags & MSM_BO_CACHE_MASK) { 1026 case MSM_BO_UNCACHED: 1027 case MSM_BO_CACHED: 1028 case MSM_BO_WC: 1029 break; 1030 default: 1031 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", 1032 (flags & MSM_BO_CACHE_MASK)); 1033 return -EINVAL; 1034 } 1035 1036 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); 1037 if (!msm_obj) 1038 return -ENOMEM; 1039 1040 mutex_init(&msm_obj->lock); 1041 1042 msm_obj->flags = flags; 1043 msm_obj->madv = MSM_MADV_WILLNEED; 1044 1045 INIT_LIST_HEAD(&msm_obj->submit_entry); 1046 INIT_LIST_HEAD(&msm_obj->vmas); 1047 1048 *obj = &msm_obj->base; 1049 (*obj)->funcs = &msm_gem_object_funcs; 1050 1051 return 0; 1052 } 1053 1054 static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, 1055 uint32_t size, uint32_t flags, bool struct_mutex_locked) 1056 { 1057 struct msm_drm_private *priv = dev->dev_private; 1058 struct msm_gem_object *msm_obj; 1059 struct drm_gem_object *obj = NULL; 1060 bool use_vram = false; 1061 int ret; 1062 1063 size = PAGE_ALIGN(size); 1064 1065 if (!msm_use_mmu(dev)) 1066 use_vram = true; 1067 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size) 1068 use_vram = true; 1069 1070 if (WARN_ON(use_vram && !priv->vram.size)) 1071 return ERR_PTR(-EINVAL); 1072 1073 /* Disallow zero sized objects as they make the underlying 1074 * infrastructure grumpy 1075 */ 1076 if (size == 0) 1077 return ERR_PTR(-EINVAL); 1078 1079 ret = msm_gem_new_impl(dev, size, flags, &obj); 1080 if (ret) 1081 goto fail; 1082 1083 msm_obj = to_msm_bo(obj); 1084 1085 if (use_vram) { 1086 struct msm_gem_vma *vma; 1087 struct page **pages; 1088 1089 mutex_lock(&msm_obj->lock); 1090 1091 vma = add_vma(obj, NULL); 1092 mutex_unlock(&msm_obj->lock); 1093 if (IS_ERR(vma)) { 1094 ret = PTR_ERR(vma); 1095 goto fail; 1096 } 1097 1098 to_msm_bo(obj)->vram_node = &vma->node; 1099 1100 drm_gem_private_object_init(dev, obj, size); 1101 1102 pages = get_pages(obj); 1103 if (IS_ERR(pages)) { 1104 ret = PTR_ERR(pages); 1105 goto fail; 1106 } 1107 1108 vma->iova = physaddr(obj); 1109 } else { 1110 ret = drm_gem_object_init(dev, obj, size); 1111 if (ret) 1112 goto fail; 1113 /* 1114 * Our buffers are kept pinned, so allocating them from the 1115 * MOVABLE zone is a really bad idea, and conflicts with CMA. 1116 * See comments above new_inode() why this is required _and_ 1117 * expected if you're going to pin these pages. 1118 */ 1119 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); 1120 } 1121 1122 if (struct_mutex_locked) { 1123 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 1124 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 1125 } else { 1126 mutex_lock(&dev->struct_mutex); 1127 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 1128 mutex_unlock(&dev->struct_mutex); 1129 } 1130 1131 return obj; 1132 1133 fail: 1134 drm_gem_object_put(obj); 1135 return ERR_PTR(ret); 1136 } 1137 1138 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, 1139 uint32_t size, uint32_t flags) 1140 { 1141 return _msm_gem_new(dev, size, flags, true); 1142 } 1143 1144 struct drm_gem_object *msm_gem_new(struct drm_device *dev, 1145 uint32_t size, uint32_t flags) 1146 { 1147 return _msm_gem_new(dev, size, flags, false); 1148 } 1149 1150 struct drm_gem_object *msm_gem_import(struct drm_device *dev, 1151 struct dma_buf *dmabuf, struct sg_table *sgt) 1152 { 1153 struct msm_drm_private *priv = dev->dev_private; 1154 struct msm_gem_object *msm_obj; 1155 struct drm_gem_object *obj; 1156 uint32_t size; 1157 int ret, npages; 1158 1159 /* if we don't have IOMMU, don't bother pretending we can import: */ 1160 if (!msm_use_mmu(dev)) { 1161 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); 1162 return ERR_PTR(-EINVAL); 1163 } 1164 1165 size = PAGE_ALIGN(dmabuf->size); 1166 1167 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); 1168 if (ret) 1169 goto fail; 1170 1171 drm_gem_private_object_init(dev, obj, size); 1172 1173 npages = size / PAGE_SIZE; 1174 1175 msm_obj = to_msm_bo(obj); 1176 mutex_lock(&msm_obj->lock); 1177 msm_obj->sgt = sgt; 1178 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 1179 if (!msm_obj->pages) { 1180 mutex_unlock(&msm_obj->lock); 1181 ret = -ENOMEM; 1182 goto fail; 1183 } 1184 1185 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); 1186 if (ret) { 1187 mutex_unlock(&msm_obj->lock); 1188 goto fail; 1189 } 1190 1191 mutex_unlock(&msm_obj->lock); 1192 1193 mutex_lock(&dev->struct_mutex); 1194 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 1195 mutex_unlock(&dev->struct_mutex); 1196 1197 return obj; 1198 1199 fail: 1200 drm_gem_object_put(obj); 1201 return ERR_PTR(ret); 1202 } 1203 1204 static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1205 uint32_t flags, struct msm_gem_address_space *aspace, 1206 struct drm_gem_object **bo, uint64_t *iova, bool locked) 1207 { 1208 void *vaddr; 1209 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked); 1210 int ret; 1211 1212 if (IS_ERR(obj)) 1213 return ERR_CAST(obj); 1214 1215 if (iova) { 1216 ret = msm_gem_get_and_pin_iova(obj, aspace, iova); 1217 if (ret) 1218 goto err; 1219 } 1220 1221 vaddr = msm_gem_get_vaddr(obj); 1222 if (IS_ERR(vaddr)) { 1223 msm_gem_unpin_iova(obj, aspace); 1224 ret = PTR_ERR(vaddr); 1225 goto err; 1226 } 1227 1228 if (bo) 1229 *bo = obj; 1230 1231 return vaddr; 1232 err: 1233 if (locked) 1234 drm_gem_object_put_locked(obj); 1235 else 1236 drm_gem_object_put(obj); 1237 1238 return ERR_PTR(ret); 1239 1240 } 1241 1242 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 1243 uint32_t flags, struct msm_gem_address_space *aspace, 1244 struct drm_gem_object **bo, uint64_t *iova) 1245 { 1246 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false); 1247 } 1248 1249 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, 1250 uint32_t flags, struct msm_gem_address_space *aspace, 1251 struct drm_gem_object **bo, uint64_t *iova) 1252 { 1253 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true); 1254 } 1255 1256 void msm_gem_kernel_put(struct drm_gem_object *bo, 1257 struct msm_gem_address_space *aspace, bool locked) 1258 { 1259 if (IS_ERR_OR_NULL(bo)) 1260 return; 1261 1262 msm_gem_put_vaddr(bo); 1263 msm_gem_unpin_iova(bo, aspace); 1264 1265 if (locked) 1266 drm_gem_object_put_locked(bo); 1267 else 1268 drm_gem_object_put(bo); 1269 } 1270 1271 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...) 1272 { 1273 struct msm_gem_object *msm_obj = to_msm_bo(bo); 1274 va_list ap; 1275 1276 if (!fmt) 1277 return; 1278 1279 va_start(ap, fmt); 1280 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap); 1281 va_end(ap); 1282 } 1283