1 /* 2 * Copyright (C) 2013 Red Hat 3 * Author: Rob Clark <robdclark@gmail.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include <linux/spinlock.h> 19 #include <linux/shmem_fs.h> 20 #include <linux/dma-buf.h> 21 #include <linux/pfn_t.h> 22 23 #include "msm_drv.h" 24 #include "msm_fence.h" 25 #include "msm_gem.h" 26 #include "msm_gpu.h" 27 #include "msm_mmu.h" 28 29 static dma_addr_t physaddr(struct drm_gem_object *obj) 30 { 31 struct msm_gem_object *msm_obj = to_msm_bo(obj); 32 struct msm_drm_private *priv = obj->dev->dev_private; 33 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + 34 priv->vram.paddr; 35 } 36 37 static bool use_pages(struct drm_gem_object *obj) 38 { 39 struct msm_gem_object *msm_obj = to_msm_bo(obj); 40 return !msm_obj->vram_node; 41 } 42 43 /* allocate pages from VRAM carveout, used when no IOMMU: */ 44 static struct page **get_pages_vram(struct drm_gem_object *obj, 45 int npages) 46 { 47 struct msm_gem_object *msm_obj = to_msm_bo(obj); 48 struct msm_drm_private *priv = obj->dev->dev_private; 49 dma_addr_t paddr; 50 struct page **p; 51 int ret, i; 52 53 p = drm_malloc_ab(npages, sizeof(struct page *)); 54 if (!p) 55 return ERR_PTR(-ENOMEM); 56 57 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, 58 npages, 0, DRM_MM_SEARCH_DEFAULT); 59 if (ret) { 60 drm_free_large(p); 61 return ERR_PTR(ret); 62 } 63 64 paddr = physaddr(obj); 65 for (i = 0; i < npages; i++) { 66 p[i] = phys_to_page(paddr); 67 paddr += PAGE_SIZE; 68 } 69 70 return p; 71 } 72 73 /* called with dev->struct_mutex held */ 74 static struct page **get_pages(struct drm_gem_object *obj) 75 { 76 struct msm_gem_object *msm_obj = to_msm_bo(obj); 77 78 if (!msm_obj->pages) { 79 struct drm_device *dev = obj->dev; 80 struct page **p; 81 int npages = obj->size >> PAGE_SHIFT; 82 83 if (use_pages(obj)) 84 p = drm_gem_get_pages(obj); 85 else 86 p = get_pages_vram(obj, npages); 87 88 if (IS_ERR(p)) { 89 dev_err(dev->dev, "could not get pages: %ld\n", 90 PTR_ERR(p)); 91 return p; 92 } 93 94 msm_obj->sgt = drm_prime_pages_to_sg(p, npages); 95 if (IS_ERR(msm_obj->sgt)) { 96 dev_err(dev->dev, "failed to allocate sgt\n"); 97 return ERR_CAST(msm_obj->sgt); 98 } 99 100 msm_obj->pages = p; 101 102 /* For non-cached buffers, ensure the new pages are clean 103 * because display controller, GPU, etc. are not coherent: 104 */ 105 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 106 dma_map_sg(dev->dev, msm_obj->sgt->sgl, 107 msm_obj->sgt->nents, DMA_BIDIRECTIONAL); 108 } 109 110 return msm_obj->pages; 111 } 112 113 static void put_pages(struct drm_gem_object *obj) 114 { 115 struct msm_gem_object *msm_obj = to_msm_bo(obj); 116 117 if (msm_obj->pages) { 118 /* For non-cached buffers, ensure the new pages are clean 119 * because display controller, GPU, etc. are not coherent: 120 */ 121 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 122 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, 123 msm_obj->sgt->nents, DMA_BIDIRECTIONAL); 124 sg_free_table(msm_obj->sgt); 125 kfree(msm_obj->sgt); 126 127 if (use_pages(obj)) 128 drm_gem_put_pages(obj, msm_obj->pages, true, false); 129 else { 130 drm_mm_remove_node(msm_obj->vram_node); 131 drm_free_large(msm_obj->pages); 132 } 133 134 msm_obj->pages = NULL; 135 } 136 } 137 138 struct page **msm_gem_get_pages(struct drm_gem_object *obj) 139 { 140 struct drm_device *dev = obj->dev; 141 struct page **p; 142 mutex_lock(&dev->struct_mutex); 143 p = get_pages(obj); 144 mutex_unlock(&dev->struct_mutex); 145 return p; 146 } 147 148 void msm_gem_put_pages(struct drm_gem_object *obj) 149 { 150 /* when we start tracking the pin count, then do something here */ 151 } 152 153 int msm_gem_mmap_obj(struct drm_gem_object *obj, 154 struct vm_area_struct *vma) 155 { 156 struct msm_gem_object *msm_obj = to_msm_bo(obj); 157 158 vma->vm_flags &= ~VM_PFNMAP; 159 vma->vm_flags |= VM_MIXEDMAP; 160 161 if (msm_obj->flags & MSM_BO_WC) { 162 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 163 } else if (msm_obj->flags & MSM_BO_UNCACHED) { 164 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 165 } else { 166 /* 167 * Shunt off cached objs to shmem file so they have their own 168 * address_space (so unmap_mapping_range does what we want, 169 * in particular in the case of mmap'd dmabufs) 170 */ 171 fput(vma->vm_file); 172 get_file(obj->filp); 173 vma->vm_pgoff = 0; 174 vma->vm_file = obj->filp; 175 176 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 177 } 178 179 return 0; 180 } 181 182 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 183 { 184 int ret; 185 186 ret = drm_gem_mmap(filp, vma); 187 if (ret) { 188 DBG("mmap failed: %d", ret); 189 return ret; 190 } 191 192 return msm_gem_mmap_obj(vma->vm_private_data, vma); 193 } 194 195 int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 196 { 197 struct drm_gem_object *obj = vma->vm_private_data; 198 struct drm_device *dev = obj->dev; 199 struct page **pages; 200 unsigned long pfn; 201 pgoff_t pgoff; 202 int ret; 203 204 /* Make sure we don't parallel update on a fault, nor move or remove 205 * something from beneath our feet 206 */ 207 ret = mutex_lock_interruptible(&dev->struct_mutex); 208 if (ret) 209 goto out; 210 211 /* make sure we have pages attached now */ 212 pages = get_pages(obj); 213 if (IS_ERR(pages)) { 214 ret = PTR_ERR(pages); 215 goto out_unlock; 216 } 217 218 /* We don't use vmf->pgoff since that has the fake offset: */ 219 pgoff = ((unsigned long)vmf->virtual_address - 220 vma->vm_start) >> PAGE_SHIFT; 221 222 pfn = page_to_pfn(pages[pgoff]); 223 224 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, 225 pfn, pfn << PAGE_SHIFT); 226 227 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, 228 __pfn_to_pfn_t(pfn, PFN_DEV)); 229 230 out_unlock: 231 mutex_unlock(&dev->struct_mutex); 232 out: 233 switch (ret) { 234 case -EAGAIN: 235 case 0: 236 case -ERESTARTSYS: 237 case -EINTR: 238 case -EBUSY: 239 /* 240 * EBUSY is ok: this just means that another thread 241 * already did the job. 242 */ 243 return VM_FAULT_NOPAGE; 244 case -ENOMEM: 245 return VM_FAULT_OOM; 246 default: 247 return VM_FAULT_SIGBUS; 248 } 249 } 250 251 /** get mmap offset */ 252 static uint64_t mmap_offset(struct drm_gem_object *obj) 253 { 254 struct drm_device *dev = obj->dev; 255 int ret; 256 257 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 258 259 /* Make it mmapable */ 260 ret = drm_gem_create_mmap_offset(obj); 261 262 if (ret) { 263 dev_err(dev->dev, "could not allocate mmap offset\n"); 264 return 0; 265 } 266 267 return drm_vma_node_offset_addr(&obj->vma_node); 268 } 269 270 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) 271 { 272 uint64_t offset; 273 mutex_lock(&obj->dev->struct_mutex); 274 offset = mmap_offset(obj); 275 mutex_unlock(&obj->dev->struct_mutex); 276 return offset; 277 } 278 279 static void 280 put_iova(struct drm_gem_object *obj) 281 { 282 struct drm_device *dev = obj->dev; 283 struct msm_drm_private *priv = obj->dev->dev_private; 284 struct msm_gem_object *msm_obj = to_msm_bo(obj); 285 int id; 286 287 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 288 289 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { 290 struct msm_mmu *mmu = priv->mmus[id]; 291 if (mmu && msm_obj->domain[id].iova) { 292 uint32_t offset = msm_obj->domain[id].iova; 293 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); 294 msm_obj->domain[id].iova = 0; 295 } 296 } 297 } 298 299 /* should be called under struct_mutex.. although it can be called 300 * from atomic context without struct_mutex to acquire an extra 301 * iova ref if you know one is already held. 302 * 303 * That means when I do eventually need to add support for unpinning 304 * the refcnt counter needs to be atomic_t. 305 */ 306 int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, 307 uint32_t *iova) 308 { 309 struct msm_gem_object *msm_obj = to_msm_bo(obj); 310 int ret = 0; 311 312 if (!msm_obj->domain[id].iova) { 313 struct msm_drm_private *priv = obj->dev->dev_private; 314 struct page **pages = get_pages(obj); 315 316 if (IS_ERR(pages)) 317 return PTR_ERR(pages); 318 319 if (iommu_present(&platform_bus_type)) { 320 struct msm_mmu *mmu = priv->mmus[id]; 321 uint32_t offset; 322 323 if (WARN_ON(!mmu)) 324 return -EINVAL; 325 326 offset = (uint32_t)mmap_offset(obj); 327 ret = mmu->funcs->map(mmu, offset, msm_obj->sgt, 328 obj->size, IOMMU_READ | IOMMU_WRITE); 329 msm_obj->domain[id].iova = offset; 330 } else { 331 msm_obj->domain[id].iova = physaddr(obj); 332 } 333 } 334 335 if (!ret) 336 *iova = msm_obj->domain[id].iova; 337 338 return ret; 339 } 340 341 /* get iova, taking a reference. Should have a matching put */ 342 int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) 343 { 344 struct msm_gem_object *msm_obj = to_msm_bo(obj); 345 int ret; 346 347 /* this is safe right now because we don't unmap until the 348 * bo is deleted: 349 */ 350 if (msm_obj->domain[id].iova) { 351 *iova = msm_obj->domain[id].iova; 352 return 0; 353 } 354 355 mutex_lock(&obj->dev->struct_mutex); 356 ret = msm_gem_get_iova_locked(obj, id, iova); 357 mutex_unlock(&obj->dev->struct_mutex); 358 return ret; 359 } 360 361 /* get iova without taking a reference, used in places where you have 362 * already done a 'msm_gem_get_iova()'. 363 */ 364 uint32_t msm_gem_iova(struct drm_gem_object *obj, int id) 365 { 366 struct msm_gem_object *msm_obj = to_msm_bo(obj); 367 WARN_ON(!msm_obj->domain[id].iova); 368 return msm_obj->domain[id].iova; 369 } 370 371 void msm_gem_put_iova(struct drm_gem_object *obj, int id) 372 { 373 // XXX TODO .. 374 // NOTE: probably don't need a _locked() version.. we wouldn't 375 // normally unmap here, but instead just mark that it could be 376 // unmapped (if the iova refcnt drops to zero), but then later 377 // if another _get_iova_locked() fails we can start unmapping 378 // things that are no longer needed.. 379 } 380 381 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 382 struct drm_mode_create_dumb *args) 383 { 384 args->pitch = align_pitch(args->width, args->bpp); 385 args->size = PAGE_ALIGN(args->pitch * args->height); 386 return msm_gem_new_handle(dev, file, args->size, 387 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); 388 } 389 390 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 391 uint32_t handle, uint64_t *offset) 392 { 393 struct drm_gem_object *obj; 394 int ret = 0; 395 396 /* GEM does all our handle to object mapping */ 397 obj = drm_gem_object_lookup(file, handle); 398 if (obj == NULL) { 399 ret = -ENOENT; 400 goto fail; 401 } 402 403 *offset = msm_gem_mmap_offset(obj); 404 405 drm_gem_object_unreference_unlocked(obj); 406 407 fail: 408 return ret; 409 } 410 411 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) 412 { 413 struct msm_gem_object *msm_obj = to_msm_bo(obj); 414 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 415 if (!msm_obj->vaddr) { 416 struct page **pages = get_pages(obj); 417 if (IS_ERR(pages)) 418 return ERR_CAST(pages); 419 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 420 VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 421 if (msm_obj->vaddr == NULL) 422 return ERR_PTR(-ENOMEM); 423 } 424 msm_obj->vmap_count++; 425 return msm_obj->vaddr; 426 } 427 428 void *msm_gem_get_vaddr(struct drm_gem_object *obj) 429 { 430 void *ret; 431 mutex_lock(&obj->dev->struct_mutex); 432 ret = msm_gem_get_vaddr_locked(obj); 433 mutex_unlock(&obj->dev->struct_mutex); 434 return ret; 435 } 436 437 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) 438 { 439 struct msm_gem_object *msm_obj = to_msm_bo(obj); 440 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 441 WARN_ON(msm_obj->vmap_count < 1); 442 msm_obj->vmap_count--; 443 } 444 445 void msm_gem_put_vaddr(struct drm_gem_object *obj) 446 { 447 mutex_lock(&obj->dev->struct_mutex); 448 msm_gem_put_vaddr_locked(obj); 449 mutex_unlock(&obj->dev->struct_mutex); 450 } 451 452 /* Update madvise status, returns true if not purged, else 453 * false or -errno. 454 */ 455 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) 456 { 457 struct msm_gem_object *msm_obj = to_msm_bo(obj); 458 459 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 460 461 if (msm_obj->madv != __MSM_MADV_PURGED) 462 msm_obj->madv = madv; 463 464 return (msm_obj->madv != __MSM_MADV_PURGED); 465 } 466 467 void msm_gem_purge(struct drm_gem_object *obj) 468 { 469 struct drm_device *dev = obj->dev; 470 struct msm_gem_object *msm_obj = to_msm_bo(obj); 471 472 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 473 WARN_ON(!is_purgeable(msm_obj)); 474 WARN_ON(obj->import_attach); 475 476 put_iova(obj); 477 478 msm_gem_vunmap(obj); 479 480 put_pages(obj); 481 482 msm_obj->madv = __MSM_MADV_PURGED; 483 484 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 485 drm_gem_free_mmap_offset(obj); 486 487 /* Our goal here is to return as much of the memory as 488 * is possible back to the system as we are called from OOM. 489 * To do this we must instruct the shmfs to drop all of its 490 * backing pages, *now*. 491 */ 492 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 493 494 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 495 0, (loff_t)-1); 496 } 497 498 void msm_gem_vunmap(struct drm_gem_object *obj) 499 { 500 struct msm_gem_object *msm_obj = to_msm_bo(obj); 501 502 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) 503 return; 504 505 vunmap(msm_obj->vaddr); 506 msm_obj->vaddr = NULL; 507 } 508 509 /* must be called before _move_to_active().. */ 510 int msm_gem_sync_object(struct drm_gem_object *obj, 511 struct msm_fence_context *fctx, bool exclusive) 512 { 513 struct msm_gem_object *msm_obj = to_msm_bo(obj); 514 struct reservation_object_list *fobj; 515 struct fence *fence; 516 int i, ret; 517 518 if (!exclusive) { 519 /* NOTE: _reserve_shared() must happen before _add_shared_fence(), 520 * which makes this a slightly strange place to call it. OTOH this 521 * is a convenient can-fail point to hook it in. (And similar to 522 * how etnaviv and nouveau handle this.) 523 */ 524 ret = reservation_object_reserve_shared(msm_obj->resv); 525 if (ret) 526 return ret; 527 } 528 529 fobj = reservation_object_get_list(msm_obj->resv); 530 if (!fobj || (fobj->shared_count == 0)) { 531 fence = reservation_object_get_excl(msm_obj->resv); 532 /* don't need to wait on our own fences, since ring is fifo */ 533 if (fence && (fence->context != fctx->context)) { 534 ret = fence_wait(fence, true); 535 if (ret) 536 return ret; 537 } 538 } 539 540 if (!exclusive || !fobj) 541 return 0; 542 543 for (i = 0; i < fobj->shared_count; i++) { 544 fence = rcu_dereference_protected(fobj->shared[i], 545 reservation_object_held(msm_obj->resv)); 546 if (fence->context != fctx->context) { 547 ret = fence_wait(fence, true); 548 if (ret) 549 return ret; 550 } 551 } 552 553 return 0; 554 } 555 556 void msm_gem_move_to_active(struct drm_gem_object *obj, 557 struct msm_gpu *gpu, bool exclusive, struct fence *fence) 558 { 559 struct msm_gem_object *msm_obj = to_msm_bo(obj); 560 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); 561 msm_obj->gpu = gpu; 562 if (exclusive) 563 reservation_object_add_excl_fence(msm_obj->resv, fence); 564 else 565 reservation_object_add_shared_fence(msm_obj->resv, fence); 566 list_del_init(&msm_obj->mm_list); 567 list_add_tail(&msm_obj->mm_list, &gpu->active_list); 568 } 569 570 void msm_gem_move_to_inactive(struct drm_gem_object *obj) 571 { 572 struct drm_device *dev = obj->dev; 573 struct msm_drm_private *priv = dev->dev_private; 574 struct msm_gem_object *msm_obj = to_msm_bo(obj); 575 576 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 577 578 msm_obj->gpu = NULL; 579 list_del_init(&msm_obj->mm_list); 580 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 581 } 582 583 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 584 { 585 struct msm_gem_object *msm_obj = to_msm_bo(obj); 586 bool write = !!(op & MSM_PREP_WRITE); 587 588 if (op & MSM_PREP_NOSYNC) { 589 if (!reservation_object_test_signaled_rcu(msm_obj->resv, write)) 590 return -EBUSY; 591 } else { 592 int ret; 593 594 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write, 595 true, timeout_to_jiffies(timeout)); 596 if (ret <= 0) 597 return ret == 0 ? -ETIMEDOUT : ret; 598 } 599 600 /* TODO cache maintenance */ 601 602 return 0; 603 } 604 605 int msm_gem_cpu_fini(struct drm_gem_object *obj) 606 { 607 /* TODO cache maintenance */ 608 return 0; 609 } 610 611 #ifdef CONFIG_DEBUG_FS 612 static void describe_fence(struct fence *fence, const char *type, 613 struct seq_file *m) 614 { 615 if (!fence_is_signaled(fence)) 616 seq_printf(m, "\t%9s: %s %s seq %u\n", type, 617 fence->ops->get_driver_name(fence), 618 fence->ops->get_timeline_name(fence), 619 fence->seqno); 620 } 621 622 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 623 { 624 struct msm_gem_object *msm_obj = to_msm_bo(obj); 625 struct reservation_object *robj = msm_obj->resv; 626 struct reservation_object_list *fobj; 627 struct fence *fence; 628 uint64_t off = drm_vma_node_start(&obj->vma_node); 629 const char *madv; 630 631 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 632 633 switch (msm_obj->madv) { 634 case __MSM_MADV_PURGED: 635 madv = " purged"; 636 break; 637 case MSM_MADV_DONTNEED: 638 madv = " purgeable"; 639 break; 640 case MSM_MADV_WILLNEED: 641 default: 642 madv = ""; 643 break; 644 } 645 646 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n", 647 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 648 obj->name, obj->refcount.refcount.counter, 649 off, msm_obj->vaddr, obj->size, madv); 650 651 rcu_read_lock(); 652 fobj = rcu_dereference(robj->fence); 653 if (fobj) { 654 unsigned int i, shared_count = fobj->shared_count; 655 656 for (i = 0; i < shared_count; i++) { 657 fence = rcu_dereference(fobj->shared[i]); 658 describe_fence(fence, "Shared", m); 659 } 660 } 661 662 fence = rcu_dereference(robj->fence_excl); 663 if (fence) 664 describe_fence(fence, "Exclusive", m); 665 rcu_read_unlock(); 666 } 667 668 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) 669 { 670 struct msm_gem_object *msm_obj; 671 int count = 0; 672 size_t size = 0; 673 674 list_for_each_entry(msm_obj, list, mm_list) { 675 struct drm_gem_object *obj = &msm_obj->base; 676 seq_printf(m, " "); 677 msm_gem_describe(obj, m); 678 count++; 679 size += obj->size; 680 } 681 682 seq_printf(m, "Total %d objects, %zu bytes\n", count, size); 683 } 684 #endif 685 686 void msm_gem_free_object(struct drm_gem_object *obj) 687 { 688 struct drm_device *dev = obj->dev; 689 struct msm_gem_object *msm_obj = to_msm_bo(obj); 690 691 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 692 693 /* object should not be on active list: */ 694 WARN_ON(is_active(msm_obj)); 695 696 list_del(&msm_obj->mm_list); 697 698 put_iova(obj); 699 700 if (obj->import_attach) { 701 if (msm_obj->vaddr) 702 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); 703 704 /* Don't drop the pages for imported dmabuf, as they are not 705 * ours, just free the array we allocated: 706 */ 707 if (msm_obj->pages) 708 drm_free_large(msm_obj->pages); 709 710 drm_prime_gem_destroy(obj, msm_obj->sgt); 711 } else { 712 msm_gem_vunmap(obj); 713 put_pages(obj); 714 } 715 716 if (msm_obj->resv == &msm_obj->_resv) 717 reservation_object_fini(msm_obj->resv); 718 719 drm_gem_object_release(obj); 720 721 kfree(msm_obj); 722 } 723 724 /* convenience method to construct a GEM buffer object, and userspace handle */ 725 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 726 uint32_t size, uint32_t flags, uint32_t *handle) 727 { 728 struct drm_gem_object *obj; 729 int ret; 730 731 ret = mutex_lock_interruptible(&dev->struct_mutex); 732 if (ret) 733 return ret; 734 735 obj = msm_gem_new(dev, size, flags); 736 737 mutex_unlock(&dev->struct_mutex); 738 739 if (IS_ERR(obj)) 740 return PTR_ERR(obj); 741 742 ret = drm_gem_handle_create(file, obj, handle); 743 744 /* drop reference from allocate - handle holds it now */ 745 drm_gem_object_unreference_unlocked(obj); 746 747 return ret; 748 } 749 750 static int msm_gem_new_impl(struct drm_device *dev, 751 uint32_t size, uint32_t flags, 752 struct reservation_object *resv, 753 struct drm_gem_object **obj) 754 { 755 struct msm_drm_private *priv = dev->dev_private; 756 struct msm_gem_object *msm_obj; 757 unsigned sz; 758 bool use_vram = false; 759 760 switch (flags & MSM_BO_CACHE_MASK) { 761 case MSM_BO_UNCACHED: 762 case MSM_BO_CACHED: 763 case MSM_BO_WC: 764 break; 765 default: 766 dev_err(dev->dev, "invalid cache flag: %x\n", 767 (flags & MSM_BO_CACHE_MASK)); 768 return -EINVAL; 769 } 770 771 if (!iommu_present(&platform_bus_type)) 772 use_vram = true; 773 else if ((flags & MSM_BO_STOLEN) && priv->vram.size) 774 use_vram = true; 775 776 if (WARN_ON(use_vram && !priv->vram.size)) 777 return -EINVAL; 778 779 sz = sizeof(*msm_obj); 780 if (use_vram) 781 sz += sizeof(struct drm_mm_node); 782 783 msm_obj = kzalloc(sz, GFP_KERNEL); 784 if (!msm_obj) 785 return -ENOMEM; 786 787 if (use_vram) 788 msm_obj->vram_node = (void *)&msm_obj[1]; 789 790 msm_obj->flags = flags; 791 msm_obj->madv = MSM_MADV_WILLNEED; 792 793 if (resv) { 794 msm_obj->resv = resv; 795 } else { 796 msm_obj->resv = &msm_obj->_resv; 797 reservation_object_init(msm_obj->resv); 798 } 799 800 INIT_LIST_HEAD(&msm_obj->submit_entry); 801 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 802 803 *obj = &msm_obj->base; 804 805 return 0; 806 } 807 808 struct drm_gem_object *msm_gem_new(struct drm_device *dev, 809 uint32_t size, uint32_t flags) 810 { 811 struct drm_gem_object *obj = NULL; 812 int ret; 813 814 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 815 816 size = PAGE_ALIGN(size); 817 818 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj); 819 if (ret) 820 goto fail; 821 822 if (use_pages(obj)) { 823 ret = drm_gem_object_init(dev, obj, size); 824 if (ret) 825 goto fail; 826 } else { 827 drm_gem_private_object_init(dev, obj, size); 828 } 829 830 return obj; 831 832 fail: 833 if (obj) 834 drm_gem_object_unreference(obj); 835 836 return ERR_PTR(ret); 837 } 838 839 struct drm_gem_object *msm_gem_import(struct drm_device *dev, 840 struct dma_buf *dmabuf, struct sg_table *sgt) 841 { 842 struct msm_gem_object *msm_obj; 843 struct drm_gem_object *obj; 844 uint32_t size; 845 int ret, npages; 846 847 /* if we don't have IOMMU, don't bother pretending we can import: */ 848 if (!iommu_present(&platform_bus_type)) { 849 dev_err(dev->dev, "cannot import without IOMMU\n"); 850 return ERR_PTR(-EINVAL); 851 } 852 853 size = PAGE_ALIGN(dmabuf->size); 854 855 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj); 856 if (ret) 857 goto fail; 858 859 drm_gem_private_object_init(dev, obj, size); 860 861 npages = size / PAGE_SIZE; 862 863 msm_obj = to_msm_bo(obj); 864 msm_obj->sgt = sgt; 865 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); 866 if (!msm_obj->pages) { 867 ret = -ENOMEM; 868 goto fail; 869 } 870 871 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); 872 if (ret) 873 goto fail; 874 875 return obj; 876 877 fail: 878 drm_gem_object_unreference_unlocked(obj); 879 return ERR_PTR(ret); 880 } 881