1 /* 2 * Copyright (C) 2013 Red Hat 3 * Author: Rob Clark <robdclark@gmail.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include <linux/spinlock.h> 19 #include <linux/shmem_fs.h> 20 #include <linux/dma-buf.h> 21 #include <linux/pfn_t.h> 22 23 #include "msm_drv.h" 24 #include "msm_fence.h" 25 #include "msm_gem.h" 26 #include "msm_gpu.h" 27 #include "msm_mmu.h" 28 29 static dma_addr_t physaddr(struct drm_gem_object *obj) 30 { 31 struct msm_gem_object *msm_obj = to_msm_bo(obj); 32 struct msm_drm_private *priv = obj->dev->dev_private; 33 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + 34 priv->vram.paddr; 35 } 36 37 static bool use_pages(struct drm_gem_object *obj) 38 { 39 struct msm_gem_object *msm_obj = to_msm_bo(obj); 40 return !msm_obj->vram_node; 41 } 42 43 /* allocate pages from VRAM carveout, used when no IOMMU: */ 44 static struct page **get_pages_vram(struct drm_gem_object *obj, 45 int npages) 46 { 47 struct msm_gem_object *msm_obj = to_msm_bo(obj); 48 struct msm_drm_private *priv = obj->dev->dev_private; 49 dma_addr_t paddr; 50 struct page **p; 51 int ret, i; 52 53 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 54 if (!p) 55 return ERR_PTR(-ENOMEM); 56 57 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); 58 if (ret) { 59 kvfree(p); 60 return ERR_PTR(ret); 61 } 62 63 paddr = physaddr(obj); 64 for (i = 0; i < npages; i++) { 65 p[i] = phys_to_page(paddr); 66 paddr += PAGE_SIZE; 67 } 68 69 return p; 70 } 71 72 /* called with dev->struct_mutex held */ 73 static struct page **get_pages(struct drm_gem_object *obj) 74 { 75 struct msm_gem_object *msm_obj = to_msm_bo(obj); 76 77 if (!msm_obj->pages) { 78 struct drm_device *dev = obj->dev; 79 struct page **p; 80 int npages = obj->size >> PAGE_SHIFT; 81 82 if (use_pages(obj)) 83 p = drm_gem_get_pages(obj); 84 else 85 p = get_pages_vram(obj, npages); 86 87 if (IS_ERR(p)) { 88 dev_err(dev->dev, "could not get pages: %ld\n", 89 PTR_ERR(p)); 90 return p; 91 } 92 93 msm_obj->sgt = drm_prime_pages_to_sg(p, npages); 94 if (IS_ERR(msm_obj->sgt)) { 95 dev_err(dev->dev, "failed to allocate sgt\n"); 96 return ERR_CAST(msm_obj->sgt); 97 } 98 99 msm_obj->pages = p; 100 101 /* For non-cached buffers, ensure the new pages are clean 102 * because display controller, GPU, etc. are not coherent: 103 */ 104 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 105 dma_map_sg(dev->dev, msm_obj->sgt->sgl, 106 msm_obj->sgt->nents, DMA_BIDIRECTIONAL); 107 } 108 109 return msm_obj->pages; 110 } 111 112 static void put_pages(struct drm_gem_object *obj) 113 { 114 struct msm_gem_object *msm_obj = to_msm_bo(obj); 115 116 if (msm_obj->pages) { 117 /* For non-cached buffers, ensure the new pages are clean 118 * because display controller, GPU, etc. are not coherent: 119 */ 120 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 121 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, 122 msm_obj->sgt->nents, DMA_BIDIRECTIONAL); 123 sg_free_table(msm_obj->sgt); 124 kfree(msm_obj->sgt); 125 126 if (use_pages(obj)) 127 drm_gem_put_pages(obj, msm_obj->pages, true, false); 128 else { 129 drm_mm_remove_node(msm_obj->vram_node); 130 kvfree(msm_obj->pages); 131 } 132 133 msm_obj->pages = NULL; 134 } 135 } 136 137 struct page **msm_gem_get_pages(struct drm_gem_object *obj) 138 { 139 struct drm_device *dev = obj->dev; 140 struct page **p; 141 mutex_lock(&dev->struct_mutex); 142 p = get_pages(obj); 143 mutex_unlock(&dev->struct_mutex); 144 return p; 145 } 146 147 void msm_gem_put_pages(struct drm_gem_object *obj) 148 { 149 /* when we start tracking the pin count, then do something here */ 150 } 151 152 int msm_gem_mmap_obj(struct drm_gem_object *obj, 153 struct vm_area_struct *vma) 154 { 155 struct msm_gem_object *msm_obj = to_msm_bo(obj); 156 157 vma->vm_flags &= ~VM_PFNMAP; 158 vma->vm_flags |= VM_MIXEDMAP; 159 160 if (msm_obj->flags & MSM_BO_WC) { 161 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 162 } else if (msm_obj->flags & MSM_BO_UNCACHED) { 163 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 164 } else { 165 /* 166 * Shunt off cached objs to shmem file so they have their own 167 * address_space (so unmap_mapping_range does what we want, 168 * in particular in the case of mmap'd dmabufs) 169 */ 170 fput(vma->vm_file); 171 get_file(obj->filp); 172 vma->vm_pgoff = 0; 173 vma->vm_file = obj->filp; 174 175 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 176 } 177 178 return 0; 179 } 180 181 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 182 { 183 int ret; 184 185 ret = drm_gem_mmap(filp, vma); 186 if (ret) { 187 DBG("mmap failed: %d", ret); 188 return ret; 189 } 190 191 return msm_gem_mmap_obj(vma->vm_private_data, vma); 192 } 193 194 int msm_gem_fault(struct vm_fault *vmf) 195 { 196 struct vm_area_struct *vma = vmf->vma; 197 struct drm_gem_object *obj = vma->vm_private_data; 198 struct drm_device *dev = obj->dev; 199 struct msm_drm_private *priv = dev->dev_private; 200 struct page **pages; 201 unsigned long pfn; 202 pgoff_t pgoff; 203 int ret; 204 205 /* This should only happen if userspace tries to pass a mmap'd 206 * but unfaulted gem bo vaddr into submit ioctl, triggering 207 * a page fault while struct_mutex is already held. This is 208 * not a valid use-case so just bail. 209 */ 210 if (priv->struct_mutex_task == current) 211 return VM_FAULT_SIGBUS; 212 213 /* Make sure we don't parallel update on a fault, nor move or remove 214 * something from beneath our feet 215 */ 216 ret = mutex_lock_interruptible(&dev->struct_mutex); 217 if (ret) 218 goto out; 219 220 /* make sure we have pages attached now */ 221 pages = get_pages(obj); 222 if (IS_ERR(pages)) { 223 ret = PTR_ERR(pages); 224 goto out_unlock; 225 } 226 227 /* We don't use vmf->pgoff since that has the fake offset: */ 228 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 229 230 pfn = page_to_pfn(pages[pgoff]); 231 232 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 233 pfn, pfn << PAGE_SHIFT); 234 235 ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); 236 237 out_unlock: 238 mutex_unlock(&dev->struct_mutex); 239 out: 240 switch (ret) { 241 case -EAGAIN: 242 case 0: 243 case -ERESTARTSYS: 244 case -EINTR: 245 case -EBUSY: 246 /* 247 * EBUSY is ok: this just means that another thread 248 * already did the job. 249 */ 250 return VM_FAULT_NOPAGE; 251 case -ENOMEM: 252 return VM_FAULT_OOM; 253 default: 254 return VM_FAULT_SIGBUS; 255 } 256 } 257 258 /** get mmap offset */ 259 static uint64_t mmap_offset(struct drm_gem_object *obj) 260 { 261 struct drm_device *dev = obj->dev; 262 int ret; 263 264 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 265 266 /* Make it mmapable */ 267 ret = drm_gem_create_mmap_offset(obj); 268 269 if (ret) { 270 dev_err(dev->dev, "could not allocate mmap offset\n"); 271 return 0; 272 } 273 274 return drm_vma_node_offset_addr(&obj->vma_node); 275 } 276 277 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) 278 { 279 uint64_t offset; 280 mutex_lock(&obj->dev->struct_mutex); 281 offset = mmap_offset(obj); 282 mutex_unlock(&obj->dev->struct_mutex); 283 return offset; 284 } 285 286 static void 287 put_iova(struct drm_gem_object *obj) 288 { 289 struct drm_device *dev = obj->dev; 290 struct msm_drm_private *priv = obj->dev->dev_private; 291 struct msm_gem_object *msm_obj = to_msm_bo(obj); 292 int id; 293 294 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 295 296 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { 297 if (!priv->aspace[id]) 298 continue; 299 msm_gem_unmap_vma(priv->aspace[id], 300 &msm_obj->domain[id], msm_obj->sgt); 301 } 302 } 303 304 /* should be called under struct_mutex.. although it can be called 305 * from atomic context without struct_mutex to acquire an extra 306 * iova ref if you know one is already held. 307 * 308 * That means when I do eventually need to add support for unpinning 309 * the refcnt counter needs to be atomic_t. 310 */ 311 int msm_gem_get_iova_locked(struct drm_gem_object *obj, 312 struct msm_gem_address_space *aspace, uint64_t *iova) 313 { 314 struct msm_gem_object *msm_obj = to_msm_bo(obj); 315 int id = aspace ? aspace->id : 0; 316 int ret = 0; 317 318 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 319 320 if (!msm_obj->domain[id].iova) { 321 struct msm_drm_private *priv = obj->dev->dev_private; 322 struct page **pages = get_pages(obj); 323 324 if (IS_ERR(pages)) 325 return PTR_ERR(pages); 326 327 ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id], 328 msm_obj->sgt, obj->size >> PAGE_SHIFT); 329 } 330 331 if (!ret) 332 *iova = msm_obj->domain[id].iova; 333 334 return ret; 335 } 336 337 /* get iova, taking a reference. Should have a matching put */ 338 int msm_gem_get_iova(struct drm_gem_object *obj, 339 struct msm_gem_address_space *aspace, uint64_t *iova) 340 { 341 struct msm_gem_object *msm_obj = to_msm_bo(obj); 342 int id = aspace ? aspace->id : 0; 343 int ret; 344 345 /* this is safe right now because we don't unmap until the 346 * bo is deleted: 347 */ 348 if (msm_obj->domain[id].iova) { 349 might_lock(&obj->dev->struct_mutex); 350 *iova = msm_obj->domain[id].iova; 351 return 0; 352 } 353 354 mutex_lock(&obj->dev->struct_mutex); 355 ret = msm_gem_get_iova_locked(obj, aspace, iova); 356 mutex_unlock(&obj->dev->struct_mutex); 357 return ret; 358 } 359 360 /* get iova without taking a reference, used in places where you have 361 * already done a 'msm_gem_get_iova()'. 362 */ 363 uint64_t msm_gem_iova(struct drm_gem_object *obj, 364 struct msm_gem_address_space *aspace) 365 { 366 struct msm_gem_object *msm_obj = to_msm_bo(obj); 367 int id = aspace ? aspace->id : 0; 368 WARN_ON(!msm_obj->domain[id].iova); 369 return msm_obj->domain[id].iova; 370 } 371 372 void msm_gem_put_iova(struct drm_gem_object *obj, 373 struct msm_gem_address_space *aspace) 374 { 375 // XXX TODO .. 376 // NOTE: probably don't need a _locked() version.. we wouldn't 377 // normally unmap here, but instead just mark that it could be 378 // unmapped (if the iova refcnt drops to zero), but then later 379 // if another _get_iova_locked() fails we can start unmapping 380 // things that are no longer needed.. 381 } 382 383 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 384 struct drm_mode_create_dumb *args) 385 { 386 args->pitch = align_pitch(args->width, args->bpp); 387 args->size = PAGE_ALIGN(args->pitch * args->height); 388 return msm_gem_new_handle(dev, file, args->size, 389 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); 390 } 391 392 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 393 uint32_t handle, uint64_t *offset) 394 { 395 struct drm_gem_object *obj; 396 int ret = 0; 397 398 /* GEM does all our handle to object mapping */ 399 obj = drm_gem_object_lookup(file, handle); 400 if (obj == NULL) { 401 ret = -ENOENT; 402 goto fail; 403 } 404 405 *offset = msm_gem_mmap_offset(obj); 406 407 drm_gem_object_unreference_unlocked(obj); 408 409 fail: 410 return ret; 411 } 412 413 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) 414 { 415 struct msm_gem_object *msm_obj = to_msm_bo(obj); 416 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 417 if (!msm_obj->vaddr) { 418 struct page **pages = get_pages(obj); 419 if (IS_ERR(pages)) 420 return ERR_CAST(pages); 421 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 422 VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 423 if (msm_obj->vaddr == NULL) 424 return ERR_PTR(-ENOMEM); 425 } 426 msm_obj->vmap_count++; 427 return msm_obj->vaddr; 428 } 429 430 void *msm_gem_get_vaddr(struct drm_gem_object *obj) 431 { 432 void *ret; 433 mutex_lock(&obj->dev->struct_mutex); 434 ret = msm_gem_get_vaddr_locked(obj); 435 mutex_unlock(&obj->dev->struct_mutex); 436 return ret; 437 } 438 439 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) 440 { 441 struct msm_gem_object *msm_obj = to_msm_bo(obj); 442 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 443 WARN_ON(msm_obj->vmap_count < 1); 444 msm_obj->vmap_count--; 445 } 446 447 void msm_gem_put_vaddr(struct drm_gem_object *obj) 448 { 449 mutex_lock(&obj->dev->struct_mutex); 450 msm_gem_put_vaddr_locked(obj); 451 mutex_unlock(&obj->dev->struct_mutex); 452 } 453 454 /* Update madvise status, returns true if not purged, else 455 * false or -errno. 456 */ 457 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) 458 { 459 struct msm_gem_object *msm_obj = to_msm_bo(obj); 460 461 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 462 463 if (msm_obj->madv != __MSM_MADV_PURGED) 464 msm_obj->madv = madv; 465 466 return (msm_obj->madv != __MSM_MADV_PURGED); 467 } 468 469 void msm_gem_purge(struct drm_gem_object *obj) 470 { 471 struct drm_device *dev = obj->dev; 472 struct msm_gem_object *msm_obj = to_msm_bo(obj); 473 474 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 475 WARN_ON(!is_purgeable(msm_obj)); 476 WARN_ON(obj->import_attach); 477 478 put_iova(obj); 479 480 msm_gem_vunmap(obj); 481 482 put_pages(obj); 483 484 msm_obj->madv = __MSM_MADV_PURGED; 485 486 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 487 drm_gem_free_mmap_offset(obj); 488 489 /* Our goal here is to return as much of the memory as 490 * is possible back to the system as we are called from OOM. 491 * To do this we must instruct the shmfs to drop all of its 492 * backing pages, *now*. 493 */ 494 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 495 496 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 497 0, (loff_t)-1); 498 } 499 500 void msm_gem_vunmap(struct drm_gem_object *obj) 501 { 502 struct msm_gem_object *msm_obj = to_msm_bo(obj); 503 504 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) 505 return; 506 507 vunmap(msm_obj->vaddr); 508 msm_obj->vaddr = NULL; 509 } 510 511 /* must be called before _move_to_active().. */ 512 int msm_gem_sync_object(struct drm_gem_object *obj, 513 struct msm_fence_context *fctx, bool exclusive) 514 { 515 struct msm_gem_object *msm_obj = to_msm_bo(obj); 516 struct reservation_object_list *fobj; 517 struct dma_fence *fence; 518 int i, ret; 519 520 if (!exclusive) { 521 /* NOTE: _reserve_shared() must happen before _add_shared_fence(), 522 * which makes this a slightly strange place to call it. OTOH this 523 * is a convenient can-fail point to hook it in. (And similar to 524 * how etnaviv and nouveau handle this.) 525 */ 526 ret = reservation_object_reserve_shared(msm_obj->resv); 527 if (ret) 528 return ret; 529 } 530 531 fobj = reservation_object_get_list(msm_obj->resv); 532 if (!fobj || (fobj->shared_count == 0)) { 533 fence = reservation_object_get_excl(msm_obj->resv); 534 /* don't need to wait on our own fences, since ring is fifo */ 535 if (fence && (fence->context != fctx->context)) { 536 ret = dma_fence_wait(fence, true); 537 if (ret) 538 return ret; 539 } 540 } 541 542 if (!exclusive || !fobj) 543 return 0; 544 545 for (i = 0; i < fobj->shared_count; i++) { 546 fence = rcu_dereference_protected(fobj->shared[i], 547 reservation_object_held(msm_obj->resv)); 548 if (fence->context != fctx->context) { 549 ret = dma_fence_wait(fence, true); 550 if (ret) 551 return ret; 552 } 553 } 554 555 return 0; 556 } 557 558 void msm_gem_move_to_active(struct drm_gem_object *obj, 559 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence) 560 { 561 struct msm_gem_object *msm_obj = to_msm_bo(obj); 562 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); 563 msm_obj->gpu = gpu; 564 if (exclusive) 565 reservation_object_add_excl_fence(msm_obj->resv, fence); 566 else 567 reservation_object_add_shared_fence(msm_obj->resv, fence); 568 list_del_init(&msm_obj->mm_list); 569 list_add_tail(&msm_obj->mm_list, &gpu->active_list); 570 } 571 572 void msm_gem_move_to_inactive(struct drm_gem_object *obj) 573 { 574 struct drm_device *dev = obj->dev; 575 struct msm_drm_private *priv = dev->dev_private; 576 struct msm_gem_object *msm_obj = to_msm_bo(obj); 577 578 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 579 580 msm_obj->gpu = NULL; 581 list_del_init(&msm_obj->mm_list); 582 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 583 } 584 585 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 586 { 587 struct msm_gem_object *msm_obj = to_msm_bo(obj); 588 bool write = !!(op & MSM_PREP_WRITE); 589 unsigned long remain = 590 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 591 long ret; 592 593 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write, 594 true, remain); 595 if (ret == 0) 596 return remain == 0 ? -EBUSY : -ETIMEDOUT; 597 else if (ret < 0) 598 return ret; 599 600 /* TODO cache maintenance */ 601 602 return 0; 603 } 604 605 int msm_gem_cpu_fini(struct drm_gem_object *obj) 606 { 607 /* TODO cache maintenance */ 608 return 0; 609 } 610 611 #ifdef CONFIG_DEBUG_FS 612 static void describe_fence(struct dma_fence *fence, const char *type, 613 struct seq_file *m) 614 { 615 if (!dma_fence_is_signaled(fence)) 616 seq_printf(m, "\t%9s: %s %s seq %u\n", type, 617 fence->ops->get_driver_name(fence), 618 fence->ops->get_timeline_name(fence), 619 fence->seqno); 620 } 621 622 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 623 { 624 struct msm_gem_object *msm_obj = to_msm_bo(obj); 625 struct reservation_object *robj = msm_obj->resv; 626 struct reservation_object_list *fobj; 627 struct msm_drm_private *priv = obj->dev->dev_private; 628 struct dma_fence *fence; 629 uint64_t off = drm_vma_node_start(&obj->vma_node); 630 const char *madv; 631 unsigned id; 632 633 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 634 635 switch (msm_obj->madv) { 636 case __MSM_MADV_PURGED: 637 madv = " purged"; 638 break; 639 case MSM_MADV_DONTNEED: 640 madv = " purgeable"; 641 break; 642 case MSM_MADV_WILLNEED: 643 default: 644 madv = ""; 645 break; 646 } 647 648 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t", 649 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 650 obj->name, kref_read(&obj->refcount), 651 off, msm_obj->vaddr); 652 653 for (id = 0; id < priv->num_aspaces; id++) 654 seq_printf(m, " %08llx", msm_obj->domain[id].iova); 655 656 seq_printf(m, " %zu%s\n", obj->size, madv); 657 658 rcu_read_lock(); 659 fobj = rcu_dereference(robj->fence); 660 if (fobj) { 661 unsigned int i, shared_count = fobj->shared_count; 662 663 for (i = 0; i < shared_count; i++) { 664 fence = rcu_dereference(fobj->shared[i]); 665 describe_fence(fence, "Shared", m); 666 } 667 } 668 669 fence = rcu_dereference(robj->fence_excl); 670 if (fence) 671 describe_fence(fence, "Exclusive", m); 672 rcu_read_unlock(); 673 } 674 675 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) 676 { 677 struct msm_gem_object *msm_obj; 678 int count = 0; 679 size_t size = 0; 680 681 list_for_each_entry(msm_obj, list, mm_list) { 682 struct drm_gem_object *obj = &msm_obj->base; 683 seq_printf(m, " "); 684 msm_gem_describe(obj, m); 685 count++; 686 size += obj->size; 687 } 688 689 seq_printf(m, "Total %d objects, %zu bytes\n", count, size); 690 } 691 #endif 692 693 void msm_gem_free_object(struct drm_gem_object *obj) 694 { 695 struct drm_device *dev = obj->dev; 696 struct msm_gem_object *msm_obj = to_msm_bo(obj); 697 698 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 699 700 /* object should not be on active list: */ 701 WARN_ON(is_active(msm_obj)); 702 703 list_del(&msm_obj->mm_list); 704 705 put_iova(obj); 706 707 if (obj->import_attach) { 708 if (msm_obj->vaddr) 709 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); 710 711 /* Don't drop the pages for imported dmabuf, as they are not 712 * ours, just free the array we allocated: 713 */ 714 if (msm_obj->pages) 715 kvfree(msm_obj->pages); 716 717 drm_prime_gem_destroy(obj, msm_obj->sgt); 718 } else { 719 msm_gem_vunmap(obj); 720 put_pages(obj); 721 } 722 723 if (msm_obj->resv == &msm_obj->_resv) 724 reservation_object_fini(msm_obj->resv); 725 726 drm_gem_object_release(obj); 727 728 kfree(msm_obj); 729 } 730 731 /* convenience method to construct a GEM buffer object, and userspace handle */ 732 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 733 uint32_t size, uint32_t flags, uint32_t *handle) 734 { 735 struct drm_gem_object *obj; 736 int ret; 737 738 ret = mutex_lock_interruptible(&dev->struct_mutex); 739 if (ret) 740 return ret; 741 742 obj = msm_gem_new(dev, size, flags); 743 744 mutex_unlock(&dev->struct_mutex); 745 746 if (IS_ERR(obj)) 747 return PTR_ERR(obj); 748 749 ret = drm_gem_handle_create(file, obj, handle); 750 751 /* drop reference from allocate - handle holds it now */ 752 drm_gem_object_unreference_unlocked(obj); 753 754 return ret; 755 } 756 757 static int msm_gem_new_impl(struct drm_device *dev, 758 uint32_t size, uint32_t flags, 759 struct reservation_object *resv, 760 struct drm_gem_object **obj) 761 { 762 struct msm_drm_private *priv = dev->dev_private; 763 struct msm_gem_object *msm_obj; 764 765 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 766 767 switch (flags & MSM_BO_CACHE_MASK) { 768 case MSM_BO_UNCACHED: 769 case MSM_BO_CACHED: 770 case MSM_BO_WC: 771 break; 772 default: 773 dev_err(dev->dev, "invalid cache flag: %x\n", 774 (flags & MSM_BO_CACHE_MASK)); 775 return -EINVAL; 776 } 777 778 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); 779 if (!msm_obj) 780 return -ENOMEM; 781 782 msm_obj->flags = flags; 783 msm_obj->madv = MSM_MADV_WILLNEED; 784 785 if (resv) { 786 msm_obj->resv = resv; 787 } else { 788 msm_obj->resv = &msm_obj->_resv; 789 reservation_object_init(msm_obj->resv); 790 } 791 792 INIT_LIST_HEAD(&msm_obj->submit_entry); 793 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 794 795 *obj = &msm_obj->base; 796 797 return 0; 798 } 799 800 struct drm_gem_object *msm_gem_new(struct drm_device *dev, 801 uint32_t size, uint32_t flags) 802 { 803 struct msm_drm_private *priv = dev->dev_private; 804 struct drm_gem_object *obj = NULL; 805 bool use_vram = false; 806 int ret; 807 808 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 809 810 size = PAGE_ALIGN(size); 811 812 if (!iommu_present(&platform_bus_type)) 813 use_vram = true; 814 else if ((flags & MSM_BO_STOLEN) && priv->vram.size) 815 use_vram = true; 816 817 if (WARN_ON(use_vram && !priv->vram.size)) 818 return ERR_PTR(-EINVAL); 819 820 /* Disallow zero sized objects as they make the underlying 821 * infrastructure grumpy 822 */ 823 if (size == 0) 824 return ERR_PTR(-EINVAL); 825 826 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj); 827 if (ret) 828 goto fail; 829 830 if (use_vram) { 831 struct msm_gem_object *msm_obj = to_msm_bo(obj); 832 struct page **pages; 833 834 msm_obj->vram_node = &msm_obj->domain[0].node; 835 drm_gem_private_object_init(dev, obj, size); 836 837 msm_obj->pages = get_pages(obj); 838 pages = get_pages(obj); 839 if (IS_ERR(pages)) { 840 ret = PTR_ERR(pages); 841 goto fail; 842 } 843 msm_obj->domain[0].iova = physaddr(obj); 844 } else { 845 ret = drm_gem_object_init(dev, obj, size); 846 if (ret) 847 goto fail; 848 } 849 850 return obj; 851 852 fail: 853 drm_gem_object_unreference(obj); 854 return ERR_PTR(ret); 855 } 856 857 struct drm_gem_object *msm_gem_import(struct drm_device *dev, 858 struct dma_buf *dmabuf, struct sg_table *sgt) 859 { 860 struct msm_gem_object *msm_obj; 861 struct drm_gem_object *obj; 862 uint32_t size; 863 int ret, npages; 864 865 /* if we don't have IOMMU, don't bother pretending we can import: */ 866 if (!iommu_present(&platform_bus_type)) { 867 dev_err(dev->dev, "cannot import without IOMMU\n"); 868 return ERR_PTR(-EINVAL); 869 } 870 871 size = PAGE_ALIGN(dmabuf->size); 872 873 /* Take mutex so we can modify the inactive list in msm_gem_new_impl */ 874 mutex_lock(&dev->struct_mutex); 875 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj); 876 mutex_unlock(&dev->struct_mutex); 877 878 if (ret) 879 goto fail; 880 881 drm_gem_private_object_init(dev, obj, size); 882 883 npages = size / PAGE_SIZE; 884 885 msm_obj = to_msm_bo(obj); 886 msm_obj->sgt = sgt; 887 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 888 if (!msm_obj->pages) { 889 ret = -ENOMEM; 890 goto fail; 891 } 892 893 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); 894 if (ret) 895 goto fail; 896 897 return obj; 898 899 fail: 900 drm_gem_object_unreference_unlocked(obj); 901 return ERR_PTR(ret); 902 } 903