1c8afe684SRob Clark /* 2c8afe684SRob Clark * Copyright (C) 2013 Red Hat 3c8afe684SRob Clark * Author: Rob Clark <robdclark@gmail.com> 4c8afe684SRob Clark * 5c8afe684SRob Clark * This program is free software; you can redistribute it and/or modify it 6c8afe684SRob Clark * under the terms of the GNU General Public License version 2 as published by 7c8afe684SRob Clark * the Free Software Foundation. 8c8afe684SRob Clark * 9c8afe684SRob Clark * This program is distributed in the hope that it will be useful, but WITHOUT 10c8afe684SRob Clark * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11c8afe684SRob Clark * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12c8afe684SRob Clark * more details. 13c8afe684SRob Clark * 14c8afe684SRob Clark * You should have received a copy of the GNU General Public License along with 15c8afe684SRob Clark * this program. If not, see <http://www.gnu.org/licenses/>. 16c8afe684SRob Clark */ 17c8afe684SRob Clark 18c8afe684SRob Clark #include <linux/spinlock.h> 19c8afe684SRob Clark #include <linux/shmem_fs.h> 2005b84911SRob Clark #include <linux/dma-buf.h> 2101c8f1c4SDan Williams #include <linux/pfn_t.h> 22c8afe684SRob Clark 23c8afe684SRob Clark #include "msm_drv.h" 24fde5de6cSRob Clark #include "msm_fence.h" 25c8afe684SRob Clark #include "msm_gem.h" 267198e6b0SRob Clark #include "msm_gpu.h" 27871d812aSRob Clark #include "msm_mmu.h" 28c8afe684SRob Clark 29871d812aSRob Clark static dma_addr_t physaddr(struct drm_gem_object *obj) 30871d812aSRob Clark { 31871d812aSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 32871d812aSRob Clark struct msm_drm_private *priv = obj->dev->dev_private; 33871d812aSRob Clark return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + 34871d812aSRob Clark priv->vram.paddr; 35871d812aSRob Clark } 36871d812aSRob Clark 37072f1f91SRob Clark static bool use_pages(struct drm_gem_object *obj) 38072f1f91SRob Clark { 39072f1f91SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 40072f1f91SRob Clark return !msm_obj->vram_node; 41072f1f91SRob Clark } 42072f1f91SRob Clark 43871d812aSRob Clark /* allocate pages from VRAM carveout, used when no IOMMU: */ 44871d812aSRob Clark static struct page **get_pages_vram(struct drm_gem_object *obj, 45871d812aSRob Clark int npages) 46871d812aSRob Clark { 47871d812aSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 48871d812aSRob Clark struct msm_drm_private *priv = obj->dev->dev_private; 49871d812aSRob Clark dma_addr_t paddr; 50871d812aSRob Clark struct page **p; 51871d812aSRob Clark int ret, i; 52871d812aSRob Clark 53871d812aSRob Clark p = drm_malloc_ab(npages, sizeof(struct page *)); 54871d812aSRob Clark if (!p) 55871d812aSRob Clark return ERR_PTR(-ENOMEM); 56871d812aSRob Clark 57871d812aSRob Clark ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, 58871d812aSRob Clark npages, 0, DRM_MM_SEARCH_DEFAULT); 59871d812aSRob Clark if (ret) { 60871d812aSRob Clark drm_free_large(p); 61871d812aSRob Clark return ERR_PTR(ret); 62871d812aSRob Clark } 63871d812aSRob Clark 64871d812aSRob Clark paddr = physaddr(obj); 65871d812aSRob Clark for (i = 0; i < npages; i++) { 66871d812aSRob Clark p[i] = phys_to_page(paddr); 67871d812aSRob Clark paddr += PAGE_SIZE; 68871d812aSRob Clark } 69871d812aSRob Clark 70871d812aSRob Clark return p; 71871d812aSRob Clark } 72c8afe684SRob Clark 73c8afe684SRob Clark /* called with dev->struct_mutex held */ 74c8afe684SRob Clark static struct page **get_pages(struct drm_gem_object *obj) 75c8afe684SRob Clark { 76c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 77c8afe684SRob Clark 78c8afe684SRob Clark if (!msm_obj->pages) { 79c8afe684SRob Clark struct drm_device *dev = obj->dev; 80871d812aSRob Clark struct page **p; 81c8afe684SRob Clark int npages = obj->size >> PAGE_SHIFT; 82c8afe684SRob Clark 83072f1f91SRob Clark if (use_pages(obj)) 840cdbe8acSDavid Herrmann p = drm_gem_get_pages(obj); 85871d812aSRob Clark else 86871d812aSRob Clark p = get_pages_vram(obj, npages); 87871d812aSRob Clark 88c8afe684SRob Clark if (IS_ERR(p)) { 89c8afe684SRob Clark dev_err(dev->dev, "could not get pages: %ld\n", 90c8afe684SRob Clark PTR_ERR(p)); 91c8afe684SRob Clark return p; 92c8afe684SRob Clark } 93c8afe684SRob Clark 94c8afe684SRob Clark msm_obj->sgt = drm_prime_pages_to_sg(p, npages); 951f70e079SWei Yongjun if (IS_ERR(msm_obj->sgt)) { 96c8afe684SRob Clark dev_err(dev->dev, "failed to allocate sgt\n"); 971f70e079SWei Yongjun return ERR_CAST(msm_obj->sgt); 98c8afe684SRob Clark } 99c8afe684SRob Clark 100c8afe684SRob Clark msm_obj->pages = p; 101c8afe684SRob Clark 102c8afe684SRob Clark /* For non-cached buffers, ensure the new pages are clean 103c8afe684SRob Clark * because display controller, GPU, etc. are not coherent: 104c8afe684SRob Clark */ 105c8afe684SRob Clark if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 106c8afe684SRob Clark dma_map_sg(dev->dev, msm_obj->sgt->sgl, 107c8afe684SRob Clark msm_obj->sgt->nents, DMA_BIDIRECTIONAL); 108c8afe684SRob Clark } 109c8afe684SRob Clark 110c8afe684SRob Clark return msm_obj->pages; 111c8afe684SRob Clark } 112c8afe684SRob Clark 113c8afe684SRob Clark static void put_pages(struct drm_gem_object *obj) 114c8afe684SRob Clark { 115c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 116c8afe684SRob Clark 117c8afe684SRob Clark if (msm_obj->pages) { 118c8afe684SRob Clark /* For non-cached buffers, ensure the new pages are clean 119c8afe684SRob Clark * because display controller, GPU, etc. are not coherent: 120c8afe684SRob Clark */ 121c8afe684SRob Clark if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 122c8afe684SRob Clark dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, 123c8afe684SRob Clark msm_obj->sgt->nents, DMA_BIDIRECTIONAL); 124c8afe684SRob Clark sg_free_table(msm_obj->sgt); 125c8afe684SRob Clark kfree(msm_obj->sgt); 126c8afe684SRob Clark 127072f1f91SRob Clark if (use_pages(obj)) 128c8afe684SRob Clark drm_gem_put_pages(obj, msm_obj->pages, true, false); 1291ffa2425SMicah Richert else { 130871d812aSRob Clark drm_mm_remove_node(msm_obj->vram_node); 1311ffa2425SMicah Richert drm_free_large(msm_obj->pages); 1321ffa2425SMicah Richert } 133871d812aSRob Clark 134c8afe684SRob Clark msm_obj->pages = NULL; 135c8afe684SRob Clark } 136c8afe684SRob Clark } 137c8afe684SRob Clark 13805b84911SRob Clark struct page **msm_gem_get_pages(struct drm_gem_object *obj) 13905b84911SRob Clark { 14005b84911SRob Clark struct drm_device *dev = obj->dev; 14105b84911SRob Clark struct page **p; 14205b84911SRob Clark mutex_lock(&dev->struct_mutex); 14305b84911SRob Clark p = get_pages(obj); 14405b84911SRob Clark mutex_unlock(&dev->struct_mutex); 14505b84911SRob Clark return p; 14605b84911SRob Clark } 14705b84911SRob Clark 14805b84911SRob Clark void msm_gem_put_pages(struct drm_gem_object *obj) 14905b84911SRob Clark { 15005b84911SRob Clark /* when we start tracking the pin count, then do something here */ 15105b84911SRob Clark } 15205b84911SRob Clark 153c8afe684SRob Clark int msm_gem_mmap_obj(struct drm_gem_object *obj, 154c8afe684SRob Clark struct vm_area_struct *vma) 155c8afe684SRob Clark { 156c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 157c8afe684SRob Clark 158c8afe684SRob Clark vma->vm_flags &= ~VM_PFNMAP; 159c8afe684SRob Clark vma->vm_flags |= VM_MIXEDMAP; 160c8afe684SRob Clark 161c8afe684SRob Clark if (msm_obj->flags & MSM_BO_WC) { 162c8afe684SRob Clark vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 163c8afe684SRob Clark } else if (msm_obj->flags & MSM_BO_UNCACHED) { 164c8afe684SRob Clark vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 165c8afe684SRob Clark } else { 166c8afe684SRob Clark /* 167c8afe684SRob Clark * Shunt off cached objs to shmem file so they have their own 168c8afe684SRob Clark * address_space (so unmap_mapping_range does what we want, 169c8afe684SRob Clark * in particular in the case of mmap'd dmabufs) 170c8afe684SRob Clark */ 171c8afe684SRob Clark fput(vma->vm_file); 172c8afe684SRob Clark get_file(obj->filp); 173c8afe684SRob Clark vma->vm_pgoff = 0; 174c8afe684SRob Clark vma->vm_file = obj->filp; 175c8afe684SRob Clark 176c8afe684SRob Clark vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 177c8afe684SRob Clark } 178c8afe684SRob Clark 179c8afe684SRob Clark return 0; 180c8afe684SRob Clark } 181c8afe684SRob Clark 182c8afe684SRob Clark int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 183c8afe684SRob Clark { 184c8afe684SRob Clark int ret; 185c8afe684SRob Clark 186c8afe684SRob Clark ret = drm_gem_mmap(filp, vma); 187c8afe684SRob Clark if (ret) { 188c8afe684SRob Clark DBG("mmap failed: %d", ret); 189c8afe684SRob Clark return ret; 190c8afe684SRob Clark } 191c8afe684SRob Clark 192c8afe684SRob Clark return msm_gem_mmap_obj(vma->vm_private_data, vma); 193c8afe684SRob Clark } 194c8afe684SRob Clark 195c8afe684SRob Clark int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 196c8afe684SRob Clark { 197c8afe684SRob Clark struct drm_gem_object *obj = vma->vm_private_data; 198c8afe684SRob Clark struct drm_device *dev = obj->dev; 199d78d383aSRob Clark struct msm_drm_private *priv = dev->dev_private; 200c8afe684SRob Clark struct page **pages; 201c8afe684SRob Clark unsigned long pfn; 202c8afe684SRob Clark pgoff_t pgoff; 203c8afe684SRob Clark int ret; 204c8afe684SRob Clark 205d78d383aSRob Clark /* This should only happen if userspace tries to pass a mmap'd 206d78d383aSRob Clark * but unfaulted gem bo vaddr into submit ioctl, triggering 207d78d383aSRob Clark * a page fault while struct_mutex is already held. This is 208d78d383aSRob Clark * not a valid use-case so just bail. 209d78d383aSRob Clark */ 210d78d383aSRob Clark if (priv->struct_mutex_task == current) 211d78d383aSRob Clark return VM_FAULT_SIGBUS; 212d78d383aSRob Clark 213c8afe684SRob Clark /* Make sure we don't parallel update on a fault, nor move or remove 214c8afe684SRob Clark * something from beneath our feet 215c8afe684SRob Clark */ 216c8afe684SRob Clark ret = mutex_lock_interruptible(&dev->struct_mutex); 217c8afe684SRob Clark if (ret) 218c8afe684SRob Clark goto out; 219c8afe684SRob Clark 220c8afe684SRob Clark /* make sure we have pages attached now */ 221c8afe684SRob Clark pages = get_pages(obj); 222c8afe684SRob Clark if (IS_ERR(pages)) { 223c8afe684SRob Clark ret = PTR_ERR(pages); 224c8afe684SRob Clark goto out_unlock; 225c8afe684SRob Clark } 226c8afe684SRob Clark 227c8afe684SRob Clark /* We don't use vmf->pgoff since that has the fake offset: */ 228c8afe684SRob Clark pgoff = ((unsigned long)vmf->virtual_address - 229c8afe684SRob Clark vma->vm_start) >> PAGE_SHIFT; 230c8afe684SRob Clark 231871d812aSRob Clark pfn = page_to_pfn(pages[pgoff]); 232c8afe684SRob Clark 233c8afe684SRob Clark VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, 234c8afe684SRob Clark pfn, pfn << PAGE_SHIFT); 235c8afe684SRob Clark 23601c8f1c4SDan Williams ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, 23701c8f1c4SDan Williams __pfn_to_pfn_t(pfn, PFN_DEV)); 238c8afe684SRob Clark 239c8afe684SRob Clark out_unlock: 240c8afe684SRob Clark mutex_unlock(&dev->struct_mutex); 241c8afe684SRob Clark out: 242c8afe684SRob Clark switch (ret) { 243c8afe684SRob Clark case -EAGAIN: 244c8afe684SRob Clark case 0: 245c8afe684SRob Clark case -ERESTARTSYS: 246c8afe684SRob Clark case -EINTR: 247505886d5SRob Clark case -EBUSY: 248505886d5SRob Clark /* 249505886d5SRob Clark * EBUSY is ok: this just means that another thread 250505886d5SRob Clark * already did the job. 251505886d5SRob Clark */ 252c8afe684SRob Clark return VM_FAULT_NOPAGE; 253c8afe684SRob Clark case -ENOMEM: 254c8afe684SRob Clark return VM_FAULT_OOM; 255c8afe684SRob Clark default: 256c8afe684SRob Clark return VM_FAULT_SIGBUS; 257c8afe684SRob Clark } 258c8afe684SRob Clark } 259c8afe684SRob Clark 260c8afe684SRob Clark /** get mmap offset */ 261c8afe684SRob Clark static uint64_t mmap_offset(struct drm_gem_object *obj) 262c8afe684SRob Clark { 263c8afe684SRob Clark struct drm_device *dev = obj->dev; 264c8afe684SRob Clark int ret; 265c8afe684SRob Clark 266c8afe684SRob Clark WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 267c8afe684SRob Clark 268c8afe684SRob Clark /* Make it mmapable */ 269c8afe684SRob Clark ret = drm_gem_create_mmap_offset(obj); 270c8afe684SRob Clark 271c8afe684SRob Clark if (ret) { 272c8afe684SRob Clark dev_err(dev->dev, "could not allocate mmap offset\n"); 273c8afe684SRob Clark return 0; 274c8afe684SRob Clark } 275c8afe684SRob Clark 276c8afe684SRob Clark return drm_vma_node_offset_addr(&obj->vma_node); 277c8afe684SRob Clark } 278c8afe684SRob Clark 279c8afe684SRob Clark uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) 280c8afe684SRob Clark { 281c8afe684SRob Clark uint64_t offset; 282c8afe684SRob Clark mutex_lock(&obj->dev->struct_mutex); 283c8afe684SRob Clark offset = mmap_offset(obj); 284c8afe684SRob Clark mutex_unlock(&obj->dev->struct_mutex); 285c8afe684SRob Clark return offset; 286c8afe684SRob Clark } 287c8afe684SRob Clark 2884fe5f65eSRob Clark static void 2894fe5f65eSRob Clark put_iova(struct drm_gem_object *obj) 2904fe5f65eSRob Clark { 2914fe5f65eSRob Clark struct drm_device *dev = obj->dev; 2924fe5f65eSRob Clark struct msm_drm_private *priv = obj->dev->dev_private; 2934fe5f65eSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 2944fe5f65eSRob Clark int id; 2954fe5f65eSRob Clark 2964fe5f65eSRob Clark WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2974fe5f65eSRob Clark 2984fe5f65eSRob Clark for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { 2994fe5f65eSRob Clark struct msm_mmu *mmu = priv->mmus[id]; 3004fe5f65eSRob Clark if (mmu && msm_obj->domain[id].iova) { 3014fe5f65eSRob Clark uint32_t offset = msm_obj->domain[id].iova; 3024fe5f65eSRob Clark mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); 3034fe5f65eSRob Clark msm_obj->domain[id].iova = 0; 3044fe5f65eSRob Clark } 3054fe5f65eSRob Clark } 3064fe5f65eSRob Clark } 3074fe5f65eSRob Clark 308c8afe684SRob Clark /* should be called under struct_mutex.. although it can be called 309c8afe684SRob Clark * from atomic context without struct_mutex to acquire an extra 310c8afe684SRob Clark * iova ref if you know one is already held. 311c8afe684SRob Clark * 312c8afe684SRob Clark * That means when I do eventually need to add support for unpinning 313c8afe684SRob Clark * the refcnt counter needs to be atomic_t. 314c8afe684SRob Clark */ 315c8afe684SRob Clark int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, 316c8afe684SRob Clark uint32_t *iova) 317c8afe684SRob Clark { 318c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 319c8afe684SRob Clark int ret = 0; 320c8afe684SRob Clark 321c8afe684SRob Clark if (!msm_obj->domain[id].iova) { 322c8afe684SRob Clark struct msm_drm_private *priv = obj->dev->dev_private; 323871d812aSRob Clark struct page **pages = get_pages(obj); 324871d812aSRob Clark 325c8afe684SRob Clark if (IS_ERR(pages)) 326c8afe684SRob Clark return PTR_ERR(pages); 327871d812aSRob Clark 328871d812aSRob Clark if (iommu_present(&platform_bus_type)) { 3291c4997feSRob Clark struct msm_mmu *mmu = priv->mmus[id]; 3301c4997feSRob Clark uint32_t offset; 3311c4997feSRob Clark 3321c4997feSRob Clark if (WARN_ON(!mmu)) 3331c4997feSRob Clark return -EINVAL; 3341c4997feSRob Clark 3351c4997feSRob Clark offset = (uint32_t)mmap_offset(obj); 336871d812aSRob Clark ret = mmu->funcs->map(mmu, offset, msm_obj->sgt, 337c8afe684SRob Clark obj->size, IOMMU_READ | IOMMU_WRITE); 338c8afe684SRob Clark msm_obj->domain[id].iova = offset; 339871d812aSRob Clark } else { 340871d812aSRob Clark msm_obj->domain[id].iova = physaddr(obj); 341871d812aSRob Clark } 342c8afe684SRob Clark } 343c8afe684SRob Clark 344c8afe684SRob Clark if (!ret) 345c8afe684SRob Clark *iova = msm_obj->domain[id].iova; 346c8afe684SRob Clark 347c8afe684SRob Clark return ret; 348c8afe684SRob Clark } 349c8afe684SRob Clark 3502638d90aSRob Clark /* get iova, taking a reference. Should have a matching put */ 351c8afe684SRob Clark int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) 352c8afe684SRob Clark { 353edd4fc63SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 354c8afe684SRob Clark int ret; 355edd4fc63SRob Clark 356edd4fc63SRob Clark /* this is safe right now because we don't unmap until the 357edd4fc63SRob Clark * bo is deleted: 358edd4fc63SRob Clark */ 359edd4fc63SRob Clark if (msm_obj->domain[id].iova) { 360edd4fc63SRob Clark *iova = msm_obj->domain[id].iova; 361edd4fc63SRob Clark return 0; 362edd4fc63SRob Clark } 363edd4fc63SRob Clark 364c8afe684SRob Clark mutex_lock(&obj->dev->struct_mutex); 365c8afe684SRob Clark ret = msm_gem_get_iova_locked(obj, id, iova); 366c8afe684SRob Clark mutex_unlock(&obj->dev->struct_mutex); 367c8afe684SRob Clark return ret; 368c8afe684SRob Clark } 369c8afe684SRob Clark 3702638d90aSRob Clark /* get iova without taking a reference, used in places where you have 3712638d90aSRob Clark * already done a 'msm_gem_get_iova()'. 3722638d90aSRob Clark */ 3732638d90aSRob Clark uint32_t msm_gem_iova(struct drm_gem_object *obj, int id) 3742638d90aSRob Clark { 3752638d90aSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 3762638d90aSRob Clark WARN_ON(!msm_obj->domain[id].iova); 3772638d90aSRob Clark return msm_obj->domain[id].iova; 3782638d90aSRob Clark } 3792638d90aSRob Clark 380c8afe684SRob Clark void msm_gem_put_iova(struct drm_gem_object *obj, int id) 381c8afe684SRob Clark { 382c8afe684SRob Clark // XXX TODO .. 383c8afe684SRob Clark // NOTE: probably don't need a _locked() version.. we wouldn't 384c8afe684SRob Clark // normally unmap here, but instead just mark that it could be 385c8afe684SRob Clark // unmapped (if the iova refcnt drops to zero), but then later 386c8afe684SRob Clark // if another _get_iova_locked() fails we can start unmapping 387c8afe684SRob Clark // things that are no longer needed.. 388c8afe684SRob Clark } 389c8afe684SRob Clark 390c8afe684SRob Clark int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 391c8afe684SRob Clark struct drm_mode_create_dumb *args) 392c8afe684SRob Clark { 393c8afe684SRob Clark args->pitch = align_pitch(args->width, args->bpp); 394c8afe684SRob Clark args->size = PAGE_ALIGN(args->pitch * args->height); 395c8afe684SRob Clark return msm_gem_new_handle(dev, file, args->size, 396c8afe684SRob Clark MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); 397c8afe684SRob Clark } 398c8afe684SRob Clark 399c8afe684SRob Clark int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 400c8afe684SRob Clark uint32_t handle, uint64_t *offset) 401c8afe684SRob Clark { 402c8afe684SRob Clark struct drm_gem_object *obj; 403c8afe684SRob Clark int ret = 0; 404c8afe684SRob Clark 405c8afe684SRob Clark /* GEM does all our handle to object mapping */ 406a8ad0bd8SChris Wilson obj = drm_gem_object_lookup(file, handle); 407c8afe684SRob Clark if (obj == NULL) { 408c8afe684SRob Clark ret = -ENOENT; 409c8afe684SRob Clark goto fail; 410c8afe684SRob Clark } 411c8afe684SRob Clark 412c8afe684SRob Clark *offset = msm_gem_mmap_offset(obj); 413c8afe684SRob Clark 414c8afe684SRob Clark drm_gem_object_unreference_unlocked(obj); 415c8afe684SRob Clark 416c8afe684SRob Clark fail: 417c8afe684SRob Clark return ret; 418c8afe684SRob Clark } 419c8afe684SRob Clark 42018f23049SRob Clark void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) 421c8afe684SRob Clark { 422c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 423c8afe684SRob Clark WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 424c8afe684SRob Clark if (!msm_obj->vaddr) { 425c8afe684SRob Clark struct page **pages = get_pages(obj); 426c8afe684SRob Clark if (IS_ERR(pages)) 427c8afe684SRob Clark return ERR_CAST(pages); 428c8afe684SRob Clark msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 429c8afe684SRob Clark VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 43069a834c2SRob Clark if (msm_obj->vaddr == NULL) 43169a834c2SRob Clark return ERR_PTR(-ENOMEM); 432c8afe684SRob Clark } 433e1e9db2cSRob Clark msm_obj->vmap_count++; 434c8afe684SRob Clark return msm_obj->vaddr; 435c8afe684SRob Clark } 436c8afe684SRob Clark 43718f23049SRob Clark void *msm_gem_get_vaddr(struct drm_gem_object *obj) 438c8afe684SRob Clark { 439c8afe684SRob Clark void *ret; 440c8afe684SRob Clark mutex_lock(&obj->dev->struct_mutex); 44118f23049SRob Clark ret = msm_gem_get_vaddr_locked(obj); 442c8afe684SRob Clark mutex_unlock(&obj->dev->struct_mutex); 443c8afe684SRob Clark return ret; 444c8afe684SRob Clark } 445c8afe684SRob Clark 44618f23049SRob Clark void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) 44718f23049SRob Clark { 448e1e9db2cSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 44918f23049SRob Clark WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 450e1e9db2cSRob Clark WARN_ON(msm_obj->vmap_count < 1); 451e1e9db2cSRob Clark msm_obj->vmap_count--; 45218f23049SRob Clark } 45318f23049SRob Clark 45418f23049SRob Clark void msm_gem_put_vaddr(struct drm_gem_object *obj) 45518f23049SRob Clark { 456e1e9db2cSRob Clark mutex_lock(&obj->dev->struct_mutex); 457e1e9db2cSRob Clark msm_gem_put_vaddr_locked(obj); 458e1e9db2cSRob Clark mutex_unlock(&obj->dev->struct_mutex); 45918f23049SRob Clark } 46018f23049SRob Clark 4614cd33c48SRob Clark /* Update madvise status, returns true if not purged, else 4624cd33c48SRob Clark * false or -errno. 4634cd33c48SRob Clark */ 4644cd33c48SRob Clark int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) 4654cd33c48SRob Clark { 4664cd33c48SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 4674cd33c48SRob Clark 4684cd33c48SRob Clark WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 4694cd33c48SRob Clark 4704cd33c48SRob Clark if (msm_obj->madv != __MSM_MADV_PURGED) 4714cd33c48SRob Clark msm_obj->madv = madv; 4724cd33c48SRob Clark 4734cd33c48SRob Clark return (msm_obj->madv != __MSM_MADV_PURGED); 4744cd33c48SRob Clark } 4754cd33c48SRob Clark 47668209390SRob Clark void msm_gem_purge(struct drm_gem_object *obj) 47768209390SRob Clark { 47868209390SRob Clark struct drm_device *dev = obj->dev; 47968209390SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 48068209390SRob Clark 48168209390SRob Clark WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 48268209390SRob Clark WARN_ON(!is_purgeable(msm_obj)); 48368209390SRob Clark WARN_ON(obj->import_attach); 48468209390SRob Clark 48568209390SRob Clark put_iova(obj); 48668209390SRob Clark 487e1e9db2cSRob Clark msm_gem_vunmap(obj); 48868209390SRob Clark 48968209390SRob Clark put_pages(obj); 49068209390SRob Clark 49168209390SRob Clark msm_obj->madv = __MSM_MADV_PURGED; 49268209390SRob Clark 49368209390SRob Clark drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 49468209390SRob Clark drm_gem_free_mmap_offset(obj); 49568209390SRob Clark 49668209390SRob Clark /* Our goal here is to return as much of the memory as 49768209390SRob Clark * is possible back to the system as we are called from OOM. 49868209390SRob Clark * To do this we must instruct the shmfs to drop all of its 49968209390SRob Clark * backing pages, *now*. 50068209390SRob Clark */ 50168209390SRob Clark shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 50268209390SRob Clark 50368209390SRob Clark invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 50468209390SRob Clark 0, (loff_t)-1); 50568209390SRob Clark } 50668209390SRob Clark 507e1e9db2cSRob Clark void msm_gem_vunmap(struct drm_gem_object *obj) 508e1e9db2cSRob Clark { 509e1e9db2cSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 510e1e9db2cSRob Clark 511e1e9db2cSRob Clark if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) 512e1e9db2cSRob Clark return; 513e1e9db2cSRob Clark 514e1e9db2cSRob Clark vunmap(msm_obj->vaddr); 515e1e9db2cSRob Clark msm_obj->vaddr = NULL; 516e1e9db2cSRob Clark } 517e1e9db2cSRob Clark 518b6295f9aSRob Clark /* must be called before _move_to_active().. */ 519b6295f9aSRob Clark int msm_gem_sync_object(struct drm_gem_object *obj, 520b6295f9aSRob Clark struct msm_fence_context *fctx, bool exclusive) 521b6295f9aSRob Clark { 522b6295f9aSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 523b6295f9aSRob Clark struct reservation_object_list *fobj; 524b6295f9aSRob Clark struct fence *fence; 525b6295f9aSRob Clark int i, ret; 526b6295f9aSRob Clark 527b6295f9aSRob Clark if (!exclusive) { 528b6295f9aSRob Clark /* NOTE: _reserve_shared() must happen before _add_shared_fence(), 529b6295f9aSRob Clark * which makes this a slightly strange place to call it. OTOH this 530b6295f9aSRob Clark * is a convenient can-fail point to hook it in. (And similar to 531b6295f9aSRob Clark * how etnaviv and nouveau handle this.) 532b6295f9aSRob Clark */ 533b6295f9aSRob Clark ret = reservation_object_reserve_shared(msm_obj->resv); 534b6295f9aSRob Clark if (ret) 535b6295f9aSRob Clark return ret; 536b6295f9aSRob Clark } 537b6295f9aSRob Clark 538b6295f9aSRob Clark fobj = reservation_object_get_list(msm_obj->resv); 539b6295f9aSRob Clark if (!fobj || (fobj->shared_count == 0)) { 540b6295f9aSRob Clark fence = reservation_object_get_excl(msm_obj->resv); 541b6295f9aSRob Clark /* don't need to wait on our own fences, since ring is fifo */ 542b6295f9aSRob Clark if (fence && (fence->context != fctx->context)) { 543b6295f9aSRob Clark ret = fence_wait(fence, true); 544b6295f9aSRob Clark if (ret) 545b6295f9aSRob Clark return ret; 546b6295f9aSRob Clark } 547b6295f9aSRob Clark } 548b6295f9aSRob Clark 549b6295f9aSRob Clark if (!exclusive || !fobj) 550b6295f9aSRob Clark return 0; 551b6295f9aSRob Clark 552b6295f9aSRob Clark for (i = 0; i < fobj->shared_count; i++) { 553b6295f9aSRob Clark fence = rcu_dereference_protected(fobj->shared[i], 554b6295f9aSRob Clark reservation_object_held(msm_obj->resv)); 555b6295f9aSRob Clark if (fence->context != fctx->context) { 556b6295f9aSRob Clark ret = fence_wait(fence, true); 557b6295f9aSRob Clark if (ret) 558b6295f9aSRob Clark return ret; 559b6295f9aSRob Clark } 560b6295f9aSRob Clark } 561b6295f9aSRob Clark 562b6295f9aSRob Clark return 0; 563b6295f9aSRob Clark } 564b6295f9aSRob Clark 5657198e6b0SRob Clark void msm_gem_move_to_active(struct drm_gem_object *obj, 566b6295f9aSRob Clark struct msm_gpu *gpu, bool exclusive, struct fence *fence) 5677198e6b0SRob Clark { 5687198e6b0SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 5694cd33c48SRob Clark WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); 5707198e6b0SRob Clark msm_obj->gpu = gpu; 571b6295f9aSRob Clark if (exclusive) 572b6295f9aSRob Clark reservation_object_add_excl_fence(msm_obj->resv, fence); 573bf6811f3SRob Clark else 574b6295f9aSRob Clark reservation_object_add_shared_fence(msm_obj->resv, fence); 5757198e6b0SRob Clark list_del_init(&msm_obj->mm_list); 5767198e6b0SRob Clark list_add_tail(&msm_obj->mm_list, &gpu->active_list); 5777198e6b0SRob Clark } 5787198e6b0SRob Clark 5797198e6b0SRob Clark void msm_gem_move_to_inactive(struct drm_gem_object *obj) 5807198e6b0SRob Clark { 5817198e6b0SRob Clark struct drm_device *dev = obj->dev; 5827198e6b0SRob Clark struct msm_drm_private *priv = dev->dev_private; 5837198e6b0SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 5847198e6b0SRob Clark 5857198e6b0SRob Clark WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 5867198e6b0SRob Clark 5877198e6b0SRob Clark msm_obj->gpu = NULL; 5887198e6b0SRob Clark list_del_init(&msm_obj->mm_list); 5897198e6b0SRob Clark list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 5907198e6b0SRob Clark } 5917198e6b0SRob Clark 592ba00c3f2SRob Clark int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 593ba00c3f2SRob Clark { 594b6295f9aSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 595b6295f9aSRob Clark bool write = !!(op & MSM_PREP_WRITE); 596b6295f9aSRob Clark 597b6295f9aSRob Clark if (op & MSM_PREP_NOSYNC) { 598b6295f9aSRob Clark if (!reservation_object_test_signaled_rcu(msm_obj->resv, write)) 599b6295f9aSRob Clark return -EBUSY; 600b6295f9aSRob Clark } else { 601b6295f9aSRob Clark int ret; 602b6295f9aSRob Clark 603b6295f9aSRob Clark ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write, 604b6295f9aSRob Clark true, timeout_to_jiffies(timeout)); 605b6295f9aSRob Clark if (ret <= 0) 606b6295f9aSRob Clark return ret == 0 ? -ETIMEDOUT : ret; 607b6295f9aSRob Clark } 608ba00c3f2SRob Clark 6097198e6b0SRob Clark /* TODO cache maintenance */ 6107198e6b0SRob Clark 611b6295f9aSRob Clark return 0; 6127198e6b0SRob Clark } 6137198e6b0SRob Clark 6147198e6b0SRob Clark int msm_gem_cpu_fini(struct drm_gem_object *obj) 6157198e6b0SRob Clark { 6167198e6b0SRob Clark /* TODO cache maintenance */ 617c8afe684SRob Clark return 0; 618c8afe684SRob Clark } 619c8afe684SRob Clark 620c8afe684SRob Clark #ifdef CONFIG_DEBUG_FS 621b6295f9aSRob Clark static void describe_fence(struct fence *fence, const char *type, 622b6295f9aSRob Clark struct seq_file *m) 623b6295f9aSRob Clark { 624b6295f9aSRob Clark if (!fence_is_signaled(fence)) 625b6295f9aSRob Clark seq_printf(m, "\t%9s: %s %s seq %u\n", type, 626b6295f9aSRob Clark fence->ops->get_driver_name(fence), 627b6295f9aSRob Clark fence->ops->get_timeline_name(fence), 628b6295f9aSRob Clark fence->seqno); 629b6295f9aSRob Clark } 630b6295f9aSRob Clark 631c8afe684SRob Clark void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 632c8afe684SRob Clark { 633c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 634b6295f9aSRob Clark struct reservation_object *robj = msm_obj->resv; 635b6295f9aSRob Clark struct reservation_object_list *fobj; 636b6295f9aSRob Clark struct fence *fence; 637c8afe684SRob Clark uint64_t off = drm_vma_node_start(&obj->vma_node); 6384cd33c48SRob Clark const char *madv; 639c8afe684SRob Clark 640b6295f9aSRob Clark WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 641b6295f9aSRob Clark 6424cd33c48SRob Clark switch (msm_obj->madv) { 6434cd33c48SRob Clark case __MSM_MADV_PURGED: 6444cd33c48SRob Clark madv = " purged"; 6454cd33c48SRob Clark break; 6464cd33c48SRob Clark case MSM_MADV_DONTNEED: 6474cd33c48SRob Clark madv = " purgeable"; 6484cd33c48SRob Clark break; 6494cd33c48SRob Clark case MSM_MADV_WILLNEED: 6504cd33c48SRob Clark default: 6514cd33c48SRob Clark madv = ""; 6524cd33c48SRob Clark break; 6534cd33c48SRob Clark } 6544cd33c48SRob Clark 6554cd33c48SRob Clark seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n", 6567198e6b0SRob Clark msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 657bf6811f3SRob Clark obj->name, obj->refcount.refcount.counter, 6584cd33c48SRob Clark off, msm_obj->vaddr, obj->size, madv); 659b6295f9aSRob Clark 660b6295f9aSRob Clark rcu_read_lock(); 661b6295f9aSRob Clark fobj = rcu_dereference(robj->fence); 662b6295f9aSRob Clark if (fobj) { 663b6295f9aSRob Clark unsigned int i, shared_count = fobj->shared_count; 664b6295f9aSRob Clark 665b6295f9aSRob Clark for (i = 0; i < shared_count; i++) { 666b6295f9aSRob Clark fence = rcu_dereference(fobj->shared[i]); 667b6295f9aSRob Clark describe_fence(fence, "Shared", m); 668b6295f9aSRob Clark } 669b6295f9aSRob Clark } 670b6295f9aSRob Clark 671b6295f9aSRob Clark fence = rcu_dereference(robj->fence_excl); 672b6295f9aSRob Clark if (fence) 673b6295f9aSRob Clark describe_fence(fence, "Exclusive", m); 674b6295f9aSRob Clark rcu_read_unlock(); 675c8afe684SRob Clark } 676c8afe684SRob Clark 677c8afe684SRob Clark void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) 678c8afe684SRob Clark { 679c8afe684SRob Clark struct msm_gem_object *msm_obj; 680c8afe684SRob Clark int count = 0; 681c8afe684SRob Clark size_t size = 0; 682c8afe684SRob Clark 683c8afe684SRob Clark list_for_each_entry(msm_obj, list, mm_list) { 684c8afe684SRob Clark struct drm_gem_object *obj = &msm_obj->base; 685c8afe684SRob Clark seq_printf(m, " "); 686c8afe684SRob Clark msm_gem_describe(obj, m); 687c8afe684SRob Clark count++; 688c8afe684SRob Clark size += obj->size; 689c8afe684SRob Clark } 690c8afe684SRob Clark 691c8afe684SRob Clark seq_printf(m, "Total %d objects, %zu bytes\n", count, size); 692c8afe684SRob Clark } 693c8afe684SRob Clark #endif 694c8afe684SRob Clark 695c8afe684SRob Clark void msm_gem_free_object(struct drm_gem_object *obj) 696c8afe684SRob Clark { 697c8afe684SRob Clark struct drm_device *dev = obj->dev; 698c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 699c8afe684SRob Clark 700c8afe684SRob Clark WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 701c8afe684SRob Clark 7027198e6b0SRob Clark /* object should not be on active list: */ 7037198e6b0SRob Clark WARN_ON(is_active(msm_obj)); 7047198e6b0SRob Clark 705c8afe684SRob Clark list_del(&msm_obj->mm_list); 706c8afe684SRob Clark 7074fe5f65eSRob Clark put_iova(obj); 708c8afe684SRob Clark 70905b84911SRob Clark if (obj->import_attach) { 71005b84911SRob Clark if (msm_obj->vaddr) 71105b84911SRob Clark dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); 71205b84911SRob Clark 71305b84911SRob Clark /* Don't drop the pages for imported dmabuf, as they are not 71405b84911SRob Clark * ours, just free the array we allocated: 71505b84911SRob Clark */ 71605b84911SRob Clark if (msm_obj->pages) 71705b84911SRob Clark drm_free_large(msm_obj->pages); 71805b84911SRob Clark 719f28730c8Sjilai wang drm_prime_gem_destroy(obj, msm_obj->sgt); 72005b84911SRob Clark } else { 721e1e9db2cSRob Clark msm_gem_vunmap(obj); 722c8afe684SRob Clark put_pages(obj); 72305b84911SRob Clark } 724c8afe684SRob Clark 7257198e6b0SRob Clark if (msm_obj->resv == &msm_obj->_resv) 7267198e6b0SRob Clark reservation_object_fini(msm_obj->resv); 7277198e6b0SRob Clark 728c8afe684SRob Clark drm_gem_object_release(obj); 729c8afe684SRob Clark 730c8afe684SRob Clark kfree(msm_obj); 731c8afe684SRob Clark } 732c8afe684SRob Clark 733c8afe684SRob Clark /* convenience method to construct a GEM buffer object, and userspace handle */ 734c8afe684SRob Clark int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 735c8afe684SRob Clark uint32_t size, uint32_t flags, uint32_t *handle) 736c8afe684SRob Clark { 737c8afe684SRob Clark struct drm_gem_object *obj; 738c8afe684SRob Clark int ret; 739c8afe684SRob Clark 740c8afe684SRob Clark ret = mutex_lock_interruptible(&dev->struct_mutex); 741c8afe684SRob Clark if (ret) 742c8afe684SRob Clark return ret; 743c8afe684SRob Clark 744c8afe684SRob Clark obj = msm_gem_new(dev, size, flags); 745c8afe684SRob Clark 746c8afe684SRob Clark mutex_unlock(&dev->struct_mutex); 747c8afe684SRob Clark 748c8afe684SRob Clark if (IS_ERR(obj)) 749c8afe684SRob Clark return PTR_ERR(obj); 750c8afe684SRob Clark 751c8afe684SRob Clark ret = drm_gem_handle_create(file, obj, handle); 752c8afe684SRob Clark 753c8afe684SRob Clark /* drop reference from allocate - handle holds it now */ 754c8afe684SRob Clark drm_gem_object_unreference_unlocked(obj); 755c8afe684SRob Clark 756c8afe684SRob Clark return ret; 757c8afe684SRob Clark } 758c8afe684SRob Clark 75905b84911SRob Clark static int msm_gem_new_impl(struct drm_device *dev, 76005b84911SRob Clark uint32_t size, uint32_t flags, 76179f0e202SRob Clark struct reservation_object *resv, 76205b84911SRob Clark struct drm_gem_object **obj) 763c8afe684SRob Clark { 764c8afe684SRob Clark struct msm_drm_private *priv = dev->dev_private; 765c8afe684SRob Clark struct msm_gem_object *msm_obj; 766871d812aSRob Clark unsigned sz; 767072f1f91SRob Clark bool use_vram = false; 768c8afe684SRob Clark 769c8afe684SRob Clark switch (flags & MSM_BO_CACHE_MASK) { 770c8afe684SRob Clark case MSM_BO_UNCACHED: 771c8afe684SRob Clark case MSM_BO_CACHED: 772c8afe684SRob Clark case MSM_BO_WC: 773c8afe684SRob Clark break; 774c8afe684SRob Clark default: 775c8afe684SRob Clark dev_err(dev->dev, "invalid cache flag: %x\n", 776c8afe684SRob Clark (flags & MSM_BO_CACHE_MASK)); 77705b84911SRob Clark return -EINVAL; 778c8afe684SRob Clark } 779c8afe684SRob Clark 780871d812aSRob Clark if (!iommu_present(&platform_bus_type)) 781072f1f91SRob Clark use_vram = true; 782072f1f91SRob Clark else if ((flags & MSM_BO_STOLEN) && priv->vram.size) 783072f1f91SRob Clark use_vram = true; 784072f1f91SRob Clark 785072f1f91SRob Clark if (WARN_ON(use_vram && !priv->vram.size)) 786072f1f91SRob Clark return -EINVAL; 787072f1f91SRob Clark 788072f1f91SRob Clark sz = sizeof(*msm_obj); 789072f1f91SRob Clark if (use_vram) 790871d812aSRob Clark sz += sizeof(struct drm_mm_node); 791871d812aSRob Clark 792871d812aSRob Clark msm_obj = kzalloc(sz, GFP_KERNEL); 79305b84911SRob Clark if (!msm_obj) 79405b84911SRob Clark return -ENOMEM; 795c8afe684SRob Clark 796072f1f91SRob Clark if (use_vram) 797871d812aSRob Clark msm_obj->vram_node = (void *)&msm_obj[1]; 798871d812aSRob Clark 799c8afe684SRob Clark msm_obj->flags = flags; 8004cd33c48SRob Clark msm_obj->madv = MSM_MADV_WILLNEED; 801c8afe684SRob Clark 80279f0e202SRob Clark if (resv) { 80379f0e202SRob Clark msm_obj->resv = resv; 80479f0e202SRob Clark } else { 8057198e6b0SRob Clark msm_obj->resv = &msm_obj->_resv; 8067198e6b0SRob Clark reservation_object_init(msm_obj->resv); 80779f0e202SRob Clark } 808c8afe684SRob Clark 8097198e6b0SRob Clark INIT_LIST_HEAD(&msm_obj->submit_entry); 810c8afe684SRob Clark list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 811c8afe684SRob Clark 81205b84911SRob Clark *obj = &msm_obj->base; 81305b84911SRob Clark 81405b84911SRob Clark return 0; 81505b84911SRob Clark } 81605b84911SRob Clark 81705b84911SRob Clark struct drm_gem_object *msm_gem_new(struct drm_device *dev, 81805b84911SRob Clark uint32_t size, uint32_t flags) 81905b84911SRob Clark { 820871d812aSRob Clark struct drm_gem_object *obj = NULL; 82105b84911SRob Clark int ret; 82205b84911SRob Clark 82305b84911SRob Clark WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 82405b84911SRob Clark 82505b84911SRob Clark size = PAGE_ALIGN(size); 82605b84911SRob Clark 82779f0e202SRob Clark ret = msm_gem_new_impl(dev, size, flags, NULL, &obj); 82805b84911SRob Clark if (ret) 82905b84911SRob Clark goto fail; 83005b84911SRob Clark 831072f1f91SRob Clark if (use_pages(obj)) { 83205b84911SRob Clark ret = drm_gem_object_init(dev, obj, size); 83305b84911SRob Clark if (ret) 83405b84911SRob Clark goto fail; 835871d812aSRob Clark } else { 836871d812aSRob Clark drm_gem_private_object_init(dev, obj, size); 837871d812aSRob Clark } 83805b84911SRob Clark 83905b84911SRob Clark return obj; 84005b84911SRob Clark 84105b84911SRob Clark fail: 8429999f105SRob Clark drm_gem_object_unreference(obj); 84305b84911SRob Clark return ERR_PTR(ret); 84405b84911SRob Clark } 84505b84911SRob Clark 84605b84911SRob Clark struct drm_gem_object *msm_gem_import(struct drm_device *dev, 84779f0e202SRob Clark struct dma_buf *dmabuf, struct sg_table *sgt) 84805b84911SRob Clark { 84905b84911SRob Clark struct msm_gem_object *msm_obj; 85005b84911SRob Clark struct drm_gem_object *obj; 85179f0e202SRob Clark uint32_t size; 85205b84911SRob Clark int ret, npages; 85305b84911SRob Clark 854871d812aSRob Clark /* if we don't have IOMMU, don't bother pretending we can import: */ 855871d812aSRob Clark if (!iommu_present(&platform_bus_type)) { 856871d812aSRob Clark dev_err(dev->dev, "cannot import without IOMMU\n"); 857871d812aSRob Clark return ERR_PTR(-EINVAL); 858871d812aSRob Clark } 859871d812aSRob Clark 86079f0e202SRob Clark size = PAGE_ALIGN(dmabuf->size); 86105b84911SRob Clark 86279f0e202SRob Clark ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj); 86305b84911SRob Clark if (ret) 86405b84911SRob Clark goto fail; 86505b84911SRob Clark 86605b84911SRob Clark drm_gem_private_object_init(dev, obj, size); 86705b84911SRob Clark 86805b84911SRob Clark npages = size / PAGE_SIZE; 86905b84911SRob Clark 87005b84911SRob Clark msm_obj = to_msm_bo(obj); 87105b84911SRob Clark msm_obj->sgt = sgt; 87205b84911SRob Clark msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); 87305b84911SRob Clark if (!msm_obj->pages) { 87405b84911SRob Clark ret = -ENOMEM; 87505b84911SRob Clark goto fail; 87605b84911SRob Clark } 87705b84911SRob Clark 87805b84911SRob Clark ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); 87905b84911SRob Clark if (ret) 88005b84911SRob Clark goto fail; 88105b84911SRob Clark 882c8afe684SRob Clark return obj; 883c8afe684SRob Clark 884c8afe684SRob Clark fail: 885c8afe684SRob Clark drm_gem_object_unreference_unlocked(obj); 886c8afe684SRob Clark return ERR_PTR(ret); 887c8afe684SRob Clark } 888