1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2c8afe684SRob Clark /* 3c8afe684SRob Clark * Copyright (C) 2013 Red Hat 4c8afe684SRob Clark * Author: Rob Clark <robdclark@gmail.com> 5c8afe684SRob Clark */ 6c8afe684SRob Clark 7c8afe684SRob Clark #include <linux/spinlock.h> 8c8afe684SRob Clark #include <linux/shmem_fs.h> 905b84911SRob Clark #include <linux/dma-buf.h> 1001c8f1c4SDan Williams #include <linux/pfn_t.h> 11c8afe684SRob Clark 12feea39a8SSam Ravnborg #include <drm/drm_prime.h> 13feea39a8SSam Ravnborg 14c8afe684SRob Clark #include "msm_drv.h" 15fde5de6cSRob Clark #include "msm_fence.h" 16c8afe684SRob Clark #include "msm_gem.h" 177198e6b0SRob Clark #include "msm_gpu.h" 18871d812aSRob Clark #include "msm_mmu.h" 19c8afe684SRob Clark 200e08270aSSushmita Susheelendra static void msm_gem_vunmap_locked(struct drm_gem_object *obj); 210e08270aSSushmita Susheelendra 220e08270aSSushmita Susheelendra 23871d812aSRob Clark static dma_addr_t physaddr(struct drm_gem_object *obj) 24871d812aSRob Clark { 25871d812aSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 26871d812aSRob Clark struct msm_drm_private *priv = obj->dev->dev_private; 27871d812aSRob Clark return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + 28871d812aSRob Clark priv->vram.paddr; 29871d812aSRob Clark } 30871d812aSRob Clark 31072f1f91SRob Clark static bool use_pages(struct drm_gem_object *obj) 32072f1f91SRob Clark { 33072f1f91SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 34072f1f91SRob Clark return !msm_obj->vram_node; 35072f1f91SRob Clark } 36072f1f91SRob Clark 373de433c5SRob Clark /* 383de433c5SRob Clark * Cache sync.. this is a bit over-complicated, to fit dma-mapping 393de433c5SRob Clark * API. Really GPU cache is out of scope here (handled on cmdstream) 403de433c5SRob Clark * and all we need to do is invalidate newly allocated pages before 413de433c5SRob Clark * mapping to CPU as uncached/writecombine. 423de433c5SRob Clark * 433de433c5SRob Clark * On top of this, we have the added headache, that depending on 443de433c5SRob Clark * display generation, the display's iommu may be wired up to either 453de433c5SRob Clark * the toplevel drm device (mdss), or to the mdp sub-node, meaning 463de433c5SRob Clark * that here we either have dma-direct or iommu ops. 473de433c5SRob Clark * 483de433c5SRob Clark * Let this be a cautionary tail of abstraction gone wrong. 493de433c5SRob Clark */ 503de433c5SRob Clark 513de433c5SRob Clark static void sync_for_device(struct msm_gem_object *msm_obj) 523de433c5SRob Clark { 533de433c5SRob Clark struct device *dev = msm_obj->base.dev->dev; 543de433c5SRob Clark 559f614197SRob Clark if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) { 563de433c5SRob Clark dma_sync_sg_for_device(dev, msm_obj->sgt->sgl, 573de433c5SRob Clark msm_obj->sgt->nents, DMA_BIDIRECTIONAL); 583de433c5SRob Clark } else { 593de433c5SRob Clark dma_map_sg(dev, msm_obj->sgt->sgl, 603de433c5SRob Clark msm_obj->sgt->nents, DMA_BIDIRECTIONAL); 613de433c5SRob Clark } 623de433c5SRob Clark } 633de433c5SRob Clark 643de433c5SRob Clark static void sync_for_cpu(struct msm_gem_object *msm_obj) 653de433c5SRob Clark { 663de433c5SRob Clark struct device *dev = msm_obj->base.dev->dev; 673de433c5SRob Clark 689f614197SRob Clark if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) { 693de433c5SRob Clark dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl, 703de433c5SRob Clark msm_obj->sgt->nents, DMA_BIDIRECTIONAL); 713de433c5SRob Clark } else { 723de433c5SRob Clark dma_unmap_sg(dev, msm_obj->sgt->sgl, 733de433c5SRob Clark msm_obj->sgt->nents, DMA_BIDIRECTIONAL); 743de433c5SRob Clark } 753de433c5SRob Clark } 763de433c5SRob Clark 77871d812aSRob Clark /* allocate pages from VRAM carveout, used when no IOMMU: */ 780e08270aSSushmita Susheelendra static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) 79871d812aSRob Clark { 80871d812aSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 81871d812aSRob Clark struct msm_drm_private *priv = obj->dev->dev_private; 82871d812aSRob Clark dma_addr_t paddr; 83871d812aSRob Clark struct page **p; 84871d812aSRob Clark int ret, i; 85871d812aSRob Clark 862098105eSMichal Hocko p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 87871d812aSRob Clark if (!p) 88871d812aSRob Clark return ERR_PTR(-ENOMEM); 89871d812aSRob Clark 900e08270aSSushmita Susheelendra spin_lock(&priv->vram.lock); 914e64e553SChris Wilson ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); 920e08270aSSushmita Susheelendra spin_unlock(&priv->vram.lock); 93871d812aSRob Clark if (ret) { 942098105eSMichal Hocko kvfree(p); 95871d812aSRob Clark return ERR_PTR(ret); 96871d812aSRob Clark } 97871d812aSRob Clark 98871d812aSRob Clark paddr = physaddr(obj); 99871d812aSRob Clark for (i = 0; i < npages; i++) { 100871d812aSRob Clark p[i] = phys_to_page(paddr); 101871d812aSRob Clark paddr += PAGE_SIZE; 102871d812aSRob Clark } 103871d812aSRob Clark 104871d812aSRob Clark return p; 105871d812aSRob Clark } 106c8afe684SRob Clark 107c8afe684SRob Clark static struct page **get_pages(struct drm_gem_object *obj) 108c8afe684SRob Clark { 109c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 110c8afe684SRob Clark 111c8afe684SRob Clark if (!msm_obj->pages) { 112c8afe684SRob Clark struct drm_device *dev = obj->dev; 113871d812aSRob Clark struct page **p; 114c8afe684SRob Clark int npages = obj->size >> PAGE_SHIFT; 115c8afe684SRob Clark 116072f1f91SRob Clark if (use_pages(obj)) 1170cdbe8acSDavid Herrmann p = drm_gem_get_pages(obj); 118871d812aSRob Clark else 119871d812aSRob Clark p = get_pages_vram(obj, npages); 120871d812aSRob Clark 121c8afe684SRob Clark if (IS_ERR(p)) { 1226a41da17SMamta Shukla DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", 123c8afe684SRob Clark PTR_ERR(p)); 124c8afe684SRob Clark return p; 125c8afe684SRob Clark } 126c8afe684SRob Clark 12762e3a3e3SPrakash Kamliya msm_obj->pages = p; 12862e3a3e3SPrakash Kamliya 129c8afe684SRob Clark msm_obj->sgt = drm_prime_pages_to_sg(p, npages); 1301f70e079SWei Yongjun if (IS_ERR(msm_obj->sgt)) { 13162e3a3e3SPrakash Kamliya void *ptr = ERR_CAST(msm_obj->sgt); 132c8afe684SRob Clark 1336a41da17SMamta Shukla DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n"); 13462e3a3e3SPrakash Kamliya msm_obj->sgt = NULL; 13562e3a3e3SPrakash Kamliya return ptr; 13662e3a3e3SPrakash Kamliya } 137c8afe684SRob Clark 138c8afe684SRob Clark /* For non-cached buffers, ensure the new pages are clean 139c8afe684SRob Clark * because display controller, GPU, etc. are not coherent: 140c8afe684SRob Clark */ 141c8afe684SRob Clark if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 1423de433c5SRob Clark sync_for_device(msm_obj); 143c8afe684SRob Clark } 144c8afe684SRob Clark 145c8afe684SRob Clark return msm_obj->pages; 146c8afe684SRob Clark } 147c8afe684SRob Clark 1480e08270aSSushmita Susheelendra static void put_pages_vram(struct drm_gem_object *obj) 1490e08270aSSushmita Susheelendra { 1500e08270aSSushmita Susheelendra struct msm_gem_object *msm_obj = to_msm_bo(obj); 1510e08270aSSushmita Susheelendra struct msm_drm_private *priv = obj->dev->dev_private; 1520e08270aSSushmita Susheelendra 1530e08270aSSushmita Susheelendra spin_lock(&priv->vram.lock); 1540e08270aSSushmita Susheelendra drm_mm_remove_node(msm_obj->vram_node); 1550e08270aSSushmita Susheelendra spin_unlock(&priv->vram.lock); 1560e08270aSSushmita Susheelendra 1570e08270aSSushmita Susheelendra kvfree(msm_obj->pages); 1580e08270aSSushmita Susheelendra } 1590e08270aSSushmita Susheelendra 160c8afe684SRob Clark static void put_pages(struct drm_gem_object *obj) 161c8afe684SRob Clark { 162c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 163c8afe684SRob Clark 164c8afe684SRob Clark if (msm_obj->pages) { 1653976626eSBen Hutchings if (msm_obj->sgt) { 1663976626eSBen Hutchings /* For non-cached buffers, ensure the new 1673976626eSBen Hutchings * pages are clean because display controller, 1683976626eSBen Hutchings * GPU, etc. are not coherent: 169c8afe684SRob Clark */ 170c8afe684SRob Clark if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 1713de433c5SRob Clark sync_for_cpu(msm_obj); 17262e3a3e3SPrakash Kamliya 173c8afe684SRob Clark sg_free_table(msm_obj->sgt); 174c8afe684SRob Clark kfree(msm_obj->sgt); 1753976626eSBen Hutchings } 176c8afe684SRob Clark 177072f1f91SRob Clark if (use_pages(obj)) 178c8afe684SRob Clark drm_gem_put_pages(obj, msm_obj->pages, true, false); 1790e08270aSSushmita Susheelendra else 1800e08270aSSushmita Susheelendra put_pages_vram(obj); 181871d812aSRob Clark 182c8afe684SRob Clark msm_obj->pages = NULL; 183c8afe684SRob Clark } 184c8afe684SRob Clark } 185c8afe684SRob Clark 18605b84911SRob Clark struct page **msm_gem_get_pages(struct drm_gem_object *obj) 18705b84911SRob Clark { 1880e08270aSSushmita Susheelendra struct msm_gem_object *msm_obj = to_msm_bo(obj); 18905b84911SRob Clark struct page **p; 1900e08270aSSushmita Susheelendra 1910e08270aSSushmita Susheelendra mutex_lock(&msm_obj->lock); 1920e08270aSSushmita Susheelendra 1930e08270aSSushmita Susheelendra if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 1940e08270aSSushmita Susheelendra mutex_unlock(&msm_obj->lock); 1950e08270aSSushmita Susheelendra return ERR_PTR(-EBUSY); 1960e08270aSSushmita Susheelendra } 1970e08270aSSushmita Susheelendra 19805b84911SRob Clark p = get_pages(obj); 1990e08270aSSushmita Susheelendra mutex_unlock(&msm_obj->lock); 20005b84911SRob Clark return p; 20105b84911SRob Clark } 20205b84911SRob Clark 20305b84911SRob Clark void msm_gem_put_pages(struct drm_gem_object *obj) 20405b84911SRob Clark { 20505b84911SRob Clark /* when we start tracking the pin count, then do something here */ 20605b84911SRob Clark } 20705b84911SRob Clark 208c8afe684SRob Clark int msm_gem_mmap_obj(struct drm_gem_object *obj, 209c8afe684SRob Clark struct vm_area_struct *vma) 210c8afe684SRob Clark { 211c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 212c8afe684SRob Clark 213c8afe684SRob Clark vma->vm_flags &= ~VM_PFNMAP; 214c8afe684SRob Clark vma->vm_flags |= VM_MIXEDMAP; 215c8afe684SRob Clark 216c8afe684SRob Clark if (msm_obj->flags & MSM_BO_WC) { 217c8afe684SRob Clark vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 218c8afe684SRob Clark } else if (msm_obj->flags & MSM_BO_UNCACHED) { 219c8afe684SRob Clark vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 220c8afe684SRob Clark } else { 221c8afe684SRob Clark /* 222c8afe684SRob Clark * Shunt off cached objs to shmem file so they have their own 223c8afe684SRob Clark * address_space (so unmap_mapping_range does what we want, 224c8afe684SRob Clark * in particular in the case of mmap'd dmabufs) 225c8afe684SRob Clark */ 226c8afe684SRob Clark fput(vma->vm_file); 227c8afe684SRob Clark get_file(obj->filp); 228c8afe684SRob Clark vma->vm_pgoff = 0; 229c8afe684SRob Clark vma->vm_file = obj->filp; 230c8afe684SRob Clark 231c8afe684SRob Clark vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 232c8afe684SRob Clark } 233c8afe684SRob Clark 234c8afe684SRob Clark return 0; 235c8afe684SRob Clark } 236c8afe684SRob Clark 237c8afe684SRob Clark int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 238c8afe684SRob Clark { 239c8afe684SRob Clark int ret; 240c8afe684SRob Clark 241c8afe684SRob Clark ret = drm_gem_mmap(filp, vma); 242c8afe684SRob Clark if (ret) { 243c8afe684SRob Clark DBG("mmap failed: %d", ret); 244c8afe684SRob Clark return ret; 245c8afe684SRob Clark } 246c8afe684SRob Clark 247c8afe684SRob Clark return msm_gem_mmap_obj(vma->vm_private_data, vma); 248c8afe684SRob Clark } 249c8afe684SRob Clark 250a5f74ec7SSouptick Joarder vm_fault_t msm_gem_fault(struct vm_fault *vmf) 251c8afe684SRob Clark { 25211bac800SDave Jiang struct vm_area_struct *vma = vmf->vma; 253c8afe684SRob Clark struct drm_gem_object *obj = vma->vm_private_data; 2540e08270aSSushmita Susheelendra struct msm_gem_object *msm_obj = to_msm_bo(obj); 255c8afe684SRob Clark struct page **pages; 256c8afe684SRob Clark unsigned long pfn; 257c8afe684SRob Clark pgoff_t pgoff; 258a5f74ec7SSouptick Joarder int err; 259a5f74ec7SSouptick Joarder vm_fault_t ret; 260c8afe684SRob Clark 2610e08270aSSushmita Susheelendra /* 2620e08270aSSushmita Susheelendra * vm_ops.open/drm_gem_mmap_obj and close get and put 2630e08270aSSushmita Susheelendra * a reference on obj. So, we dont need to hold one here. 264d78d383aSRob Clark */ 265a5f74ec7SSouptick Joarder err = mutex_lock_interruptible(&msm_obj->lock); 266a5f74ec7SSouptick Joarder if (err) { 267a5f74ec7SSouptick Joarder ret = VM_FAULT_NOPAGE; 268c8afe684SRob Clark goto out; 269a5f74ec7SSouptick Joarder } 270c8afe684SRob Clark 2710e08270aSSushmita Susheelendra if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 2720e08270aSSushmita Susheelendra mutex_unlock(&msm_obj->lock); 2730e08270aSSushmita Susheelendra return VM_FAULT_SIGBUS; 2740e08270aSSushmita Susheelendra } 2750e08270aSSushmita Susheelendra 276c8afe684SRob Clark /* make sure we have pages attached now */ 277c8afe684SRob Clark pages = get_pages(obj); 278c8afe684SRob Clark if (IS_ERR(pages)) { 279a5f74ec7SSouptick Joarder ret = vmf_error(PTR_ERR(pages)); 280c8afe684SRob Clark goto out_unlock; 281c8afe684SRob Clark } 282c8afe684SRob Clark 283c8afe684SRob Clark /* We don't use vmf->pgoff since that has the fake offset: */ 2841a29d85eSJan Kara pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 285c8afe684SRob Clark 286871d812aSRob Clark pfn = page_to_pfn(pages[pgoff]); 287c8afe684SRob Clark 2881a29d85eSJan Kara VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 289c8afe684SRob Clark pfn, pfn << PAGE_SHIFT); 290c8afe684SRob Clark 291a5f74ec7SSouptick Joarder ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); 292c8afe684SRob Clark out_unlock: 2930e08270aSSushmita Susheelendra mutex_unlock(&msm_obj->lock); 294c8afe684SRob Clark out: 295a5f74ec7SSouptick Joarder return ret; 296c8afe684SRob Clark } 297c8afe684SRob Clark 298c8afe684SRob Clark /** get mmap offset */ 299c8afe684SRob Clark static uint64_t mmap_offset(struct drm_gem_object *obj) 300c8afe684SRob Clark { 301c8afe684SRob Clark struct drm_device *dev = obj->dev; 3020e08270aSSushmita Susheelendra struct msm_gem_object *msm_obj = to_msm_bo(obj); 303c8afe684SRob Clark int ret; 304c8afe684SRob Clark 3050e08270aSSushmita Susheelendra WARN_ON(!mutex_is_locked(&msm_obj->lock)); 306c8afe684SRob Clark 307c8afe684SRob Clark /* Make it mmapable */ 308c8afe684SRob Clark ret = drm_gem_create_mmap_offset(obj); 309c8afe684SRob Clark 310c8afe684SRob Clark if (ret) { 3116a41da17SMamta Shukla DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n"); 312c8afe684SRob Clark return 0; 313c8afe684SRob Clark } 314c8afe684SRob Clark 315c8afe684SRob Clark return drm_vma_node_offset_addr(&obj->vma_node); 316c8afe684SRob Clark } 317c8afe684SRob Clark 318c8afe684SRob Clark uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) 319c8afe684SRob Clark { 320c8afe684SRob Clark uint64_t offset; 3210e08270aSSushmita Susheelendra struct msm_gem_object *msm_obj = to_msm_bo(obj); 3220e08270aSSushmita Susheelendra 3230e08270aSSushmita Susheelendra mutex_lock(&msm_obj->lock); 324c8afe684SRob Clark offset = mmap_offset(obj); 3250e08270aSSushmita Susheelendra mutex_unlock(&msm_obj->lock); 326c8afe684SRob Clark return offset; 327c8afe684SRob Clark } 328c8afe684SRob Clark 3294b85f7f5SRob Clark static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, 3304b85f7f5SRob Clark struct msm_gem_address_space *aspace) 3314b85f7f5SRob Clark { 3324b85f7f5SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 3334b85f7f5SRob Clark struct msm_gem_vma *vma; 3344b85f7f5SRob Clark 3350e08270aSSushmita Susheelendra WARN_ON(!mutex_is_locked(&msm_obj->lock)); 3360e08270aSSushmita Susheelendra 3374b85f7f5SRob Clark vma = kzalloc(sizeof(*vma), GFP_KERNEL); 3384b85f7f5SRob Clark if (!vma) 3394b85f7f5SRob Clark return ERR_PTR(-ENOMEM); 3404b85f7f5SRob Clark 3414b85f7f5SRob Clark vma->aspace = aspace; 3424b85f7f5SRob Clark 3434b85f7f5SRob Clark list_add_tail(&vma->list, &msm_obj->vmas); 3444b85f7f5SRob Clark 3454b85f7f5SRob Clark return vma; 3464b85f7f5SRob Clark } 3474b85f7f5SRob Clark 3484b85f7f5SRob Clark static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, 3494b85f7f5SRob Clark struct msm_gem_address_space *aspace) 3504b85f7f5SRob Clark { 3514b85f7f5SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 3524b85f7f5SRob Clark struct msm_gem_vma *vma; 3534b85f7f5SRob Clark 3540e08270aSSushmita Susheelendra WARN_ON(!mutex_is_locked(&msm_obj->lock)); 3554b85f7f5SRob Clark 3564b85f7f5SRob Clark list_for_each_entry(vma, &msm_obj->vmas, list) { 3574b85f7f5SRob Clark if (vma->aspace == aspace) 3584b85f7f5SRob Clark return vma; 3594b85f7f5SRob Clark } 3604b85f7f5SRob Clark 3614b85f7f5SRob Clark return NULL; 3624b85f7f5SRob Clark } 3634b85f7f5SRob Clark 3644b85f7f5SRob Clark static void del_vma(struct msm_gem_vma *vma) 3654b85f7f5SRob Clark { 3664b85f7f5SRob Clark if (!vma) 3674b85f7f5SRob Clark return; 3684b85f7f5SRob Clark 3694b85f7f5SRob Clark list_del(&vma->list); 3704b85f7f5SRob Clark kfree(vma); 3714b85f7f5SRob Clark } 3724b85f7f5SRob Clark 3730e08270aSSushmita Susheelendra /* Called with msm_obj->lock locked */ 3744fe5f65eSRob Clark static void 3754fe5f65eSRob Clark put_iova(struct drm_gem_object *obj) 3764fe5f65eSRob Clark { 3774fe5f65eSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 3784b85f7f5SRob Clark struct msm_gem_vma *vma, *tmp; 3794fe5f65eSRob Clark 3800e08270aSSushmita Susheelendra WARN_ON(!mutex_is_locked(&msm_obj->lock)); 3814fe5f65eSRob Clark 3824b85f7f5SRob Clark list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { 383d67f1b6dSBrian Masney if (vma->aspace) { 3847ad0e8cfSJordan Crouse msm_gem_purge_vma(vma->aspace, vma); 3857ad0e8cfSJordan Crouse msm_gem_close_vma(vma->aspace, vma); 386d67f1b6dSBrian Masney } 3874b85f7f5SRob Clark del_vma(vma); 3884fe5f65eSRob Clark } 3894fe5f65eSRob Clark } 3904fe5f65eSRob Clark 391c0ee9794SJordan Crouse static int msm_gem_get_iova_locked(struct drm_gem_object *obj, 392d3b8877eSJonathan Marek struct msm_gem_address_space *aspace, uint64_t *iova, 393d3b8877eSJonathan Marek u64 range_start, u64 range_end) 394c8afe684SRob Clark { 395c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 3964b85f7f5SRob Clark struct msm_gem_vma *vma; 397c8afe684SRob Clark int ret = 0; 398c8afe684SRob Clark 399c0ee9794SJordan Crouse WARN_ON(!mutex_is_locked(&msm_obj->lock)); 400cb1e3818SRob Clark 4014b85f7f5SRob Clark vma = lookup_vma(obj, aspace); 402871d812aSRob Clark 4034b85f7f5SRob Clark if (!vma) { 4044b85f7f5SRob Clark vma = add_vma(obj, aspace); 405c0ee9794SJordan Crouse if (IS_ERR(vma)) 406c0ee9794SJordan Crouse return PTR_ERR(vma); 4074b85f7f5SRob Clark 408d3b8877eSJonathan Marek ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT, 409d3b8877eSJonathan Marek range_start, range_end); 410c0ee9794SJordan Crouse if (ret) { 411c0ee9794SJordan Crouse del_vma(vma); 412c0ee9794SJordan Crouse return ret; 413c8afe684SRob Clark } 4144b85f7f5SRob Clark } 4154b85f7f5SRob Clark 4164b85f7f5SRob Clark *iova = vma->iova; 4174b85f7f5SRob Clark return 0; 418c0ee9794SJordan Crouse } 4194b85f7f5SRob Clark 420c0ee9794SJordan Crouse static int msm_gem_pin_iova(struct drm_gem_object *obj, 421c0ee9794SJordan Crouse struct msm_gem_address_space *aspace) 422c0ee9794SJordan Crouse { 423c0ee9794SJordan Crouse struct msm_gem_object *msm_obj = to_msm_bo(obj); 424c0ee9794SJordan Crouse struct msm_gem_vma *vma; 425c0ee9794SJordan Crouse struct page **pages; 426bbc2cd07SRob Clark int prot = IOMMU_READ; 427bbc2cd07SRob Clark 428bbc2cd07SRob Clark if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) 429bbc2cd07SRob Clark prot |= IOMMU_WRITE; 430c0ee9794SJordan Crouse 4310b462d7aSJonathan Marek if (msm_obj->flags & MSM_BO_MAP_PRIV) 4320b462d7aSJonathan Marek prot |= IOMMU_PRIV; 4330b462d7aSJonathan Marek 434c0ee9794SJordan Crouse WARN_ON(!mutex_is_locked(&msm_obj->lock)); 435c0ee9794SJordan Crouse 436c0ee9794SJordan Crouse if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) 437c0ee9794SJordan Crouse return -EBUSY; 438c0ee9794SJordan Crouse 439c0ee9794SJordan Crouse vma = lookup_vma(obj, aspace); 440c0ee9794SJordan Crouse if (WARN_ON(!vma)) 441c0ee9794SJordan Crouse return -EINVAL; 442c0ee9794SJordan Crouse 443c0ee9794SJordan Crouse pages = get_pages(obj); 444c0ee9794SJordan Crouse if (IS_ERR(pages)) 445c0ee9794SJordan Crouse return PTR_ERR(pages); 446c0ee9794SJordan Crouse 447bbc2cd07SRob Clark return msm_gem_map_vma(aspace, vma, prot, 448bbc2cd07SRob Clark msm_obj->sgt, obj->size >> PAGE_SHIFT); 449c0ee9794SJordan Crouse } 450c0ee9794SJordan Crouse 451d3b8877eSJonathan Marek /* 452d3b8877eSJonathan Marek * get iova and pin it. Should have a matching put 453d3b8877eSJonathan Marek * limits iova to specified range (in pages) 454d3b8877eSJonathan Marek */ 455d3b8877eSJonathan Marek int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, 456d3b8877eSJonathan Marek struct msm_gem_address_space *aspace, uint64_t *iova, 457d3b8877eSJonathan Marek u64 range_start, u64 range_end) 458c0ee9794SJordan Crouse { 459c0ee9794SJordan Crouse struct msm_gem_object *msm_obj = to_msm_bo(obj); 460c0ee9794SJordan Crouse u64 local; 461c0ee9794SJordan Crouse int ret; 462c0ee9794SJordan Crouse 463c0ee9794SJordan Crouse mutex_lock(&msm_obj->lock); 464c0ee9794SJordan Crouse 465d3b8877eSJonathan Marek ret = msm_gem_get_iova_locked(obj, aspace, &local, 466d3b8877eSJonathan Marek range_start, range_end); 467c0ee9794SJordan Crouse 468c0ee9794SJordan Crouse if (!ret) 469c0ee9794SJordan Crouse ret = msm_gem_pin_iova(obj, aspace); 470c0ee9794SJordan Crouse 471c0ee9794SJordan Crouse if (!ret) 472c0ee9794SJordan Crouse *iova = local; 473c0ee9794SJordan Crouse 4740e08270aSSushmita Susheelendra mutex_unlock(&msm_obj->lock); 475c8afe684SRob Clark return ret; 476c8afe684SRob Clark } 477c8afe684SRob Clark 478d3b8877eSJonathan Marek /* get iova and pin it. Should have a matching put */ 479d3b8877eSJonathan Marek int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, 480d3b8877eSJonathan Marek struct msm_gem_address_space *aspace, uint64_t *iova) 481d3b8877eSJonathan Marek { 482d3b8877eSJonathan Marek return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX); 483d3b8877eSJonathan Marek } 484d3b8877eSJonathan Marek 4857ad0e8cfSJordan Crouse /* 4867ad0e8cfSJordan Crouse * Get an iova but don't pin it. Doesn't need a put because iovas are currently 4877ad0e8cfSJordan Crouse * valid for the life of the object 4887ad0e8cfSJordan Crouse */ 4899fe041f6SJordan Crouse int msm_gem_get_iova(struct drm_gem_object *obj, 4909fe041f6SJordan Crouse struct msm_gem_address_space *aspace, uint64_t *iova) 4919fe041f6SJordan Crouse { 4929fe041f6SJordan Crouse struct msm_gem_object *msm_obj = to_msm_bo(obj); 4939fe041f6SJordan Crouse int ret; 4949fe041f6SJordan Crouse 4959fe041f6SJordan Crouse mutex_lock(&msm_obj->lock); 496d3b8877eSJonathan Marek ret = msm_gem_get_iova_locked(obj, aspace, iova, 0, U64_MAX); 4979fe041f6SJordan Crouse mutex_unlock(&msm_obj->lock); 4989fe041f6SJordan Crouse 4999fe041f6SJordan Crouse return ret; 5009fe041f6SJordan Crouse } 5019fe041f6SJordan Crouse 5022638d90aSRob Clark /* get iova without taking a reference, used in places where you have 5039fe041f6SJordan Crouse * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova' 5042638d90aSRob Clark */ 5058bdcd949SRob Clark uint64_t msm_gem_iova(struct drm_gem_object *obj, 5068bdcd949SRob Clark struct msm_gem_address_space *aspace) 5072638d90aSRob Clark { 5080e08270aSSushmita Susheelendra struct msm_gem_object *msm_obj = to_msm_bo(obj); 5094b85f7f5SRob Clark struct msm_gem_vma *vma; 5104b85f7f5SRob Clark 5110e08270aSSushmita Susheelendra mutex_lock(&msm_obj->lock); 5124b85f7f5SRob Clark vma = lookup_vma(obj, aspace); 5130e08270aSSushmita Susheelendra mutex_unlock(&msm_obj->lock); 5144b85f7f5SRob Clark WARN_ON(!vma); 5154b85f7f5SRob Clark 5164b85f7f5SRob Clark return vma ? vma->iova : 0; 5172638d90aSRob Clark } 5182638d90aSRob Clark 5197ad0e8cfSJordan Crouse /* 5207ad0e8cfSJordan Crouse * Unpin a iova by updating the reference counts. The memory isn't actually 5217ad0e8cfSJordan Crouse * purged until something else (shrinker, mm_notifier, destroy, etc) decides 5227ad0e8cfSJordan Crouse * to get rid of it 5237ad0e8cfSJordan Crouse */ 5247ad0e8cfSJordan Crouse void msm_gem_unpin_iova(struct drm_gem_object *obj, 5258bdcd949SRob Clark struct msm_gem_address_space *aspace) 526c8afe684SRob Clark { 5277ad0e8cfSJordan Crouse struct msm_gem_object *msm_obj = to_msm_bo(obj); 5287ad0e8cfSJordan Crouse struct msm_gem_vma *vma; 5297ad0e8cfSJordan Crouse 5307ad0e8cfSJordan Crouse mutex_lock(&msm_obj->lock); 5317ad0e8cfSJordan Crouse vma = lookup_vma(obj, aspace); 5327ad0e8cfSJordan Crouse 5337ad0e8cfSJordan Crouse if (!WARN_ON(!vma)) 5347ad0e8cfSJordan Crouse msm_gem_unmap_vma(aspace, vma); 5357ad0e8cfSJordan Crouse 5367ad0e8cfSJordan Crouse mutex_unlock(&msm_obj->lock); 537c8afe684SRob Clark } 538c8afe684SRob Clark 539c8afe684SRob Clark int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 540c8afe684SRob Clark struct drm_mode_create_dumb *args) 541c8afe684SRob Clark { 542c8afe684SRob Clark args->pitch = align_pitch(args->width, args->bpp); 543c8afe684SRob Clark args->size = PAGE_ALIGN(args->pitch * args->height); 544c8afe684SRob Clark return msm_gem_new_handle(dev, file, args->size, 5450815d774SJordan Crouse MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb"); 546c8afe684SRob Clark } 547c8afe684SRob Clark 548c8afe684SRob Clark int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 549c8afe684SRob Clark uint32_t handle, uint64_t *offset) 550c8afe684SRob Clark { 551c8afe684SRob Clark struct drm_gem_object *obj; 552c8afe684SRob Clark int ret = 0; 553c8afe684SRob Clark 554c8afe684SRob Clark /* GEM does all our handle to object mapping */ 555a8ad0bd8SChris Wilson obj = drm_gem_object_lookup(file, handle); 556c8afe684SRob Clark if (obj == NULL) { 557c8afe684SRob Clark ret = -ENOENT; 558c8afe684SRob Clark goto fail; 559c8afe684SRob Clark } 560c8afe684SRob Clark 561c8afe684SRob Clark *offset = msm_gem_mmap_offset(obj); 562c8afe684SRob Clark 563dc9a9b32SSteve Kowalik drm_gem_object_put_unlocked(obj); 564c8afe684SRob Clark 565c8afe684SRob Clark fail: 566c8afe684SRob Clark return ret; 567c8afe684SRob Clark } 568c8afe684SRob Clark 569fad33f4bSRob Clark static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) 570c8afe684SRob Clark { 5710e08270aSSushmita Susheelendra struct msm_gem_object *msm_obj = to_msm_bo(obj); 5720e08270aSSushmita Susheelendra int ret = 0; 5730e08270aSSushmita Susheelendra 5740e08270aSSushmita Susheelendra mutex_lock(&msm_obj->lock); 5750e08270aSSushmita Susheelendra 576fad33f4bSRob Clark if (WARN_ON(msm_obj->madv > madv)) { 5776a41da17SMamta Shukla DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", 578fad33f4bSRob Clark msm_obj->madv, madv); 5790e08270aSSushmita Susheelendra mutex_unlock(&msm_obj->lock); 5800e08270aSSushmita Susheelendra return ERR_PTR(-EBUSY); 581c8afe684SRob Clark } 582c8afe684SRob Clark 5830e08270aSSushmita Susheelendra /* increment vmap_count *before* vmap() call, so shrinker can 5840e08270aSSushmita Susheelendra * check vmap_count (is_vunmapable()) outside of msm_obj->lock. 5850e08270aSSushmita Susheelendra * This guarantees that we won't try to msm_gem_vunmap() this 5860e08270aSSushmita Susheelendra * same object from within the vmap() call (while we already 5870e08270aSSushmita Susheelendra * hold msm_obj->lock) 5880e08270aSSushmita Susheelendra */ 5890e08270aSSushmita Susheelendra msm_obj->vmap_count++; 5900e08270aSSushmita Susheelendra 5910e08270aSSushmita Susheelendra if (!msm_obj->vaddr) { 5920e08270aSSushmita Susheelendra struct page **pages = get_pages(obj); 5930e08270aSSushmita Susheelendra if (IS_ERR(pages)) { 5940e08270aSSushmita Susheelendra ret = PTR_ERR(pages); 5950e08270aSSushmita Susheelendra goto fail; 5960e08270aSSushmita Susheelendra } 5970e08270aSSushmita Susheelendra msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 5980e08270aSSushmita Susheelendra VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 5990e08270aSSushmita Susheelendra if (msm_obj->vaddr == NULL) { 6000e08270aSSushmita Susheelendra ret = -ENOMEM; 6010e08270aSSushmita Susheelendra goto fail; 6020e08270aSSushmita Susheelendra } 6030e08270aSSushmita Susheelendra } 6040e08270aSSushmita Susheelendra 6050e08270aSSushmita Susheelendra mutex_unlock(&msm_obj->lock); 6060e08270aSSushmita Susheelendra return msm_obj->vaddr; 6070e08270aSSushmita Susheelendra 6080e08270aSSushmita Susheelendra fail: 609e1e9db2cSRob Clark msm_obj->vmap_count--; 6100e08270aSSushmita Susheelendra mutex_unlock(&msm_obj->lock); 6110e08270aSSushmita Susheelendra return ERR_PTR(ret); 61218f23049SRob Clark } 61318f23049SRob Clark 614fad33f4bSRob Clark void *msm_gem_get_vaddr(struct drm_gem_object *obj) 615fad33f4bSRob Clark { 616fad33f4bSRob Clark return get_vaddr(obj, MSM_MADV_WILLNEED); 617fad33f4bSRob Clark } 618fad33f4bSRob Clark 619fad33f4bSRob Clark /* 620fad33f4bSRob Clark * Don't use this! It is for the very special case of dumping 621fad33f4bSRob Clark * submits from GPU hangs or faults, were the bo may already 622fad33f4bSRob Clark * be MSM_MADV_DONTNEED, but we know the buffer is still on the 623fad33f4bSRob Clark * active list. 624fad33f4bSRob Clark */ 625fad33f4bSRob Clark void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) 626fad33f4bSRob Clark { 627fad33f4bSRob Clark return get_vaddr(obj, __MSM_MADV_PURGED); 628fad33f4bSRob Clark } 629fad33f4bSRob Clark 63018f23049SRob Clark void msm_gem_put_vaddr(struct drm_gem_object *obj) 63118f23049SRob Clark { 6320e08270aSSushmita Susheelendra struct msm_gem_object *msm_obj = to_msm_bo(obj); 6330e08270aSSushmita Susheelendra 6340e08270aSSushmita Susheelendra mutex_lock(&msm_obj->lock); 6350e08270aSSushmita Susheelendra WARN_ON(msm_obj->vmap_count < 1); 6360e08270aSSushmita Susheelendra msm_obj->vmap_count--; 6370e08270aSSushmita Susheelendra mutex_unlock(&msm_obj->lock); 63818f23049SRob Clark } 63918f23049SRob Clark 6404cd33c48SRob Clark /* Update madvise status, returns true if not purged, else 6414cd33c48SRob Clark * false or -errno. 6424cd33c48SRob Clark */ 6434cd33c48SRob Clark int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) 6444cd33c48SRob Clark { 6454cd33c48SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 6464cd33c48SRob Clark 6470e08270aSSushmita Susheelendra mutex_lock(&msm_obj->lock); 6480e08270aSSushmita Susheelendra 6494cd33c48SRob Clark WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 6504cd33c48SRob Clark 6514cd33c48SRob Clark if (msm_obj->madv != __MSM_MADV_PURGED) 6524cd33c48SRob Clark msm_obj->madv = madv; 6534cd33c48SRob Clark 6540e08270aSSushmita Susheelendra madv = msm_obj->madv; 6550e08270aSSushmita Susheelendra 6560e08270aSSushmita Susheelendra mutex_unlock(&msm_obj->lock); 6570e08270aSSushmita Susheelendra 6580e08270aSSushmita Susheelendra return (madv != __MSM_MADV_PURGED); 6594cd33c48SRob Clark } 6604cd33c48SRob Clark 6610e08270aSSushmita Susheelendra void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass) 66268209390SRob Clark { 66368209390SRob Clark struct drm_device *dev = obj->dev; 66468209390SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 66568209390SRob Clark 66668209390SRob Clark WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 66768209390SRob Clark WARN_ON(!is_purgeable(msm_obj)); 66868209390SRob Clark WARN_ON(obj->import_attach); 66968209390SRob Clark 6700e08270aSSushmita Susheelendra mutex_lock_nested(&msm_obj->lock, subclass); 6710e08270aSSushmita Susheelendra 67268209390SRob Clark put_iova(obj); 67368209390SRob Clark 6740e08270aSSushmita Susheelendra msm_gem_vunmap_locked(obj); 67568209390SRob Clark 67668209390SRob Clark put_pages(obj); 67768209390SRob Clark 67868209390SRob Clark msm_obj->madv = __MSM_MADV_PURGED; 67968209390SRob Clark 68068209390SRob Clark drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 68168209390SRob Clark drm_gem_free_mmap_offset(obj); 68268209390SRob Clark 68368209390SRob Clark /* Our goal here is to return as much of the memory as 68468209390SRob Clark * is possible back to the system as we are called from OOM. 68568209390SRob Clark * To do this we must instruct the shmfs to drop all of its 68668209390SRob Clark * backing pages, *now*. 68768209390SRob Clark */ 68868209390SRob Clark shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 68968209390SRob Clark 69068209390SRob Clark invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 69168209390SRob Clark 0, (loff_t)-1); 6920e08270aSSushmita Susheelendra 6930e08270aSSushmita Susheelendra mutex_unlock(&msm_obj->lock); 69468209390SRob Clark } 69568209390SRob Clark 6960e08270aSSushmita Susheelendra static void msm_gem_vunmap_locked(struct drm_gem_object *obj) 697e1e9db2cSRob Clark { 698e1e9db2cSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 699e1e9db2cSRob Clark 7000e08270aSSushmita Susheelendra WARN_ON(!mutex_is_locked(&msm_obj->lock)); 7010e08270aSSushmita Susheelendra 702e1e9db2cSRob Clark if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj))) 703e1e9db2cSRob Clark return; 704e1e9db2cSRob Clark 705e1e9db2cSRob Clark vunmap(msm_obj->vaddr); 706e1e9db2cSRob Clark msm_obj->vaddr = NULL; 707e1e9db2cSRob Clark } 708e1e9db2cSRob Clark 7090e08270aSSushmita Susheelendra void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass) 7100e08270aSSushmita Susheelendra { 7110e08270aSSushmita Susheelendra struct msm_gem_object *msm_obj = to_msm_bo(obj); 7120e08270aSSushmita Susheelendra 7130e08270aSSushmita Susheelendra mutex_lock_nested(&msm_obj->lock, subclass); 7140e08270aSSushmita Susheelendra msm_gem_vunmap_locked(obj); 7150e08270aSSushmita Susheelendra mutex_unlock(&msm_obj->lock); 7160e08270aSSushmita Susheelendra } 7170e08270aSSushmita Susheelendra 718b6295f9aSRob Clark /* must be called before _move_to_active().. */ 719b6295f9aSRob Clark int msm_gem_sync_object(struct drm_gem_object *obj, 720b6295f9aSRob Clark struct msm_fence_context *fctx, bool exclusive) 721b6295f9aSRob Clark { 72252791eeeSChristian König struct dma_resv_list *fobj; 723f54d1867SChris Wilson struct dma_fence *fence; 724b6295f9aSRob Clark int i, ret; 725b6295f9aSRob Clark 72652791eeeSChristian König fobj = dma_resv_get_list(obj->resv); 727b6295f9aSRob Clark if (!fobj || (fobj->shared_count == 0)) { 72852791eeeSChristian König fence = dma_resv_get_excl(obj->resv); 729b6295f9aSRob Clark /* don't need to wait on our own fences, since ring is fifo */ 730b6295f9aSRob Clark if (fence && (fence->context != fctx->context)) { 731f54d1867SChris Wilson ret = dma_fence_wait(fence, true); 732b6295f9aSRob Clark if (ret) 733b6295f9aSRob Clark return ret; 734b6295f9aSRob Clark } 735b6295f9aSRob Clark } 736b6295f9aSRob Clark 737b6295f9aSRob Clark if (!exclusive || !fobj) 738b6295f9aSRob Clark return 0; 739b6295f9aSRob Clark 740b6295f9aSRob Clark for (i = 0; i < fobj->shared_count; i++) { 741b6295f9aSRob Clark fence = rcu_dereference_protected(fobj->shared[i], 74252791eeeSChristian König dma_resv_held(obj->resv)); 743b6295f9aSRob Clark if (fence->context != fctx->context) { 744f54d1867SChris Wilson ret = dma_fence_wait(fence, true); 745b6295f9aSRob Clark if (ret) 746b6295f9aSRob Clark return ret; 747b6295f9aSRob Clark } 748b6295f9aSRob Clark } 749b6295f9aSRob Clark 750b6295f9aSRob Clark return 0; 751b6295f9aSRob Clark } 752b6295f9aSRob Clark 7537198e6b0SRob Clark void msm_gem_move_to_active(struct drm_gem_object *obj, 754f54d1867SChris Wilson struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence) 7557198e6b0SRob Clark { 7567198e6b0SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 7574cd33c48SRob Clark WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); 7587198e6b0SRob Clark msm_obj->gpu = gpu; 759b6295f9aSRob Clark if (exclusive) 76052791eeeSChristian König dma_resv_add_excl_fence(obj->resv, fence); 761bf6811f3SRob Clark else 76252791eeeSChristian König dma_resv_add_shared_fence(obj->resv, fence); 7637198e6b0SRob Clark list_del_init(&msm_obj->mm_list); 7647198e6b0SRob Clark list_add_tail(&msm_obj->mm_list, &gpu->active_list); 7657198e6b0SRob Clark } 7667198e6b0SRob Clark 7677198e6b0SRob Clark void msm_gem_move_to_inactive(struct drm_gem_object *obj) 7687198e6b0SRob Clark { 7697198e6b0SRob Clark struct drm_device *dev = obj->dev; 7707198e6b0SRob Clark struct msm_drm_private *priv = dev->dev_private; 7717198e6b0SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 7727198e6b0SRob Clark 7737198e6b0SRob Clark WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 7747198e6b0SRob Clark 7757198e6b0SRob Clark msm_obj->gpu = NULL; 7767198e6b0SRob Clark list_del_init(&msm_obj->mm_list); 7777198e6b0SRob Clark list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 7787198e6b0SRob Clark } 7797198e6b0SRob Clark 780ba00c3f2SRob Clark int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 781ba00c3f2SRob Clark { 782b6295f9aSRob Clark bool write = !!(op & MSM_PREP_WRITE); 783f755e227SChris Wilson unsigned long remain = 784f755e227SChris Wilson op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 785f755e227SChris Wilson long ret; 786b6295f9aSRob Clark 78752791eeeSChristian König ret = dma_resv_wait_timeout_rcu(obj->resv, write, 788f755e227SChris Wilson true, remain); 789f755e227SChris Wilson if (ret == 0) 790f755e227SChris Wilson return remain == 0 ? -EBUSY : -ETIMEDOUT; 791f755e227SChris Wilson else if (ret < 0) 792f755e227SChris Wilson return ret; 793ba00c3f2SRob Clark 7947198e6b0SRob Clark /* TODO cache maintenance */ 7957198e6b0SRob Clark 796b6295f9aSRob Clark return 0; 7977198e6b0SRob Clark } 7987198e6b0SRob Clark 7997198e6b0SRob Clark int msm_gem_cpu_fini(struct drm_gem_object *obj) 8007198e6b0SRob Clark { 8017198e6b0SRob Clark /* TODO cache maintenance */ 802c8afe684SRob Clark return 0; 803c8afe684SRob Clark } 804c8afe684SRob Clark 805c8afe684SRob Clark #ifdef CONFIG_DEBUG_FS 806f54d1867SChris Wilson static void describe_fence(struct dma_fence *fence, const char *type, 807b6295f9aSRob Clark struct seq_file *m) 808b6295f9aSRob Clark { 809f54d1867SChris Wilson if (!dma_fence_is_signaled(fence)) 810a3115621SDave Airlie seq_printf(m, "\t%9s: %s %s seq %llu\n", type, 811b6295f9aSRob Clark fence->ops->get_driver_name(fence), 812b6295f9aSRob Clark fence->ops->get_timeline_name(fence), 813b6295f9aSRob Clark fence->seqno); 814b6295f9aSRob Clark } 815b6295f9aSRob Clark 816c8afe684SRob Clark void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 817c8afe684SRob Clark { 818c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 81952791eeeSChristian König struct dma_resv *robj = obj->resv; 82052791eeeSChristian König struct dma_resv_list *fobj; 821f54d1867SChris Wilson struct dma_fence *fence; 8224b85f7f5SRob Clark struct msm_gem_vma *vma; 823c8afe684SRob Clark uint64_t off = drm_vma_node_start(&obj->vma_node); 8244cd33c48SRob Clark const char *madv; 825c8afe684SRob Clark 8260e08270aSSushmita Susheelendra mutex_lock(&msm_obj->lock); 827b6295f9aSRob Clark 8284cd33c48SRob Clark switch (msm_obj->madv) { 8294cd33c48SRob Clark case __MSM_MADV_PURGED: 8304cd33c48SRob Clark madv = " purged"; 8314cd33c48SRob Clark break; 8324cd33c48SRob Clark case MSM_MADV_DONTNEED: 8334cd33c48SRob Clark madv = " purgeable"; 8344cd33c48SRob Clark break; 8354cd33c48SRob Clark case MSM_MADV_WILLNEED: 8364cd33c48SRob Clark default: 8374cd33c48SRob Clark madv = ""; 8384cd33c48SRob Clark break; 8394cd33c48SRob Clark } 8404cd33c48SRob Clark 841575f0485SJordan Crouse seq_printf(m, "%08x: %c %2d (%2d) %08llx %p", 8427198e6b0SRob Clark msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 8432c935bc5SPeter Zijlstra obj->name, kref_read(&obj->refcount), 844667ce33eSRob Clark off, msm_obj->vaddr); 845667ce33eSRob Clark 8460815d774SJordan Crouse seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name); 847667ce33eSRob Clark 848575f0485SJordan Crouse if (!list_empty(&msm_obj->vmas)) { 849575f0485SJordan Crouse 850575f0485SJordan Crouse seq_puts(m, " vmas:"); 851575f0485SJordan Crouse 852575f0485SJordan Crouse list_for_each_entry(vma, &msm_obj->vmas, list) 85390f94660SBrian Masney seq_printf(m, " [%s: %08llx,%s,inuse=%d]", 85490f94660SBrian Masney vma->aspace != NULL ? vma->aspace->name : NULL, 8557ad0e8cfSJordan Crouse vma->iova, vma->mapped ? "mapped" : "unmapped", 8567ad0e8cfSJordan Crouse vma->inuse); 857575f0485SJordan Crouse 858575f0485SJordan Crouse seq_puts(m, "\n"); 859575f0485SJordan Crouse } 860b6295f9aSRob Clark 861b6295f9aSRob Clark rcu_read_lock(); 862b6295f9aSRob Clark fobj = rcu_dereference(robj->fence); 863b6295f9aSRob Clark if (fobj) { 864b6295f9aSRob Clark unsigned int i, shared_count = fobj->shared_count; 865b6295f9aSRob Clark 866b6295f9aSRob Clark for (i = 0; i < shared_count; i++) { 867b6295f9aSRob Clark fence = rcu_dereference(fobj->shared[i]); 868b6295f9aSRob Clark describe_fence(fence, "Shared", m); 869b6295f9aSRob Clark } 870b6295f9aSRob Clark } 871b6295f9aSRob Clark 872b6295f9aSRob Clark fence = rcu_dereference(robj->fence_excl); 873b6295f9aSRob Clark if (fence) 874b6295f9aSRob Clark describe_fence(fence, "Exclusive", m); 875b6295f9aSRob Clark rcu_read_unlock(); 8760e08270aSSushmita Susheelendra 8770e08270aSSushmita Susheelendra mutex_unlock(&msm_obj->lock); 878c8afe684SRob Clark } 879c8afe684SRob Clark 880c8afe684SRob Clark void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) 881c8afe684SRob Clark { 882c8afe684SRob Clark struct msm_gem_object *msm_obj; 883c8afe684SRob Clark int count = 0; 884c8afe684SRob Clark size_t size = 0; 885c8afe684SRob Clark 8860815d774SJordan Crouse seq_puts(m, " flags id ref offset kaddr size madv name\n"); 887c8afe684SRob Clark list_for_each_entry(msm_obj, list, mm_list) { 888c8afe684SRob Clark struct drm_gem_object *obj = &msm_obj->base; 889575f0485SJordan Crouse seq_puts(m, " "); 890c8afe684SRob Clark msm_gem_describe(obj, m); 891c8afe684SRob Clark count++; 892c8afe684SRob Clark size += obj->size; 893c8afe684SRob Clark } 894c8afe684SRob Clark 895c8afe684SRob Clark seq_printf(m, "Total %d objects, %zu bytes\n", count, size); 896c8afe684SRob Clark } 897c8afe684SRob Clark #endif 898c8afe684SRob Clark 899d71b6bd8SRob Clark /* don't call directly! Use drm_gem_object_put() and friends */ 900c8afe684SRob Clark void msm_gem_free_object(struct drm_gem_object *obj) 901c8afe684SRob Clark { 902c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 90348e7f183SKristian H. Kristensen struct drm_device *dev = obj->dev; 90448e7f183SKristian H. Kristensen struct msm_drm_private *priv = dev->dev_private; 90548e7f183SKristian H. Kristensen 90648e7f183SKristian H. Kristensen if (llist_add(&msm_obj->freed, &priv->free_list)) 90748e7f183SKristian H. Kristensen queue_work(priv->wq, &priv->free_work); 90848e7f183SKristian H. Kristensen } 90948e7f183SKristian H. Kristensen 91048e7f183SKristian H. Kristensen static void free_object(struct msm_gem_object *msm_obj) 91148e7f183SKristian H. Kristensen { 91248e7f183SKristian H. Kristensen struct drm_gem_object *obj = &msm_obj->base; 91348e7f183SKristian H. Kristensen struct drm_device *dev = obj->dev; 914c8afe684SRob Clark 915c8afe684SRob Clark WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 916c8afe684SRob Clark 9177198e6b0SRob Clark /* object should not be on active list: */ 9187198e6b0SRob Clark WARN_ON(is_active(msm_obj)); 9197198e6b0SRob Clark 920c8afe684SRob Clark list_del(&msm_obj->mm_list); 921c8afe684SRob Clark 9220e08270aSSushmita Susheelendra mutex_lock(&msm_obj->lock); 9230e08270aSSushmita Susheelendra 9244fe5f65eSRob Clark put_iova(obj); 925c8afe684SRob Clark 92605b84911SRob Clark if (obj->import_attach) { 92705b84911SRob Clark if (msm_obj->vaddr) 92805b84911SRob Clark dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); 92905b84911SRob Clark 93005b84911SRob Clark /* Don't drop the pages for imported dmabuf, as they are not 93105b84911SRob Clark * ours, just free the array we allocated: 93205b84911SRob Clark */ 93305b84911SRob Clark if (msm_obj->pages) 9342098105eSMichal Hocko kvfree(msm_obj->pages); 93505b84911SRob Clark 936f28730c8Sjilai wang drm_prime_gem_destroy(obj, msm_obj->sgt); 93705b84911SRob Clark } else { 9380e08270aSSushmita Susheelendra msm_gem_vunmap_locked(obj); 939c8afe684SRob Clark put_pages(obj); 94005b84911SRob Clark } 941c8afe684SRob Clark 942c8afe684SRob Clark drm_gem_object_release(obj); 943c8afe684SRob Clark 9440e08270aSSushmita Susheelendra mutex_unlock(&msm_obj->lock); 945c8afe684SRob Clark kfree(msm_obj); 946c8afe684SRob Clark } 947c8afe684SRob Clark 94848e7f183SKristian H. Kristensen void msm_gem_free_work(struct work_struct *work) 94948e7f183SKristian H. Kristensen { 95048e7f183SKristian H. Kristensen struct msm_drm_private *priv = 95148e7f183SKristian H. Kristensen container_of(work, struct msm_drm_private, free_work); 95248e7f183SKristian H. Kristensen struct drm_device *dev = priv->dev; 95348e7f183SKristian H. Kristensen struct llist_node *freed; 95448e7f183SKristian H. Kristensen struct msm_gem_object *msm_obj, *next; 95548e7f183SKristian H. Kristensen 95648e7f183SKristian H. Kristensen while ((freed = llist_del_all(&priv->free_list))) { 95748e7f183SKristian H. Kristensen 95848e7f183SKristian H. Kristensen mutex_lock(&dev->struct_mutex); 95948e7f183SKristian H. Kristensen 96048e7f183SKristian H. Kristensen llist_for_each_entry_safe(msm_obj, next, 96148e7f183SKristian H. Kristensen freed, freed) 96248e7f183SKristian H. Kristensen free_object(msm_obj); 96348e7f183SKristian H. Kristensen 96448e7f183SKristian H. Kristensen mutex_unlock(&dev->struct_mutex); 96548e7f183SKristian H. Kristensen 96648e7f183SKristian H. Kristensen if (need_resched()) 96748e7f183SKristian H. Kristensen break; 96848e7f183SKristian H. Kristensen } 96948e7f183SKristian H. Kristensen } 97048e7f183SKristian H. Kristensen 971c8afe684SRob Clark /* convenience method to construct a GEM buffer object, and userspace handle */ 972c8afe684SRob Clark int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 9730815d774SJordan Crouse uint32_t size, uint32_t flags, uint32_t *handle, 9740815d774SJordan Crouse char *name) 975c8afe684SRob Clark { 976c8afe684SRob Clark struct drm_gem_object *obj; 977c8afe684SRob Clark int ret; 978c8afe684SRob Clark 979c8afe684SRob Clark obj = msm_gem_new(dev, size, flags); 980c8afe684SRob Clark 981c8afe684SRob Clark if (IS_ERR(obj)) 982c8afe684SRob Clark return PTR_ERR(obj); 983c8afe684SRob Clark 9840815d774SJordan Crouse if (name) 9850815d774SJordan Crouse msm_gem_object_set_name(obj, "%s", name); 9860815d774SJordan Crouse 987c8afe684SRob Clark ret = drm_gem_handle_create(file, obj, handle); 988c8afe684SRob Clark 989c8afe684SRob Clark /* drop reference from allocate - handle holds it now */ 990dc9a9b32SSteve Kowalik drm_gem_object_put_unlocked(obj); 991c8afe684SRob Clark 992c8afe684SRob Clark return ret; 993c8afe684SRob Clark } 994c8afe684SRob Clark 99505b84911SRob Clark static int msm_gem_new_impl(struct drm_device *dev, 99605b84911SRob Clark uint32_t size, uint32_t flags, 9970e08270aSSushmita Susheelendra struct drm_gem_object **obj, 9980e08270aSSushmita Susheelendra bool struct_mutex_locked) 999c8afe684SRob Clark { 1000c8afe684SRob Clark struct msm_drm_private *priv = dev->dev_private; 1001c8afe684SRob Clark struct msm_gem_object *msm_obj; 1002c8afe684SRob Clark 1003c8afe684SRob Clark switch (flags & MSM_BO_CACHE_MASK) { 1004c8afe684SRob Clark case MSM_BO_UNCACHED: 1005c8afe684SRob Clark case MSM_BO_CACHED: 1006c8afe684SRob Clark case MSM_BO_WC: 1007c8afe684SRob Clark break; 1008c8afe684SRob Clark default: 10096a41da17SMamta Shukla DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", 1010c8afe684SRob Clark (flags & MSM_BO_CACHE_MASK)); 101105b84911SRob Clark return -EINVAL; 1012c8afe684SRob Clark } 1013c8afe684SRob Clark 1014667ce33eSRob Clark msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); 101505b84911SRob Clark if (!msm_obj) 101605b84911SRob Clark return -ENOMEM; 1017c8afe684SRob Clark 10180e08270aSSushmita Susheelendra mutex_init(&msm_obj->lock); 10190e08270aSSushmita Susheelendra 1020c8afe684SRob Clark msm_obj->flags = flags; 10214cd33c48SRob Clark msm_obj->madv = MSM_MADV_WILLNEED; 1022c8afe684SRob Clark 10237198e6b0SRob Clark INIT_LIST_HEAD(&msm_obj->submit_entry); 10244b85f7f5SRob Clark INIT_LIST_HEAD(&msm_obj->vmas); 10254b85f7f5SRob Clark 10260e08270aSSushmita Susheelendra if (struct_mutex_locked) { 10270e08270aSSushmita Susheelendra WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 1028c8afe684SRob Clark list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 10290e08270aSSushmita Susheelendra } else { 10300e08270aSSushmita Susheelendra mutex_lock(&dev->struct_mutex); 10310e08270aSSushmita Susheelendra list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 10320e08270aSSushmita Susheelendra mutex_unlock(&dev->struct_mutex); 10330e08270aSSushmita Susheelendra } 1034c8afe684SRob Clark 103505b84911SRob Clark *obj = &msm_obj->base; 103605b84911SRob Clark 103705b84911SRob Clark return 0; 103805b84911SRob Clark } 103905b84911SRob Clark 10400e08270aSSushmita Susheelendra static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, 10410e08270aSSushmita Susheelendra uint32_t size, uint32_t flags, bool struct_mutex_locked) 104205b84911SRob Clark { 1043f4839bd5SRob Clark struct msm_drm_private *priv = dev->dev_private; 1044871d812aSRob Clark struct drm_gem_object *obj = NULL; 1045f4839bd5SRob Clark bool use_vram = false; 104605b84911SRob Clark int ret; 104705b84911SRob Clark 104805b84911SRob Clark size = PAGE_ALIGN(size); 104905b84911SRob Clark 1050c2052a4eSJonathan Marek if (!msm_use_mmu(dev)) 1051f4839bd5SRob Clark use_vram = true; 105286f46f25SJonathan Marek else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size) 1053f4839bd5SRob Clark use_vram = true; 1054f4839bd5SRob Clark 1055f4839bd5SRob Clark if (WARN_ON(use_vram && !priv->vram.size)) 1056f4839bd5SRob Clark return ERR_PTR(-EINVAL); 1057f4839bd5SRob Clark 10581a5dff5dSJordan Crouse /* Disallow zero sized objects as they make the underlying 10591a5dff5dSJordan Crouse * infrastructure grumpy 10601a5dff5dSJordan Crouse */ 10611a5dff5dSJordan Crouse if (size == 0) 10621a5dff5dSJordan Crouse return ERR_PTR(-EINVAL); 10631a5dff5dSJordan Crouse 10645ebeb02cSDaniel Vetter ret = msm_gem_new_impl(dev, size, flags, &obj, struct_mutex_locked); 106505b84911SRob Clark if (ret) 106605b84911SRob Clark goto fail; 106705b84911SRob Clark 1068f4839bd5SRob Clark if (use_vram) { 10694b85f7f5SRob Clark struct msm_gem_vma *vma; 1070f4839bd5SRob Clark struct page **pages; 1071b3949a9aSHans Verkuil struct msm_gem_object *msm_obj = to_msm_bo(obj); 1072b3949a9aSHans Verkuil 1073b3949a9aSHans Verkuil mutex_lock(&msm_obj->lock); 1074f4839bd5SRob Clark 10754b85f7f5SRob Clark vma = add_vma(obj, NULL); 1076b3949a9aSHans Verkuil mutex_unlock(&msm_obj->lock); 10774b85f7f5SRob Clark if (IS_ERR(vma)) { 10784b85f7f5SRob Clark ret = PTR_ERR(vma); 10794b85f7f5SRob Clark goto fail; 10804b85f7f5SRob Clark } 10814b85f7f5SRob Clark 10824b85f7f5SRob Clark to_msm_bo(obj)->vram_node = &vma->node; 10834b85f7f5SRob Clark 1084f4839bd5SRob Clark drm_gem_private_object_init(dev, obj, size); 1085f4839bd5SRob Clark 1086f4839bd5SRob Clark pages = get_pages(obj); 1087f4839bd5SRob Clark if (IS_ERR(pages)) { 1088f4839bd5SRob Clark ret = PTR_ERR(pages); 1089f4839bd5SRob Clark goto fail; 1090f4839bd5SRob Clark } 10914b85f7f5SRob Clark 10924b85f7f5SRob Clark vma->iova = physaddr(obj); 1093f4839bd5SRob Clark } else { 109405b84911SRob Clark ret = drm_gem_object_init(dev, obj, size); 109505b84911SRob Clark if (ret) 109605b84911SRob Clark goto fail; 10970abdba47SLucas Stach /* 10980abdba47SLucas Stach * Our buffers are kept pinned, so allocating them from the 10990abdba47SLucas Stach * MOVABLE zone is a really bad idea, and conflicts with CMA. 11000abdba47SLucas Stach * See comments above new_inode() why this is required _and_ 11010abdba47SLucas Stach * expected if you're going to pin these pages. 11020abdba47SLucas Stach */ 11030abdba47SLucas Stach mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); 1104871d812aSRob Clark } 110505b84911SRob Clark 110605b84911SRob Clark return obj; 110705b84911SRob Clark 110805b84911SRob Clark fail: 1109dc9a9b32SSteve Kowalik drm_gem_object_put_unlocked(obj); 111005b84911SRob Clark return ERR_PTR(ret); 111105b84911SRob Clark } 111205b84911SRob Clark 11130e08270aSSushmita Susheelendra struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, 11140e08270aSSushmita Susheelendra uint32_t size, uint32_t flags) 11150e08270aSSushmita Susheelendra { 11160e08270aSSushmita Susheelendra return _msm_gem_new(dev, size, flags, true); 11170e08270aSSushmita Susheelendra } 11180e08270aSSushmita Susheelendra 11190e08270aSSushmita Susheelendra struct drm_gem_object *msm_gem_new(struct drm_device *dev, 11200e08270aSSushmita Susheelendra uint32_t size, uint32_t flags) 11210e08270aSSushmita Susheelendra { 11220e08270aSSushmita Susheelendra return _msm_gem_new(dev, size, flags, false); 11230e08270aSSushmita Susheelendra } 11240e08270aSSushmita Susheelendra 112505b84911SRob Clark struct drm_gem_object *msm_gem_import(struct drm_device *dev, 112679f0e202SRob Clark struct dma_buf *dmabuf, struct sg_table *sgt) 112705b84911SRob Clark { 112805b84911SRob Clark struct msm_gem_object *msm_obj; 112905b84911SRob Clark struct drm_gem_object *obj; 113079f0e202SRob Clark uint32_t size; 113105b84911SRob Clark int ret, npages; 113205b84911SRob Clark 1133871d812aSRob Clark /* if we don't have IOMMU, don't bother pretending we can import: */ 1134c2052a4eSJonathan Marek if (!msm_use_mmu(dev)) { 11356a41da17SMamta Shukla DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); 1136871d812aSRob Clark return ERR_PTR(-EINVAL); 1137871d812aSRob Clark } 1138871d812aSRob Clark 113979f0e202SRob Clark size = PAGE_ALIGN(dmabuf->size); 114005b84911SRob Clark 11415ebeb02cSDaniel Vetter ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj, false); 114205b84911SRob Clark if (ret) 114305b84911SRob Clark goto fail; 114405b84911SRob Clark 114505b84911SRob Clark drm_gem_private_object_init(dev, obj, size); 114605b84911SRob Clark 114705b84911SRob Clark npages = size / PAGE_SIZE; 114805b84911SRob Clark 114905b84911SRob Clark msm_obj = to_msm_bo(obj); 11500e08270aSSushmita Susheelendra mutex_lock(&msm_obj->lock); 115105b84911SRob Clark msm_obj->sgt = sgt; 11522098105eSMichal Hocko msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 115305b84911SRob Clark if (!msm_obj->pages) { 11540e08270aSSushmita Susheelendra mutex_unlock(&msm_obj->lock); 115505b84911SRob Clark ret = -ENOMEM; 115605b84911SRob Clark goto fail; 115705b84911SRob Clark } 115805b84911SRob Clark 115905b84911SRob Clark ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); 11600e08270aSSushmita Susheelendra if (ret) { 11610e08270aSSushmita Susheelendra mutex_unlock(&msm_obj->lock); 116205b84911SRob Clark goto fail; 11630e08270aSSushmita Susheelendra } 116405b84911SRob Clark 11650e08270aSSushmita Susheelendra mutex_unlock(&msm_obj->lock); 1166c8afe684SRob Clark return obj; 1167c8afe684SRob Clark 1168c8afe684SRob Clark fail: 1169dc9a9b32SSteve Kowalik drm_gem_object_put_unlocked(obj); 1170c8afe684SRob Clark return ERR_PTR(ret); 1171c8afe684SRob Clark } 11728223286dSJordan Crouse 11738223286dSJordan Crouse static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 11748223286dSJordan Crouse uint32_t flags, struct msm_gem_address_space *aspace, 11758223286dSJordan Crouse struct drm_gem_object **bo, uint64_t *iova, bool locked) 11768223286dSJordan Crouse { 11778223286dSJordan Crouse void *vaddr; 11788223286dSJordan Crouse struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked); 11798223286dSJordan Crouse int ret; 11808223286dSJordan Crouse 11818223286dSJordan Crouse if (IS_ERR(obj)) 11828223286dSJordan Crouse return ERR_CAST(obj); 11838223286dSJordan Crouse 11848223286dSJordan Crouse if (iova) { 11859fe041f6SJordan Crouse ret = msm_gem_get_and_pin_iova(obj, aspace, iova); 118693f7abf1SJordan Crouse if (ret) 118793f7abf1SJordan Crouse goto err; 11888223286dSJordan Crouse } 11898223286dSJordan Crouse 11908223286dSJordan Crouse vaddr = msm_gem_get_vaddr(obj); 1191c9811d0fSWei Yongjun if (IS_ERR(vaddr)) { 11927ad0e8cfSJordan Crouse msm_gem_unpin_iova(obj, aspace); 119393f7abf1SJordan Crouse ret = PTR_ERR(vaddr); 119493f7abf1SJordan Crouse goto err; 11958223286dSJordan Crouse } 11968223286dSJordan Crouse 11978223286dSJordan Crouse if (bo) 11988223286dSJordan Crouse *bo = obj; 11998223286dSJordan Crouse 12008223286dSJordan Crouse return vaddr; 120193f7abf1SJordan Crouse err: 120293f7abf1SJordan Crouse if (locked) 120393f7abf1SJordan Crouse drm_gem_object_put(obj); 120493f7abf1SJordan Crouse else 120593f7abf1SJordan Crouse drm_gem_object_put_unlocked(obj); 120693f7abf1SJordan Crouse 120793f7abf1SJordan Crouse return ERR_PTR(ret); 120893f7abf1SJordan Crouse 12098223286dSJordan Crouse } 12108223286dSJordan Crouse 12118223286dSJordan Crouse void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 12128223286dSJordan Crouse uint32_t flags, struct msm_gem_address_space *aspace, 12138223286dSJordan Crouse struct drm_gem_object **bo, uint64_t *iova) 12148223286dSJordan Crouse { 12158223286dSJordan Crouse return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false); 12168223286dSJordan Crouse } 12178223286dSJordan Crouse 12188223286dSJordan Crouse void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, 12198223286dSJordan Crouse uint32_t flags, struct msm_gem_address_space *aspace, 12208223286dSJordan Crouse struct drm_gem_object **bo, uint64_t *iova) 12218223286dSJordan Crouse { 12228223286dSJordan Crouse return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true); 12238223286dSJordan Crouse } 12241e29dff0SJordan Crouse 12251e29dff0SJordan Crouse void msm_gem_kernel_put(struct drm_gem_object *bo, 12261e29dff0SJordan Crouse struct msm_gem_address_space *aspace, bool locked) 12271e29dff0SJordan Crouse { 12281e29dff0SJordan Crouse if (IS_ERR_OR_NULL(bo)) 12291e29dff0SJordan Crouse return; 12301e29dff0SJordan Crouse 12311e29dff0SJordan Crouse msm_gem_put_vaddr(bo); 12327ad0e8cfSJordan Crouse msm_gem_unpin_iova(bo, aspace); 12331e29dff0SJordan Crouse 12341e29dff0SJordan Crouse if (locked) 12351e29dff0SJordan Crouse drm_gem_object_put(bo); 12361e29dff0SJordan Crouse else 12371e29dff0SJordan Crouse drm_gem_object_put_unlocked(bo); 12381e29dff0SJordan Crouse } 12390815d774SJordan Crouse 12400815d774SJordan Crouse void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...) 12410815d774SJordan Crouse { 12420815d774SJordan Crouse struct msm_gem_object *msm_obj = to_msm_bo(bo); 12430815d774SJordan Crouse va_list ap; 12440815d774SJordan Crouse 12450815d774SJordan Crouse if (!fmt) 12460815d774SJordan Crouse return; 12470815d774SJordan Crouse 12480815d774SJordan Crouse va_start(ap, fmt); 12490815d774SJordan Crouse vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap); 12500815d774SJordan Crouse va_end(ap); 12510815d774SJordan Crouse } 1252