1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2c8afe684SRob Clark /* 3c8afe684SRob Clark * Copyright (C) 2013 Red Hat 4c8afe684SRob Clark * Author: Rob Clark <robdclark@gmail.com> 5c8afe684SRob Clark */ 6c8afe684SRob Clark 70a0f0d8bSChristoph Hellwig #include <linux/dma-map-ops.h> 8c8afe684SRob Clark #include <linux/spinlock.h> 9c8afe684SRob Clark #include <linux/shmem_fs.h> 1005b84911SRob Clark #include <linux/dma-buf.h> 1101c8f1c4SDan Williams #include <linux/pfn_t.h> 12c8afe684SRob Clark 13feea39a8SSam Ravnborg #include <drm/drm_prime.h> 14feea39a8SSam Ravnborg 15c8afe684SRob Clark #include "msm_drv.h" 16fde5de6cSRob Clark #include "msm_fence.h" 17c8afe684SRob Clark #include "msm_gem.h" 187198e6b0SRob Clark #include "msm_gpu.h" 19871d812aSRob Clark #include "msm_mmu.h" 20c8afe684SRob Clark 213edfa30fSRob Clark static void update_inactive(struct msm_gem_object *msm_obj); 220e08270aSSushmita Susheelendra 23871d812aSRob Clark static dma_addr_t physaddr(struct drm_gem_object *obj) 24871d812aSRob Clark { 25871d812aSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 26871d812aSRob Clark struct msm_drm_private *priv = obj->dev->dev_private; 27871d812aSRob Clark return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + 28871d812aSRob Clark priv->vram.paddr; 29871d812aSRob Clark } 30871d812aSRob Clark 31072f1f91SRob Clark static bool use_pages(struct drm_gem_object *obj) 32072f1f91SRob Clark { 33072f1f91SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 34072f1f91SRob Clark return !msm_obj->vram_node; 35072f1f91SRob Clark } 36072f1f91SRob Clark 373de433c5SRob Clark /* 383de433c5SRob Clark * Cache sync.. this is a bit over-complicated, to fit dma-mapping 393de433c5SRob Clark * API. Really GPU cache is out of scope here (handled on cmdstream) 403de433c5SRob Clark * and all we need to do is invalidate newly allocated pages before 413de433c5SRob Clark * mapping to CPU as uncached/writecombine. 423de433c5SRob Clark * 433de433c5SRob Clark * On top of this, we have the added headache, that depending on 443de433c5SRob Clark * display generation, the display's iommu may be wired up to either 453de433c5SRob Clark * the toplevel drm device (mdss), or to the mdp sub-node, meaning 463de433c5SRob Clark * that here we either have dma-direct or iommu ops. 473de433c5SRob Clark * 483de433c5SRob Clark * Let this be a cautionary tail of abstraction gone wrong. 493de433c5SRob Clark */ 503de433c5SRob Clark 513de433c5SRob Clark static void sync_for_device(struct msm_gem_object *msm_obj) 523de433c5SRob Clark { 533de433c5SRob Clark struct device *dev = msm_obj->base.dev->dev; 543de433c5SRob Clark 557690a33fSMarek Szyprowski dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 563de433c5SRob Clark } 573de433c5SRob Clark 583de433c5SRob Clark static void sync_for_cpu(struct msm_gem_object *msm_obj) 593de433c5SRob Clark { 603de433c5SRob Clark struct device *dev = msm_obj->base.dev->dev; 613de433c5SRob Clark 627690a33fSMarek Szyprowski dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); 633de433c5SRob Clark } 643de433c5SRob Clark 65871d812aSRob Clark /* allocate pages from VRAM carveout, used when no IOMMU: */ 660e08270aSSushmita Susheelendra static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) 67871d812aSRob Clark { 68871d812aSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 69871d812aSRob Clark struct msm_drm_private *priv = obj->dev->dev_private; 70871d812aSRob Clark dma_addr_t paddr; 71871d812aSRob Clark struct page **p; 72871d812aSRob Clark int ret, i; 73871d812aSRob Clark 742098105eSMichal Hocko p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 75871d812aSRob Clark if (!p) 76871d812aSRob Clark return ERR_PTR(-ENOMEM); 77871d812aSRob Clark 780e08270aSSushmita Susheelendra spin_lock(&priv->vram.lock); 794e64e553SChris Wilson ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); 800e08270aSSushmita Susheelendra spin_unlock(&priv->vram.lock); 81871d812aSRob Clark if (ret) { 822098105eSMichal Hocko kvfree(p); 83871d812aSRob Clark return ERR_PTR(ret); 84871d812aSRob Clark } 85871d812aSRob Clark 86871d812aSRob Clark paddr = physaddr(obj); 87871d812aSRob Clark for (i = 0; i < npages; i++) { 88871d812aSRob Clark p[i] = phys_to_page(paddr); 89871d812aSRob Clark paddr += PAGE_SIZE; 90871d812aSRob Clark } 91871d812aSRob Clark 92871d812aSRob Clark return p; 93871d812aSRob Clark } 94c8afe684SRob Clark 95c8afe684SRob Clark static struct page **get_pages(struct drm_gem_object *obj) 96c8afe684SRob Clark { 97c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 98c8afe684SRob Clark 9990643a24SRob Clark GEM_WARN_ON(!msm_gem_is_locked(obj)); 10007fcad0dSIskren Chernev 101c8afe684SRob Clark if (!msm_obj->pages) { 102c8afe684SRob Clark struct drm_device *dev = obj->dev; 103871d812aSRob Clark struct page **p; 104c8afe684SRob Clark int npages = obj->size >> PAGE_SHIFT; 105c8afe684SRob Clark 106072f1f91SRob Clark if (use_pages(obj)) 1070cdbe8acSDavid Herrmann p = drm_gem_get_pages(obj); 108871d812aSRob Clark else 109871d812aSRob Clark p = get_pages_vram(obj, npages); 110871d812aSRob Clark 111c8afe684SRob Clark if (IS_ERR(p)) { 1126a41da17SMamta Shukla DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", 113c8afe684SRob Clark PTR_ERR(p)); 114c8afe684SRob Clark return p; 115c8afe684SRob Clark } 116c8afe684SRob Clark 11762e3a3e3SPrakash Kamliya msm_obj->pages = p; 11862e3a3e3SPrakash Kamliya 119707d561fSGerd Hoffmann msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages); 1201f70e079SWei Yongjun if (IS_ERR(msm_obj->sgt)) { 12162e3a3e3SPrakash Kamliya void *ptr = ERR_CAST(msm_obj->sgt); 122c8afe684SRob Clark 1236a41da17SMamta Shukla DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n"); 12462e3a3e3SPrakash Kamliya msm_obj->sgt = NULL; 12562e3a3e3SPrakash Kamliya return ptr; 12662e3a3e3SPrakash Kamliya } 127c8afe684SRob Clark 128c8afe684SRob Clark /* For non-cached buffers, ensure the new pages are clean 129c8afe684SRob Clark * because display controller, GPU, etc. are not coherent: 130c8afe684SRob Clark */ 131c8afe684SRob Clark if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 1323de433c5SRob Clark sync_for_device(msm_obj); 13364fcbde7SRob Clark 13464fcbde7SRob Clark GEM_WARN_ON(msm_obj->active_count); 13564fcbde7SRob Clark update_inactive(msm_obj); 136c8afe684SRob Clark } 137c8afe684SRob Clark 138c8afe684SRob Clark return msm_obj->pages; 139c8afe684SRob Clark } 140c8afe684SRob Clark 1410e08270aSSushmita Susheelendra static void put_pages_vram(struct drm_gem_object *obj) 1420e08270aSSushmita Susheelendra { 1430e08270aSSushmita Susheelendra struct msm_gem_object *msm_obj = to_msm_bo(obj); 1440e08270aSSushmita Susheelendra struct msm_drm_private *priv = obj->dev->dev_private; 1450e08270aSSushmita Susheelendra 1460e08270aSSushmita Susheelendra spin_lock(&priv->vram.lock); 1470e08270aSSushmita Susheelendra drm_mm_remove_node(msm_obj->vram_node); 1480e08270aSSushmita Susheelendra spin_unlock(&priv->vram.lock); 1490e08270aSSushmita Susheelendra 1500e08270aSSushmita Susheelendra kvfree(msm_obj->pages); 1510e08270aSSushmita Susheelendra } 1520e08270aSSushmita Susheelendra 153c8afe684SRob Clark static void put_pages(struct drm_gem_object *obj) 154c8afe684SRob Clark { 155c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 156c8afe684SRob Clark 157c8afe684SRob Clark if (msm_obj->pages) { 1583976626eSBen Hutchings if (msm_obj->sgt) { 1593976626eSBen Hutchings /* For non-cached buffers, ensure the new 1603976626eSBen Hutchings * pages are clean because display controller, 1613976626eSBen Hutchings * GPU, etc. are not coherent: 162c8afe684SRob Clark */ 163c8afe684SRob Clark if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 1643de433c5SRob Clark sync_for_cpu(msm_obj); 16562e3a3e3SPrakash Kamliya 166c8afe684SRob Clark sg_free_table(msm_obj->sgt); 167c8afe684SRob Clark kfree(msm_obj->sgt); 168b9a31d0dSRob Clark msm_obj->sgt = NULL; 1693976626eSBen Hutchings } 170c8afe684SRob Clark 171072f1f91SRob Clark if (use_pages(obj)) 172c8afe684SRob Clark drm_gem_put_pages(obj, msm_obj->pages, true, false); 1730e08270aSSushmita Susheelendra else 1740e08270aSSushmita Susheelendra put_pages_vram(obj); 175871d812aSRob Clark 176c8afe684SRob Clark msm_obj->pages = NULL; 177c8afe684SRob Clark } 178c8afe684SRob Clark } 179c8afe684SRob Clark 18005b84911SRob Clark struct page **msm_gem_get_pages(struct drm_gem_object *obj) 18105b84911SRob Clark { 1820e08270aSSushmita Susheelendra struct msm_gem_object *msm_obj = to_msm_bo(obj); 18305b84911SRob Clark struct page **p; 1840e08270aSSushmita Susheelendra 185a6ae74c9SRob Clark msm_gem_lock(obj); 1860e08270aSSushmita Susheelendra 18790643a24SRob Clark if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 188a6ae74c9SRob Clark msm_gem_unlock(obj); 1890e08270aSSushmita Susheelendra return ERR_PTR(-EBUSY); 1900e08270aSSushmita Susheelendra } 1910e08270aSSushmita Susheelendra 19205b84911SRob Clark p = get_pages(obj); 19310f76165SRob Clark 19410f76165SRob Clark if (!IS_ERR(p)) { 19510f76165SRob Clark msm_obj->pin_count++; 19610f76165SRob Clark update_inactive(msm_obj); 19710f76165SRob Clark } 19810f76165SRob Clark 199a6ae74c9SRob Clark msm_gem_unlock(obj); 20005b84911SRob Clark return p; 20105b84911SRob Clark } 20205b84911SRob Clark 20305b84911SRob Clark void msm_gem_put_pages(struct drm_gem_object *obj) 20405b84911SRob Clark { 20510f76165SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 20610f76165SRob Clark 20710f76165SRob Clark msm_gem_lock(obj); 20810f76165SRob Clark msm_obj->pin_count--; 20910f76165SRob Clark GEM_WARN_ON(msm_obj->pin_count < 0); 21010f76165SRob Clark update_inactive(msm_obj); 21110f76165SRob Clark msm_gem_unlock(obj); 21205b84911SRob Clark } 21305b84911SRob Clark 214c8afe684SRob Clark int msm_gem_mmap_obj(struct drm_gem_object *obj, 215c8afe684SRob Clark struct vm_area_struct *vma) 216c8afe684SRob Clark { 217c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 218c8afe684SRob Clark 219c8afe684SRob Clark vma->vm_flags &= ~VM_PFNMAP; 220c8afe684SRob Clark vma->vm_flags |= VM_MIXEDMAP; 221c8afe684SRob Clark 222c8afe684SRob Clark if (msm_obj->flags & MSM_BO_WC) { 223c8afe684SRob Clark vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 224c8afe684SRob Clark } else if (msm_obj->flags & MSM_BO_UNCACHED) { 225c8afe684SRob Clark vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 226c8afe684SRob Clark } else { 227c8afe684SRob Clark /* 228c8afe684SRob Clark * Shunt off cached objs to shmem file so they have their own 229c8afe684SRob Clark * address_space (so unmap_mapping_range does what we want, 230c8afe684SRob Clark * in particular in the case of mmap'd dmabufs) 231c8afe684SRob Clark */ 232c8afe684SRob Clark vma->vm_pgoff = 0; 233295992fbSChristian König vma_set_file(vma, obj->filp); 234c8afe684SRob Clark 235c8afe684SRob Clark vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 236c8afe684SRob Clark } 237c8afe684SRob Clark 238c8afe684SRob Clark return 0; 239c8afe684SRob Clark } 240c8afe684SRob Clark 241c8afe684SRob Clark int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 242c8afe684SRob Clark { 243c8afe684SRob Clark int ret; 244c8afe684SRob Clark 245c8afe684SRob Clark ret = drm_gem_mmap(filp, vma); 246c8afe684SRob Clark if (ret) { 247c8afe684SRob Clark DBG("mmap failed: %d", ret); 248c8afe684SRob Clark return ret; 249c8afe684SRob Clark } 250c8afe684SRob Clark 251c8afe684SRob Clark return msm_gem_mmap_obj(vma->vm_private_data, vma); 252c8afe684SRob Clark } 253c8afe684SRob Clark 2543c9edd9cSThomas Zimmermann static vm_fault_t msm_gem_fault(struct vm_fault *vmf) 255c8afe684SRob Clark { 25611bac800SDave Jiang struct vm_area_struct *vma = vmf->vma; 257c8afe684SRob Clark struct drm_gem_object *obj = vma->vm_private_data; 2580e08270aSSushmita Susheelendra struct msm_gem_object *msm_obj = to_msm_bo(obj); 259c8afe684SRob Clark struct page **pages; 260c8afe684SRob Clark unsigned long pfn; 261c8afe684SRob Clark pgoff_t pgoff; 262a5f74ec7SSouptick Joarder int err; 263a5f74ec7SSouptick Joarder vm_fault_t ret; 264c8afe684SRob Clark 2650e08270aSSushmita Susheelendra /* 2660e08270aSSushmita Susheelendra * vm_ops.open/drm_gem_mmap_obj and close get and put 2670e08270aSSushmita Susheelendra * a reference on obj. So, we dont need to hold one here. 268d78d383aSRob Clark */ 269a6ae74c9SRob Clark err = msm_gem_lock_interruptible(obj); 270a5f74ec7SSouptick Joarder if (err) { 271a5f74ec7SSouptick Joarder ret = VM_FAULT_NOPAGE; 272c8afe684SRob Clark goto out; 273a5f74ec7SSouptick Joarder } 274c8afe684SRob Clark 27590643a24SRob Clark if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 276a6ae74c9SRob Clark msm_gem_unlock(obj); 2770e08270aSSushmita Susheelendra return VM_FAULT_SIGBUS; 2780e08270aSSushmita Susheelendra } 2790e08270aSSushmita Susheelendra 280c8afe684SRob Clark /* make sure we have pages attached now */ 281c8afe684SRob Clark pages = get_pages(obj); 282c8afe684SRob Clark if (IS_ERR(pages)) { 283a5f74ec7SSouptick Joarder ret = vmf_error(PTR_ERR(pages)); 284c8afe684SRob Clark goto out_unlock; 285c8afe684SRob Clark } 286c8afe684SRob Clark 287c8afe684SRob Clark /* We don't use vmf->pgoff since that has the fake offset: */ 2881a29d85eSJan Kara pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 289c8afe684SRob Clark 290871d812aSRob Clark pfn = page_to_pfn(pages[pgoff]); 291c8afe684SRob Clark 2921a29d85eSJan Kara VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, 293c8afe684SRob Clark pfn, pfn << PAGE_SHIFT); 294c8afe684SRob Clark 295a5f74ec7SSouptick Joarder ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); 296c8afe684SRob Clark out_unlock: 297a6ae74c9SRob Clark msm_gem_unlock(obj); 298c8afe684SRob Clark out: 299a5f74ec7SSouptick Joarder return ret; 300c8afe684SRob Clark } 301c8afe684SRob Clark 302c8afe684SRob Clark /** get mmap offset */ 303c8afe684SRob Clark static uint64_t mmap_offset(struct drm_gem_object *obj) 304c8afe684SRob Clark { 305c8afe684SRob Clark struct drm_device *dev = obj->dev; 306c8afe684SRob Clark int ret; 307c8afe684SRob Clark 30890643a24SRob Clark GEM_WARN_ON(!msm_gem_is_locked(obj)); 309c8afe684SRob Clark 310c8afe684SRob Clark /* Make it mmapable */ 311c8afe684SRob Clark ret = drm_gem_create_mmap_offset(obj); 312c8afe684SRob Clark 313c8afe684SRob Clark if (ret) { 3146a41da17SMamta Shukla DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n"); 315c8afe684SRob Clark return 0; 316c8afe684SRob Clark } 317c8afe684SRob Clark 318c8afe684SRob Clark return drm_vma_node_offset_addr(&obj->vma_node); 319c8afe684SRob Clark } 320c8afe684SRob Clark 321c8afe684SRob Clark uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) 322c8afe684SRob Clark { 323c8afe684SRob Clark uint64_t offset; 3240e08270aSSushmita Susheelendra 325a6ae74c9SRob Clark msm_gem_lock(obj); 326c8afe684SRob Clark offset = mmap_offset(obj); 327a6ae74c9SRob Clark msm_gem_unlock(obj); 328c8afe684SRob Clark return offset; 329c8afe684SRob Clark } 330c8afe684SRob Clark 3314b85f7f5SRob Clark static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, 3324b85f7f5SRob Clark struct msm_gem_address_space *aspace) 3334b85f7f5SRob Clark { 3344b85f7f5SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 3354b85f7f5SRob Clark struct msm_gem_vma *vma; 3364b85f7f5SRob Clark 33790643a24SRob Clark GEM_WARN_ON(!msm_gem_is_locked(obj)); 3380e08270aSSushmita Susheelendra 3394b85f7f5SRob Clark vma = kzalloc(sizeof(*vma), GFP_KERNEL); 3404b85f7f5SRob Clark if (!vma) 3414b85f7f5SRob Clark return ERR_PTR(-ENOMEM); 3424b85f7f5SRob Clark 3434b85f7f5SRob Clark vma->aspace = aspace; 3444b85f7f5SRob Clark 3454b85f7f5SRob Clark list_add_tail(&vma->list, &msm_obj->vmas); 3464b85f7f5SRob Clark 3474b85f7f5SRob Clark return vma; 3484b85f7f5SRob Clark } 3494b85f7f5SRob Clark 3504b85f7f5SRob Clark static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, 3514b85f7f5SRob Clark struct msm_gem_address_space *aspace) 3524b85f7f5SRob Clark { 3534b85f7f5SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 3544b85f7f5SRob Clark struct msm_gem_vma *vma; 3554b85f7f5SRob Clark 35690643a24SRob Clark GEM_WARN_ON(!msm_gem_is_locked(obj)); 3574b85f7f5SRob Clark 3584b85f7f5SRob Clark list_for_each_entry(vma, &msm_obj->vmas, list) { 3594b85f7f5SRob Clark if (vma->aspace == aspace) 3604b85f7f5SRob Clark return vma; 3614b85f7f5SRob Clark } 3624b85f7f5SRob Clark 3634b85f7f5SRob Clark return NULL; 3644b85f7f5SRob Clark } 3654b85f7f5SRob Clark 3664b85f7f5SRob Clark static void del_vma(struct msm_gem_vma *vma) 3674b85f7f5SRob Clark { 3684b85f7f5SRob Clark if (!vma) 3694b85f7f5SRob Clark return; 3704b85f7f5SRob Clark 3714b85f7f5SRob Clark list_del(&vma->list); 3724b85f7f5SRob Clark kfree(vma); 3734b85f7f5SRob Clark } 3744b85f7f5SRob Clark 37520d0ae2fSRob Clark /** 37620d0ae2fSRob Clark * If close is true, this also closes the VMA (releasing the allocated 37720d0ae2fSRob Clark * iova range) in addition to removing the iommu mapping. In the eviction 37820d0ae2fSRob Clark * case (!close), we keep the iova allocated, but only remove the iommu 37920d0ae2fSRob Clark * mapping. 38020d0ae2fSRob Clark */ 3814fe5f65eSRob Clark static void 38220d0ae2fSRob Clark put_iova_spaces(struct drm_gem_object *obj, bool close) 3834fe5f65eSRob Clark { 3844fe5f65eSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 3859b73bde3SIskren Chernev struct msm_gem_vma *vma; 3864fe5f65eSRob Clark 38790643a24SRob Clark GEM_WARN_ON(!msm_gem_is_locked(obj)); 3884fe5f65eSRob Clark 3899b73bde3SIskren Chernev list_for_each_entry(vma, &msm_obj->vmas, list) { 390d67f1b6dSBrian Masney if (vma->aspace) { 3917ad0e8cfSJordan Crouse msm_gem_purge_vma(vma->aspace, vma); 39220d0ae2fSRob Clark if (close) 3937ad0e8cfSJordan Crouse msm_gem_close_vma(vma->aspace, vma); 394d67f1b6dSBrian Masney } 3959b73bde3SIskren Chernev } 3969b73bde3SIskren Chernev } 3979b73bde3SIskren Chernev 3989b73bde3SIskren Chernev /* Called with msm_obj locked */ 3999b73bde3SIskren Chernev static void 4009b73bde3SIskren Chernev put_iova_vmas(struct drm_gem_object *obj) 4014fe5f65eSRob Clark { 4024fe5f65eSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 4034fe5f65eSRob Clark struct msm_gem_vma *vma, *tmp; 4044fe5f65eSRob Clark 40590643a24SRob Clark GEM_WARN_ON(!msm_gem_is_locked(obj)); 4064fe5f65eSRob Clark 4074fe5f65eSRob Clark list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { 4084b85f7f5SRob Clark del_vma(vma); 4094fe5f65eSRob Clark } 4104fe5f65eSRob Clark } 4114fe5f65eSRob Clark 4128117e5e5SRob Clark static int get_iova_locked(struct drm_gem_object *obj, 413d3b8877eSJonathan Marek struct msm_gem_address_space *aspace, uint64_t *iova, 414d3b8877eSJonathan Marek u64 range_start, u64 range_end) 415c8afe684SRob Clark { 4164b85f7f5SRob Clark struct msm_gem_vma *vma; 417c8afe684SRob Clark int ret = 0; 418c8afe684SRob Clark 41990643a24SRob Clark GEM_WARN_ON(!msm_gem_is_locked(obj)); 420cb1e3818SRob Clark 4214b85f7f5SRob Clark vma = lookup_vma(obj, aspace); 422871d812aSRob Clark 4234b85f7f5SRob Clark if (!vma) { 4244b85f7f5SRob Clark vma = add_vma(obj, aspace); 425c0ee9794SJordan Crouse if (IS_ERR(vma)) 426c0ee9794SJordan Crouse return PTR_ERR(vma); 4274b85f7f5SRob Clark 428d3b8877eSJonathan Marek ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT, 429d3b8877eSJonathan Marek range_start, range_end); 430c0ee9794SJordan Crouse if (ret) { 431c0ee9794SJordan Crouse del_vma(vma); 432c0ee9794SJordan Crouse return ret; 433c8afe684SRob Clark } 4344b85f7f5SRob Clark } 4354b85f7f5SRob Clark 4364b85f7f5SRob Clark *iova = vma->iova; 4374b85f7f5SRob Clark return 0; 438c0ee9794SJordan Crouse } 4394b85f7f5SRob Clark 440c0ee9794SJordan Crouse static int msm_gem_pin_iova(struct drm_gem_object *obj, 441c0ee9794SJordan Crouse struct msm_gem_address_space *aspace) 442c0ee9794SJordan Crouse { 443c0ee9794SJordan Crouse struct msm_gem_object *msm_obj = to_msm_bo(obj); 444c0ee9794SJordan Crouse struct msm_gem_vma *vma; 445c0ee9794SJordan Crouse struct page **pages; 44664fcbde7SRob Clark int ret, prot = IOMMU_READ; 447bbc2cd07SRob Clark 448bbc2cd07SRob Clark if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) 449bbc2cd07SRob Clark prot |= IOMMU_WRITE; 450c0ee9794SJordan Crouse 4510b462d7aSJonathan Marek if (msm_obj->flags & MSM_BO_MAP_PRIV) 4520b462d7aSJonathan Marek prot |= IOMMU_PRIV; 4530b462d7aSJonathan Marek 45490643a24SRob Clark GEM_WARN_ON(!msm_gem_is_locked(obj)); 455c0ee9794SJordan Crouse 45690643a24SRob Clark if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) 457c0ee9794SJordan Crouse return -EBUSY; 458c0ee9794SJordan Crouse 459c0ee9794SJordan Crouse vma = lookup_vma(obj, aspace); 46090643a24SRob Clark if (GEM_WARN_ON(!vma)) 461c0ee9794SJordan Crouse return -EINVAL; 462c0ee9794SJordan Crouse 463c0ee9794SJordan Crouse pages = get_pages(obj); 464c0ee9794SJordan Crouse if (IS_ERR(pages)) 465c0ee9794SJordan Crouse return PTR_ERR(pages); 466c0ee9794SJordan Crouse 46764fcbde7SRob Clark ret = msm_gem_map_vma(aspace, vma, prot, 468bbc2cd07SRob Clark msm_obj->sgt, obj->size >> PAGE_SHIFT); 46964fcbde7SRob Clark 47064fcbde7SRob Clark if (!ret) 47164fcbde7SRob Clark msm_obj->pin_count++; 47264fcbde7SRob Clark 47364fcbde7SRob Clark return ret; 474c0ee9794SJordan Crouse } 475c0ee9794SJordan Crouse 476e4b87d22SRob Clark static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, 477d3b8877eSJonathan Marek struct msm_gem_address_space *aspace, uint64_t *iova, 478d3b8877eSJonathan Marek u64 range_start, u64 range_end) 479c0ee9794SJordan Crouse { 480c0ee9794SJordan Crouse u64 local; 481c0ee9794SJordan Crouse int ret; 482c0ee9794SJordan Crouse 48390643a24SRob Clark GEM_WARN_ON(!msm_gem_is_locked(obj)); 484c0ee9794SJordan Crouse 4858117e5e5SRob Clark ret = get_iova_locked(obj, aspace, &local, 486d3b8877eSJonathan Marek range_start, range_end); 487c0ee9794SJordan Crouse 488c0ee9794SJordan Crouse if (!ret) 489c0ee9794SJordan Crouse ret = msm_gem_pin_iova(obj, aspace); 490c0ee9794SJordan Crouse 491c0ee9794SJordan Crouse if (!ret) 492c0ee9794SJordan Crouse *iova = local; 493c0ee9794SJordan Crouse 494c8afe684SRob Clark return ret; 495c8afe684SRob Clark } 496c8afe684SRob Clark 497e4b87d22SRob Clark /* 498e4b87d22SRob Clark * get iova and pin it. Should have a matching put 499e4b87d22SRob Clark * limits iova to specified range (in pages) 500e4b87d22SRob Clark */ 501e4b87d22SRob Clark int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, 502e4b87d22SRob Clark struct msm_gem_address_space *aspace, uint64_t *iova, 503e4b87d22SRob Clark u64 range_start, u64 range_end) 504e4b87d22SRob Clark { 505e4b87d22SRob Clark int ret; 506e4b87d22SRob Clark 507e4b87d22SRob Clark msm_gem_lock(obj); 508e4b87d22SRob Clark ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end); 509e4b87d22SRob Clark msm_gem_unlock(obj); 510e4b87d22SRob Clark 511e4b87d22SRob Clark return ret; 512e4b87d22SRob Clark } 513e4b87d22SRob Clark 514e4b87d22SRob Clark int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj, 515e4b87d22SRob Clark struct msm_gem_address_space *aspace, uint64_t *iova) 516e4b87d22SRob Clark { 517e4b87d22SRob Clark return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX); 518e4b87d22SRob Clark } 519e4b87d22SRob Clark 520d3b8877eSJonathan Marek /* get iova and pin it. Should have a matching put */ 521d3b8877eSJonathan Marek int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, 522d3b8877eSJonathan Marek struct msm_gem_address_space *aspace, uint64_t *iova) 523d3b8877eSJonathan Marek { 524d3b8877eSJonathan Marek return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX); 525d3b8877eSJonathan Marek } 526d3b8877eSJonathan Marek 5277ad0e8cfSJordan Crouse /* 5287ad0e8cfSJordan Crouse * Get an iova but don't pin it. Doesn't need a put because iovas are currently 5297ad0e8cfSJordan Crouse * valid for the life of the object 5307ad0e8cfSJordan Crouse */ 5319fe041f6SJordan Crouse int msm_gem_get_iova(struct drm_gem_object *obj, 5329fe041f6SJordan Crouse struct msm_gem_address_space *aspace, uint64_t *iova) 5339fe041f6SJordan Crouse { 5349fe041f6SJordan Crouse int ret; 5359fe041f6SJordan Crouse 536a6ae74c9SRob Clark msm_gem_lock(obj); 5378117e5e5SRob Clark ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX); 538a6ae74c9SRob Clark msm_gem_unlock(obj); 5399fe041f6SJordan Crouse 5409fe041f6SJordan Crouse return ret; 5419fe041f6SJordan Crouse } 5429fe041f6SJordan Crouse 5432638d90aSRob Clark /* get iova without taking a reference, used in places where you have 5449fe041f6SJordan Crouse * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova' 5452638d90aSRob Clark */ 5468bdcd949SRob Clark uint64_t msm_gem_iova(struct drm_gem_object *obj, 5478bdcd949SRob Clark struct msm_gem_address_space *aspace) 5482638d90aSRob Clark { 5494b85f7f5SRob Clark struct msm_gem_vma *vma; 5504b85f7f5SRob Clark 551a6ae74c9SRob Clark msm_gem_lock(obj); 5524b85f7f5SRob Clark vma = lookup_vma(obj, aspace); 553a6ae74c9SRob Clark msm_gem_unlock(obj); 55490643a24SRob Clark GEM_WARN_ON(!vma); 5554b85f7f5SRob Clark 5564b85f7f5SRob Clark return vma ? vma->iova : 0; 5572638d90aSRob Clark } 5582638d90aSRob Clark 5597ad0e8cfSJordan Crouse /* 560e4b87d22SRob Clark * Locked variant of msm_gem_unpin_iova() 561e4b87d22SRob Clark */ 562e4b87d22SRob Clark void msm_gem_unpin_iova_locked(struct drm_gem_object *obj, 563e4b87d22SRob Clark struct msm_gem_address_space *aspace) 564e4b87d22SRob Clark { 56564fcbde7SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 566e4b87d22SRob Clark struct msm_gem_vma *vma; 567e4b87d22SRob Clark 56890643a24SRob Clark GEM_WARN_ON(!msm_gem_is_locked(obj)); 569e4b87d22SRob Clark 570e4b87d22SRob Clark vma = lookup_vma(obj, aspace); 571e4b87d22SRob Clark 57264fcbde7SRob Clark if (!GEM_WARN_ON(!vma)) { 573e4b87d22SRob Clark msm_gem_unmap_vma(aspace, vma); 57464fcbde7SRob Clark 57564fcbde7SRob Clark msm_obj->pin_count--; 57664fcbde7SRob Clark GEM_WARN_ON(msm_obj->pin_count < 0); 57764fcbde7SRob Clark 57864fcbde7SRob Clark update_inactive(msm_obj); 57964fcbde7SRob Clark } 580e4b87d22SRob Clark } 581e4b87d22SRob Clark 582e4b87d22SRob Clark /* 5837ad0e8cfSJordan Crouse * Unpin a iova by updating the reference counts. The memory isn't actually 5847ad0e8cfSJordan Crouse * purged until something else (shrinker, mm_notifier, destroy, etc) decides 5857ad0e8cfSJordan Crouse * to get rid of it 5867ad0e8cfSJordan Crouse */ 5877ad0e8cfSJordan Crouse void msm_gem_unpin_iova(struct drm_gem_object *obj, 5888bdcd949SRob Clark struct msm_gem_address_space *aspace) 589c8afe684SRob Clark { 590a6ae74c9SRob Clark msm_gem_lock(obj); 591e4b87d22SRob Clark msm_gem_unpin_iova_locked(obj, aspace); 592a6ae74c9SRob Clark msm_gem_unlock(obj); 593c8afe684SRob Clark } 594c8afe684SRob Clark 595c8afe684SRob Clark int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 596c8afe684SRob Clark struct drm_mode_create_dumb *args) 597c8afe684SRob Clark { 598c8afe684SRob Clark args->pitch = align_pitch(args->width, args->bpp); 599c8afe684SRob Clark args->size = PAGE_ALIGN(args->pitch * args->height); 600c8afe684SRob Clark return msm_gem_new_handle(dev, file, args->size, 6010815d774SJordan Crouse MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb"); 602c8afe684SRob Clark } 603c8afe684SRob Clark 604c8afe684SRob Clark int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 605c8afe684SRob Clark uint32_t handle, uint64_t *offset) 606c8afe684SRob Clark { 607c8afe684SRob Clark struct drm_gem_object *obj; 608c8afe684SRob Clark int ret = 0; 609c8afe684SRob Clark 610c8afe684SRob Clark /* GEM does all our handle to object mapping */ 611a8ad0bd8SChris Wilson obj = drm_gem_object_lookup(file, handle); 612c8afe684SRob Clark if (obj == NULL) { 613c8afe684SRob Clark ret = -ENOENT; 614c8afe684SRob Clark goto fail; 615c8afe684SRob Clark } 616c8afe684SRob Clark 617c8afe684SRob Clark *offset = msm_gem_mmap_offset(obj); 618c8afe684SRob Clark 619f7d33950SEmil Velikov drm_gem_object_put(obj); 620c8afe684SRob Clark 621c8afe684SRob Clark fail: 622c8afe684SRob Clark return ret; 623c8afe684SRob Clark } 624c8afe684SRob Clark 625fad33f4bSRob Clark static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) 626c8afe684SRob Clark { 6270e08270aSSushmita Susheelendra struct msm_gem_object *msm_obj = to_msm_bo(obj); 6280e08270aSSushmita Susheelendra int ret = 0; 6290e08270aSSushmita Susheelendra 63090643a24SRob Clark GEM_WARN_ON(!msm_gem_is_locked(obj)); 631e4b87d22SRob Clark 6328b6b7d84SDaniel Vetter if (obj->import_attach) 6338b6b7d84SDaniel Vetter return ERR_PTR(-ENODEV); 6348b6b7d84SDaniel Vetter 63590643a24SRob Clark if (GEM_WARN_ON(msm_obj->madv > madv)) { 6366a41da17SMamta Shukla DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", 637fad33f4bSRob Clark msm_obj->madv, madv); 6380e08270aSSushmita Susheelendra return ERR_PTR(-EBUSY); 639c8afe684SRob Clark } 640c8afe684SRob Clark 6410e08270aSSushmita Susheelendra /* increment vmap_count *before* vmap() call, so shrinker can 642a6ae74c9SRob Clark * check vmap_count (is_vunmapable()) outside of msm_obj lock. 6430e08270aSSushmita Susheelendra * This guarantees that we won't try to msm_gem_vunmap() this 6440e08270aSSushmita Susheelendra * same object from within the vmap() call (while we already 645a6ae74c9SRob Clark * hold msm_obj lock) 6460e08270aSSushmita Susheelendra */ 6470e08270aSSushmita Susheelendra msm_obj->vmap_count++; 6480e08270aSSushmita Susheelendra 6490e08270aSSushmita Susheelendra if (!msm_obj->vaddr) { 6500e08270aSSushmita Susheelendra struct page **pages = get_pages(obj); 6510e08270aSSushmita Susheelendra if (IS_ERR(pages)) { 6520e08270aSSushmita Susheelendra ret = PTR_ERR(pages); 6530e08270aSSushmita Susheelendra goto fail; 6540e08270aSSushmita Susheelendra } 6550e08270aSSushmita Susheelendra msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 6560e08270aSSushmita Susheelendra VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 6570e08270aSSushmita Susheelendra if (msm_obj->vaddr == NULL) { 6580e08270aSSushmita Susheelendra ret = -ENOMEM; 6590e08270aSSushmita Susheelendra goto fail; 6600e08270aSSushmita Susheelendra } 66110f76165SRob Clark 66210f76165SRob Clark update_inactive(msm_obj); 6630e08270aSSushmita Susheelendra } 6640e08270aSSushmita Susheelendra 6650e08270aSSushmita Susheelendra return msm_obj->vaddr; 6660e08270aSSushmita Susheelendra 6670e08270aSSushmita Susheelendra fail: 668e1e9db2cSRob Clark msm_obj->vmap_count--; 6690e08270aSSushmita Susheelendra return ERR_PTR(ret); 67018f23049SRob Clark } 67118f23049SRob Clark 672e4b87d22SRob Clark void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj) 673e4b87d22SRob Clark { 674e4b87d22SRob Clark return get_vaddr(obj, MSM_MADV_WILLNEED); 675e4b87d22SRob Clark } 676e4b87d22SRob Clark 677fad33f4bSRob Clark void *msm_gem_get_vaddr(struct drm_gem_object *obj) 678fad33f4bSRob Clark { 679e4b87d22SRob Clark void *ret; 680e4b87d22SRob Clark 681e4b87d22SRob Clark msm_gem_lock(obj); 682e4b87d22SRob Clark ret = msm_gem_get_vaddr_locked(obj); 683e4b87d22SRob Clark msm_gem_unlock(obj); 684e4b87d22SRob Clark 685e4b87d22SRob Clark return ret; 686fad33f4bSRob Clark } 687fad33f4bSRob Clark 688fad33f4bSRob Clark /* 689fad33f4bSRob Clark * Don't use this! It is for the very special case of dumping 690fad33f4bSRob Clark * submits from GPU hangs or faults, were the bo may already 691fad33f4bSRob Clark * be MSM_MADV_DONTNEED, but we know the buffer is still on the 692fad33f4bSRob Clark * active list. 693fad33f4bSRob Clark */ 694fad33f4bSRob Clark void *msm_gem_get_vaddr_active(struct drm_gem_object *obj) 695fad33f4bSRob Clark { 696fad33f4bSRob Clark return get_vaddr(obj, __MSM_MADV_PURGED); 697fad33f4bSRob Clark } 698fad33f4bSRob Clark 699e4b87d22SRob Clark void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) 70018f23049SRob Clark { 7010e08270aSSushmita Susheelendra struct msm_gem_object *msm_obj = to_msm_bo(obj); 7020e08270aSSushmita Susheelendra 70390643a24SRob Clark GEM_WARN_ON(!msm_gem_is_locked(obj)); 70490643a24SRob Clark GEM_WARN_ON(msm_obj->vmap_count < 1); 705e4b87d22SRob Clark 7060e08270aSSushmita Susheelendra msm_obj->vmap_count--; 7074cd33c48SRob Clark } 7080e08270aSSushmita Susheelendra 7090e08270aSSushmita Susheelendra void msm_gem_put_vaddr(struct drm_gem_object *obj) 7104cd33c48SRob Clark { 711a6ae74c9SRob Clark msm_gem_lock(obj); 712e4b87d22SRob Clark msm_gem_put_vaddr_locked(obj); 713a6ae74c9SRob Clark msm_gem_unlock(obj); 7144cd33c48SRob Clark } 7154cd33c48SRob Clark 7164cd33c48SRob Clark /* Update madvise status, returns true if not purged, else 7174cd33c48SRob Clark * false or -errno. 7184cd33c48SRob Clark */ 7194cd33c48SRob Clark int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) 7204cd33c48SRob Clark { 7214cd33c48SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 72268209390SRob Clark 723a6ae74c9SRob Clark msm_gem_lock(obj); 724c8afe684SRob Clark 725c8afe684SRob Clark if (msm_obj->madv != __MSM_MADV_PURGED) 726c8afe684SRob Clark msm_obj->madv = madv; 727c8afe684SRob Clark 7280e08270aSSushmita Susheelendra madv = msm_obj->madv; 7290e08270aSSushmita Susheelendra 7303edfa30fSRob Clark /* If the obj is inactive, we might need to move it 7313edfa30fSRob Clark * between inactive lists 7323edfa30fSRob Clark */ 7333edfa30fSRob Clark if (msm_obj->active_count == 0) 7343edfa30fSRob Clark update_inactive(msm_obj); 7353edfa30fSRob Clark 736a6ae74c9SRob Clark msm_gem_unlock(obj); 7370e08270aSSushmita Susheelendra 7380e08270aSSushmita Susheelendra return (madv != __MSM_MADV_PURGED); 739c8afe684SRob Clark } 740c8afe684SRob Clark 741599089c6SRob Clark void msm_gem_purge(struct drm_gem_object *obj) 74268209390SRob Clark { 74368209390SRob Clark struct drm_device *dev = obj->dev; 74468209390SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 74568209390SRob Clark 74681d4d597SRob Clark GEM_WARN_ON(!msm_gem_is_locked(obj)); 74790643a24SRob Clark GEM_WARN_ON(!is_purgeable(msm_obj)); 74868209390SRob Clark 74920d0ae2fSRob Clark /* Get rid of any iommu mapping(s): */ 75020d0ae2fSRob Clark put_iova_spaces(obj, true); 7510e08270aSSushmita Susheelendra 752599089c6SRob Clark msm_gem_vunmap(obj); 75368209390SRob Clark 75481d4d597SRob Clark drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 75581d4d597SRob Clark 75668209390SRob Clark put_pages(obj); 75768209390SRob Clark 7589b73bde3SIskren Chernev put_iova_vmas(obj); 7599b73bde3SIskren Chernev 76068209390SRob Clark msm_obj->madv = __MSM_MADV_PURGED; 76125ed38b3SRob Clark update_inactive(msm_obj); 76268209390SRob Clark 76368209390SRob Clark drm_gem_free_mmap_offset(obj); 76468209390SRob Clark 76568209390SRob Clark /* Our goal here is to return as much of the memory as 76668209390SRob Clark * is possible back to the system as we are called from OOM. 76768209390SRob Clark * To do this we must instruct the shmfs to drop all of its 76868209390SRob Clark * backing pages, *now*. 76968209390SRob Clark */ 77068209390SRob Clark shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 77168209390SRob Clark 77268209390SRob Clark invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 77368209390SRob Clark 0, (loff_t)-1); 77468209390SRob Clark } 77568209390SRob Clark 77663f17ef8SRob Clark /** 77763f17ef8SRob Clark * Unpin the backing pages and make them available to be swapped out. 77863f17ef8SRob Clark */ 77963f17ef8SRob Clark void msm_gem_evict(struct drm_gem_object *obj) 78063f17ef8SRob Clark { 78163f17ef8SRob Clark struct drm_device *dev = obj->dev; 78263f17ef8SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 78363f17ef8SRob Clark 78463f17ef8SRob Clark GEM_WARN_ON(!msm_gem_is_locked(obj)); 78563f17ef8SRob Clark GEM_WARN_ON(is_unevictable(msm_obj)); 78663f17ef8SRob Clark GEM_WARN_ON(!msm_obj->evictable); 78763f17ef8SRob Clark GEM_WARN_ON(msm_obj->active_count); 78863f17ef8SRob Clark 78963f17ef8SRob Clark /* Get rid of any iommu mapping(s): */ 79063f17ef8SRob Clark put_iova_spaces(obj, false); 79163f17ef8SRob Clark 79263f17ef8SRob Clark drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 79363f17ef8SRob Clark 79463f17ef8SRob Clark put_pages(obj); 79563f17ef8SRob Clark 79663f17ef8SRob Clark update_inactive(msm_obj); 79763f17ef8SRob Clark } 79863f17ef8SRob Clark 799599089c6SRob Clark void msm_gem_vunmap(struct drm_gem_object *obj) 800e1e9db2cSRob Clark { 801e1e9db2cSRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 802e1e9db2cSRob Clark 80390643a24SRob Clark GEM_WARN_ON(!msm_gem_is_locked(obj)); 8040e08270aSSushmita Susheelendra 80590643a24SRob Clark if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj))) 806e1e9db2cSRob Clark return; 807e1e9db2cSRob Clark 808e1e9db2cSRob Clark vunmap(msm_obj->vaddr); 809e1e9db2cSRob Clark msm_obj->vaddr = NULL; 810e1e9db2cSRob Clark } 811e1e9db2cSRob Clark 812b6295f9aSRob Clark /* must be called before _move_to_active().. */ 813b6295f9aSRob Clark int msm_gem_sync_object(struct drm_gem_object *obj, 814b6295f9aSRob Clark struct msm_fence_context *fctx, bool exclusive) 815b6295f9aSRob Clark { 81652791eeeSChristian König struct dma_resv_list *fobj; 817f54d1867SChris Wilson struct dma_fence *fence; 818b6295f9aSRob Clark int i, ret; 819b6295f9aSRob Clark 82052791eeeSChristian König fobj = dma_resv_get_list(obj->resv); 821b6295f9aSRob Clark if (!fobj || (fobj->shared_count == 0)) { 822*6edbd6abSChristian König fence = dma_resv_excl_fence(obj->resv); 823b6295f9aSRob Clark /* don't need to wait on our own fences, since ring is fifo */ 824b6295f9aSRob Clark if (fence && (fence->context != fctx->context)) { 825f54d1867SChris Wilson ret = dma_fence_wait(fence, true); 826b6295f9aSRob Clark if (ret) 827b6295f9aSRob Clark return ret; 828b6295f9aSRob Clark } 829b6295f9aSRob Clark } 830b6295f9aSRob Clark 831b6295f9aSRob Clark if (!exclusive || !fobj) 832b6295f9aSRob Clark return 0; 833b6295f9aSRob Clark 834b6295f9aSRob Clark for (i = 0; i < fobj->shared_count; i++) { 835b6295f9aSRob Clark fence = rcu_dereference_protected(fobj->shared[i], 83652791eeeSChristian König dma_resv_held(obj->resv)); 837b6295f9aSRob Clark if (fence->context != fctx->context) { 838f54d1867SChris Wilson ret = dma_fence_wait(fence, true); 839b6295f9aSRob Clark if (ret) 840b6295f9aSRob Clark return ret; 841b6295f9aSRob Clark } 842b6295f9aSRob Clark } 843b6295f9aSRob Clark 844b6295f9aSRob Clark return 0; 845b6295f9aSRob Clark } 846b6295f9aSRob Clark 8479d8baa2bSAkhil P Oommen void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu) 8487198e6b0SRob Clark { 8497198e6b0SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 850d984457bSRob Clark struct msm_drm_private *priv = obj->dev->dev_private; 851d984457bSRob Clark 852d984457bSRob Clark might_sleep(); 85390643a24SRob Clark GEM_WARN_ON(!msm_gem_is_locked(obj)); 85490643a24SRob Clark GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); 85590643a24SRob Clark GEM_WARN_ON(msm_obj->dontneed); 85664fcbde7SRob Clark GEM_WARN_ON(!msm_obj->sgt); 8579d8baa2bSAkhil P Oommen 858ab5c54cbSRob Clark if (msm_obj->active_count++ == 0) { 859d984457bSRob Clark mutex_lock(&priv->mm_lock); 86064fcbde7SRob Clark if (msm_obj->evictable) 86164fcbde7SRob Clark mark_unevictable(msm_obj); 862cc8a4d5aSRob Clark list_del(&msm_obj->mm_list); 8637198e6b0SRob Clark list_add_tail(&msm_obj->mm_list, &gpu->active_list); 864d984457bSRob Clark mutex_unlock(&priv->mm_lock); 8657198e6b0SRob Clark } 8669d8baa2bSAkhil P Oommen } 8677198e6b0SRob Clark 8689d8baa2bSAkhil P Oommen void msm_gem_active_put(struct drm_gem_object *obj) 8697198e6b0SRob Clark { 8707198e6b0SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 8717198e6b0SRob Clark 872d984457bSRob Clark might_sleep(); 87390643a24SRob Clark GEM_WARN_ON(!msm_gem_is_locked(obj)); 8747198e6b0SRob Clark 875ab5c54cbSRob Clark if (--msm_obj->active_count == 0) { 8763edfa30fSRob Clark update_inactive(msm_obj); 8777198e6b0SRob Clark } 8789d8baa2bSAkhil P Oommen } 8797198e6b0SRob Clark 8803edfa30fSRob Clark static void update_inactive(struct msm_gem_object *msm_obj) 8813edfa30fSRob Clark { 8823edfa30fSRob Clark struct msm_drm_private *priv = msm_obj->base.dev->dev_private; 8833edfa30fSRob Clark 88464fcbde7SRob Clark GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base)); 88564fcbde7SRob Clark 88664fcbde7SRob Clark if (msm_obj->active_count != 0) 88764fcbde7SRob Clark return; 88864fcbde7SRob Clark 8893edfa30fSRob Clark mutex_lock(&priv->mm_lock); 8903edfa30fSRob Clark 891cc8a4d5aSRob Clark if (msm_obj->dontneed) 8920054eeb7SRob Clark mark_unpurgeable(msm_obj); 89364fcbde7SRob Clark if (msm_obj->evictable) 89464fcbde7SRob Clark mark_unevictable(msm_obj); 895cc8a4d5aSRob Clark 896cc8a4d5aSRob Clark list_del(&msm_obj->mm_list); 89764fcbde7SRob Clark if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) { 8983edfa30fSRob Clark list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); 89964fcbde7SRob Clark mark_evictable(msm_obj); 900cc8a4d5aSRob Clark } else if (msm_obj->madv == MSM_MADV_DONTNEED) { 9013edfa30fSRob Clark list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed); 9020054eeb7SRob Clark mark_purgeable(msm_obj); 903cc8a4d5aSRob Clark } else { 90464fcbde7SRob Clark GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt); 90564fcbde7SRob Clark list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); 906cc8a4d5aSRob Clark } 9073edfa30fSRob Clark 9083edfa30fSRob Clark mutex_unlock(&priv->mm_lock); 9093edfa30fSRob Clark } 9103edfa30fSRob Clark 911ba00c3f2SRob Clark int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 912ba00c3f2SRob Clark { 913b6295f9aSRob Clark bool write = !!(op & MSM_PREP_WRITE); 914f755e227SChris Wilson unsigned long remain = 915f755e227SChris Wilson op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 916f755e227SChris Wilson long ret; 917b6295f9aSRob Clark 91852791eeeSChristian König ret = dma_resv_wait_timeout_rcu(obj->resv, write, 919f755e227SChris Wilson true, remain); 920f755e227SChris Wilson if (ret == 0) 921f755e227SChris Wilson return remain == 0 ? -EBUSY : -ETIMEDOUT; 922f755e227SChris Wilson else if (ret < 0) 923f755e227SChris Wilson return ret; 924ba00c3f2SRob Clark 9257198e6b0SRob Clark /* TODO cache maintenance */ 9267198e6b0SRob Clark 927b6295f9aSRob Clark return 0; 9287198e6b0SRob Clark } 9297198e6b0SRob Clark 9307198e6b0SRob Clark int msm_gem_cpu_fini(struct drm_gem_object *obj) 9317198e6b0SRob Clark { 9327198e6b0SRob Clark /* TODO cache maintenance */ 933c8afe684SRob Clark return 0; 934c8afe684SRob Clark } 935c8afe684SRob Clark 936c8afe684SRob Clark #ifdef CONFIG_DEBUG_FS 937f54d1867SChris Wilson static void describe_fence(struct dma_fence *fence, const char *type, 938b6295f9aSRob Clark struct seq_file *m) 939b6295f9aSRob Clark { 940f54d1867SChris Wilson if (!dma_fence_is_signaled(fence)) 941a3115621SDave Airlie seq_printf(m, "\t%9s: %s %s seq %llu\n", type, 942b6295f9aSRob Clark fence->ops->get_driver_name(fence), 943b6295f9aSRob Clark fence->ops->get_timeline_name(fence), 944b6295f9aSRob Clark fence->seqno); 945b6295f9aSRob Clark } 946b6295f9aSRob Clark 947528107c8SRob Clark void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, 948528107c8SRob Clark struct msm_gem_stats *stats) 949c8afe684SRob Clark { 950c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 95152791eeeSChristian König struct dma_resv *robj = obj->resv; 95252791eeeSChristian König struct dma_resv_list *fobj; 953f54d1867SChris Wilson struct dma_fence *fence; 9544b85f7f5SRob Clark struct msm_gem_vma *vma; 955c8afe684SRob Clark uint64_t off = drm_vma_node_start(&obj->vma_node); 9564cd33c48SRob Clark const char *madv; 957c8afe684SRob Clark 958a6ae74c9SRob Clark msm_gem_lock(obj); 959b6295f9aSRob Clark 960528107c8SRob Clark stats->all.count++; 961528107c8SRob Clark stats->all.size += obj->size; 962528107c8SRob Clark 963528107c8SRob Clark if (is_active(msm_obj)) { 964528107c8SRob Clark stats->active.count++; 965528107c8SRob Clark stats->active.size += obj->size; 966528107c8SRob Clark } 967528107c8SRob Clark 968f48f3563SRob Clark if (msm_obj->pages) { 969f48f3563SRob Clark stats->resident.count++; 970f48f3563SRob Clark stats->resident.size += obj->size; 971f48f3563SRob Clark } 972f48f3563SRob Clark 9734cd33c48SRob Clark switch (msm_obj->madv) { 9744cd33c48SRob Clark case __MSM_MADV_PURGED: 975528107c8SRob Clark stats->purged.count++; 976528107c8SRob Clark stats->purged.size += obj->size; 9774cd33c48SRob Clark madv = " purged"; 9784cd33c48SRob Clark break; 9794cd33c48SRob Clark case MSM_MADV_DONTNEED: 9800054eeb7SRob Clark stats->purgeable.count++; 9810054eeb7SRob Clark stats->purgeable.size += obj->size; 9824cd33c48SRob Clark madv = " purgeable"; 9834cd33c48SRob Clark break; 9844cd33c48SRob Clark case MSM_MADV_WILLNEED: 9854cd33c48SRob Clark default: 9864cd33c48SRob Clark madv = ""; 9874cd33c48SRob Clark break; 9884cd33c48SRob Clark } 9894cd33c48SRob Clark 990575f0485SJordan Crouse seq_printf(m, "%08x: %c %2d (%2d) %08llx %p", 9917198e6b0SRob Clark msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 9922c935bc5SPeter Zijlstra obj->name, kref_read(&obj->refcount), 993667ce33eSRob Clark off, msm_obj->vaddr); 994667ce33eSRob Clark 9950815d774SJordan Crouse seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name); 996667ce33eSRob Clark 997575f0485SJordan Crouse if (!list_empty(&msm_obj->vmas)) { 998575f0485SJordan Crouse 999575f0485SJordan Crouse seq_puts(m, " vmas:"); 1000575f0485SJordan Crouse 100125faf2f2SRob Clark list_for_each_entry(vma, &msm_obj->vmas, list) { 100225faf2f2SRob Clark const char *name, *comm; 100325faf2f2SRob Clark if (vma->aspace) { 100425faf2f2SRob Clark struct msm_gem_address_space *aspace = vma->aspace; 100525faf2f2SRob Clark struct task_struct *task = 100625faf2f2SRob Clark get_pid_task(aspace->pid, PIDTYPE_PID); 100725faf2f2SRob Clark if (task) { 100825faf2f2SRob Clark comm = kstrdup(task->comm, GFP_KERNEL); 100925faf2f2SRob Clark } else { 101025faf2f2SRob Clark comm = NULL; 101125faf2f2SRob Clark } 101225faf2f2SRob Clark name = aspace->name; 101325faf2f2SRob Clark } else { 101425faf2f2SRob Clark name = comm = NULL; 101525faf2f2SRob Clark } 101625faf2f2SRob Clark seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]", 101725faf2f2SRob Clark name, comm ? ":" : "", comm ? comm : "", 101825faf2f2SRob Clark vma->aspace, vma->iova, 101925faf2f2SRob Clark vma->mapped ? "mapped" : "unmapped", 10207ad0e8cfSJordan Crouse vma->inuse); 102125faf2f2SRob Clark kfree(comm); 102225faf2f2SRob Clark } 1023575f0485SJordan Crouse 1024575f0485SJordan Crouse seq_puts(m, "\n"); 1025575f0485SJordan Crouse } 1026b6295f9aSRob Clark 1027b6295f9aSRob Clark rcu_read_lock(); 1028b6295f9aSRob Clark fobj = rcu_dereference(robj->fence); 1029b6295f9aSRob Clark if (fobj) { 1030b6295f9aSRob Clark unsigned int i, shared_count = fobj->shared_count; 1031b6295f9aSRob Clark 1032b6295f9aSRob Clark for (i = 0; i < shared_count; i++) { 1033b6295f9aSRob Clark fence = rcu_dereference(fobj->shared[i]); 1034b6295f9aSRob Clark describe_fence(fence, "Shared", m); 1035b6295f9aSRob Clark } 1036b6295f9aSRob Clark } 1037b6295f9aSRob Clark 1038*6edbd6abSChristian König fence = dma_resv_excl_fence(robj); 1039b6295f9aSRob Clark if (fence) 1040b6295f9aSRob Clark describe_fence(fence, "Exclusive", m); 1041b6295f9aSRob Clark rcu_read_unlock(); 10420e08270aSSushmita Susheelendra 1043a6ae74c9SRob Clark msm_gem_unlock(obj); 1044c8afe684SRob Clark } 1045c8afe684SRob Clark 1046c8afe684SRob Clark void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) 1047c8afe684SRob Clark { 1048528107c8SRob Clark struct msm_gem_stats stats = {}; 1049c8afe684SRob Clark struct msm_gem_object *msm_obj; 1050c8afe684SRob Clark 10510815d774SJordan Crouse seq_puts(m, " flags id ref offset kaddr size madv name\n"); 10526ed0897cSRob Clark list_for_each_entry(msm_obj, list, node) { 1053c8afe684SRob Clark struct drm_gem_object *obj = &msm_obj->base; 1054575f0485SJordan Crouse seq_puts(m, " "); 1055528107c8SRob Clark msm_gem_describe(obj, m, &stats); 1056c8afe684SRob Clark } 1057c8afe684SRob Clark 1058528107c8SRob Clark seq_printf(m, "Total: %4d objects, %9zu bytes\n", 1059528107c8SRob Clark stats.all.count, stats.all.size); 1060528107c8SRob Clark seq_printf(m, "Active: %4d objects, %9zu bytes\n", 1061528107c8SRob Clark stats.active.count, stats.active.size); 1062f48f3563SRob Clark seq_printf(m, "Resident: %4d objects, %9zu bytes\n", 1063f48f3563SRob Clark stats.resident.count, stats.resident.size); 1064f1902c6bSColin Ian King seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n", 10650054eeb7SRob Clark stats.purgeable.count, stats.purgeable.size); 1066528107c8SRob Clark seq_printf(m, "Purged: %4d objects, %9zu bytes\n", 1067528107c8SRob Clark stats.purged.count, stats.purged.size); 1068c8afe684SRob Clark } 1069c8afe684SRob Clark #endif 1070c8afe684SRob Clark 1071eecd7fd8SEmil Velikov /* don't call directly! Use drm_gem_object_put_locked() and friends */ 1072c8afe684SRob Clark void msm_gem_free_object(struct drm_gem_object *obj) 1073c8afe684SRob Clark { 1074c8afe684SRob Clark struct msm_gem_object *msm_obj = to_msm_bo(obj); 107548e7f183SKristian H. Kristensen struct drm_device *dev = obj->dev; 107648e7f183SKristian H. Kristensen struct msm_drm_private *priv = dev->dev_private; 107748e7f183SKristian H. Kristensen 10786ed0897cSRob Clark mutex_lock(&priv->obj_lock); 10796ed0897cSRob Clark list_del(&msm_obj->node); 10806ed0897cSRob Clark mutex_unlock(&priv->obj_lock); 10816ed0897cSRob Clark 1082d984457bSRob Clark mutex_lock(&priv->mm_lock); 1083cc8a4d5aSRob Clark if (msm_obj->dontneed) 10840054eeb7SRob Clark mark_unpurgeable(msm_obj); 1085c8afe684SRob Clark list_del(&msm_obj->mm_list); 1086d984457bSRob Clark mutex_unlock(&priv->mm_lock); 1087c8afe684SRob Clark 1088a6ae74c9SRob Clark msm_gem_lock(obj); 1089c8afe684SRob Clark 1090c8afe684SRob Clark /* object should not be on active list: */ 109190643a24SRob Clark GEM_WARN_ON(is_active(msm_obj)); 1092c8afe684SRob Clark 109320d0ae2fSRob Clark put_iova_spaces(obj, true); 1094c8afe684SRob Clark 109505b84911SRob Clark if (obj->import_attach) { 109690643a24SRob Clark GEM_WARN_ON(msm_obj->vaddr); 109705b84911SRob Clark 109805b84911SRob Clark /* Don't drop the pages for imported dmabuf, as they are not 109905b84911SRob Clark * ours, just free the array we allocated: 110005b84911SRob Clark */ 11012098105eSMichal Hocko kvfree(msm_obj->pages); 110205b84911SRob Clark 110357f04815SRob Clark put_iova_vmas(obj); 110457f04815SRob Clark 11056c0e3ea2SRob Clark /* dma_buf_detach() grabs resv lock, so we need to unlock 11066c0e3ea2SRob Clark * prior to drm_prime_gem_destroy 11076c0e3ea2SRob Clark */ 11086c0e3ea2SRob Clark msm_gem_unlock(obj); 11096c0e3ea2SRob Clark 1110f28730c8Sjilai wang drm_prime_gem_destroy(obj, msm_obj->sgt); 111105b84911SRob Clark } else { 1112599089c6SRob Clark msm_gem_vunmap(obj); 1113c8afe684SRob Clark put_pages(obj); 111457f04815SRob Clark put_iova_vmas(obj); 11156c0e3ea2SRob Clark msm_gem_unlock(obj); 111605b84911SRob Clark } 1117c8afe684SRob Clark 1118c8afe684SRob Clark drm_gem_object_release(obj); 1119c8afe684SRob Clark 1120c8afe684SRob Clark kfree(msm_obj); 1121c8afe684SRob Clark } 1122c8afe684SRob Clark 1123c8afe684SRob Clark /* convenience method to construct a GEM buffer object, and userspace handle */ 1124c8afe684SRob Clark int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 11250815d774SJordan Crouse uint32_t size, uint32_t flags, uint32_t *handle, 11260815d774SJordan Crouse char *name) 1127c8afe684SRob Clark { 1128c8afe684SRob Clark struct drm_gem_object *obj; 1129c8afe684SRob Clark int ret; 1130c8afe684SRob Clark 1131c8afe684SRob Clark obj = msm_gem_new(dev, size, flags); 1132c8afe684SRob Clark 1133c8afe684SRob Clark if (IS_ERR(obj)) 1134c8afe684SRob Clark return PTR_ERR(obj); 1135c8afe684SRob Clark 11360815d774SJordan Crouse if (name) 11370815d774SJordan Crouse msm_gem_object_set_name(obj, "%s", name); 11380815d774SJordan Crouse 1139c8afe684SRob Clark ret = drm_gem_handle_create(file, obj, handle); 1140c8afe684SRob Clark 1141c8afe684SRob Clark /* drop reference from allocate - handle holds it now */ 1142f7d33950SEmil Velikov drm_gem_object_put(obj); 1143c8afe684SRob Clark 1144c8afe684SRob Clark return ret; 1145c8afe684SRob Clark } 1146c8afe684SRob Clark 11473c9edd9cSThomas Zimmermann static const struct vm_operations_struct vm_ops = { 11483c9edd9cSThomas Zimmermann .fault = msm_gem_fault, 11493c9edd9cSThomas Zimmermann .open = drm_gem_vm_open, 11503c9edd9cSThomas Zimmermann .close = drm_gem_vm_close, 11513c9edd9cSThomas Zimmermann }; 11523c9edd9cSThomas Zimmermann 11533c9edd9cSThomas Zimmermann static const struct drm_gem_object_funcs msm_gem_object_funcs = { 11543c9edd9cSThomas Zimmermann .free = msm_gem_free_object, 11553c9edd9cSThomas Zimmermann .pin = msm_gem_prime_pin, 11563c9edd9cSThomas Zimmermann .unpin = msm_gem_prime_unpin, 11573c9edd9cSThomas Zimmermann .get_sg_table = msm_gem_prime_get_sg_table, 11583c9edd9cSThomas Zimmermann .vmap = msm_gem_prime_vmap, 11593c9edd9cSThomas Zimmermann .vunmap = msm_gem_prime_vunmap, 11603c9edd9cSThomas Zimmermann .vm_ops = &vm_ops, 11613c9edd9cSThomas Zimmermann }; 11623c9edd9cSThomas Zimmermann 116305b84911SRob Clark static int msm_gem_new_impl(struct drm_device *dev, 116405b84911SRob Clark uint32_t size, uint32_t flags, 11653cbdc8d8SAkhil P Oommen struct drm_gem_object **obj) 1166c8afe684SRob Clark { 1167c8afe684SRob Clark struct msm_gem_object *msm_obj; 1168c8afe684SRob Clark 1169c8afe684SRob Clark switch (flags & MSM_BO_CACHE_MASK) { 1170c8afe684SRob Clark case MSM_BO_UNCACHED: 1171c8afe684SRob Clark case MSM_BO_CACHED: 1172c8afe684SRob Clark case MSM_BO_WC: 1173c8afe684SRob Clark break; 1174c8afe684SRob Clark default: 11756a41da17SMamta Shukla DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", 1176c8afe684SRob Clark (flags & MSM_BO_CACHE_MASK)); 117705b84911SRob Clark return -EINVAL; 1178c8afe684SRob Clark } 1179c8afe684SRob Clark 1180667ce33eSRob Clark msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); 118105b84911SRob Clark if (!msm_obj) 118205b84911SRob Clark return -ENOMEM; 1183c8afe684SRob Clark 1184c8afe684SRob Clark msm_obj->flags = flags; 11854cd33c48SRob Clark msm_obj->madv = MSM_MADV_WILLNEED; 1186c8afe684SRob Clark 11877198e6b0SRob Clark INIT_LIST_HEAD(&msm_obj->submit_entry); 11884b85f7f5SRob Clark INIT_LIST_HEAD(&msm_obj->vmas); 11894b85f7f5SRob Clark 119005b84911SRob Clark *obj = &msm_obj->base; 11913c9edd9cSThomas Zimmermann (*obj)->funcs = &msm_gem_object_funcs; 119205b84911SRob Clark 119305b84911SRob Clark return 0; 119405b84911SRob Clark } 119505b84911SRob Clark 11960e08270aSSushmita Susheelendra static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, 11970e08270aSSushmita Susheelendra uint32_t size, uint32_t flags, bool struct_mutex_locked) 119805b84911SRob Clark { 1199f4839bd5SRob Clark struct msm_drm_private *priv = dev->dev_private; 12003cbdc8d8SAkhil P Oommen struct msm_gem_object *msm_obj; 1201871d812aSRob Clark struct drm_gem_object *obj = NULL; 1202f4839bd5SRob Clark bool use_vram = false; 120305b84911SRob Clark int ret; 120405b84911SRob Clark 120505b84911SRob Clark size = PAGE_ALIGN(size); 120605b84911SRob Clark 1207c2052a4eSJonathan Marek if (!msm_use_mmu(dev)) 1208f4839bd5SRob Clark use_vram = true; 120986f46f25SJonathan Marek else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size) 1210f4839bd5SRob Clark use_vram = true; 1211f4839bd5SRob Clark 121290643a24SRob Clark if (GEM_WARN_ON(use_vram && !priv->vram.size)) 1213f4839bd5SRob Clark return ERR_PTR(-EINVAL); 1214f4839bd5SRob Clark 12151a5dff5dSJordan Crouse /* Disallow zero sized objects as they make the underlying 12161a5dff5dSJordan Crouse * infrastructure grumpy 12171a5dff5dSJordan Crouse */ 12181a5dff5dSJordan Crouse if (size == 0) 12191a5dff5dSJordan Crouse return ERR_PTR(-EINVAL); 12201a5dff5dSJordan Crouse 12213cbdc8d8SAkhil P Oommen ret = msm_gem_new_impl(dev, size, flags, &obj); 122205b84911SRob Clark if (ret) 122305b84911SRob Clark goto fail; 122405b84911SRob Clark 12253cbdc8d8SAkhil P Oommen msm_obj = to_msm_bo(obj); 12263cbdc8d8SAkhil P Oommen 1227f4839bd5SRob Clark if (use_vram) { 12284b85f7f5SRob Clark struct msm_gem_vma *vma; 1229f4839bd5SRob Clark struct page **pages; 1230b3949a9aSHans Verkuil 1231a694ffedSIskren Chernev drm_gem_private_object_init(dev, obj, size); 1232a694ffedSIskren Chernev 1233a6ae74c9SRob Clark msm_gem_lock(obj); 1234f4839bd5SRob Clark 12354b85f7f5SRob Clark vma = add_vma(obj, NULL); 1236a6ae74c9SRob Clark msm_gem_unlock(obj); 12374b85f7f5SRob Clark if (IS_ERR(vma)) { 12384b85f7f5SRob Clark ret = PTR_ERR(vma); 12394b85f7f5SRob Clark goto fail; 12404b85f7f5SRob Clark } 12414b85f7f5SRob Clark 12424b85f7f5SRob Clark to_msm_bo(obj)->vram_node = &vma->node; 12434b85f7f5SRob Clark 124407fcad0dSIskren Chernev msm_gem_lock(obj); 1245f4839bd5SRob Clark pages = get_pages(obj); 124607fcad0dSIskren Chernev msm_gem_unlock(obj); 1247f4839bd5SRob Clark if (IS_ERR(pages)) { 1248f4839bd5SRob Clark ret = PTR_ERR(pages); 1249f4839bd5SRob Clark goto fail; 1250f4839bd5SRob Clark } 12514b85f7f5SRob Clark 12524b85f7f5SRob Clark vma->iova = physaddr(obj); 1253f4839bd5SRob Clark } else { 125405b84911SRob Clark ret = drm_gem_object_init(dev, obj, size); 125505b84911SRob Clark if (ret) 125605b84911SRob Clark goto fail; 12570abdba47SLucas Stach /* 12580abdba47SLucas Stach * Our buffers are kept pinned, so allocating them from the 12590abdba47SLucas Stach * MOVABLE zone is a really bad idea, and conflicts with CMA. 12600abdba47SLucas Stach * See comments above new_inode() why this is required _and_ 12610abdba47SLucas Stach * expected if you're going to pin these pages. 12620abdba47SLucas Stach */ 12630abdba47SLucas Stach mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); 1264871d812aSRob Clark } 126505b84911SRob Clark 1266d984457bSRob Clark mutex_lock(&priv->mm_lock); 126764fcbde7SRob Clark list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); 1268d984457bSRob Clark mutex_unlock(&priv->mm_lock); 12693cbdc8d8SAkhil P Oommen 12706ed0897cSRob Clark mutex_lock(&priv->obj_lock); 12716ed0897cSRob Clark list_add_tail(&msm_obj->node, &priv->objects); 12726ed0897cSRob Clark mutex_unlock(&priv->obj_lock); 12736ed0897cSRob Clark 127405b84911SRob Clark return obj; 127505b84911SRob Clark 127605b84911SRob Clark fail: 1277ce0a9dc0SRob Clark if (struct_mutex_locked) { 1278ce0a9dc0SRob Clark drm_gem_object_put_locked(obj); 1279ce0a9dc0SRob Clark } else { 1280f7d33950SEmil Velikov drm_gem_object_put(obj); 1281ce0a9dc0SRob Clark } 128205b84911SRob Clark return ERR_PTR(ret); 128305b84911SRob Clark } 128405b84911SRob Clark 12850e08270aSSushmita Susheelendra struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, 12860e08270aSSushmita Susheelendra uint32_t size, uint32_t flags) 12870e08270aSSushmita Susheelendra { 12880e08270aSSushmita Susheelendra return _msm_gem_new(dev, size, flags, true); 12890e08270aSSushmita Susheelendra } 12900e08270aSSushmita Susheelendra 12910e08270aSSushmita Susheelendra struct drm_gem_object *msm_gem_new(struct drm_device *dev, 12920e08270aSSushmita Susheelendra uint32_t size, uint32_t flags) 12930e08270aSSushmita Susheelendra { 12940e08270aSSushmita Susheelendra return _msm_gem_new(dev, size, flags, false); 12950e08270aSSushmita Susheelendra } 12960e08270aSSushmita Susheelendra 129705b84911SRob Clark struct drm_gem_object *msm_gem_import(struct drm_device *dev, 129879f0e202SRob Clark struct dma_buf *dmabuf, struct sg_table *sgt) 129905b84911SRob Clark { 13003cbdc8d8SAkhil P Oommen struct msm_drm_private *priv = dev->dev_private; 130105b84911SRob Clark struct msm_gem_object *msm_obj; 130205b84911SRob Clark struct drm_gem_object *obj; 130379f0e202SRob Clark uint32_t size; 130405b84911SRob Clark int ret, npages; 130505b84911SRob Clark 1306871d812aSRob Clark /* if we don't have IOMMU, don't bother pretending we can import: */ 1307c2052a4eSJonathan Marek if (!msm_use_mmu(dev)) { 13086a41da17SMamta Shukla DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); 1309871d812aSRob Clark return ERR_PTR(-EINVAL); 1310871d812aSRob Clark } 1311871d812aSRob Clark 131279f0e202SRob Clark size = PAGE_ALIGN(dmabuf->size); 131305b84911SRob Clark 13143cbdc8d8SAkhil P Oommen ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); 131505b84911SRob Clark if (ret) 131605b84911SRob Clark goto fail; 131705b84911SRob Clark 131805b84911SRob Clark drm_gem_private_object_init(dev, obj, size); 131905b84911SRob Clark 132005b84911SRob Clark npages = size / PAGE_SIZE; 132105b84911SRob Clark 132205b84911SRob Clark msm_obj = to_msm_bo(obj); 1323a6ae74c9SRob Clark msm_gem_lock(obj); 132405b84911SRob Clark msm_obj->sgt = sgt; 13252098105eSMichal Hocko msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 132605b84911SRob Clark if (!msm_obj->pages) { 1327a6ae74c9SRob Clark msm_gem_unlock(obj); 132805b84911SRob Clark ret = -ENOMEM; 132905b84911SRob Clark goto fail; 133005b84911SRob Clark } 133105b84911SRob Clark 1332c67e6279SChristian König ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages); 13330e08270aSSushmita Susheelendra if (ret) { 1334a6ae74c9SRob Clark msm_gem_unlock(obj); 133505b84911SRob Clark goto fail; 13360e08270aSSushmita Susheelendra } 133705b84911SRob Clark 1338a6ae74c9SRob Clark msm_gem_unlock(obj); 13393cbdc8d8SAkhil P Oommen 1340d984457bSRob Clark mutex_lock(&priv->mm_lock); 134164fcbde7SRob Clark list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); 1342d984457bSRob Clark mutex_unlock(&priv->mm_lock); 13433cbdc8d8SAkhil P Oommen 13446ed0897cSRob Clark mutex_lock(&priv->obj_lock); 13456ed0897cSRob Clark list_add_tail(&msm_obj->node, &priv->objects); 13466ed0897cSRob Clark mutex_unlock(&priv->obj_lock); 13476ed0897cSRob Clark 1348c8afe684SRob Clark return obj; 1349c8afe684SRob Clark 1350c8afe684SRob Clark fail: 1351f7d33950SEmil Velikov drm_gem_object_put(obj); 1352c8afe684SRob Clark return ERR_PTR(ret); 1353c8afe684SRob Clark } 13548223286dSJordan Crouse 13558223286dSJordan Crouse static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 13568223286dSJordan Crouse uint32_t flags, struct msm_gem_address_space *aspace, 13578223286dSJordan Crouse struct drm_gem_object **bo, uint64_t *iova, bool locked) 13588223286dSJordan Crouse { 13598223286dSJordan Crouse void *vaddr; 13608223286dSJordan Crouse struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked); 13618223286dSJordan Crouse int ret; 13628223286dSJordan Crouse 13638223286dSJordan Crouse if (IS_ERR(obj)) 13648223286dSJordan Crouse return ERR_CAST(obj); 13658223286dSJordan Crouse 13668223286dSJordan Crouse if (iova) { 13679fe041f6SJordan Crouse ret = msm_gem_get_and_pin_iova(obj, aspace, iova); 136893f7abf1SJordan Crouse if (ret) 136993f7abf1SJordan Crouse goto err; 13708223286dSJordan Crouse } 13718223286dSJordan Crouse 13728223286dSJordan Crouse vaddr = msm_gem_get_vaddr(obj); 1373c9811d0fSWei Yongjun if (IS_ERR(vaddr)) { 13747ad0e8cfSJordan Crouse msm_gem_unpin_iova(obj, aspace); 137593f7abf1SJordan Crouse ret = PTR_ERR(vaddr); 137693f7abf1SJordan Crouse goto err; 13778223286dSJordan Crouse } 13788223286dSJordan Crouse 13798223286dSJordan Crouse if (bo) 13808223286dSJordan Crouse *bo = obj; 13818223286dSJordan Crouse 13828223286dSJordan Crouse return vaddr; 138393f7abf1SJordan Crouse err: 138493f7abf1SJordan Crouse if (locked) 1385eecd7fd8SEmil Velikov drm_gem_object_put_locked(obj); 138693f7abf1SJordan Crouse else 1387f7d33950SEmil Velikov drm_gem_object_put(obj); 138893f7abf1SJordan Crouse 138993f7abf1SJordan Crouse return ERR_PTR(ret); 139093f7abf1SJordan Crouse 13918223286dSJordan Crouse } 13928223286dSJordan Crouse 13938223286dSJordan Crouse void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, 13948223286dSJordan Crouse uint32_t flags, struct msm_gem_address_space *aspace, 13958223286dSJordan Crouse struct drm_gem_object **bo, uint64_t *iova) 13968223286dSJordan Crouse { 13978223286dSJordan Crouse return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false); 13988223286dSJordan Crouse } 13998223286dSJordan Crouse 14008223286dSJordan Crouse void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, 14018223286dSJordan Crouse uint32_t flags, struct msm_gem_address_space *aspace, 14028223286dSJordan Crouse struct drm_gem_object **bo, uint64_t *iova) 14038223286dSJordan Crouse { 14048223286dSJordan Crouse return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true); 14058223286dSJordan Crouse } 14061e29dff0SJordan Crouse 14071e29dff0SJordan Crouse void msm_gem_kernel_put(struct drm_gem_object *bo, 14081e29dff0SJordan Crouse struct msm_gem_address_space *aspace, bool locked) 14091e29dff0SJordan Crouse { 14101e29dff0SJordan Crouse if (IS_ERR_OR_NULL(bo)) 14111e29dff0SJordan Crouse return; 14121e29dff0SJordan Crouse 14131e29dff0SJordan Crouse msm_gem_put_vaddr(bo); 14147ad0e8cfSJordan Crouse msm_gem_unpin_iova(bo, aspace); 14151e29dff0SJordan Crouse 14161e29dff0SJordan Crouse if (locked) 1417eecd7fd8SEmil Velikov drm_gem_object_put_locked(bo); 14181e29dff0SJordan Crouse else 1419f7d33950SEmil Velikov drm_gem_object_put(bo); 14201e29dff0SJordan Crouse } 14210815d774SJordan Crouse 14220815d774SJordan Crouse void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...) 14230815d774SJordan Crouse { 14240815d774SJordan Crouse struct msm_gem_object *msm_obj = to_msm_bo(bo); 14250815d774SJordan Crouse va_list ap; 14260815d774SJordan Crouse 14270815d774SJordan Crouse if (!fmt) 14280815d774SJordan Crouse return; 14290815d774SJordan Crouse 14300815d774SJordan Crouse va_start(ap, fmt); 14310815d774SJordan Crouse vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap); 14320815d774SJordan Crouse va_end(ap); 14330815d774SJordan Crouse } 1434