1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2667ce33eSRob Clark /* 3667ce33eSRob Clark * Copyright (C) 2016 Red Hat 4667ce33eSRob Clark * Author: Rob Clark <robdclark@gmail.com> 5667ce33eSRob Clark */ 6667ce33eSRob Clark 7667ce33eSRob Clark #include "msm_drv.h" 895d1deb0SRob Clark #include "msm_fence.h" 9667ce33eSRob Clark #include "msm_gem.h" 10667ce33eSRob Clark #include "msm_mmu.h" 11667ce33eSRob Clark 12ee546cd3SJordan Crouse static void 13ee546cd3SJordan Crouse msm_gem_address_space_destroy(struct kref *kref) 14ee546cd3SJordan Crouse { 15ee546cd3SJordan Crouse struct msm_gem_address_space *aspace = container_of(kref, 16ee546cd3SJordan Crouse struct msm_gem_address_space, kref); 17ee546cd3SJordan Crouse 18ee546cd3SJordan Crouse drm_mm_takedown(&aspace->mm); 19ee546cd3SJordan Crouse if (aspace->mmu) 20ee546cd3SJordan Crouse aspace->mmu->funcs->destroy(aspace->mmu); 2125faf2f2SRob Clark put_pid(aspace->pid); 22ee546cd3SJordan Crouse kfree(aspace); 23ee546cd3SJordan Crouse } 24ee546cd3SJordan Crouse 25ee546cd3SJordan Crouse 26ee546cd3SJordan Crouse void msm_gem_address_space_put(struct msm_gem_address_space *aspace) 27ee546cd3SJordan Crouse { 28ee546cd3SJordan Crouse if (aspace) 29ee546cd3SJordan Crouse kref_put(&aspace->kref, msm_gem_address_space_destroy); 30ee546cd3SJordan Crouse } 31ee546cd3SJordan Crouse 32933415e2SJordan Crouse struct msm_gem_address_space * 33933415e2SJordan Crouse msm_gem_address_space_get(struct msm_gem_address_space *aspace) 34933415e2SJordan Crouse { 35933415e2SJordan Crouse if (!IS_ERR_OR_NULL(aspace)) 36933415e2SJordan Crouse kref_get(&aspace->kref); 37933415e2SJordan Crouse 38933415e2SJordan Crouse return aspace; 39933415e2SJordan Crouse } 40933415e2SJordan Crouse 41ca35ab2aSRob Clark bool msm_gem_vma_inuse(struct msm_gem_vma *vma) 42ca35ab2aSRob Clark { 43*b14b8c5fSRob Clark bool ret = true; 44*b14b8c5fSRob Clark 45*b14b8c5fSRob Clark spin_lock(&vma->lock); 46*b14b8c5fSRob Clark 4795d1deb0SRob Clark if (vma->inuse > 0) 48*b14b8c5fSRob Clark goto out; 4995d1deb0SRob Clark 5095d1deb0SRob Clark while (vma->fence_mask) { 5195d1deb0SRob Clark unsigned idx = ffs(vma->fence_mask) - 1; 5295d1deb0SRob Clark 5395d1deb0SRob Clark if (!msm_fence_completed(vma->fctx[idx], vma->fence[idx])) 54*b14b8c5fSRob Clark goto out; 5595d1deb0SRob Clark 5695d1deb0SRob Clark vma->fence_mask &= ~BIT(idx); 5795d1deb0SRob Clark } 5895d1deb0SRob Clark 59*b14b8c5fSRob Clark ret = false; 60*b14b8c5fSRob Clark 61*b14b8c5fSRob Clark out: 62*b14b8c5fSRob Clark spin_unlock(&vma->lock); 63*b14b8c5fSRob Clark 64*b14b8c5fSRob Clark return ret; 65ca35ab2aSRob Clark } 66ca35ab2aSRob Clark 677ad0e8cfSJordan Crouse /* Actually unmap memory for the vma */ 68fc2f0756SRob Clark void msm_gem_vma_purge(struct msm_gem_vma *vma) 69667ce33eSRob Clark { 70fc2f0756SRob Clark struct msm_gem_address_space *aspace = vma->aspace; 712ee4b5d2SRob Clark unsigned size = vma->node.size; 727ad0e8cfSJordan Crouse 737ad0e8cfSJordan Crouse /* Print a message if we try to purge a vma in use */ 74b4d329c4SRob Clark GEM_WARN_ON(msm_gem_vma_inuse(vma)); 75667ce33eSRob Clark 767ad0e8cfSJordan Crouse /* Don't do anything if the memory isn't mapped */ 777ad0e8cfSJordan Crouse if (!vma->mapped) 787ad0e8cfSJordan Crouse return; 797ad0e8cfSJordan Crouse 8070dc51b4SJordan Crouse aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size); 817ad0e8cfSJordan Crouse 827ad0e8cfSJordan Crouse vma->mapped = false; 83667ce33eSRob Clark } 84667ce33eSRob Clark 85*b14b8c5fSRob Clark static void vma_unpin_locked(struct msm_gem_vma *vma) 867ad0e8cfSJordan Crouse { 8795d1deb0SRob Clark if (GEM_WARN_ON(!vma->inuse)) 8895d1deb0SRob Clark return; 898e30fa32SRob Clark if (!GEM_WARN_ON(!vma->iova)) 907ad0e8cfSJordan Crouse vma->inuse--; 91667ce33eSRob Clark } 92667ce33eSRob Clark 93*b14b8c5fSRob Clark /* Remove reference counts for the mapping */ 94*b14b8c5fSRob Clark void msm_gem_vma_unpin(struct msm_gem_vma *vma) 95*b14b8c5fSRob Clark { 96*b14b8c5fSRob Clark spin_lock(&vma->lock); 97*b14b8c5fSRob Clark vma_unpin_locked(vma); 98*b14b8c5fSRob Clark spin_unlock(&vma->lock); 99*b14b8c5fSRob Clark } 100*b14b8c5fSRob Clark 10195d1deb0SRob Clark /* Replace pin reference with fence: */ 102fc2f0756SRob Clark void msm_gem_vma_unpin_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx) 10395d1deb0SRob Clark { 104*b14b8c5fSRob Clark spin_lock(&vma->lock); 10595d1deb0SRob Clark vma->fctx[fctx->index] = fctx; 10695d1deb0SRob Clark vma->fence[fctx->index] = fctx->last_fence; 10795d1deb0SRob Clark vma->fence_mask |= BIT(fctx->index); 108*b14b8c5fSRob Clark vma_unpin_locked(vma); 109*b14b8c5fSRob Clark spin_unlock(&vma->lock); 11095d1deb0SRob Clark } 11195d1deb0SRob Clark 11295d1deb0SRob Clark /* Map and pin vma: */ 113667ce33eSRob Clark int 114fc2f0756SRob Clark msm_gem_vma_map(struct msm_gem_vma *vma, int prot, 1152ee4b5d2SRob Clark struct sg_table *sgt, int size) 116667ce33eSRob Clark { 117fc2f0756SRob Clark struct msm_gem_address_space *aspace = vma->aspace; 118fc2f0756SRob Clark int ret; 119667ce33eSRob Clark 1208e30fa32SRob Clark if (GEM_WARN_ON(!vma->iova)) 121c0ee9794SJordan Crouse return -EINVAL; 122c0ee9794SJordan Crouse 1237ad0e8cfSJordan Crouse /* Increase the usage counter */ 124*b14b8c5fSRob Clark spin_lock(&vma->lock); 1257ad0e8cfSJordan Crouse vma->inuse++; 126*b14b8c5fSRob Clark spin_unlock(&vma->lock); 1277ad0e8cfSJordan Crouse 128c0ee9794SJordan Crouse if (vma->mapped) 129667ce33eSRob Clark return 0; 130c0ee9794SJordan Crouse 131c0ee9794SJordan Crouse vma->mapped = true; 132c0ee9794SJordan Crouse 133fc2f0756SRob Clark if (!aspace) 134fc2f0756SRob Clark return 0; 135fc2f0756SRob Clark 136*b14b8c5fSRob Clark /* 137*b14b8c5fSRob Clark * NOTE: iommu/io-pgtable can allocate pages, so we cannot hold 138*b14b8c5fSRob Clark * a lock across map/unmap which is also used in the job_run() 139*b14b8c5fSRob Clark * path, as this can cause deadlock in job_run() vs shrinker/ 140*b14b8c5fSRob Clark * reclaim. 141*b14b8c5fSRob Clark * 142*b14b8c5fSRob Clark * Revisit this if we can come up with a scheme to pre-alloc pages 143*b14b8c5fSRob Clark * for the pgtable in map/unmap ops. 144*b14b8c5fSRob Clark */ 145fc2f0756SRob Clark ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, size, prot); 146c0ee9794SJordan Crouse 14763ca94adSAkhil P Oommen if (ret) { 148c0ee9794SJordan Crouse vma->mapped = false; 149*b14b8c5fSRob Clark spin_lock(&vma->lock); 15063ca94adSAkhil P Oommen vma->inuse--; 151*b14b8c5fSRob Clark spin_unlock(&vma->lock); 15263ca94adSAkhil P Oommen } 153c0ee9794SJordan Crouse 154c0ee9794SJordan Crouse return ret; 1550e08270aSSushmita Susheelendra } 156667ce33eSRob Clark 1577ad0e8cfSJordan Crouse /* Close an iova. Warn if it is still in use */ 158fc2f0756SRob Clark void msm_gem_vma_close(struct msm_gem_vma *vma) 1597ad0e8cfSJordan Crouse { 160fc2f0756SRob Clark struct msm_gem_address_space *aspace = vma->aspace; 161fc2f0756SRob Clark 162b4d329c4SRob Clark GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped); 1637ad0e8cfSJordan Crouse 1647ad0e8cfSJordan Crouse spin_lock(&aspace->lock); 1657ad0e8cfSJordan Crouse if (vma->iova) 1667ad0e8cfSJordan Crouse drm_mm_remove_node(&vma->node); 1677ad0e8cfSJordan Crouse spin_unlock(&aspace->lock); 1687ad0e8cfSJordan Crouse 1697ad0e8cfSJordan Crouse vma->iova = 0; 1707ad0e8cfSJordan Crouse 1717ad0e8cfSJordan Crouse msm_gem_address_space_put(aspace); 1727ad0e8cfSJordan Crouse } 1737ad0e8cfSJordan Crouse 174fc2f0756SRob Clark struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_address_space *aspace) 175fc2f0756SRob Clark { 176fc2f0756SRob Clark struct msm_gem_vma *vma; 177fc2f0756SRob Clark 178fc2f0756SRob Clark vma = kzalloc(sizeof(*vma), GFP_KERNEL); 179fc2f0756SRob Clark if (!vma) 180fc2f0756SRob Clark return NULL; 181fc2f0756SRob Clark 182*b14b8c5fSRob Clark spin_lock_init(&vma->lock); 183fc2f0756SRob Clark vma->aspace = aspace; 184fc2f0756SRob Clark 185fc2f0756SRob Clark return vma; 186fc2f0756SRob Clark } 187fc2f0756SRob Clark 188c0ee9794SJordan Crouse /* Initialize a new vma and allocate an iova for it */ 189fc2f0756SRob Clark int msm_gem_vma_init(struct msm_gem_vma *vma, int size, 190d3b8877eSJonathan Marek u64 range_start, u64 range_end) 191c0ee9794SJordan Crouse { 192fc2f0756SRob Clark struct msm_gem_address_space *aspace = vma->aspace; 193c0ee9794SJordan Crouse int ret; 194c0ee9794SJordan Crouse 195fc2f0756SRob Clark if (GEM_WARN_ON(!aspace)) 196fc2f0756SRob Clark return -EINVAL; 197fc2f0756SRob Clark 1988e30fa32SRob Clark if (GEM_WARN_ON(vma->iova)) 199c0ee9794SJordan Crouse return -EBUSY; 200c0ee9794SJordan Crouse 201c0ee9794SJordan Crouse spin_lock(&aspace->lock); 2022ee4b5d2SRob Clark ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node, 2032ee4b5d2SRob Clark size, PAGE_SIZE, 0, 2042ee4b5d2SRob Clark range_start, range_end, 0); 2050e08270aSSushmita Susheelendra spin_unlock(&aspace->lock); 2060e08270aSSushmita Susheelendra 207667ce33eSRob Clark if (ret) 208667ce33eSRob Clark return ret; 209667ce33eSRob Clark 2102ee4b5d2SRob Clark vma->iova = vma->node.start; 211c0ee9794SJordan Crouse vma->mapped = false; 212667ce33eSRob Clark 213ee546cd3SJordan Crouse kref_get(&aspace->kref); 214667ce33eSRob Clark 215c0ee9794SJordan Crouse return 0; 216667ce33eSRob Clark } 217667ce33eSRob Clark 218667ce33eSRob Clark struct msm_gem_address_space * 219ccac7ce3SJordan Crouse msm_gem_address_space_create(struct msm_mmu *mmu, const char *name, 220ccac7ce3SJordan Crouse u64 va_start, u64 size) 221667ce33eSRob Clark { 222667ce33eSRob Clark struct msm_gem_address_space *aspace; 223ccac7ce3SJordan Crouse 224ccac7ce3SJordan Crouse if (IS_ERR(mmu)) 225ccac7ce3SJordan Crouse return ERR_CAST(mmu); 226667ce33eSRob Clark 227667ce33eSRob Clark aspace = kzalloc(sizeof(*aspace), GFP_KERNEL); 228667ce33eSRob Clark if (!aspace) 229667ce33eSRob Clark return ERR_PTR(-ENOMEM); 230667ce33eSRob Clark 2310e08270aSSushmita Susheelendra spin_lock_init(&aspace->lock); 232667ce33eSRob Clark aspace->name = name; 233ccac7ce3SJordan Crouse aspace->mmu = mmu; 234a636a0ffSRob Clark aspace->va_start = va_start; 235a636a0ffSRob Clark aspace->va_size = size; 236667ce33eSRob Clark 2372ee4b5d2SRob Clark drm_mm_init(&aspace->mm, va_start, size); 238c2052a4eSJonathan Marek 239c2052a4eSJonathan Marek kref_init(&aspace->kref); 240c2052a4eSJonathan Marek 241c2052a4eSJonathan Marek return aspace; 242c2052a4eSJonathan Marek } 243