1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2016 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #include "msm_drv.h" 8 #include "msm_gem.h" 9 #include "msm_mmu.h" 10 11 static void 12 msm_gem_address_space_destroy(struct kref *kref) 13 { 14 struct msm_gem_address_space *aspace = container_of(kref, 15 struct msm_gem_address_space, kref); 16 17 drm_mm_takedown(&aspace->mm); 18 if (aspace->mmu) 19 aspace->mmu->funcs->destroy(aspace->mmu); 20 put_pid(aspace->pid); 21 kfree(aspace); 22 } 23 24 25 void msm_gem_address_space_put(struct msm_gem_address_space *aspace) 26 { 27 if (aspace) 28 kref_put(&aspace->kref, msm_gem_address_space_destroy); 29 } 30 31 struct msm_gem_address_space * 32 msm_gem_address_space_get(struct msm_gem_address_space *aspace) 33 { 34 if (!IS_ERR_OR_NULL(aspace)) 35 kref_get(&aspace->kref); 36 37 return aspace; 38 } 39 40 /* Actually unmap memory for the vma */ 41 void msm_gem_purge_vma(struct msm_gem_address_space *aspace, 42 struct msm_gem_vma *vma) 43 { 44 unsigned size = vma->node.size << PAGE_SHIFT; 45 46 /* Print a message if we try to purge a vma in use */ 47 if (WARN_ON(vma->inuse > 0)) 48 return; 49 50 /* Don't do anything if the memory isn't mapped */ 51 if (!vma->mapped) 52 return; 53 54 if (aspace->mmu) 55 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size); 56 57 vma->mapped = false; 58 } 59 60 /* Remove reference counts for the mapping */ 61 void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, 62 struct msm_gem_vma *vma) 63 { 64 if (!WARN_ON(!vma->iova)) 65 vma->inuse--; 66 } 67 68 int 69 msm_gem_map_vma(struct msm_gem_address_space *aspace, 70 struct msm_gem_vma *vma, int prot, 71 struct sg_table *sgt, int npages) 72 { 73 unsigned size = npages << PAGE_SHIFT; 74 int ret = 0; 75 76 if (WARN_ON(!vma->iova)) 77 return -EINVAL; 78 79 /* Increase the usage counter */ 80 vma->inuse++; 81 82 if (vma->mapped) 83 return 0; 84 85 vma->mapped = true; 86 87 if (aspace && aspace->mmu) 88 ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, 89 size, prot); 90 91 if (ret) 92 vma->mapped = false; 93 94 return ret; 95 } 96 97 /* Close an iova. Warn if it is still in use */ 98 void msm_gem_close_vma(struct msm_gem_address_space *aspace, 99 struct msm_gem_vma *vma) 100 { 101 if (WARN_ON(vma->inuse > 0 || vma->mapped)) 102 return; 103 104 spin_lock(&aspace->lock); 105 if (vma->iova) 106 drm_mm_remove_node(&vma->node); 107 spin_unlock(&aspace->lock); 108 109 vma->iova = 0; 110 111 msm_gem_address_space_put(aspace); 112 } 113 114 /* Initialize a new vma and allocate an iova for it */ 115 int msm_gem_init_vma(struct msm_gem_address_space *aspace, 116 struct msm_gem_vma *vma, int npages, 117 u64 range_start, u64 range_end) 118 { 119 int ret; 120 121 if (WARN_ON(vma->iova)) 122 return -EBUSY; 123 124 spin_lock(&aspace->lock); 125 ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node, npages, 0, 126 0, range_start, range_end, 0); 127 spin_unlock(&aspace->lock); 128 129 if (ret) 130 return ret; 131 132 vma->iova = vma->node.start << PAGE_SHIFT; 133 vma->mapped = false; 134 135 kref_get(&aspace->kref); 136 137 return 0; 138 } 139 140 struct msm_gem_address_space * 141 msm_gem_address_space_create(struct msm_mmu *mmu, const char *name, 142 u64 va_start, u64 size) 143 { 144 struct msm_gem_address_space *aspace; 145 146 if (IS_ERR(mmu)) 147 return ERR_CAST(mmu); 148 149 aspace = kzalloc(sizeof(*aspace), GFP_KERNEL); 150 if (!aspace) 151 return ERR_PTR(-ENOMEM); 152 153 spin_lock_init(&aspace->lock); 154 aspace->name = name; 155 aspace->mmu = mmu; 156 157 drm_mm_init(&aspace->mm, va_start >> PAGE_SHIFT, size >> PAGE_SHIFT); 158 159 kref_init(&aspace->kref); 160 161 return aspace; 162 } 163