1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Helpers for IOMMU drivers implementing SVA 4 */ 5 #include <linux/mutex.h> 6 #include <linux/sched/mm.h> 7 #include <linux/iommu.h> 8 9 #include "iommu-sva.h" 10 11 static DEFINE_MUTEX(iommu_sva_lock); 12 static DECLARE_IOASID_SET(iommu_sva_pasid); 13 14 /** 15 * iommu_sva_alloc_pasid - Allocate a PASID for the mm 16 * @mm: the mm 17 * @min: minimum PASID value (inclusive) 18 * @max: maximum PASID value (inclusive) 19 * 20 * Try to allocate a PASID for this mm, or take a reference to the existing one 21 * provided it fits within the [@min, @max] range. On success the PASID is 22 * available in mm->pasid and will be available for the lifetime of the mm. 23 * 24 * Returns 0 on success and < 0 on error. 25 */ 26 int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max) 27 { 28 int ret = 0; 29 ioasid_t pasid; 30 31 if (min == INVALID_IOASID || max == INVALID_IOASID || 32 min == 0 || max < min) 33 return -EINVAL; 34 35 mutex_lock(&iommu_sva_lock); 36 /* Is a PASID already associated with this mm? */ 37 if (pasid_valid(mm->pasid)) { 38 if (mm->pasid < min || mm->pasid >= max) 39 ret = -EOVERFLOW; 40 goto out; 41 } 42 43 pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm); 44 if (!pasid_valid(pasid)) 45 ret = -ENOMEM; 46 else 47 mm_pasid_set(mm, pasid); 48 out: 49 mutex_unlock(&iommu_sva_lock); 50 return ret; 51 } 52 EXPORT_SYMBOL_GPL(iommu_sva_alloc_pasid); 53 54 /* ioasid_find getter() requires a void * argument */ 55 static bool __mmget_not_zero(void *mm) 56 { 57 return mmget_not_zero(mm); 58 } 59 60 /** 61 * iommu_sva_find() - Find mm associated to the given PASID 62 * @pasid: Process Address Space ID assigned to the mm 63 * 64 * On success a reference to the mm is taken, and must be released with mmput(). 65 * 66 * Returns the mm corresponding to this PASID, or an error if not found. 67 */ 68 struct mm_struct *iommu_sva_find(ioasid_t pasid) 69 { 70 return ioasid_find(&iommu_sva_pasid, pasid, __mmget_not_zero); 71 } 72 EXPORT_SYMBOL_GPL(iommu_sva_find); 73 74 /** 75 * iommu_sva_bind_device() - Bind a process address space to a device 76 * @dev: the device 77 * @mm: the mm to bind, caller must hold a reference to mm_users 78 * 79 * Create a bond between device and address space, allowing the device to 80 * access the mm using the PASID returned by iommu_sva_get_pasid(). If a 81 * bond already exists between @device and @mm, an additional internal 82 * reference is taken. Caller must call iommu_sva_unbind_device() 83 * to release each reference. 84 * 85 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to 86 * initialize the required SVA features. 87 * 88 * On error, returns an ERR_PTR value. 89 */ 90 struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm) 91 { 92 struct iommu_domain *domain; 93 struct iommu_sva *handle; 94 ioasid_t max_pasids; 95 int ret; 96 97 max_pasids = dev->iommu->max_pasids; 98 if (!max_pasids) 99 return ERR_PTR(-EOPNOTSUPP); 100 101 /* Allocate mm->pasid if necessary. */ 102 ret = iommu_sva_alloc_pasid(mm, 1, max_pasids - 1); 103 if (ret) 104 return ERR_PTR(ret); 105 106 handle = kzalloc(sizeof(*handle), GFP_KERNEL); 107 if (!handle) 108 return ERR_PTR(-ENOMEM); 109 110 mutex_lock(&iommu_sva_lock); 111 /* Search for an existing domain. */ 112 domain = iommu_get_domain_for_dev_pasid(dev, mm->pasid, 113 IOMMU_DOMAIN_SVA); 114 if (IS_ERR(domain)) { 115 ret = PTR_ERR(domain); 116 goto out_unlock; 117 } 118 119 if (domain) { 120 domain->users++; 121 goto out; 122 } 123 124 /* Allocate a new domain and set it on device pasid. */ 125 domain = iommu_sva_domain_alloc(dev, mm); 126 if (!domain) { 127 ret = -ENOMEM; 128 goto out_unlock; 129 } 130 131 ret = iommu_attach_device_pasid(domain, dev, mm->pasid); 132 if (ret) 133 goto out_free_domain; 134 domain->users = 1; 135 out: 136 mutex_unlock(&iommu_sva_lock); 137 handle->dev = dev; 138 handle->domain = domain; 139 140 return handle; 141 142 out_free_domain: 143 iommu_domain_free(domain); 144 out_unlock: 145 mutex_unlock(&iommu_sva_lock); 146 kfree(handle); 147 148 return ERR_PTR(ret); 149 } 150 EXPORT_SYMBOL_GPL(iommu_sva_bind_device); 151 152 /** 153 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device 154 * @handle: the handle returned by iommu_sva_bind_device() 155 * 156 * Put reference to a bond between device and address space. The device should 157 * not be issuing any more transaction for this PASID. All outstanding page 158 * requests for this PASID must have been flushed to the IOMMU. 159 */ 160 void iommu_sva_unbind_device(struct iommu_sva *handle) 161 { 162 struct iommu_domain *domain = handle->domain; 163 ioasid_t pasid = domain->mm->pasid; 164 struct device *dev = handle->dev; 165 166 mutex_lock(&iommu_sva_lock); 167 if (--domain->users == 0) { 168 iommu_detach_device_pasid(domain, dev, pasid); 169 iommu_domain_free(domain); 170 } 171 mutex_unlock(&iommu_sva_lock); 172 kfree(handle); 173 } 174 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device); 175 176 u32 iommu_sva_get_pasid(struct iommu_sva *handle) 177 { 178 struct iommu_domain *domain = handle->domain; 179 180 return domain->mm->pasid; 181 } 182 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid); 183 184 /* 185 * I/O page fault handler for SVA 186 */ 187 enum iommu_page_response_code 188 iommu_sva_handle_iopf(struct iommu_fault *fault, void *data) 189 { 190 vm_fault_t ret; 191 struct vm_area_struct *vma; 192 struct mm_struct *mm = data; 193 unsigned int access_flags = 0; 194 unsigned int fault_flags = FAULT_FLAG_REMOTE; 195 struct iommu_fault_page_request *prm = &fault->prm; 196 enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID; 197 198 if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID)) 199 return status; 200 201 if (!mmget_not_zero(mm)) 202 return status; 203 204 mmap_read_lock(mm); 205 206 vma = find_extend_vma(mm, prm->addr); 207 if (!vma) 208 /* Unmapped area */ 209 goto out_put_mm; 210 211 if (prm->perm & IOMMU_FAULT_PERM_READ) 212 access_flags |= VM_READ; 213 214 if (prm->perm & IOMMU_FAULT_PERM_WRITE) { 215 access_flags |= VM_WRITE; 216 fault_flags |= FAULT_FLAG_WRITE; 217 } 218 219 if (prm->perm & IOMMU_FAULT_PERM_EXEC) { 220 access_flags |= VM_EXEC; 221 fault_flags |= FAULT_FLAG_INSTRUCTION; 222 } 223 224 if (!(prm->perm & IOMMU_FAULT_PERM_PRIV)) 225 fault_flags |= FAULT_FLAG_USER; 226 227 if (access_flags & ~vma->vm_flags) 228 /* Access fault */ 229 goto out_put_mm; 230 231 ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL); 232 status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID : 233 IOMMU_PAGE_RESP_SUCCESS; 234 235 out_put_mm: 236 mmap_read_unlock(mm); 237 mmput(mm); 238 239 return status; 240 } 241