1757636edSLu Baolu // SPDX-License-Identifier: GPL-2.0
2757636edSLu Baolu /*
3757636edSLu Baolu * Helpers for IOMMU drivers implementing SVA
4757636edSLu Baolu */
523e5d9ecSKirill A. Shutemov #include <linux/mmu_context.h>
6757636edSLu Baolu #include <linux/mutex.h>
7757636edSLu Baolu #include <linux/sched/mm.h>
8757636edSLu Baolu #include <linux/iommu.h>
9757636edSLu Baolu
10757636edSLu Baolu #include "iommu-sva.h"
11757636edSLu Baolu
12757636edSLu Baolu static DEFINE_MUTEX(iommu_sva_lock);
13757636edSLu Baolu
144e14176aSJason Gunthorpe /* Allocate a PASID for the mm within range (inclusive) */
iommu_sva_alloc_pasid(struct mm_struct * mm,struct device * dev)15*2dcebc7dSJacob Pan static int iommu_sva_alloc_pasid(struct mm_struct *mm, struct device *dev)
16757636edSLu Baolu {
17*2dcebc7dSJacob Pan ioasid_t pasid;
18757636edSLu Baolu int ret = 0;
19757636edSLu Baolu
2023e5d9ecSKirill A. Shutemov if (!arch_pgtable_dma_compat(mm))
2123e5d9ecSKirill A. Shutemov return -EBUSY;
2223e5d9ecSKirill A. Shutemov
23757636edSLu Baolu mutex_lock(&iommu_sva_lock);
24757636edSLu Baolu /* Is a PASID already associated with this mm? */
25400b9b93SKirill A. Shutemov if (mm_valid_pasid(mm)) {
26*2dcebc7dSJacob Pan if (mm->pasid >= dev->iommu->max_pasids)
27757636edSLu Baolu ret = -EOVERFLOW;
28757636edSLu Baolu goto out;
29757636edSLu Baolu }
30757636edSLu Baolu
31*2dcebc7dSJacob Pan pasid = iommu_alloc_global_pasid(dev);
32*2dcebc7dSJacob Pan if (pasid == IOMMU_PASID_INVALID) {
33*2dcebc7dSJacob Pan ret = -ENOSPC;
344e14176aSJason Gunthorpe goto out;
35*2dcebc7dSJacob Pan }
36*2dcebc7dSJacob Pan mm->pasid = pasid;
374e14176aSJason Gunthorpe ret = 0;
38757636edSLu Baolu out:
39757636edSLu Baolu mutex_unlock(&iommu_sva_lock);
40757636edSLu Baolu return ret;
41757636edSLu Baolu }
42757636edSLu Baolu
43757636edSLu Baolu /**
44757636edSLu Baolu * iommu_sva_bind_device() - Bind a process address space to a device
45757636edSLu Baolu * @dev: the device
46757636edSLu Baolu * @mm: the mm to bind, caller must hold a reference to mm_users
47757636edSLu Baolu *
48757636edSLu Baolu * Create a bond between device and address space, allowing the device to
49757636edSLu Baolu * access the mm using the PASID returned by iommu_sva_get_pasid(). If a
50757636edSLu Baolu * bond already exists between @device and @mm, an additional internal
51757636edSLu Baolu * reference is taken. Caller must call iommu_sva_unbind_device()
52757636edSLu Baolu * to release each reference.
53757636edSLu Baolu *
54757636edSLu Baolu * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
55757636edSLu Baolu * initialize the required SVA features.
56757636edSLu Baolu *
57757636edSLu Baolu * On error, returns an ERR_PTR value.
58757636edSLu Baolu */
iommu_sva_bind_device(struct device * dev,struct mm_struct * mm)59757636edSLu Baolu struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
60757636edSLu Baolu {
61757636edSLu Baolu struct iommu_domain *domain;
62757636edSLu Baolu struct iommu_sva *handle;
63757636edSLu Baolu int ret;
64757636edSLu Baolu
65757636edSLu Baolu /* Allocate mm->pasid if necessary. */
66*2dcebc7dSJacob Pan ret = iommu_sva_alloc_pasid(mm, dev);
67757636edSLu Baolu if (ret)
68757636edSLu Baolu return ERR_PTR(ret);
69757636edSLu Baolu
70757636edSLu Baolu handle = kzalloc(sizeof(*handle), GFP_KERNEL);
71757636edSLu Baolu if (!handle)
72757636edSLu Baolu return ERR_PTR(-ENOMEM);
73757636edSLu Baolu
74757636edSLu Baolu mutex_lock(&iommu_sva_lock);
75757636edSLu Baolu /* Search for an existing domain. */
76757636edSLu Baolu domain = iommu_get_domain_for_dev_pasid(dev, mm->pasid,
77757636edSLu Baolu IOMMU_DOMAIN_SVA);
78757636edSLu Baolu if (IS_ERR(domain)) {
79757636edSLu Baolu ret = PTR_ERR(domain);
80757636edSLu Baolu goto out_unlock;
81757636edSLu Baolu }
82757636edSLu Baolu
83757636edSLu Baolu if (domain) {
84757636edSLu Baolu domain->users++;
85757636edSLu Baolu goto out;
86757636edSLu Baolu }
87757636edSLu Baolu
88757636edSLu Baolu /* Allocate a new domain and set it on device pasid. */
89757636edSLu Baolu domain = iommu_sva_domain_alloc(dev, mm);
90757636edSLu Baolu if (!domain) {
91757636edSLu Baolu ret = -ENOMEM;
92757636edSLu Baolu goto out_unlock;
93757636edSLu Baolu }
94757636edSLu Baolu
95757636edSLu Baolu ret = iommu_attach_device_pasid(domain, dev, mm->pasid);
96757636edSLu Baolu if (ret)
97757636edSLu Baolu goto out_free_domain;
98757636edSLu Baolu domain->users = 1;
99757636edSLu Baolu out:
100757636edSLu Baolu mutex_unlock(&iommu_sva_lock);
101757636edSLu Baolu handle->dev = dev;
102757636edSLu Baolu handle->domain = domain;
103757636edSLu Baolu
104757636edSLu Baolu return handle;
105757636edSLu Baolu
106757636edSLu Baolu out_free_domain:
107757636edSLu Baolu iommu_domain_free(domain);
108757636edSLu Baolu out_unlock:
109757636edSLu Baolu mutex_unlock(&iommu_sva_lock);
110757636edSLu Baolu kfree(handle);
111757636edSLu Baolu
112757636edSLu Baolu return ERR_PTR(ret);
113757636edSLu Baolu }
114757636edSLu Baolu EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
115757636edSLu Baolu
116757636edSLu Baolu /**
117757636edSLu Baolu * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
118757636edSLu Baolu * @handle: the handle returned by iommu_sva_bind_device()
119757636edSLu Baolu *
120757636edSLu Baolu * Put reference to a bond between device and address space. The device should
121757636edSLu Baolu * not be issuing any more transaction for this PASID. All outstanding page
122757636edSLu Baolu * requests for this PASID must have been flushed to the IOMMU.
123757636edSLu Baolu */
iommu_sva_unbind_device(struct iommu_sva * handle)124757636edSLu Baolu void iommu_sva_unbind_device(struct iommu_sva *handle)
125757636edSLu Baolu {
126757636edSLu Baolu struct iommu_domain *domain = handle->domain;
127757636edSLu Baolu ioasid_t pasid = domain->mm->pasid;
128757636edSLu Baolu struct device *dev = handle->dev;
129757636edSLu Baolu
130757636edSLu Baolu mutex_lock(&iommu_sva_lock);
131757636edSLu Baolu if (--domain->users == 0) {
132757636edSLu Baolu iommu_detach_device_pasid(domain, dev, pasid);
133757636edSLu Baolu iommu_domain_free(domain);
134757636edSLu Baolu }
135757636edSLu Baolu mutex_unlock(&iommu_sva_lock);
136757636edSLu Baolu kfree(handle);
137757636edSLu Baolu }
138757636edSLu Baolu EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
139757636edSLu Baolu
iommu_sva_get_pasid(struct iommu_sva * handle)140757636edSLu Baolu u32 iommu_sva_get_pasid(struct iommu_sva *handle)
141757636edSLu Baolu {
142757636edSLu Baolu struct iommu_domain *domain = handle->domain;
143757636edSLu Baolu
144757636edSLu Baolu return domain->mm->pasid;
145757636edSLu Baolu }
146757636edSLu Baolu EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
147757636edSLu Baolu
148757636edSLu Baolu /*
149757636edSLu Baolu * I/O page fault handler for SVA
150757636edSLu Baolu */
151757636edSLu Baolu enum iommu_page_response_code
iommu_sva_handle_iopf(struct iommu_fault * fault,void * data)152757636edSLu Baolu iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
153757636edSLu Baolu {
154757636edSLu Baolu vm_fault_t ret;
155757636edSLu Baolu struct vm_area_struct *vma;
156757636edSLu Baolu struct mm_struct *mm = data;
157757636edSLu Baolu unsigned int access_flags = 0;
158757636edSLu Baolu unsigned int fault_flags = FAULT_FLAG_REMOTE;
159757636edSLu Baolu struct iommu_fault_page_request *prm = &fault->prm;
160757636edSLu Baolu enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
161757636edSLu Baolu
162757636edSLu Baolu if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
163757636edSLu Baolu return status;
164757636edSLu Baolu
165757636edSLu Baolu if (!mmget_not_zero(mm))
166757636edSLu Baolu return status;
167757636edSLu Baolu
168757636edSLu Baolu mmap_read_lock(mm);
169757636edSLu Baolu
1708d7071afSLinus Torvalds vma = vma_lookup(mm, prm->addr);
171757636edSLu Baolu if (!vma)
172757636edSLu Baolu /* Unmapped area */
173757636edSLu Baolu goto out_put_mm;
174757636edSLu Baolu
175757636edSLu Baolu if (prm->perm & IOMMU_FAULT_PERM_READ)
176757636edSLu Baolu access_flags |= VM_READ;
177757636edSLu Baolu
178757636edSLu Baolu if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
179757636edSLu Baolu access_flags |= VM_WRITE;
180757636edSLu Baolu fault_flags |= FAULT_FLAG_WRITE;
181757636edSLu Baolu }
182757636edSLu Baolu
183757636edSLu Baolu if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
184757636edSLu Baolu access_flags |= VM_EXEC;
185757636edSLu Baolu fault_flags |= FAULT_FLAG_INSTRUCTION;
186757636edSLu Baolu }
187757636edSLu Baolu
188757636edSLu Baolu if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
189757636edSLu Baolu fault_flags |= FAULT_FLAG_USER;
190757636edSLu Baolu
191757636edSLu Baolu if (access_flags & ~vma->vm_flags)
192757636edSLu Baolu /* Access fault */
193757636edSLu Baolu goto out_put_mm;
194757636edSLu Baolu
195757636edSLu Baolu ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
196757636edSLu Baolu status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
197757636edSLu Baolu IOMMU_PAGE_RESP_SUCCESS;
198757636edSLu Baolu
199757636edSLu Baolu out_put_mm:
200757636edSLu Baolu mmap_read_unlock(mm);
201757636edSLu Baolu mmput(mm);
202757636edSLu Baolu
203757636edSLu Baolu return status;
204757636edSLu Baolu }
205cd389115SJacob Pan
mm_pasid_drop(struct mm_struct * mm)206cd389115SJacob Pan void mm_pasid_drop(struct mm_struct *mm)
207cd389115SJacob Pan {
20858390c8cSLinus Torvalds if (likely(!mm_valid_pasid(mm)))
2094e14176aSJason Gunthorpe return;
2104e14176aSJason Gunthorpe
211*2dcebc7dSJacob Pan iommu_free_global_pasid(mm->pasid);
212cd389115SJacob Pan }
213