xref: /openbmc/linux/drivers/gpu/drm/msm/msm_iommu.c (revision c595db6d7c8bcf87ef42204391fa890e5950e566)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2871d812aSRob Clark /*
3871d812aSRob Clark  * Copyright (C) 2013 Red Hat
4871d812aSRob Clark  * Author: Rob Clark <robdclark@gmail.com>
5871d812aSRob Clark  */
6871d812aSRob Clark 
7b145c6e6SJordan Crouse #include <linux/adreno-smmu-priv.h>
8b145c6e6SJordan Crouse #include <linux/io-pgtable.h>
9871d812aSRob Clark #include "msm_drv.h"
10871d812aSRob Clark #include "msm_mmu.h"
11871d812aSRob Clark 
12871d812aSRob Clark struct msm_iommu {
13871d812aSRob Clark 	struct msm_mmu base;
14871d812aSRob Clark 	struct iommu_domain *domain;
15b145c6e6SJordan Crouse 	atomic_t pagetables;
16871d812aSRob Clark };
17b145c6e6SJordan Crouse 
18871d812aSRob Clark #define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
19871d812aSRob Clark 
20b145c6e6SJordan Crouse struct msm_iommu_pagetable {
21b145c6e6SJordan Crouse 	struct msm_mmu base;
22b145c6e6SJordan Crouse 	struct msm_mmu *parent;
23b145c6e6SJordan Crouse 	struct io_pgtable_ops *pgtbl_ops;
24*c96a494cSRob Clark 	const struct iommu_flush_ops *tlb;
25*c96a494cSRob Clark 	struct device *iommu_dev;
2670bccecfSRob Clark 	unsigned long pgsize_bitmap;	/* Bitmap of page sizes in use */
27b145c6e6SJordan Crouse 	phys_addr_t ttbr;
28b145c6e6SJordan Crouse 	u32 asid;
29b145c6e6SJordan Crouse };
to_pagetable(struct msm_mmu * mmu)30b145c6e6SJordan Crouse static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
31b145c6e6SJordan Crouse {
32b145c6e6SJordan Crouse 	return container_of(mmu, struct msm_iommu_pagetable, base);
33b145c6e6SJordan Crouse }
34b145c6e6SJordan Crouse 
3570bccecfSRob Clark /* based on iommu_pgsize() in iommu.c: */
calc_pgsize(struct msm_iommu_pagetable * pagetable,unsigned long iova,phys_addr_t paddr,size_t size,size_t * count)3670bccecfSRob Clark static size_t calc_pgsize(struct msm_iommu_pagetable *pagetable,
3770bccecfSRob Clark 			   unsigned long iova, phys_addr_t paddr,
3870bccecfSRob Clark 			   size_t size, size_t *count)
3970bccecfSRob Clark {
4070bccecfSRob Clark 	unsigned int pgsize_idx, pgsize_idx_next;
4170bccecfSRob Clark 	unsigned long pgsizes;
4270bccecfSRob Clark 	size_t offset, pgsize, pgsize_next;
4370bccecfSRob Clark 	unsigned long addr_merge = paddr | iova;
4470bccecfSRob Clark 
4570bccecfSRob Clark 	/* Page sizes supported by the hardware and small enough for @size */
4670bccecfSRob Clark 	pgsizes = pagetable->pgsize_bitmap & GENMASK(__fls(size), 0);
4770bccecfSRob Clark 
4870bccecfSRob Clark 	/* Constrain the page sizes further based on the maximum alignment */
4970bccecfSRob Clark 	if (likely(addr_merge))
5070bccecfSRob Clark 		pgsizes &= GENMASK(__ffs(addr_merge), 0);
5170bccecfSRob Clark 
5270bccecfSRob Clark 	/* Make sure we have at least one suitable page size */
5370bccecfSRob Clark 	BUG_ON(!pgsizes);
5470bccecfSRob Clark 
5570bccecfSRob Clark 	/* Pick the biggest page size remaining */
5670bccecfSRob Clark 	pgsize_idx = __fls(pgsizes);
5770bccecfSRob Clark 	pgsize = BIT(pgsize_idx);
5870bccecfSRob Clark 	if (!count)
5970bccecfSRob Clark 		return pgsize;
6070bccecfSRob Clark 
6170bccecfSRob Clark 	/* Find the next biggest support page size, if it exists */
6270bccecfSRob Clark 	pgsizes = pagetable->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
6370bccecfSRob Clark 	if (!pgsizes)
6470bccecfSRob Clark 		goto out_set_count;
6570bccecfSRob Clark 
6670bccecfSRob Clark 	pgsize_idx_next = __ffs(pgsizes);
6770bccecfSRob Clark 	pgsize_next = BIT(pgsize_idx_next);
6870bccecfSRob Clark 
6970bccecfSRob Clark 	/*
7070bccecfSRob Clark 	 * There's no point trying a bigger page size unless the virtual
7170bccecfSRob Clark 	 * and physical addresses are similarly offset within the larger page.
7270bccecfSRob Clark 	 */
7370bccecfSRob Clark 	if ((iova ^ paddr) & (pgsize_next - 1))
7470bccecfSRob Clark 		goto out_set_count;
7570bccecfSRob Clark 
7670bccecfSRob Clark 	/* Calculate the offset to the next page size alignment boundary */
7770bccecfSRob Clark 	offset = pgsize_next - (addr_merge & (pgsize_next - 1));
7870bccecfSRob Clark 
7970bccecfSRob Clark 	/*
8070bccecfSRob Clark 	 * If size is big enough to accommodate the larger page, reduce
8170bccecfSRob Clark 	 * the number of smaller pages.
8270bccecfSRob Clark 	 */
8370bccecfSRob Clark 	if (offset + pgsize_next <= size)
8470bccecfSRob Clark 		size = offset;
8570bccecfSRob Clark 
8670bccecfSRob Clark out_set_count:
8770bccecfSRob Clark 	*count = size >> pgsize_idx;
8870bccecfSRob Clark 	return pgsize;
8970bccecfSRob Clark }
9070bccecfSRob Clark 
msm_iommu_pagetable_unmap(struct msm_mmu * mmu,u64 iova,size_t size)91b145c6e6SJordan Crouse static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
92b145c6e6SJordan Crouse 		size_t size)
93b145c6e6SJordan Crouse {
94b145c6e6SJordan Crouse 	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
95b145c6e6SJordan Crouse 	struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
96b145c6e6SJordan Crouse 
97b145c6e6SJordan Crouse 	while (size) {
9870bccecfSRob Clark 		size_t unmapped, pgsize, count;
9970bccecfSRob Clark 
10070bccecfSRob Clark 		pgsize = calc_pgsize(pagetable, iova, iova, size, &count);
10170bccecfSRob Clark 
10270bccecfSRob Clark 		unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL);
10370bccecfSRob Clark 		if (!unmapped)
10470bccecfSRob Clark 			break;
10570bccecfSRob Clark 
10670bccecfSRob Clark 		iova += unmapped;
10770bccecfSRob Clark 		size -= unmapped;
108b145c6e6SJordan Crouse 	}
109b145c6e6SJordan Crouse 
11093b694d0SLinus Torvalds 	iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
111b145c6e6SJordan Crouse 
11270bccecfSRob Clark 	return (size == 0) ? 0 : -EINVAL;
113b145c6e6SJordan Crouse }
114b145c6e6SJordan Crouse 
msm_iommu_pagetable_map(struct msm_mmu * mmu,u64 iova,struct sg_table * sgt,size_t len,int prot)115b145c6e6SJordan Crouse static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
116b145c6e6SJordan Crouse 		struct sg_table *sgt, size_t len, int prot)
117b145c6e6SJordan Crouse {
118b145c6e6SJordan Crouse 	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
119b145c6e6SJordan Crouse 	struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
120b145c6e6SJordan Crouse 	struct scatterlist *sg;
121b145c6e6SJordan Crouse 	u64 addr = iova;
122b145c6e6SJordan Crouse 	unsigned int i;
123b145c6e6SJordan Crouse 
12462b5e322SJonathan Marek 	for_each_sgtable_sg(sgt, sg, i) {
125b145c6e6SJordan Crouse 		size_t size = sg->length;
126b145c6e6SJordan Crouse 		phys_addr_t phys = sg_phys(sg);
127b145c6e6SJordan Crouse 
128b145c6e6SJordan Crouse 		while (size) {
12970bccecfSRob Clark 			size_t pgsize, count, mapped = 0;
13070bccecfSRob Clark 			int ret;
13170bccecfSRob Clark 
13270bccecfSRob Clark 			pgsize = calc_pgsize(pagetable, addr, phys, size, &count);
13370bccecfSRob Clark 
13470bccecfSRob Clark 			ret = ops->map_pages(ops, addr, phys, pgsize, count,
13570bccecfSRob Clark 					     prot, GFP_KERNEL, &mapped);
13670bccecfSRob Clark 
13770bccecfSRob Clark 			/* map_pages could fail after mapping some of the pages,
13870bccecfSRob Clark 			 * so update the counters before error handling.
13970bccecfSRob Clark 			 */
14070bccecfSRob Clark 			phys += mapped;
14170bccecfSRob Clark 			addr += mapped;
14270bccecfSRob Clark 			size -= mapped;
14370bccecfSRob Clark 
14470bccecfSRob Clark 			if (ret) {
14570bccecfSRob Clark 				msm_iommu_pagetable_unmap(mmu, iova, addr - iova);
146b145c6e6SJordan Crouse 				return -EINVAL;
147b145c6e6SJordan Crouse 			}
148b145c6e6SJordan Crouse 		}
149b145c6e6SJordan Crouse 	}
150b145c6e6SJordan Crouse 
151b145c6e6SJordan Crouse 	return 0;
152b145c6e6SJordan Crouse }
153b145c6e6SJordan Crouse 
msm_iommu_pagetable_destroy(struct msm_mmu * mmu)154b145c6e6SJordan Crouse static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
155b145c6e6SJordan Crouse {
156b145c6e6SJordan Crouse 	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
157b145c6e6SJordan Crouse 	struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
158b145c6e6SJordan Crouse 	struct adreno_smmu_priv *adreno_smmu =
159b145c6e6SJordan Crouse 		dev_get_drvdata(pagetable->parent->dev);
160b145c6e6SJordan Crouse 
161b145c6e6SJordan Crouse 	/*
162b145c6e6SJordan Crouse 	 * If this is the last attached pagetable for the parent,
163b145c6e6SJordan Crouse 	 * disable TTBR0 in the arm-smmu driver
164b145c6e6SJordan Crouse 	 */
165b145c6e6SJordan Crouse 	if (atomic_dec_return(&iommu->pagetables) == 0)
166b145c6e6SJordan Crouse 		adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
167b145c6e6SJordan Crouse 
168b145c6e6SJordan Crouse 	free_io_pgtable_ops(pagetable->pgtbl_ops);
169b145c6e6SJordan Crouse 	kfree(pagetable);
170b145c6e6SJordan Crouse }
171b145c6e6SJordan Crouse 
msm_iommu_pagetable_params(struct msm_mmu * mmu,phys_addr_t * ttbr,int * asid)172b145c6e6SJordan Crouse int msm_iommu_pagetable_params(struct msm_mmu *mmu,
173b145c6e6SJordan Crouse 		phys_addr_t *ttbr, int *asid)
174b145c6e6SJordan Crouse {
175b145c6e6SJordan Crouse 	struct msm_iommu_pagetable *pagetable;
176b145c6e6SJordan Crouse 
177b145c6e6SJordan Crouse 	if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
178b145c6e6SJordan Crouse 		return -EINVAL;
179b145c6e6SJordan Crouse 
180b145c6e6SJordan Crouse 	pagetable = to_pagetable(mmu);
181b145c6e6SJordan Crouse 
182b145c6e6SJordan Crouse 	if (ttbr)
183b145c6e6SJordan Crouse 		*ttbr = pagetable->ttbr;
184b145c6e6SJordan Crouse 
185b145c6e6SJordan Crouse 	if (asid)
186b145c6e6SJordan Crouse 		*asid = pagetable->asid;
187b145c6e6SJordan Crouse 
188b145c6e6SJordan Crouse 	return 0;
189b145c6e6SJordan Crouse }
190b145c6e6SJordan Crouse 
msm_iommu_get_geometry(struct msm_mmu * mmu)1913236130bSDmitry Baryshkov struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu)
1923236130bSDmitry Baryshkov {
1933236130bSDmitry Baryshkov 	struct msm_iommu *iommu = to_msm_iommu(mmu);
1943236130bSDmitry Baryshkov 
1953236130bSDmitry Baryshkov 	return &iommu->domain->geometry;
1963236130bSDmitry Baryshkov }
1973236130bSDmitry Baryshkov 
198b145c6e6SJordan Crouse static const struct msm_mmu_funcs pagetable_funcs = {
199b145c6e6SJordan Crouse 		.map = msm_iommu_pagetable_map,
200b145c6e6SJordan Crouse 		.unmap = msm_iommu_pagetable_unmap,
201b145c6e6SJordan Crouse 		.destroy = msm_iommu_pagetable_destroy,
202b145c6e6SJordan Crouse };
203b145c6e6SJordan Crouse 
msm_iommu_tlb_flush_all(void * cookie)204b145c6e6SJordan Crouse static void msm_iommu_tlb_flush_all(void *cookie)
205b145c6e6SJordan Crouse {
206*c96a494cSRob Clark 	struct msm_iommu_pagetable *pagetable = cookie;
207*c96a494cSRob Clark 	struct adreno_smmu_priv *adreno_smmu;
208*c96a494cSRob Clark 
209*c96a494cSRob Clark 	if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
210*c96a494cSRob Clark 		return;
211*c96a494cSRob Clark 
212*c96a494cSRob Clark 	adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
213*c96a494cSRob Clark 
214*c96a494cSRob Clark 	pagetable->tlb->tlb_flush_all((void *)adreno_smmu->cookie);
215*c96a494cSRob Clark 
216*c96a494cSRob Clark 	pm_runtime_put_autosuspend(pagetable->iommu_dev);
217b145c6e6SJordan Crouse }
218b145c6e6SJordan Crouse 
msm_iommu_tlb_flush_walk(unsigned long iova,size_t size,size_t granule,void * cookie)219b145c6e6SJordan Crouse static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
220b145c6e6SJordan Crouse 		size_t granule, void *cookie)
221b145c6e6SJordan Crouse {
222*c96a494cSRob Clark 	struct msm_iommu_pagetable *pagetable = cookie;
223*c96a494cSRob Clark 	struct adreno_smmu_priv *adreno_smmu;
224*c96a494cSRob Clark 
225*c96a494cSRob Clark 	if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
226*c96a494cSRob Clark 		return;
227*c96a494cSRob Clark 
228*c96a494cSRob Clark 	adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
229*c96a494cSRob Clark 
230*c96a494cSRob Clark 	pagetable->tlb->tlb_flush_walk(iova, size, granule, (void *)adreno_smmu->cookie);
231*c96a494cSRob Clark 
232*c96a494cSRob Clark 	pm_runtime_put_autosuspend(pagetable->iommu_dev);
233b145c6e6SJordan Crouse }
234b145c6e6SJordan Crouse 
msm_iommu_tlb_add_page(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)235b145c6e6SJordan Crouse static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
236b145c6e6SJordan Crouse 		unsigned long iova, size_t granule, void *cookie)
237b145c6e6SJordan Crouse {
238b145c6e6SJordan Crouse }
239b145c6e6SJordan Crouse 
240*c96a494cSRob Clark static const struct iommu_flush_ops tlb_ops = {
241b145c6e6SJordan Crouse 	.tlb_flush_all = msm_iommu_tlb_flush_all,
242b145c6e6SJordan Crouse 	.tlb_flush_walk = msm_iommu_tlb_flush_walk,
243b145c6e6SJordan Crouse 	.tlb_add_page = msm_iommu_tlb_add_page,
244b145c6e6SJordan Crouse };
245b145c6e6SJordan Crouse 
246bceddc2cSRob Clark static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
247bceddc2cSRob Clark 		unsigned long iova, int flags, void *arg);
248bceddc2cSRob Clark 
msm_iommu_pagetable_create(struct msm_mmu * parent)249b145c6e6SJordan Crouse struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
250b145c6e6SJordan Crouse {
251b145c6e6SJordan Crouse 	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
252b145c6e6SJordan Crouse 	struct msm_iommu *iommu = to_msm_iommu(parent);
253b145c6e6SJordan Crouse 	struct msm_iommu_pagetable *pagetable;
254b145c6e6SJordan Crouse 	const struct io_pgtable_cfg *ttbr1_cfg = NULL;
255b145c6e6SJordan Crouse 	struct io_pgtable_cfg ttbr0_cfg;
256b145c6e6SJordan Crouse 	int ret;
257b145c6e6SJordan Crouse 
258b145c6e6SJordan Crouse 	/* Get the pagetable configuration from the domain */
259b145c6e6SJordan Crouse 	if (adreno_smmu->cookie)
260b145c6e6SJordan Crouse 		ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
2615c054db5SRob Clark 
2625c054db5SRob Clark 	/*
2635c054db5SRob Clark 	 * If you hit this WARN_ONCE() you are probably missing an entry in
2645c054db5SRob Clark 	 * qcom_smmu_impl_of_match[] in arm-smmu-qcom.c
2655c054db5SRob Clark 	 */
2665c054db5SRob Clark 	if (WARN_ONCE(!ttbr1_cfg, "No per-process page tables"))
267b145c6e6SJordan Crouse 		return ERR_PTR(-ENODEV);
268b145c6e6SJordan Crouse 
269b145c6e6SJordan Crouse 	pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
270b145c6e6SJordan Crouse 	if (!pagetable)
271b145c6e6SJordan Crouse 		return ERR_PTR(-ENOMEM);
272b145c6e6SJordan Crouse 
273b145c6e6SJordan Crouse 	msm_mmu_init(&pagetable->base, parent->dev, &pagetable_funcs,
274b145c6e6SJordan Crouse 		MSM_MMU_IOMMU_PAGETABLE);
275b145c6e6SJordan Crouse 
276b145c6e6SJordan Crouse 	/* Clone the TTBR1 cfg as starting point for TTBR0 cfg: */
277b145c6e6SJordan Crouse 	ttbr0_cfg = *ttbr1_cfg;
278b145c6e6SJordan Crouse 
279b145c6e6SJordan Crouse 	/* The incoming cfg will have the TTBR1 quirk enabled */
280b145c6e6SJordan Crouse 	ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
281*c96a494cSRob Clark 	ttbr0_cfg.tlb = &tlb_ops;
282b145c6e6SJordan Crouse 
283b145c6e6SJordan Crouse 	pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
284*c96a494cSRob Clark 		&ttbr0_cfg, pagetable);
285b145c6e6SJordan Crouse 
286b145c6e6SJordan Crouse 	if (!pagetable->pgtbl_ops) {
287b145c6e6SJordan Crouse 		kfree(pagetable);
288b145c6e6SJordan Crouse 		return ERR_PTR(-ENOMEM);
289b145c6e6SJordan Crouse 	}
290b145c6e6SJordan Crouse 
291b145c6e6SJordan Crouse 	/*
292b145c6e6SJordan Crouse 	 * If this is the first pagetable that we've allocated, send it back to
293b145c6e6SJordan Crouse 	 * the arm-smmu driver as a trigger to set up TTBR0
294b145c6e6SJordan Crouse 	 */
295b145c6e6SJordan Crouse 	if (atomic_inc_return(&iommu->pagetables) == 1) {
296b145c6e6SJordan Crouse 		ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
297b145c6e6SJordan Crouse 		if (ret) {
298b145c6e6SJordan Crouse 			free_io_pgtable_ops(pagetable->pgtbl_ops);
299b145c6e6SJordan Crouse 			kfree(pagetable);
300b145c6e6SJordan Crouse 			return ERR_PTR(ret);
301b145c6e6SJordan Crouse 		}
302b145c6e6SJordan Crouse 	}
303b145c6e6SJordan Crouse 
304b145c6e6SJordan Crouse 	/* Needed later for TLB flush */
305b145c6e6SJordan Crouse 	pagetable->parent = parent;
306*c96a494cSRob Clark 	pagetable->tlb = ttbr1_cfg->tlb;
307*c96a494cSRob Clark 	pagetable->iommu_dev = ttbr1_cfg->iommu_dev;
30870bccecfSRob Clark 	pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
309b145c6e6SJordan Crouse 	pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
310b145c6e6SJordan Crouse 
311b145c6e6SJordan Crouse 	/*
312b145c6e6SJordan Crouse 	 * TODO we would like each set of page tables to have a unique ASID
31393b694d0SLinus Torvalds 	 * to optimize TLB invalidation.  But iommu_flush_iotlb_all() will
314b145c6e6SJordan Crouse 	 * end up flushing the ASID used for TTBR1 pagetables, which is not
315b145c6e6SJordan Crouse 	 * what we want.  So for now just use the same ASID as TTBR1.
316b145c6e6SJordan Crouse 	 */
317b145c6e6SJordan Crouse 	pagetable->asid = 0;
318b145c6e6SJordan Crouse 
319b145c6e6SJordan Crouse 	return &pagetable->base;
320b145c6e6SJordan Crouse }
321b145c6e6SJordan Crouse 
msm_fault_handler(struct iommu_domain * domain,struct device * dev,unsigned long iova,int flags,void * arg)3227f8036b7SRob Clark static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
323871d812aSRob Clark 		unsigned long iova, int flags, void *arg)
324871d812aSRob Clark {
3257f8036b7SRob Clark 	struct msm_iommu *iommu = arg;
3268cceb773SDmitry Baryshkov 	struct msm_mmu *mmu = &iommu->base;
3272a574cc0SJordan Crouse 	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(iommu->base.dev);
3282a574cc0SJordan Crouse 	struct adreno_smmu_fault_info info, *ptr = NULL;
3292a574cc0SJordan Crouse 
3302a574cc0SJordan Crouse 	if (adreno_smmu->get_fault_info) {
3312a574cc0SJordan Crouse 		adreno_smmu->get_fault_info(adreno_smmu->cookie, &info);
3322a574cc0SJordan Crouse 		ptr = &info;
3332a574cc0SJordan Crouse 	}
3342a574cc0SJordan Crouse 
3357f8036b7SRob Clark 	if (iommu->base.handler)
3362a574cc0SJordan Crouse 		return iommu->base.handler(iommu->base.arg, iova, flags, ptr);
3372a574cc0SJordan Crouse 
338bdad5c53SJordan Crouse 	pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
3398cceb773SDmitry Baryshkov 
3408cceb773SDmitry Baryshkov 	if (mmu->funcs->resume_translation)
3418cceb773SDmitry Baryshkov 		mmu->funcs->resume_translation(mmu);
3428cceb773SDmitry Baryshkov 
3436814dbf9SRob Clark 	return 0;
344871d812aSRob Clark }
345871d812aSRob Clark 
msm_iommu_resume_translation(struct msm_mmu * mmu)346e25e92e0SRob Clark static void msm_iommu_resume_translation(struct msm_mmu *mmu)
347e25e92e0SRob Clark {
348e25e92e0SRob Clark 	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev);
349e25e92e0SRob Clark 
3508cceb773SDmitry Baryshkov 	if (adreno_smmu->resume_translation)
351e25e92e0SRob Clark 		adreno_smmu->resume_translation(adreno_smmu->cookie, true);
352e25e92e0SRob Clark }
353e25e92e0SRob Clark 
msm_iommu_detach(struct msm_mmu * mmu)35453bf7f7aSDrew Davenport static void msm_iommu_detach(struct msm_mmu *mmu)
35587e956e9SStephane Viau {
35687e956e9SStephane Viau 	struct msm_iommu *iommu = to_msm_iommu(mmu);
357cc692726SRob Clark 
358944fc36cSRob Clark 	iommu_detach_device(iommu->domain, mmu->dev);
35987e956e9SStephane Viau }
36087e956e9SStephane Viau 
msm_iommu_map(struct msm_mmu * mmu,uint64_t iova,struct sg_table * sgt,size_t len,int prot)36178babc16SRob Clark static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
362fb212ad6SJordan Crouse 		struct sg_table *sgt, size_t len, int prot)
363871d812aSRob Clark {
364871d812aSRob Clark 	struct msm_iommu *iommu = to_msm_iommu(mmu);
365f9000049SRob Clark 	size_t ret;
366871d812aSRob Clark 
367e3c64c72SJordan Crouse 	/* The arm-smmu driver expects the addresses to be sign extended */
368e3c64c72SJordan Crouse 	if (iova & BIT_ULL(48))
369e3c64c72SJordan Crouse 		iova |= GENMASK_ULL(63, 49);
370e3c64c72SJordan Crouse 
3717690a33fSMarek Szyprowski 	ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
372098336deSWen Yang 	WARN_ON(!ret);
373871d812aSRob Clark 
374f9000049SRob Clark 	return (ret == len) ? 0 : -EINVAL;
375871d812aSRob Clark }
376871d812aSRob Clark 
msm_iommu_unmap(struct msm_mmu * mmu,uint64_t iova,size_t len)377fb212ad6SJordan Crouse static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
378871d812aSRob Clark {
379871d812aSRob Clark 	struct msm_iommu *iommu = to_msm_iommu(mmu);
380871d812aSRob Clark 
381e3c64c72SJordan Crouse 	if (iova & BIT_ULL(48))
382e3c64c72SJordan Crouse 		iova |= GENMASK_ULL(63, 49);
383e3c64c72SJordan Crouse 
384f9000049SRob Clark 	iommu_unmap(iommu->domain, iova, len);
385871d812aSRob Clark 
386871d812aSRob Clark 	return 0;
387871d812aSRob Clark }
388871d812aSRob Clark 
msm_iommu_destroy(struct msm_mmu * mmu)389871d812aSRob Clark static void msm_iommu_destroy(struct msm_mmu *mmu)
390871d812aSRob Clark {
391871d812aSRob Clark 	struct msm_iommu *iommu = to_msm_iommu(mmu);
392871d812aSRob Clark 	iommu_domain_free(iommu->domain);
393871d812aSRob Clark 	kfree(iommu);
394871d812aSRob Clark }
395871d812aSRob Clark 
396871d812aSRob Clark static const struct msm_mmu_funcs funcs = {
39787e956e9SStephane Viau 		.detach = msm_iommu_detach,
398871d812aSRob Clark 		.map = msm_iommu_map,
399871d812aSRob Clark 		.unmap = msm_iommu_unmap,
400871d812aSRob Clark 		.destroy = msm_iommu_destroy,
401e25e92e0SRob Clark 		.resume_translation = msm_iommu_resume_translation,
402871d812aSRob Clark };
403871d812aSRob Clark 
msm_iommu_new(struct device * dev,unsigned long quirks)4043236130bSDmitry Baryshkov struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
405871d812aSRob Clark {
4063236130bSDmitry Baryshkov 	struct iommu_domain *domain;
407871d812aSRob Clark 	struct msm_iommu *iommu;
40852da6d51SJordan Crouse 	int ret;
409871d812aSRob Clark 
4103236130bSDmitry Baryshkov 	domain = iommu_domain_alloc(dev->bus);
411ccac7ce3SJordan Crouse 	if (!domain)
4123236130bSDmitry Baryshkov 		return NULL;
4133236130bSDmitry Baryshkov 
4143236130bSDmitry Baryshkov 	iommu_set_pgtable_quirks(domain, quirks);
415ccac7ce3SJordan Crouse 
416871d812aSRob Clark 	iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
4173236130bSDmitry Baryshkov 	if (!iommu) {
4183236130bSDmitry Baryshkov 		iommu_domain_free(domain);
419871d812aSRob Clark 		return ERR_PTR(-ENOMEM);
4203236130bSDmitry Baryshkov 	}
421871d812aSRob Clark 
422871d812aSRob Clark 	iommu->domain = domain;
423b145c6e6SJordan Crouse 	msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
424871d812aSRob Clark 
425b145c6e6SJordan Crouse 	atomic_set(&iommu->pagetables, 0);
426b145c6e6SJordan Crouse 
42752da6d51SJordan Crouse 	ret = iommu_attach_device(iommu->domain, dev);
42852da6d51SJordan Crouse 	if (ret) {
4293236130bSDmitry Baryshkov 		iommu_domain_free(domain);
43052da6d51SJordan Crouse 		kfree(iommu);
43152da6d51SJordan Crouse 		return ERR_PTR(ret);
43252da6d51SJordan Crouse 	}
43352da6d51SJordan Crouse 
434871d812aSRob Clark 	return &iommu->base;
435871d812aSRob Clark }
4368cceb773SDmitry Baryshkov 
msm_iommu_gpu_new(struct device * dev,struct msm_gpu * gpu,unsigned long quirks)4378cceb773SDmitry Baryshkov struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks)
4388cceb773SDmitry Baryshkov {
4398cceb773SDmitry Baryshkov 	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
4408cceb773SDmitry Baryshkov 	struct msm_iommu *iommu;
4418cceb773SDmitry Baryshkov 	struct msm_mmu *mmu;
4428cceb773SDmitry Baryshkov 
4438cceb773SDmitry Baryshkov 	mmu = msm_iommu_new(dev, quirks);
444db40d292SLuca Weiss 	if (IS_ERR_OR_NULL(mmu))
4458cceb773SDmitry Baryshkov 		return mmu;
4468cceb773SDmitry Baryshkov 
4478cceb773SDmitry Baryshkov 	iommu = to_msm_iommu(mmu);
4488cceb773SDmitry Baryshkov 	iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu);
4498cceb773SDmitry Baryshkov 
4508cceb773SDmitry Baryshkov 	/* Enable stall on iommu fault: */
4518cceb773SDmitry Baryshkov 	if (adreno_smmu->set_stall)
4528cceb773SDmitry Baryshkov 		adreno_smmu->set_stall(adreno_smmu->cookie, true);
4538cceb773SDmitry Baryshkov 
4548cceb773SDmitry Baryshkov 	return mmu;
4558cceb773SDmitry Baryshkov }
456