xref: /openbmc/linux/drivers/gpu/drm/msm/msm_iommu.c (revision 70bccecfcaf6c506191aed487f3cf2695fc3008c)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2871d812aSRob Clark /*
3871d812aSRob Clark  * Copyright (C) 2013 Red Hat
4871d812aSRob Clark  * Author: Rob Clark <robdclark@gmail.com>
5871d812aSRob Clark  */
6871d812aSRob Clark 
7b145c6e6SJordan Crouse #include <linux/adreno-smmu-priv.h>
8b145c6e6SJordan Crouse #include <linux/io-pgtable.h>
9871d812aSRob Clark #include "msm_drv.h"
10871d812aSRob Clark #include "msm_mmu.h"
11871d812aSRob Clark 
12871d812aSRob Clark struct msm_iommu {
13871d812aSRob Clark 	struct msm_mmu base;
14871d812aSRob Clark 	struct iommu_domain *domain;
15b145c6e6SJordan Crouse 	atomic_t pagetables;
16871d812aSRob Clark };
17b145c6e6SJordan Crouse 
18871d812aSRob Clark #define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
19871d812aSRob Clark 
20b145c6e6SJordan Crouse struct msm_iommu_pagetable {
21b145c6e6SJordan Crouse 	struct msm_mmu base;
22b145c6e6SJordan Crouse 	struct msm_mmu *parent;
23b145c6e6SJordan Crouse 	struct io_pgtable_ops *pgtbl_ops;
24*70bccecfSRob Clark 	unsigned long pgsize_bitmap;	/* Bitmap of page sizes in use */
25b145c6e6SJordan Crouse 	phys_addr_t ttbr;
26b145c6e6SJordan Crouse 	u32 asid;
27b145c6e6SJordan Crouse };
28b145c6e6SJordan Crouse static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
29b145c6e6SJordan Crouse {
30b145c6e6SJordan Crouse 	return container_of(mmu, struct msm_iommu_pagetable, base);
31b145c6e6SJordan Crouse }
32b145c6e6SJordan Crouse 
33*70bccecfSRob Clark /* based on iommu_pgsize() in iommu.c: */
34*70bccecfSRob Clark static size_t calc_pgsize(struct msm_iommu_pagetable *pagetable,
35*70bccecfSRob Clark 			   unsigned long iova, phys_addr_t paddr,
36*70bccecfSRob Clark 			   size_t size, size_t *count)
37*70bccecfSRob Clark {
38*70bccecfSRob Clark 	unsigned int pgsize_idx, pgsize_idx_next;
39*70bccecfSRob Clark 	unsigned long pgsizes;
40*70bccecfSRob Clark 	size_t offset, pgsize, pgsize_next;
41*70bccecfSRob Clark 	unsigned long addr_merge = paddr | iova;
42*70bccecfSRob Clark 
43*70bccecfSRob Clark 	/* Page sizes supported by the hardware and small enough for @size */
44*70bccecfSRob Clark 	pgsizes = pagetable->pgsize_bitmap & GENMASK(__fls(size), 0);
45*70bccecfSRob Clark 
46*70bccecfSRob Clark 	/* Constrain the page sizes further based on the maximum alignment */
47*70bccecfSRob Clark 	if (likely(addr_merge))
48*70bccecfSRob Clark 		pgsizes &= GENMASK(__ffs(addr_merge), 0);
49*70bccecfSRob Clark 
50*70bccecfSRob Clark 	/* Make sure we have at least one suitable page size */
51*70bccecfSRob Clark 	BUG_ON(!pgsizes);
52*70bccecfSRob Clark 
53*70bccecfSRob Clark 	/* Pick the biggest page size remaining */
54*70bccecfSRob Clark 	pgsize_idx = __fls(pgsizes);
55*70bccecfSRob Clark 	pgsize = BIT(pgsize_idx);
56*70bccecfSRob Clark 	if (!count)
57*70bccecfSRob Clark 		return pgsize;
58*70bccecfSRob Clark 
59*70bccecfSRob Clark 	/* Find the next biggest support page size, if it exists */
60*70bccecfSRob Clark 	pgsizes = pagetable->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
61*70bccecfSRob Clark 	if (!pgsizes)
62*70bccecfSRob Clark 		goto out_set_count;
63*70bccecfSRob Clark 
64*70bccecfSRob Clark 	pgsize_idx_next = __ffs(pgsizes);
65*70bccecfSRob Clark 	pgsize_next = BIT(pgsize_idx_next);
66*70bccecfSRob Clark 
67*70bccecfSRob Clark 	/*
68*70bccecfSRob Clark 	 * There's no point trying a bigger page size unless the virtual
69*70bccecfSRob Clark 	 * and physical addresses are similarly offset within the larger page.
70*70bccecfSRob Clark 	 */
71*70bccecfSRob Clark 	if ((iova ^ paddr) & (pgsize_next - 1))
72*70bccecfSRob Clark 		goto out_set_count;
73*70bccecfSRob Clark 
74*70bccecfSRob Clark 	/* Calculate the offset to the next page size alignment boundary */
75*70bccecfSRob Clark 	offset = pgsize_next - (addr_merge & (pgsize_next - 1));
76*70bccecfSRob Clark 
77*70bccecfSRob Clark 	/*
78*70bccecfSRob Clark 	 * If size is big enough to accommodate the larger page, reduce
79*70bccecfSRob Clark 	 * the number of smaller pages.
80*70bccecfSRob Clark 	 */
81*70bccecfSRob Clark 	if (offset + pgsize_next <= size)
82*70bccecfSRob Clark 		size = offset;
83*70bccecfSRob Clark 
84*70bccecfSRob Clark out_set_count:
85*70bccecfSRob Clark 	*count = size >> pgsize_idx;
86*70bccecfSRob Clark 	return pgsize;
87*70bccecfSRob Clark }
88*70bccecfSRob Clark 
89b145c6e6SJordan Crouse static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
90b145c6e6SJordan Crouse 		size_t size)
91b145c6e6SJordan Crouse {
92b145c6e6SJordan Crouse 	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
93b145c6e6SJordan Crouse 	struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
94b145c6e6SJordan Crouse 
95b145c6e6SJordan Crouse 	while (size) {
96*70bccecfSRob Clark 		size_t unmapped, pgsize, count;
97*70bccecfSRob Clark 
98*70bccecfSRob Clark 		pgsize = calc_pgsize(pagetable, iova, iova, size, &count);
99*70bccecfSRob Clark 
100*70bccecfSRob Clark 		unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL);
101*70bccecfSRob Clark 		if (!unmapped)
102*70bccecfSRob Clark 			break;
103*70bccecfSRob Clark 
104*70bccecfSRob Clark 		iova += unmapped;
105*70bccecfSRob Clark 		size -= unmapped;
106b145c6e6SJordan Crouse 	}
107b145c6e6SJordan Crouse 
10893b694d0SLinus Torvalds 	iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
109b145c6e6SJordan Crouse 
110*70bccecfSRob Clark 	return (size == 0) ? 0 : -EINVAL;
111b145c6e6SJordan Crouse }
112b145c6e6SJordan Crouse 
113b145c6e6SJordan Crouse static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
114b145c6e6SJordan Crouse 		struct sg_table *sgt, size_t len, int prot)
115b145c6e6SJordan Crouse {
116b145c6e6SJordan Crouse 	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
117b145c6e6SJordan Crouse 	struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
118b145c6e6SJordan Crouse 	struct scatterlist *sg;
119b145c6e6SJordan Crouse 	u64 addr = iova;
120b145c6e6SJordan Crouse 	unsigned int i;
121b145c6e6SJordan Crouse 
12262b5e322SJonathan Marek 	for_each_sgtable_sg(sgt, sg, i) {
123b145c6e6SJordan Crouse 		size_t size = sg->length;
124b145c6e6SJordan Crouse 		phys_addr_t phys = sg_phys(sg);
125b145c6e6SJordan Crouse 
126b145c6e6SJordan Crouse 		while (size) {
127*70bccecfSRob Clark 			size_t pgsize, count, mapped = 0;
128*70bccecfSRob Clark 			int ret;
129*70bccecfSRob Clark 
130*70bccecfSRob Clark 			pgsize = calc_pgsize(pagetable, addr, phys, size, &count);
131*70bccecfSRob Clark 
132*70bccecfSRob Clark 			ret = ops->map_pages(ops, addr, phys, pgsize, count,
133*70bccecfSRob Clark 					     prot, GFP_KERNEL, &mapped);
134*70bccecfSRob Clark 
135*70bccecfSRob Clark 			/* map_pages could fail after mapping some of the pages,
136*70bccecfSRob Clark 			 * so update the counters before error handling.
137*70bccecfSRob Clark 			 */
138*70bccecfSRob Clark 			phys += mapped;
139*70bccecfSRob Clark 			addr += mapped;
140*70bccecfSRob Clark 			size -= mapped;
141*70bccecfSRob Clark 
142*70bccecfSRob Clark 			if (ret) {
143*70bccecfSRob Clark 				msm_iommu_pagetable_unmap(mmu, iova, addr - iova);
144b145c6e6SJordan Crouse 				return -EINVAL;
145b145c6e6SJordan Crouse 			}
146b145c6e6SJordan Crouse 		}
147b145c6e6SJordan Crouse 	}
148b145c6e6SJordan Crouse 
149b145c6e6SJordan Crouse 	return 0;
150b145c6e6SJordan Crouse }
151b145c6e6SJordan Crouse 
152b145c6e6SJordan Crouse static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
153b145c6e6SJordan Crouse {
154b145c6e6SJordan Crouse 	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
155b145c6e6SJordan Crouse 	struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
156b145c6e6SJordan Crouse 	struct adreno_smmu_priv *adreno_smmu =
157b145c6e6SJordan Crouse 		dev_get_drvdata(pagetable->parent->dev);
158b145c6e6SJordan Crouse 
159b145c6e6SJordan Crouse 	/*
160b145c6e6SJordan Crouse 	 * If this is the last attached pagetable for the parent,
161b145c6e6SJordan Crouse 	 * disable TTBR0 in the arm-smmu driver
162b145c6e6SJordan Crouse 	 */
163b145c6e6SJordan Crouse 	if (atomic_dec_return(&iommu->pagetables) == 0)
164b145c6e6SJordan Crouse 		adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
165b145c6e6SJordan Crouse 
166b145c6e6SJordan Crouse 	free_io_pgtable_ops(pagetable->pgtbl_ops);
167b145c6e6SJordan Crouse 	kfree(pagetable);
168b145c6e6SJordan Crouse }
169b145c6e6SJordan Crouse 
170b145c6e6SJordan Crouse int msm_iommu_pagetable_params(struct msm_mmu *mmu,
171b145c6e6SJordan Crouse 		phys_addr_t *ttbr, int *asid)
172b145c6e6SJordan Crouse {
173b145c6e6SJordan Crouse 	struct msm_iommu_pagetable *pagetable;
174b145c6e6SJordan Crouse 
175b145c6e6SJordan Crouse 	if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
176b145c6e6SJordan Crouse 		return -EINVAL;
177b145c6e6SJordan Crouse 
178b145c6e6SJordan Crouse 	pagetable = to_pagetable(mmu);
179b145c6e6SJordan Crouse 
180b145c6e6SJordan Crouse 	if (ttbr)
181b145c6e6SJordan Crouse 		*ttbr = pagetable->ttbr;
182b145c6e6SJordan Crouse 
183b145c6e6SJordan Crouse 	if (asid)
184b145c6e6SJordan Crouse 		*asid = pagetable->asid;
185b145c6e6SJordan Crouse 
186b145c6e6SJordan Crouse 	return 0;
187b145c6e6SJordan Crouse }
188b145c6e6SJordan Crouse 
189b145c6e6SJordan Crouse static const struct msm_mmu_funcs pagetable_funcs = {
190b145c6e6SJordan Crouse 		.map = msm_iommu_pagetable_map,
191b145c6e6SJordan Crouse 		.unmap = msm_iommu_pagetable_unmap,
192b145c6e6SJordan Crouse 		.destroy = msm_iommu_pagetable_destroy,
193b145c6e6SJordan Crouse };
194b145c6e6SJordan Crouse 
195b145c6e6SJordan Crouse static void msm_iommu_tlb_flush_all(void *cookie)
196b145c6e6SJordan Crouse {
197b145c6e6SJordan Crouse }
198b145c6e6SJordan Crouse 
199b145c6e6SJordan Crouse static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
200b145c6e6SJordan Crouse 		size_t granule, void *cookie)
201b145c6e6SJordan Crouse {
202b145c6e6SJordan Crouse }
203b145c6e6SJordan Crouse 
204b145c6e6SJordan Crouse static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
205b145c6e6SJordan Crouse 		unsigned long iova, size_t granule, void *cookie)
206b145c6e6SJordan Crouse {
207b145c6e6SJordan Crouse }
208b145c6e6SJordan Crouse 
209b145c6e6SJordan Crouse static const struct iommu_flush_ops null_tlb_ops = {
210b145c6e6SJordan Crouse 	.tlb_flush_all = msm_iommu_tlb_flush_all,
211b145c6e6SJordan Crouse 	.tlb_flush_walk = msm_iommu_tlb_flush_walk,
212b145c6e6SJordan Crouse 	.tlb_add_page = msm_iommu_tlb_add_page,
213b145c6e6SJordan Crouse };
214b145c6e6SJordan Crouse 
215bceddc2cSRob Clark static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
216bceddc2cSRob Clark 		unsigned long iova, int flags, void *arg);
217bceddc2cSRob Clark 
218b145c6e6SJordan Crouse struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
219b145c6e6SJordan Crouse {
220b145c6e6SJordan Crouse 	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
221b145c6e6SJordan Crouse 	struct msm_iommu *iommu = to_msm_iommu(parent);
222b145c6e6SJordan Crouse 	struct msm_iommu_pagetable *pagetable;
223b145c6e6SJordan Crouse 	const struct io_pgtable_cfg *ttbr1_cfg = NULL;
224b145c6e6SJordan Crouse 	struct io_pgtable_cfg ttbr0_cfg;
225b145c6e6SJordan Crouse 	int ret;
226b145c6e6SJordan Crouse 
227b145c6e6SJordan Crouse 	/* Get the pagetable configuration from the domain */
228b145c6e6SJordan Crouse 	if (adreno_smmu->cookie)
229b145c6e6SJordan Crouse 		ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
230b145c6e6SJordan Crouse 	if (!ttbr1_cfg)
231b145c6e6SJordan Crouse 		return ERR_PTR(-ENODEV);
232b145c6e6SJordan Crouse 
233bceddc2cSRob Clark 	/*
234bceddc2cSRob Clark 	 * Defer setting the fault handler until we have a valid adreno_smmu
235bceddc2cSRob Clark 	 * to avoid accidentially installing a GPU specific fault handler for
236bceddc2cSRob Clark 	 * the display's iommu
237bceddc2cSRob Clark 	 */
238bceddc2cSRob Clark 	iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu);
239bceddc2cSRob Clark 
240b145c6e6SJordan Crouse 	pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
241b145c6e6SJordan Crouse 	if (!pagetable)
242b145c6e6SJordan Crouse 		return ERR_PTR(-ENOMEM);
243b145c6e6SJordan Crouse 
244b145c6e6SJordan Crouse 	msm_mmu_init(&pagetable->base, parent->dev, &pagetable_funcs,
245b145c6e6SJordan Crouse 		MSM_MMU_IOMMU_PAGETABLE);
246b145c6e6SJordan Crouse 
247b145c6e6SJordan Crouse 	/* Clone the TTBR1 cfg as starting point for TTBR0 cfg: */
248b145c6e6SJordan Crouse 	ttbr0_cfg = *ttbr1_cfg;
249b145c6e6SJordan Crouse 
250b145c6e6SJordan Crouse 	/* The incoming cfg will have the TTBR1 quirk enabled */
251b145c6e6SJordan Crouse 	ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
252b145c6e6SJordan Crouse 	ttbr0_cfg.tlb = &null_tlb_ops;
253b145c6e6SJordan Crouse 
254b145c6e6SJordan Crouse 	pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
255b145c6e6SJordan Crouse 		&ttbr0_cfg, iommu->domain);
256b145c6e6SJordan Crouse 
257b145c6e6SJordan Crouse 	if (!pagetable->pgtbl_ops) {
258b145c6e6SJordan Crouse 		kfree(pagetable);
259b145c6e6SJordan Crouse 		return ERR_PTR(-ENOMEM);
260b145c6e6SJordan Crouse 	}
261b145c6e6SJordan Crouse 
262b145c6e6SJordan Crouse 	/*
263b145c6e6SJordan Crouse 	 * If this is the first pagetable that we've allocated, send it back to
264b145c6e6SJordan Crouse 	 * the arm-smmu driver as a trigger to set up TTBR0
265b145c6e6SJordan Crouse 	 */
266b145c6e6SJordan Crouse 	if (atomic_inc_return(&iommu->pagetables) == 1) {
267e25e92e0SRob Clark 		/* Enable stall on iommu fault: */
268e25e92e0SRob Clark 		adreno_smmu->set_stall(adreno_smmu->cookie, true);
269e25e92e0SRob Clark 
270b145c6e6SJordan Crouse 		ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
271b145c6e6SJordan Crouse 		if (ret) {
272b145c6e6SJordan Crouse 			free_io_pgtable_ops(pagetable->pgtbl_ops);
273b145c6e6SJordan Crouse 			kfree(pagetable);
274b145c6e6SJordan Crouse 			return ERR_PTR(ret);
275b145c6e6SJordan Crouse 		}
276b145c6e6SJordan Crouse 	}
277b145c6e6SJordan Crouse 
278b145c6e6SJordan Crouse 	/* Needed later for TLB flush */
279b145c6e6SJordan Crouse 	pagetable->parent = parent;
280*70bccecfSRob Clark 	pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
281b145c6e6SJordan Crouse 	pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
282b145c6e6SJordan Crouse 
283b145c6e6SJordan Crouse 	/*
284b145c6e6SJordan Crouse 	 * TODO we would like each set of page tables to have a unique ASID
28593b694d0SLinus Torvalds 	 * to optimize TLB invalidation.  But iommu_flush_iotlb_all() will
286b145c6e6SJordan Crouse 	 * end up flushing the ASID used for TTBR1 pagetables, which is not
287b145c6e6SJordan Crouse 	 * what we want.  So for now just use the same ASID as TTBR1.
288b145c6e6SJordan Crouse 	 */
289b145c6e6SJordan Crouse 	pagetable->asid = 0;
290b145c6e6SJordan Crouse 
291b145c6e6SJordan Crouse 	return &pagetable->base;
292b145c6e6SJordan Crouse }
293b145c6e6SJordan Crouse 
2947f8036b7SRob Clark static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
295871d812aSRob Clark 		unsigned long iova, int flags, void *arg)
296871d812aSRob Clark {
2977f8036b7SRob Clark 	struct msm_iommu *iommu = arg;
2982a574cc0SJordan Crouse 	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(iommu->base.dev);
2992a574cc0SJordan Crouse 	struct adreno_smmu_fault_info info, *ptr = NULL;
3002a574cc0SJordan Crouse 
3012a574cc0SJordan Crouse 	if (adreno_smmu->get_fault_info) {
3022a574cc0SJordan Crouse 		adreno_smmu->get_fault_info(adreno_smmu->cookie, &info);
3032a574cc0SJordan Crouse 		ptr = &info;
3042a574cc0SJordan Crouse 	}
3052a574cc0SJordan Crouse 
3067f8036b7SRob Clark 	if (iommu->base.handler)
3072a574cc0SJordan Crouse 		return iommu->base.handler(iommu->base.arg, iova, flags, ptr);
3082a574cc0SJordan Crouse 
309bdad5c53SJordan Crouse 	pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
3106814dbf9SRob Clark 	return 0;
311871d812aSRob Clark }
312871d812aSRob Clark 
313e25e92e0SRob Clark static void msm_iommu_resume_translation(struct msm_mmu *mmu)
314e25e92e0SRob Clark {
315e25e92e0SRob Clark 	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev);
316e25e92e0SRob Clark 
317e25e92e0SRob Clark 	adreno_smmu->resume_translation(adreno_smmu->cookie, true);
318e25e92e0SRob Clark }
319e25e92e0SRob Clark 
32053bf7f7aSDrew Davenport static void msm_iommu_detach(struct msm_mmu *mmu)
32187e956e9SStephane Viau {
32287e956e9SStephane Viau 	struct msm_iommu *iommu = to_msm_iommu(mmu);
323cc692726SRob Clark 
324944fc36cSRob Clark 	iommu_detach_device(iommu->domain, mmu->dev);
32587e956e9SStephane Viau }
32687e956e9SStephane Viau 
32778babc16SRob Clark static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
328fb212ad6SJordan Crouse 		struct sg_table *sgt, size_t len, int prot)
329871d812aSRob Clark {
330871d812aSRob Clark 	struct msm_iommu *iommu = to_msm_iommu(mmu);
331f9000049SRob Clark 	size_t ret;
332871d812aSRob Clark 
333e3c64c72SJordan Crouse 	/* The arm-smmu driver expects the addresses to be sign extended */
334e3c64c72SJordan Crouse 	if (iova & BIT_ULL(48))
335e3c64c72SJordan Crouse 		iova |= GENMASK_ULL(63, 49);
336e3c64c72SJordan Crouse 
3377690a33fSMarek Szyprowski 	ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
338098336deSWen Yang 	WARN_ON(!ret);
339871d812aSRob Clark 
340f9000049SRob Clark 	return (ret == len) ? 0 : -EINVAL;
341871d812aSRob Clark }
342871d812aSRob Clark 
343fb212ad6SJordan Crouse static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
344871d812aSRob Clark {
345871d812aSRob Clark 	struct msm_iommu *iommu = to_msm_iommu(mmu);
346871d812aSRob Clark 
347e3c64c72SJordan Crouse 	if (iova & BIT_ULL(48))
348e3c64c72SJordan Crouse 		iova |= GENMASK_ULL(63, 49);
349e3c64c72SJordan Crouse 
350f9000049SRob Clark 	iommu_unmap(iommu->domain, iova, len);
351871d812aSRob Clark 
352871d812aSRob Clark 	return 0;
353871d812aSRob Clark }
354871d812aSRob Clark 
355871d812aSRob Clark static void msm_iommu_destroy(struct msm_mmu *mmu)
356871d812aSRob Clark {
357871d812aSRob Clark 	struct msm_iommu *iommu = to_msm_iommu(mmu);
358871d812aSRob Clark 	iommu_domain_free(iommu->domain);
359871d812aSRob Clark 	kfree(iommu);
360871d812aSRob Clark }
361871d812aSRob Clark 
362871d812aSRob Clark static const struct msm_mmu_funcs funcs = {
36387e956e9SStephane Viau 		.detach = msm_iommu_detach,
364871d812aSRob Clark 		.map = msm_iommu_map,
365871d812aSRob Clark 		.unmap = msm_iommu_unmap,
366871d812aSRob Clark 		.destroy = msm_iommu_destroy,
367e25e92e0SRob Clark 		.resume_translation = msm_iommu_resume_translation,
368871d812aSRob Clark };
369871d812aSRob Clark 
370944fc36cSRob Clark struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
371871d812aSRob Clark {
372871d812aSRob Clark 	struct msm_iommu *iommu;
37352da6d51SJordan Crouse 	int ret;
374871d812aSRob Clark 
375ccac7ce3SJordan Crouse 	if (!domain)
376ccac7ce3SJordan Crouse 		return ERR_PTR(-ENODEV);
377ccac7ce3SJordan Crouse 
378871d812aSRob Clark 	iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
379871d812aSRob Clark 	if (!iommu)
380871d812aSRob Clark 		return ERR_PTR(-ENOMEM);
381871d812aSRob Clark 
382871d812aSRob Clark 	iommu->domain = domain;
383b145c6e6SJordan Crouse 	msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
384871d812aSRob Clark 
385b145c6e6SJordan Crouse 	atomic_set(&iommu->pagetables, 0);
386b145c6e6SJordan Crouse 
38752da6d51SJordan Crouse 	ret = iommu_attach_device(iommu->domain, dev);
38852da6d51SJordan Crouse 	if (ret) {
38952da6d51SJordan Crouse 		kfree(iommu);
39052da6d51SJordan Crouse 		return ERR_PTR(ret);
39152da6d51SJordan Crouse 	}
39252da6d51SJordan Crouse 
393871d812aSRob Clark 	return &iommu->base;
394871d812aSRob Clark }
395