1 /*
2  * CPU-agnostic ARM page table allocator.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15  *
16  * Copyright (C) 2014 ARM Limited
17  *
18  * Author: Will Deacon <will.deacon@arm.com>
19  */
20 
21 #define pr_fmt(fmt)	"arm-lpae io-pgtable: " fmt
22 
23 #include <linux/atomic.h>
24 #include <linux/bitops.h>
25 #include <linux/io-pgtable.h>
26 #include <linux/iommu.h>
27 #include <linux/kernel.h>
28 #include <linux/sizes.h>
29 #include <linux/slab.h>
30 #include <linux/types.h>
31 #include <linux/dma-mapping.h>
32 
33 #include <asm/barrier.h>
34 
35 #define ARM_LPAE_MAX_ADDR_BITS		52
36 #define ARM_LPAE_S2_MAX_CONCAT_PAGES	16
37 #define ARM_LPAE_MAX_LEVELS		4
38 
39 /* Struct accessors */
40 #define io_pgtable_to_data(x)						\
41 	container_of((x), struct arm_lpae_io_pgtable, iop)
42 
43 #define io_pgtable_ops_to_data(x)					\
44 	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
45 
46 /*
47  * For consistency with the architecture, we always consider
48  * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
49  */
50 #define ARM_LPAE_START_LVL(d)		(ARM_LPAE_MAX_LEVELS - (d)->levels)
51 
52 /*
53  * Calculate the right shift amount to get to the portion describing level l
54  * in a virtual address mapped by the pagetable in d.
55  */
56 #define ARM_LPAE_LVL_SHIFT(l,d)						\
57 	((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1))		\
58 	  * (d)->bits_per_level) + (d)->pg_shift)
59 
60 #define ARM_LPAE_GRANULE(d)		(1UL << (d)->pg_shift)
61 
62 #define ARM_LPAE_PAGES_PER_PGD(d)					\
63 	DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
64 
65 /*
66  * Calculate the index at level l used to map virtual address a using the
67  * pagetable in d.
68  */
69 #define ARM_LPAE_PGD_IDX(l,d)						\
70 	((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
71 
72 #define ARM_LPAE_LVL_IDX(a,l,d)						\
73 	(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &			\
74 	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
75 
76 /* Calculate the block/page mapping size at level l for pagetable in d. */
77 #define ARM_LPAE_BLOCK_SIZE(l,d)					\
78 	(1ULL << (ilog2(sizeof(arm_lpae_iopte)) +			\
79 		((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
80 
81 /* Page table bits */
82 #define ARM_LPAE_PTE_TYPE_SHIFT		0
83 #define ARM_LPAE_PTE_TYPE_MASK		0x3
84 
85 #define ARM_LPAE_PTE_TYPE_BLOCK		1
86 #define ARM_LPAE_PTE_TYPE_TABLE		3
87 #define ARM_LPAE_PTE_TYPE_PAGE		3
88 
89 #define ARM_LPAE_PTE_ADDR_MASK		GENMASK_ULL(47,12)
90 
91 #define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
92 #define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
93 #define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
94 #define ARM_LPAE_PTE_SH_NS		(((arm_lpae_iopte)0) << 8)
95 #define ARM_LPAE_PTE_SH_OS		(((arm_lpae_iopte)2) << 8)
96 #define ARM_LPAE_PTE_SH_IS		(((arm_lpae_iopte)3) << 8)
97 #define ARM_LPAE_PTE_NS			(((arm_lpae_iopte)1) << 5)
98 #define ARM_LPAE_PTE_VALID		(((arm_lpae_iopte)1) << 0)
99 
100 #define ARM_LPAE_PTE_ATTR_LO_MASK	(((arm_lpae_iopte)0x3ff) << 2)
101 /* Ignore the contiguous bit for block splitting */
102 #define ARM_LPAE_PTE_ATTR_HI_MASK	(((arm_lpae_iopte)6) << 52)
103 #define ARM_LPAE_PTE_ATTR_MASK		(ARM_LPAE_PTE_ATTR_LO_MASK |	\
104 					 ARM_LPAE_PTE_ATTR_HI_MASK)
105 /* Software bit for solving coherency races */
106 #define ARM_LPAE_PTE_SW_SYNC		(((arm_lpae_iopte)1) << 55)
107 
108 /* Stage-1 PTE */
109 #define ARM_LPAE_PTE_AP_UNPRIV		(((arm_lpae_iopte)1) << 6)
110 #define ARM_LPAE_PTE_AP_RDONLY		(((arm_lpae_iopte)2) << 6)
111 #define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
112 #define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
113 
114 /* Stage-2 PTE */
115 #define ARM_LPAE_PTE_HAP_FAULT		(((arm_lpae_iopte)0) << 6)
116 #define ARM_LPAE_PTE_HAP_READ		(((arm_lpae_iopte)1) << 6)
117 #define ARM_LPAE_PTE_HAP_WRITE		(((arm_lpae_iopte)2) << 6)
118 #define ARM_LPAE_PTE_MEMATTR_OIWB	(((arm_lpae_iopte)0xf) << 2)
119 #define ARM_LPAE_PTE_MEMATTR_NC		(((arm_lpae_iopte)0x5) << 2)
120 #define ARM_LPAE_PTE_MEMATTR_DEV	(((arm_lpae_iopte)0x1) << 2)
121 
122 /* Register bits */
123 #define ARM_32_LPAE_TCR_EAE		(1 << 31)
124 #define ARM_64_LPAE_S2_TCR_RES1		(1 << 31)
125 
126 #define ARM_LPAE_TCR_EPD1		(1 << 23)
127 
128 #define ARM_LPAE_TCR_TG0_4K		(0 << 14)
129 #define ARM_LPAE_TCR_TG0_64K		(1 << 14)
130 #define ARM_LPAE_TCR_TG0_16K		(2 << 14)
131 
132 #define ARM_LPAE_TCR_SH0_SHIFT		12
133 #define ARM_LPAE_TCR_SH0_MASK		0x3
134 #define ARM_LPAE_TCR_SH_NS		0
135 #define ARM_LPAE_TCR_SH_OS		2
136 #define ARM_LPAE_TCR_SH_IS		3
137 
138 #define ARM_LPAE_TCR_ORGN0_SHIFT	10
139 #define ARM_LPAE_TCR_IRGN0_SHIFT	8
140 #define ARM_LPAE_TCR_RGN_MASK		0x3
141 #define ARM_LPAE_TCR_RGN_NC		0
142 #define ARM_LPAE_TCR_RGN_WBWA		1
143 #define ARM_LPAE_TCR_RGN_WT		2
144 #define ARM_LPAE_TCR_RGN_WB		3
145 
146 #define ARM_LPAE_TCR_SL0_SHIFT		6
147 #define ARM_LPAE_TCR_SL0_MASK		0x3
148 
149 #define ARM_LPAE_TCR_T0SZ_SHIFT		0
150 #define ARM_LPAE_TCR_SZ_MASK		0xf
151 
152 #define ARM_LPAE_TCR_PS_SHIFT		16
153 #define ARM_LPAE_TCR_PS_MASK		0x7
154 
155 #define ARM_LPAE_TCR_IPS_SHIFT		32
156 #define ARM_LPAE_TCR_IPS_MASK		0x7
157 
158 #define ARM_LPAE_TCR_PS_32_BIT		0x0ULL
159 #define ARM_LPAE_TCR_PS_36_BIT		0x1ULL
160 #define ARM_LPAE_TCR_PS_40_BIT		0x2ULL
161 #define ARM_LPAE_TCR_PS_42_BIT		0x3ULL
162 #define ARM_LPAE_TCR_PS_44_BIT		0x4ULL
163 #define ARM_LPAE_TCR_PS_48_BIT		0x5ULL
164 #define ARM_LPAE_TCR_PS_52_BIT		0x6ULL
165 
166 #define ARM_LPAE_MAIR_ATTR_SHIFT(n)	((n) << 3)
167 #define ARM_LPAE_MAIR_ATTR_MASK		0xff
168 #define ARM_LPAE_MAIR_ATTR_DEVICE	0x04
169 #define ARM_LPAE_MAIR_ATTR_NC		0x44
170 #define ARM_LPAE_MAIR_ATTR_WBRWA	0xff
171 #define ARM_LPAE_MAIR_ATTR_IDX_NC	0
172 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE	1
173 #define ARM_LPAE_MAIR_ATTR_IDX_DEV	2
174 
175 #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
176 #define ARM_MALI_LPAE_TTBR_READ_INNER	BIT(2)
177 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER	BIT(4)
178 
179 /* IOPTE accessors */
180 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
181 
182 #define iopte_type(pte,l)					\
183 	(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
184 
185 #define iopte_prot(pte)	((pte) & ARM_LPAE_PTE_ATTR_MASK)
186 
187 struct arm_lpae_io_pgtable {
188 	struct io_pgtable	iop;
189 
190 	int			levels;
191 	size_t			pgd_size;
192 	unsigned long		pg_shift;
193 	unsigned long		bits_per_level;
194 
195 	void			*pgd;
196 };
197 
198 typedef u64 arm_lpae_iopte;
199 
200 static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
201 			      enum io_pgtable_fmt fmt)
202 {
203 	if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
204 		return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE;
205 
206 	return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK;
207 }
208 
209 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
210 				     struct arm_lpae_io_pgtable *data)
211 {
212 	arm_lpae_iopte pte = paddr;
213 
214 	/* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
215 	return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
216 }
217 
218 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
219 				  struct arm_lpae_io_pgtable *data)
220 {
221 	u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
222 
223 	if (data->pg_shift < 16)
224 		return paddr;
225 
226 	/* Rotate the packed high-order bits back to the top */
227 	return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
228 }
229 
230 static bool selftest_running = false;
231 
232 static dma_addr_t __arm_lpae_dma_addr(void *pages)
233 {
234 	return (dma_addr_t)virt_to_phys(pages);
235 }
236 
237 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
238 				    struct io_pgtable_cfg *cfg)
239 {
240 	struct device *dev = cfg->iommu_dev;
241 	int order = get_order(size);
242 	struct page *p;
243 	dma_addr_t dma;
244 	void *pages;
245 
246 	VM_BUG_ON((gfp & __GFP_HIGHMEM));
247 	p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE,
248 			     gfp | __GFP_ZERO, order);
249 	if (!p)
250 		return NULL;
251 
252 	pages = page_address(p);
253 	if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
254 		dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
255 		if (dma_mapping_error(dev, dma))
256 			goto out_free;
257 		/*
258 		 * We depend on the IOMMU being able to work with any physical
259 		 * address directly, so if the DMA layer suggests otherwise by
260 		 * translating or truncating them, that bodes very badly...
261 		 */
262 		if (dma != virt_to_phys(pages))
263 			goto out_unmap;
264 	}
265 
266 	return pages;
267 
268 out_unmap:
269 	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
270 	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
271 out_free:
272 	__free_pages(p, order);
273 	return NULL;
274 }
275 
276 static void __arm_lpae_free_pages(void *pages, size_t size,
277 				  struct io_pgtable_cfg *cfg)
278 {
279 	if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
280 		dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
281 				 size, DMA_TO_DEVICE);
282 	free_pages((unsigned long)pages, get_order(size));
283 }
284 
285 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep,
286 				struct io_pgtable_cfg *cfg)
287 {
288 	dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
289 				   sizeof(*ptep), DMA_TO_DEVICE);
290 }
291 
292 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
293 			       struct io_pgtable_cfg *cfg)
294 {
295 	*ptep = pte;
296 
297 	if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
298 		__arm_lpae_sync_pte(ptep, cfg);
299 }
300 
301 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
302 			       unsigned long iova, size_t size, int lvl,
303 			       arm_lpae_iopte *ptep);
304 
305 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
306 				phys_addr_t paddr, arm_lpae_iopte prot,
307 				int lvl, arm_lpae_iopte *ptep)
308 {
309 	arm_lpae_iopte pte = prot;
310 
311 	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
312 		pte |= ARM_LPAE_PTE_NS;
313 
314 	if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
315 		pte |= ARM_LPAE_PTE_TYPE_PAGE;
316 	else
317 		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
318 
319 	if (data->iop.fmt != ARM_MALI_LPAE)
320 		pte |= ARM_LPAE_PTE_AF;
321 	pte |= ARM_LPAE_PTE_SH_IS;
322 	pte |= paddr_to_iopte(paddr, data);
323 
324 	__arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
325 }
326 
327 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
328 			     unsigned long iova, phys_addr_t paddr,
329 			     arm_lpae_iopte prot, int lvl,
330 			     arm_lpae_iopte *ptep)
331 {
332 	arm_lpae_iopte pte = *ptep;
333 
334 	if (iopte_leaf(pte, lvl, data->iop.fmt)) {
335 		/* We require an unmap first */
336 		WARN_ON(!selftest_running);
337 		return -EEXIST;
338 	} else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
339 		/*
340 		 * We need to unmap and free the old table before
341 		 * overwriting it with a block entry.
342 		 */
343 		arm_lpae_iopte *tblp;
344 		size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
345 
346 		tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
347 		if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
348 			return -EINVAL;
349 	}
350 
351 	__arm_lpae_init_pte(data, paddr, prot, lvl, ptep);
352 	return 0;
353 }
354 
355 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
356 					     arm_lpae_iopte *ptep,
357 					     arm_lpae_iopte curr,
358 					     struct io_pgtable_cfg *cfg)
359 {
360 	arm_lpae_iopte old, new;
361 
362 	new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE;
363 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
364 		new |= ARM_LPAE_PTE_NSTABLE;
365 
366 	/*
367 	 * Ensure the table itself is visible before its PTE can be.
368 	 * Whilst we could get away with cmpxchg64_release below, this
369 	 * doesn't have any ordering semantics when !CONFIG_SMP.
370 	 */
371 	dma_wmb();
372 
373 	old = cmpxchg64_relaxed(ptep, curr, new);
374 
375 	if ((cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) ||
376 	    (old & ARM_LPAE_PTE_SW_SYNC))
377 		return old;
378 
379 	/* Even if it's not ours, there's no point waiting; just kick it */
380 	__arm_lpae_sync_pte(ptep, cfg);
381 	if (old == curr)
382 		WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
383 
384 	return old;
385 }
386 
387 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
388 			  phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
389 			  int lvl, arm_lpae_iopte *ptep)
390 {
391 	arm_lpae_iopte *cptep, pte;
392 	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
393 	size_t tblsz = ARM_LPAE_GRANULE(data);
394 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
395 
396 	/* Find our entry at the current level */
397 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
398 
399 	/* If we can install a leaf entry at this level, then do so */
400 	if (size == block_size && (size & cfg->pgsize_bitmap))
401 		return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
402 
403 	/* We can't allocate tables at the final level */
404 	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
405 		return -EINVAL;
406 
407 	/* Grab a pointer to the next level */
408 	pte = READ_ONCE(*ptep);
409 	if (!pte) {
410 		cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg);
411 		if (!cptep)
412 			return -ENOMEM;
413 
414 		pte = arm_lpae_install_table(cptep, ptep, 0, cfg);
415 		if (pte)
416 			__arm_lpae_free_pages(cptep, tblsz, cfg);
417 	} else if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) &&
418 		   !(pte & ARM_LPAE_PTE_SW_SYNC)) {
419 		__arm_lpae_sync_pte(ptep, cfg);
420 	}
421 
422 	if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
423 		cptep = iopte_deref(pte, data);
424 	} else if (pte) {
425 		/* We require an unmap first */
426 		WARN_ON(!selftest_running);
427 		return -EEXIST;
428 	}
429 
430 	/* Rinse, repeat */
431 	return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
432 }
433 
434 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
435 					   int prot)
436 {
437 	arm_lpae_iopte pte;
438 
439 	if (data->iop.fmt == ARM_64_LPAE_S1 ||
440 	    data->iop.fmt == ARM_32_LPAE_S1) {
441 		pte = ARM_LPAE_PTE_nG;
442 		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
443 			pte |= ARM_LPAE_PTE_AP_RDONLY;
444 		if (!(prot & IOMMU_PRIV))
445 			pte |= ARM_LPAE_PTE_AP_UNPRIV;
446 	} else {
447 		pte = ARM_LPAE_PTE_HAP_FAULT;
448 		if (prot & IOMMU_READ)
449 			pte |= ARM_LPAE_PTE_HAP_READ;
450 		if (prot & IOMMU_WRITE)
451 			pte |= ARM_LPAE_PTE_HAP_WRITE;
452 	}
453 
454 	/*
455 	 * Note that this logic is structured to accommodate Mali LPAE
456 	 * having stage-1-like attributes but stage-2-like permissions.
457 	 */
458 	if (data->iop.fmt == ARM_64_LPAE_S2 ||
459 	    data->iop.fmt == ARM_32_LPAE_S2) {
460 		if (prot & IOMMU_MMIO)
461 			pte |= ARM_LPAE_PTE_MEMATTR_DEV;
462 		else if (prot & IOMMU_CACHE)
463 			pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
464 		else
465 			pte |= ARM_LPAE_PTE_MEMATTR_NC;
466 	} else {
467 		if (prot & IOMMU_MMIO)
468 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
469 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
470 		else if (prot & IOMMU_CACHE)
471 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
472 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
473 	}
474 
475 	if (prot & IOMMU_NOEXEC)
476 		pte |= ARM_LPAE_PTE_XN;
477 
478 	return pte;
479 }
480 
481 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
482 			phys_addr_t paddr, size_t size, int iommu_prot)
483 {
484 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
485 	arm_lpae_iopte *ptep = data->pgd;
486 	int ret, lvl = ARM_LPAE_START_LVL(data);
487 	arm_lpae_iopte prot;
488 
489 	/* If no access, then nothing to do */
490 	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
491 		return 0;
492 
493 	if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
494 		    paddr >= (1ULL << data->iop.cfg.oas)))
495 		return -ERANGE;
496 
497 	prot = arm_lpae_prot_to_pte(data, iommu_prot);
498 	ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
499 	/*
500 	 * Synchronise all PTE updates for the new mapping before there's
501 	 * a chance for anything to kick off a table walk for the new iova.
502 	 */
503 	wmb();
504 
505 	return ret;
506 }
507 
508 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
509 				    arm_lpae_iopte *ptep)
510 {
511 	arm_lpae_iopte *start, *end;
512 	unsigned long table_size;
513 
514 	if (lvl == ARM_LPAE_START_LVL(data))
515 		table_size = data->pgd_size;
516 	else
517 		table_size = ARM_LPAE_GRANULE(data);
518 
519 	start = ptep;
520 
521 	/* Only leaf entries at the last level */
522 	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
523 		end = ptep;
524 	else
525 		end = (void *)ptep + table_size;
526 
527 	while (ptep != end) {
528 		arm_lpae_iopte pte = *ptep++;
529 
530 		if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
531 			continue;
532 
533 		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
534 	}
535 
536 	__arm_lpae_free_pages(start, table_size, &data->iop.cfg);
537 }
538 
539 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
540 {
541 	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
542 
543 	__arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
544 	kfree(data);
545 }
546 
547 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
548 				       unsigned long iova, size_t size,
549 				       arm_lpae_iopte blk_pte, int lvl,
550 				       arm_lpae_iopte *ptep)
551 {
552 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
553 	arm_lpae_iopte pte, *tablep;
554 	phys_addr_t blk_paddr;
555 	size_t tablesz = ARM_LPAE_GRANULE(data);
556 	size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
557 	int i, unmap_idx = -1;
558 
559 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
560 		return 0;
561 
562 	tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
563 	if (!tablep)
564 		return 0; /* Bytes unmapped */
565 
566 	if (size == split_sz)
567 		unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data);
568 
569 	blk_paddr = iopte_to_paddr(blk_pte, data);
570 	pte = iopte_prot(blk_pte);
571 
572 	for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) {
573 		/* Unmap! */
574 		if (i == unmap_idx)
575 			continue;
576 
577 		__arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]);
578 	}
579 
580 	pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg);
581 	if (pte != blk_pte) {
582 		__arm_lpae_free_pages(tablep, tablesz, cfg);
583 		/*
584 		 * We may race against someone unmapping another part of this
585 		 * block, but anything else is invalid. We can't misinterpret
586 		 * a page entry here since we're never at the last level.
587 		 */
588 		if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE)
589 			return 0;
590 
591 		tablep = iopte_deref(pte, data);
592 	} else if (unmap_idx >= 0) {
593 		io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
594 		io_pgtable_tlb_sync(&data->iop);
595 		return size;
596 	}
597 
598 	return __arm_lpae_unmap(data, iova, size, lvl, tablep);
599 }
600 
601 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
602 			       unsigned long iova, size_t size, int lvl,
603 			       arm_lpae_iopte *ptep)
604 {
605 	arm_lpae_iopte pte;
606 	struct io_pgtable *iop = &data->iop;
607 
608 	/* Something went horribly wrong and we ran out of page table */
609 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
610 		return 0;
611 
612 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
613 	pte = READ_ONCE(*ptep);
614 	if (WARN_ON(!pte))
615 		return 0;
616 
617 	/* If the size matches this level, we're in the right place */
618 	if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
619 		__arm_lpae_set_pte(ptep, 0, &iop->cfg);
620 
621 		if (!iopte_leaf(pte, lvl, iop->fmt)) {
622 			/* Also flush any partial walks */
623 			io_pgtable_tlb_add_flush(iop, iova, size,
624 						ARM_LPAE_GRANULE(data), false);
625 			io_pgtable_tlb_sync(iop);
626 			ptep = iopte_deref(pte, data);
627 			__arm_lpae_free_pgtable(data, lvl + 1, ptep);
628 		} else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
629 			/*
630 			 * Order the PTE update against queueing the IOVA, to
631 			 * guarantee that a flush callback from a different CPU
632 			 * has observed it before the TLBIALL can be issued.
633 			 */
634 			smp_wmb();
635 		} else {
636 			io_pgtable_tlb_add_flush(iop, iova, size, size, true);
637 		}
638 
639 		return size;
640 	} else if (iopte_leaf(pte, lvl, iop->fmt)) {
641 		/*
642 		 * Insert a table at the next level to map the old region,
643 		 * minus the part we want to unmap
644 		 */
645 		return arm_lpae_split_blk_unmap(data, iova, size, pte,
646 						lvl + 1, ptep);
647 	}
648 
649 	/* Keep on walkin' */
650 	ptep = iopte_deref(pte, data);
651 	return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
652 }
653 
654 static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
655 			     size_t size)
656 {
657 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
658 	arm_lpae_iopte *ptep = data->pgd;
659 	int lvl = ARM_LPAE_START_LVL(data);
660 
661 	if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
662 		return 0;
663 
664 	return __arm_lpae_unmap(data, iova, size, lvl, ptep);
665 }
666 
667 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
668 					 unsigned long iova)
669 {
670 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
671 	arm_lpae_iopte pte, *ptep = data->pgd;
672 	int lvl = ARM_LPAE_START_LVL(data);
673 
674 	do {
675 		/* Valid IOPTE pointer? */
676 		if (!ptep)
677 			return 0;
678 
679 		/* Grab the IOPTE we're interested in */
680 		ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
681 		pte = READ_ONCE(*ptep);
682 
683 		/* Valid entry? */
684 		if (!pte)
685 			return 0;
686 
687 		/* Leaf entry? */
688 		if (iopte_leaf(pte, lvl, data->iop.fmt))
689 			goto found_translation;
690 
691 		/* Take it to the next level */
692 		ptep = iopte_deref(pte, data);
693 	} while (++lvl < ARM_LPAE_MAX_LEVELS);
694 
695 	/* Ran out of page tables to walk */
696 	return 0;
697 
698 found_translation:
699 	iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
700 	return iopte_to_paddr(pte, data) | iova;
701 }
702 
703 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
704 {
705 	unsigned long granule, page_sizes;
706 	unsigned int max_addr_bits = 48;
707 
708 	/*
709 	 * We need to restrict the supported page sizes to match the
710 	 * translation regime for a particular granule. Aim to match
711 	 * the CPU page size if possible, otherwise prefer smaller sizes.
712 	 * While we're at it, restrict the block sizes to match the
713 	 * chosen granule.
714 	 */
715 	if (cfg->pgsize_bitmap & PAGE_SIZE)
716 		granule = PAGE_SIZE;
717 	else if (cfg->pgsize_bitmap & ~PAGE_MASK)
718 		granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
719 	else if (cfg->pgsize_bitmap & PAGE_MASK)
720 		granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
721 	else
722 		granule = 0;
723 
724 	switch (granule) {
725 	case SZ_4K:
726 		page_sizes = (SZ_4K | SZ_2M | SZ_1G);
727 		break;
728 	case SZ_16K:
729 		page_sizes = (SZ_16K | SZ_32M);
730 		break;
731 	case SZ_64K:
732 		max_addr_bits = 52;
733 		page_sizes = (SZ_64K | SZ_512M);
734 		if (cfg->oas > 48)
735 			page_sizes |= 1ULL << 42; /* 4TB */
736 		break;
737 	default:
738 		page_sizes = 0;
739 	}
740 
741 	cfg->pgsize_bitmap &= page_sizes;
742 	cfg->ias = min(cfg->ias, max_addr_bits);
743 	cfg->oas = min(cfg->oas, max_addr_bits);
744 }
745 
746 static struct arm_lpae_io_pgtable *
747 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
748 {
749 	unsigned long va_bits, pgd_bits;
750 	struct arm_lpae_io_pgtable *data;
751 
752 	arm_lpae_restrict_pgsizes(cfg);
753 
754 	if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
755 		return NULL;
756 
757 	if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
758 		return NULL;
759 
760 	if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
761 		return NULL;
762 
763 	if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
764 		dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
765 		return NULL;
766 	}
767 
768 	data = kmalloc(sizeof(*data), GFP_KERNEL);
769 	if (!data)
770 		return NULL;
771 
772 	data->pg_shift = __ffs(cfg->pgsize_bitmap);
773 	data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
774 
775 	va_bits = cfg->ias - data->pg_shift;
776 	data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
777 
778 	/* Calculate the actual size of our pgd (without concatenation) */
779 	pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
780 	data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
781 
782 	data->iop.ops = (struct io_pgtable_ops) {
783 		.map		= arm_lpae_map,
784 		.unmap		= arm_lpae_unmap,
785 		.iova_to_phys	= arm_lpae_iova_to_phys,
786 	};
787 
788 	return data;
789 }
790 
791 static struct io_pgtable *
792 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
793 {
794 	u64 reg;
795 	struct arm_lpae_io_pgtable *data;
796 
797 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA |
798 			    IO_PGTABLE_QUIRK_NON_STRICT))
799 		return NULL;
800 
801 	data = arm_lpae_alloc_pgtable(cfg);
802 	if (!data)
803 		return NULL;
804 
805 	/* TCR */
806 	reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
807 	      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
808 	      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
809 
810 	switch (ARM_LPAE_GRANULE(data)) {
811 	case SZ_4K:
812 		reg |= ARM_LPAE_TCR_TG0_4K;
813 		break;
814 	case SZ_16K:
815 		reg |= ARM_LPAE_TCR_TG0_16K;
816 		break;
817 	case SZ_64K:
818 		reg |= ARM_LPAE_TCR_TG0_64K;
819 		break;
820 	}
821 
822 	switch (cfg->oas) {
823 	case 32:
824 		reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
825 		break;
826 	case 36:
827 		reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
828 		break;
829 	case 40:
830 		reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
831 		break;
832 	case 42:
833 		reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
834 		break;
835 	case 44:
836 		reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
837 		break;
838 	case 48:
839 		reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
840 		break;
841 	case 52:
842 		reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_IPS_SHIFT);
843 		break;
844 	default:
845 		goto out_free_data;
846 	}
847 
848 	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
849 
850 	/* Disable speculative walks through TTBR1 */
851 	reg |= ARM_LPAE_TCR_EPD1;
852 	cfg->arm_lpae_s1_cfg.tcr = reg;
853 
854 	/* MAIRs */
855 	reg = (ARM_LPAE_MAIR_ATTR_NC
856 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
857 	      (ARM_LPAE_MAIR_ATTR_WBRWA
858 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
859 	      (ARM_LPAE_MAIR_ATTR_DEVICE
860 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
861 
862 	cfg->arm_lpae_s1_cfg.mair[0] = reg;
863 	cfg->arm_lpae_s1_cfg.mair[1] = 0;
864 
865 	/* Looking good; allocate a pgd */
866 	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
867 	if (!data->pgd)
868 		goto out_free_data;
869 
870 	/* Ensure the empty pgd is visible before any actual TTBR write */
871 	wmb();
872 
873 	/* TTBRs */
874 	cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
875 	cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
876 	return &data->iop;
877 
878 out_free_data:
879 	kfree(data);
880 	return NULL;
881 }
882 
883 static struct io_pgtable *
884 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
885 {
886 	u64 reg, sl;
887 	struct arm_lpae_io_pgtable *data;
888 
889 	/* The NS quirk doesn't apply at stage 2 */
890 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NO_DMA |
891 			    IO_PGTABLE_QUIRK_NON_STRICT))
892 		return NULL;
893 
894 	data = arm_lpae_alloc_pgtable(cfg);
895 	if (!data)
896 		return NULL;
897 
898 	/*
899 	 * Concatenate PGDs at level 1 if possible in order to reduce
900 	 * the depth of the stage-2 walk.
901 	 */
902 	if (data->levels == ARM_LPAE_MAX_LEVELS) {
903 		unsigned long pgd_pages;
904 
905 		pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
906 		if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
907 			data->pgd_size = pgd_pages << data->pg_shift;
908 			data->levels--;
909 		}
910 	}
911 
912 	/* VTCR */
913 	reg = ARM_64_LPAE_S2_TCR_RES1 |
914 	     (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
915 	     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
916 	     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
917 
918 	sl = ARM_LPAE_START_LVL(data);
919 
920 	switch (ARM_LPAE_GRANULE(data)) {
921 	case SZ_4K:
922 		reg |= ARM_LPAE_TCR_TG0_4K;
923 		sl++; /* SL0 format is different for 4K granule size */
924 		break;
925 	case SZ_16K:
926 		reg |= ARM_LPAE_TCR_TG0_16K;
927 		break;
928 	case SZ_64K:
929 		reg |= ARM_LPAE_TCR_TG0_64K;
930 		break;
931 	}
932 
933 	switch (cfg->oas) {
934 	case 32:
935 		reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
936 		break;
937 	case 36:
938 		reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
939 		break;
940 	case 40:
941 		reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
942 		break;
943 	case 42:
944 		reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
945 		break;
946 	case 44:
947 		reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
948 		break;
949 	case 48:
950 		reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
951 		break;
952 	case 52:
953 		reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_PS_SHIFT);
954 		break;
955 	default:
956 		goto out_free_data;
957 	}
958 
959 	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
960 	reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
961 	cfg->arm_lpae_s2_cfg.vtcr = reg;
962 
963 	/* Allocate pgd pages */
964 	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
965 	if (!data->pgd)
966 		goto out_free_data;
967 
968 	/* Ensure the empty pgd is visible before any actual TTBR write */
969 	wmb();
970 
971 	/* VTTBR */
972 	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
973 	return &data->iop;
974 
975 out_free_data:
976 	kfree(data);
977 	return NULL;
978 }
979 
980 static struct io_pgtable *
981 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
982 {
983 	struct io_pgtable *iop;
984 
985 	if (cfg->ias > 32 || cfg->oas > 40)
986 		return NULL;
987 
988 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
989 	iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
990 	if (iop) {
991 		cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
992 		cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
993 	}
994 
995 	return iop;
996 }
997 
998 static struct io_pgtable *
999 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1000 {
1001 	struct io_pgtable *iop;
1002 
1003 	if (cfg->ias > 40 || cfg->oas > 40)
1004 		return NULL;
1005 
1006 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1007 	iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1008 	if (iop)
1009 		cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
1010 
1011 	return iop;
1012 }
1013 
1014 static struct io_pgtable *
1015 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1016 {
1017 	struct io_pgtable *iop;
1018 
1019 	if (cfg->ias != 48 || cfg->oas > 40)
1020 		return NULL;
1021 
1022 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1023 	iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
1024 	if (iop) {
1025 		u64 mair, ttbr;
1026 
1027 		/* Copy values as union fields overlap */
1028 		mair = cfg->arm_lpae_s1_cfg.mair[0];
1029 		ttbr = cfg->arm_lpae_s1_cfg.ttbr[0];
1030 
1031 		cfg->arm_mali_lpae_cfg.memattr = mair;
1032 		cfg->arm_mali_lpae_cfg.transtab = ttbr |
1033 			ARM_MALI_LPAE_TTBR_READ_INNER |
1034 			ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1035 	}
1036 
1037 	return iop;
1038 }
1039 
1040 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1041 	.alloc	= arm_64_lpae_alloc_pgtable_s1,
1042 	.free	= arm_lpae_free_pgtable,
1043 };
1044 
1045 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1046 	.alloc	= arm_64_lpae_alloc_pgtable_s2,
1047 	.free	= arm_lpae_free_pgtable,
1048 };
1049 
1050 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1051 	.alloc	= arm_32_lpae_alloc_pgtable_s1,
1052 	.free	= arm_lpae_free_pgtable,
1053 };
1054 
1055 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1056 	.alloc	= arm_32_lpae_alloc_pgtable_s2,
1057 	.free	= arm_lpae_free_pgtable,
1058 };
1059 
1060 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1061 	.alloc	= arm_mali_lpae_alloc_pgtable,
1062 	.free	= arm_lpae_free_pgtable,
1063 };
1064 
1065 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1066 
1067 static struct io_pgtable_cfg *cfg_cookie;
1068 
1069 static void dummy_tlb_flush_all(void *cookie)
1070 {
1071 	WARN_ON(cookie != cfg_cookie);
1072 }
1073 
1074 static void dummy_tlb_add_flush(unsigned long iova, size_t size,
1075 				size_t granule, bool leaf, void *cookie)
1076 {
1077 	WARN_ON(cookie != cfg_cookie);
1078 	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1079 }
1080 
1081 static void dummy_tlb_sync(void *cookie)
1082 {
1083 	WARN_ON(cookie != cfg_cookie);
1084 }
1085 
1086 static const struct iommu_gather_ops dummy_tlb_ops __initconst = {
1087 	.tlb_flush_all	= dummy_tlb_flush_all,
1088 	.tlb_add_flush	= dummy_tlb_add_flush,
1089 	.tlb_sync	= dummy_tlb_sync,
1090 };
1091 
1092 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1093 {
1094 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1095 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
1096 
1097 	pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1098 		cfg->pgsize_bitmap, cfg->ias);
1099 	pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
1100 		data->levels, data->pgd_size, data->pg_shift,
1101 		data->bits_per_level, data->pgd);
1102 }
1103 
1104 #define __FAIL(ops, i)	({						\
1105 		WARN(1, "selftest: test failed for fmt idx %d\n", (i));	\
1106 		arm_lpae_dump_ops(ops);					\
1107 		selftest_running = false;				\
1108 		-EFAULT;						\
1109 })
1110 
1111 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1112 {
1113 	static const enum io_pgtable_fmt fmts[] = {
1114 		ARM_64_LPAE_S1,
1115 		ARM_64_LPAE_S2,
1116 	};
1117 
1118 	int i, j;
1119 	unsigned long iova;
1120 	size_t size;
1121 	struct io_pgtable_ops *ops;
1122 
1123 	selftest_running = true;
1124 
1125 	for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1126 		cfg_cookie = cfg;
1127 		ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1128 		if (!ops) {
1129 			pr_err("selftest: failed to allocate io pgtable ops\n");
1130 			return -ENOMEM;
1131 		}
1132 
1133 		/*
1134 		 * Initial sanity checks.
1135 		 * Empty page tables shouldn't provide any translations.
1136 		 */
1137 		if (ops->iova_to_phys(ops, 42))
1138 			return __FAIL(ops, i);
1139 
1140 		if (ops->iova_to_phys(ops, SZ_1G + 42))
1141 			return __FAIL(ops, i);
1142 
1143 		if (ops->iova_to_phys(ops, SZ_2G + 42))
1144 			return __FAIL(ops, i);
1145 
1146 		/*
1147 		 * Distinct mappings of different granule sizes.
1148 		 */
1149 		iova = 0;
1150 		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1151 			size = 1UL << j;
1152 
1153 			if (ops->map(ops, iova, iova, size, IOMMU_READ |
1154 							    IOMMU_WRITE |
1155 							    IOMMU_NOEXEC |
1156 							    IOMMU_CACHE))
1157 				return __FAIL(ops, i);
1158 
1159 			/* Overlapping mappings */
1160 			if (!ops->map(ops, iova, iova + size, size,
1161 				      IOMMU_READ | IOMMU_NOEXEC))
1162 				return __FAIL(ops, i);
1163 
1164 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1165 				return __FAIL(ops, i);
1166 
1167 			iova += SZ_1G;
1168 		}
1169 
1170 		/* Partial unmap */
1171 		size = 1UL << __ffs(cfg->pgsize_bitmap);
1172 		if (ops->unmap(ops, SZ_1G + size, size) != size)
1173 			return __FAIL(ops, i);
1174 
1175 		/* Remap of partial unmap */
1176 		if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
1177 			return __FAIL(ops, i);
1178 
1179 		if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1180 			return __FAIL(ops, i);
1181 
1182 		/* Full unmap */
1183 		iova = 0;
1184 		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1185 			size = 1UL << j;
1186 
1187 			if (ops->unmap(ops, iova, size) != size)
1188 				return __FAIL(ops, i);
1189 
1190 			if (ops->iova_to_phys(ops, iova + 42))
1191 				return __FAIL(ops, i);
1192 
1193 			/* Remap full block */
1194 			if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1195 				return __FAIL(ops, i);
1196 
1197 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1198 				return __FAIL(ops, i);
1199 
1200 			iova += SZ_1G;
1201 		}
1202 
1203 		free_io_pgtable_ops(ops);
1204 	}
1205 
1206 	selftest_running = false;
1207 	return 0;
1208 }
1209 
1210 static int __init arm_lpae_do_selftests(void)
1211 {
1212 	static const unsigned long pgsize[] = {
1213 		SZ_4K | SZ_2M | SZ_1G,
1214 		SZ_16K | SZ_32M,
1215 		SZ_64K | SZ_512M,
1216 	};
1217 
1218 	static const unsigned int ias[] = {
1219 		32, 36, 40, 42, 44, 48,
1220 	};
1221 
1222 	int i, j, pass = 0, fail = 0;
1223 	struct io_pgtable_cfg cfg = {
1224 		.tlb = &dummy_tlb_ops,
1225 		.oas = 48,
1226 		.quirks = IO_PGTABLE_QUIRK_NO_DMA,
1227 	};
1228 
1229 	for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1230 		for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1231 			cfg.pgsize_bitmap = pgsize[i];
1232 			cfg.ias = ias[j];
1233 			pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1234 				pgsize[i], ias[j]);
1235 			if (arm_lpae_run_tests(&cfg))
1236 				fail++;
1237 			else
1238 				pass++;
1239 		}
1240 	}
1241 
1242 	pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1243 	return fail ? -EFAULT : 0;
1244 }
1245 subsys_initcall(arm_lpae_do_selftests);
1246 #endif
1247