1 /*
2  * CPU-agnostic ARM page table allocator.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15  *
16  * Copyright (C) 2014 ARM Limited
17  *
18  * Author: Will Deacon <will.deacon@arm.com>
19  */
20 
21 #define pr_fmt(fmt)	"arm-lpae io-pgtable: " fmt
22 
23 #include <linux/atomic.h>
24 #include <linux/iommu.h>
25 #include <linux/kernel.h>
26 #include <linux/sizes.h>
27 #include <linux/slab.h>
28 #include <linux/types.h>
29 #include <linux/dma-mapping.h>
30 
31 #include <asm/barrier.h>
32 
33 #include "io-pgtable.h"
34 
35 #define ARM_LPAE_MAX_ADDR_BITS		48
36 #define ARM_LPAE_S2_MAX_CONCAT_PAGES	16
37 #define ARM_LPAE_MAX_LEVELS		4
38 
39 /* Struct accessors */
40 #define io_pgtable_to_data(x)						\
41 	container_of((x), struct arm_lpae_io_pgtable, iop)
42 
43 #define io_pgtable_ops_to_data(x)					\
44 	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
45 
46 /*
47  * For consistency with the architecture, we always consider
48  * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
49  */
50 #define ARM_LPAE_START_LVL(d)		(ARM_LPAE_MAX_LEVELS - (d)->levels)
51 
52 /*
53  * Calculate the right shift amount to get to the portion describing level l
54  * in a virtual address mapped by the pagetable in d.
55  */
56 #define ARM_LPAE_LVL_SHIFT(l,d)						\
57 	((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1))		\
58 	  * (d)->bits_per_level) + (d)->pg_shift)
59 
60 #define ARM_LPAE_GRANULE(d)		(1UL << (d)->pg_shift)
61 
62 #define ARM_LPAE_PAGES_PER_PGD(d)					\
63 	DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
64 
65 /*
66  * Calculate the index at level l used to map virtual address a using the
67  * pagetable in d.
68  */
69 #define ARM_LPAE_PGD_IDX(l,d)						\
70 	((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
71 
72 #define ARM_LPAE_LVL_IDX(a,l,d)						\
73 	(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &			\
74 	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
75 
76 /* Calculate the block/page mapping size at level l for pagetable in d. */
77 #define ARM_LPAE_BLOCK_SIZE(l,d)					\
78 	(1ULL << (ilog2(sizeof(arm_lpae_iopte)) +			\
79 		((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
80 
81 /* Page table bits */
82 #define ARM_LPAE_PTE_TYPE_SHIFT		0
83 #define ARM_LPAE_PTE_TYPE_MASK		0x3
84 
85 #define ARM_LPAE_PTE_TYPE_BLOCK		1
86 #define ARM_LPAE_PTE_TYPE_TABLE		3
87 #define ARM_LPAE_PTE_TYPE_PAGE		3
88 
89 #define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
90 #define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
91 #define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
92 #define ARM_LPAE_PTE_SH_NS		(((arm_lpae_iopte)0) << 8)
93 #define ARM_LPAE_PTE_SH_OS		(((arm_lpae_iopte)2) << 8)
94 #define ARM_LPAE_PTE_SH_IS		(((arm_lpae_iopte)3) << 8)
95 #define ARM_LPAE_PTE_NS			(((arm_lpae_iopte)1) << 5)
96 #define ARM_LPAE_PTE_VALID		(((arm_lpae_iopte)1) << 0)
97 
98 #define ARM_LPAE_PTE_ATTR_LO_MASK	(((arm_lpae_iopte)0x3ff) << 2)
99 /* Ignore the contiguous bit for block splitting */
100 #define ARM_LPAE_PTE_ATTR_HI_MASK	(((arm_lpae_iopte)6) << 52)
101 #define ARM_LPAE_PTE_ATTR_MASK		(ARM_LPAE_PTE_ATTR_LO_MASK |	\
102 					 ARM_LPAE_PTE_ATTR_HI_MASK)
103 /* Software bit for solving coherency races */
104 #define ARM_LPAE_PTE_SW_SYNC		(((arm_lpae_iopte)1) << 55)
105 
106 /* Stage-1 PTE */
107 #define ARM_LPAE_PTE_AP_UNPRIV		(((arm_lpae_iopte)1) << 6)
108 #define ARM_LPAE_PTE_AP_RDONLY		(((arm_lpae_iopte)2) << 6)
109 #define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
110 #define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
111 
112 /* Stage-2 PTE */
113 #define ARM_LPAE_PTE_HAP_FAULT		(((arm_lpae_iopte)0) << 6)
114 #define ARM_LPAE_PTE_HAP_READ		(((arm_lpae_iopte)1) << 6)
115 #define ARM_LPAE_PTE_HAP_WRITE		(((arm_lpae_iopte)2) << 6)
116 #define ARM_LPAE_PTE_MEMATTR_OIWB	(((arm_lpae_iopte)0xf) << 2)
117 #define ARM_LPAE_PTE_MEMATTR_NC		(((arm_lpae_iopte)0x5) << 2)
118 #define ARM_LPAE_PTE_MEMATTR_DEV	(((arm_lpae_iopte)0x1) << 2)
119 
120 /* Register bits */
121 #define ARM_32_LPAE_TCR_EAE		(1 << 31)
122 #define ARM_64_LPAE_S2_TCR_RES1		(1 << 31)
123 
124 #define ARM_LPAE_TCR_EPD1		(1 << 23)
125 
126 #define ARM_LPAE_TCR_TG0_4K		(0 << 14)
127 #define ARM_LPAE_TCR_TG0_64K		(1 << 14)
128 #define ARM_LPAE_TCR_TG0_16K		(2 << 14)
129 
130 #define ARM_LPAE_TCR_SH0_SHIFT		12
131 #define ARM_LPAE_TCR_SH0_MASK		0x3
132 #define ARM_LPAE_TCR_SH_NS		0
133 #define ARM_LPAE_TCR_SH_OS		2
134 #define ARM_LPAE_TCR_SH_IS		3
135 
136 #define ARM_LPAE_TCR_ORGN0_SHIFT	10
137 #define ARM_LPAE_TCR_IRGN0_SHIFT	8
138 #define ARM_LPAE_TCR_RGN_MASK		0x3
139 #define ARM_LPAE_TCR_RGN_NC		0
140 #define ARM_LPAE_TCR_RGN_WBWA		1
141 #define ARM_LPAE_TCR_RGN_WT		2
142 #define ARM_LPAE_TCR_RGN_WB		3
143 
144 #define ARM_LPAE_TCR_SL0_SHIFT		6
145 #define ARM_LPAE_TCR_SL0_MASK		0x3
146 
147 #define ARM_LPAE_TCR_T0SZ_SHIFT		0
148 #define ARM_LPAE_TCR_SZ_MASK		0xf
149 
150 #define ARM_LPAE_TCR_PS_SHIFT		16
151 #define ARM_LPAE_TCR_PS_MASK		0x7
152 
153 #define ARM_LPAE_TCR_IPS_SHIFT		32
154 #define ARM_LPAE_TCR_IPS_MASK		0x7
155 
156 #define ARM_LPAE_TCR_PS_32_BIT		0x0ULL
157 #define ARM_LPAE_TCR_PS_36_BIT		0x1ULL
158 #define ARM_LPAE_TCR_PS_40_BIT		0x2ULL
159 #define ARM_LPAE_TCR_PS_42_BIT		0x3ULL
160 #define ARM_LPAE_TCR_PS_44_BIT		0x4ULL
161 #define ARM_LPAE_TCR_PS_48_BIT		0x5ULL
162 
163 #define ARM_LPAE_MAIR_ATTR_SHIFT(n)	((n) << 3)
164 #define ARM_LPAE_MAIR_ATTR_MASK		0xff
165 #define ARM_LPAE_MAIR_ATTR_DEVICE	0x04
166 #define ARM_LPAE_MAIR_ATTR_NC		0x44
167 #define ARM_LPAE_MAIR_ATTR_WBRWA	0xff
168 #define ARM_LPAE_MAIR_ATTR_IDX_NC	0
169 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE	1
170 #define ARM_LPAE_MAIR_ATTR_IDX_DEV	2
171 
172 /* IOPTE accessors */
173 #define iopte_deref(pte,d)					\
174 	(__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)	\
175 	& ~(ARM_LPAE_GRANULE(d) - 1ULL)))
176 
177 #define iopte_type(pte,l)					\
178 	(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
179 
180 #define iopte_prot(pte)	((pte) & ARM_LPAE_PTE_ATTR_MASK)
181 
182 #define iopte_leaf(pte,l)					\
183 	(l == (ARM_LPAE_MAX_LEVELS - 1) ?			\
184 		(iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) :	\
185 		(iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
186 
187 #define iopte_to_pfn(pte,d)					\
188 	(((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
189 
190 #define pfn_to_iopte(pfn,d)					\
191 	(((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
192 
193 struct arm_lpae_io_pgtable {
194 	struct io_pgtable	iop;
195 
196 	int			levels;
197 	size_t			pgd_size;
198 	unsigned long		pg_shift;
199 	unsigned long		bits_per_level;
200 
201 	void			*pgd;
202 };
203 
204 typedef u64 arm_lpae_iopte;
205 
206 static bool selftest_running = false;
207 
208 static dma_addr_t __arm_lpae_dma_addr(void *pages)
209 {
210 	return (dma_addr_t)virt_to_phys(pages);
211 }
212 
213 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
214 				    struct io_pgtable_cfg *cfg)
215 {
216 	struct device *dev = cfg->iommu_dev;
217 	dma_addr_t dma;
218 	void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
219 
220 	if (!pages)
221 		return NULL;
222 
223 	if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
224 		dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
225 		if (dma_mapping_error(dev, dma))
226 			goto out_free;
227 		/*
228 		 * We depend on the IOMMU being able to work with any physical
229 		 * address directly, so if the DMA layer suggests otherwise by
230 		 * translating or truncating them, that bodes very badly...
231 		 */
232 		if (dma != virt_to_phys(pages))
233 			goto out_unmap;
234 	}
235 
236 	return pages;
237 
238 out_unmap:
239 	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
240 	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
241 out_free:
242 	free_pages_exact(pages, size);
243 	return NULL;
244 }
245 
246 static void __arm_lpae_free_pages(void *pages, size_t size,
247 				  struct io_pgtable_cfg *cfg)
248 {
249 	if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
250 		dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
251 				 size, DMA_TO_DEVICE);
252 	free_pages_exact(pages, size);
253 }
254 
255 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep,
256 				struct io_pgtable_cfg *cfg)
257 {
258 	dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
259 				   sizeof(*ptep), DMA_TO_DEVICE);
260 }
261 
262 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
263 			       struct io_pgtable_cfg *cfg)
264 {
265 	*ptep = pte;
266 
267 	if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
268 		__arm_lpae_sync_pte(ptep, cfg);
269 }
270 
271 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
272 			    unsigned long iova, size_t size, int lvl,
273 			    arm_lpae_iopte *ptep);
274 
275 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
276 				phys_addr_t paddr, arm_lpae_iopte prot,
277 				int lvl, arm_lpae_iopte *ptep)
278 {
279 	arm_lpae_iopte pte = prot;
280 
281 	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
282 		pte |= ARM_LPAE_PTE_NS;
283 
284 	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
285 		pte |= ARM_LPAE_PTE_TYPE_PAGE;
286 	else
287 		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
288 
289 	pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
290 	pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
291 
292 	__arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
293 }
294 
295 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
296 			     unsigned long iova, phys_addr_t paddr,
297 			     arm_lpae_iopte prot, int lvl,
298 			     arm_lpae_iopte *ptep)
299 {
300 	arm_lpae_iopte pte = *ptep;
301 
302 	if (iopte_leaf(pte, lvl)) {
303 		/* We require an unmap first */
304 		WARN_ON(!selftest_running);
305 		return -EEXIST;
306 	} else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
307 		/*
308 		 * We need to unmap and free the old table before
309 		 * overwriting it with a block entry.
310 		 */
311 		arm_lpae_iopte *tblp;
312 		size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
313 
314 		tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
315 		if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
316 			return -EINVAL;
317 	}
318 
319 	__arm_lpae_init_pte(data, paddr, prot, lvl, ptep);
320 	return 0;
321 }
322 
323 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
324 					     arm_lpae_iopte *ptep,
325 					     arm_lpae_iopte curr,
326 					     struct io_pgtable_cfg *cfg)
327 {
328 	arm_lpae_iopte old, new;
329 
330 	new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE;
331 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
332 		new |= ARM_LPAE_PTE_NSTABLE;
333 
334 	/*
335 	 * Ensure the table itself is visible before its PTE can be.
336 	 * Whilst we could get away with cmpxchg64_release below, this
337 	 * doesn't have any ordering semantics when !CONFIG_SMP.
338 	 */
339 	dma_wmb();
340 
341 	old = cmpxchg64_relaxed(ptep, curr, new);
342 
343 	if ((cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) ||
344 	    (old & ARM_LPAE_PTE_SW_SYNC))
345 		return old;
346 
347 	/* Even if it's not ours, there's no point waiting; just kick it */
348 	__arm_lpae_sync_pte(ptep, cfg);
349 	if (old == curr)
350 		WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
351 
352 	return old;
353 }
354 
355 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
356 			  phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
357 			  int lvl, arm_lpae_iopte *ptep)
358 {
359 	arm_lpae_iopte *cptep, pte;
360 	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
361 	size_t tblsz = ARM_LPAE_GRANULE(data);
362 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
363 
364 	/* Find our entry at the current level */
365 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
366 
367 	/* If we can install a leaf entry at this level, then do so */
368 	if (size == block_size && (size & cfg->pgsize_bitmap))
369 		return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
370 
371 	/* We can't allocate tables at the final level */
372 	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
373 		return -EINVAL;
374 
375 	/* Grab a pointer to the next level */
376 	pte = READ_ONCE(*ptep);
377 	if (!pte) {
378 		cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg);
379 		if (!cptep)
380 			return -ENOMEM;
381 
382 		pte = arm_lpae_install_table(cptep, ptep, 0, cfg);
383 		if (pte)
384 			__arm_lpae_free_pages(cptep, tblsz, cfg);
385 	} else if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) &&
386 		   !(pte & ARM_LPAE_PTE_SW_SYNC)) {
387 		__arm_lpae_sync_pte(ptep, cfg);
388 	}
389 
390 	if (pte && !iopte_leaf(pte, lvl)) {
391 		cptep = iopte_deref(pte, data);
392 	} else if (pte) {
393 		/* We require an unmap first */
394 		WARN_ON(!selftest_running);
395 		return -EEXIST;
396 	}
397 
398 	/* Rinse, repeat */
399 	return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
400 }
401 
402 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
403 					   int prot)
404 {
405 	arm_lpae_iopte pte;
406 
407 	if (data->iop.fmt == ARM_64_LPAE_S1 ||
408 	    data->iop.fmt == ARM_32_LPAE_S1) {
409 		pte = ARM_LPAE_PTE_nG;
410 
411 		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
412 			pte |= ARM_LPAE_PTE_AP_RDONLY;
413 
414 		if (!(prot & IOMMU_PRIV))
415 			pte |= ARM_LPAE_PTE_AP_UNPRIV;
416 
417 		if (prot & IOMMU_MMIO)
418 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
419 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
420 		else if (prot & IOMMU_CACHE)
421 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
422 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
423 	} else {
424 		pte = ARM_LPAE_PTE_HAP_FAULT;
425 		if (prot & IOMMU_READ)
426 			pte |= ARM_LPAE_PTE_HAP_READ;
427 		if (prot & IOMMU_WRITE)
428 			pte |= ARM_LPAE_PTE_HAP_WRITE;
429 		if (prot & IOMMU_MMIO)
430 			pte |= ARM_LPAE_PTE_MEMATTR_DEV;
431 		else if (prot & IOMMU_CACHE)
432 			pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
433 		else
434 			pte |= ARM_LPAE_PTE_MEMATTR_NC;
435 	}
436 
437 	if (prot & IOMMU_NOEXEC)
438 		pte |= ARM_LPAE_PTE_XN;
439 
440 	return pte;
441 }
442 
443 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
444 			phys_addr_t paddr, size_t size, int iommu_prot)
445 {
446 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
447 	arm_lpae_iopte *ptep = data->pgd;
448 	int ret, lvl = ARM_LPAE_START_LVL(data);
449 	arm_lpae_iopte prot;
450 
451 	/* If no access, then nothing to do */
452 	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
453 		return 0;
454 
455 	prot = arm_lpae_prot_to_pte(data, iommu_prot);
456 	ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
457 	/*
458 	 * Synchronise all PTE updates for the new mapping before there's
459 	 * a chance for anything to kick off a table walk for the new iova.
460 	 */
461 	wmb();
462 
463 	return ret;
464 }
465 
466 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
467 				    arm_lpae_iopte *ptep)
468 {
469 	arm_lpae_iopte *start, *end;
470 	unsigned long table_size;
471 
472 	if (lvl == ARM_LPAE_START_LVL(data))
473 		table_size = data->pgd_size;
474 	else
475 		table_size = ARM_LPAE_GRANULE(data);
476 
477 	start = ptep;
478 
479 	/* Only leaf entries at the last level */
480 	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
481 		end = ptep;
482 	else
483 		end = (void *)ptep + table_size;
484 
485 	while (ptep != end) {
486 		arm_lpae_iopte pte = *ptep++;
487 
488 		if (!pte || iopte_leaf(pte, lvl))
489 			continue;
490 
491 		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
492 	}
493 
494 	__arm_lpae_free_pages(start, table_size, &data->iop.cfg);
495 }
496 
497 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
498 {
499 	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
500 
501 	__arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
502 	kfree(data);
503 }
504 
505 static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
506 				    unsigned long iova, size_t size,
507 				    arm_lpae_iopte blk_pte, int lvl,
508 				    arm_lpae_iopte *ptep)
509 {
510 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
511 	arm_lpae_iopte pte, *tablep;
512 	phys_addr_t blk_paddr;
513 	size_t tablesz = ARM_LPAE_GRANULE(data);
514 	size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
515 	int i, unmap_idx = -1;
516 
517 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
518 		return 0;
519 
520 	tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
521 	if (!tablep)
522 		return 0; /* Bytes unmapped */
523 
524 	if (size == split_sz)
525 		unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data);
526 
527 	blk_paddr = iopte_to_pfn(blk_pte, data) << data->pg_shift;
528 	pte = iopte_prot(blk_pte);
529 
530 	for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) {
531 		/* Unmap! */
532 		if (i == unmap_idx)
533 			continue;
534 
535 		__arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]);
536 	}
537 
538 	pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg);
539 	if (pte != blk_pte) {
540 		__arm_lpae_free_pages(tablep, tablesz, cfg);
541 		/*
542 		 * We may race against someone unmapping another part of this
543 		 * block, but anything else is invalid. We can't misinterpret
544 		 * a page entry here since we're never at the last level.
545 		 */
546 		if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE)
547 			return 0;
548 
549 		tablep = iopte_deref(pte, data);
550 	}
551 
552 	if (unmap_idx < 0)
553 		return __arm_lpae_unmap(data, iova, size, lvl, tablep);
554 
555 	io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
556 	return size;
557 }
558 
559 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
560 			    unsigned long iova, size_t size, int lvl,
561 			    arm_lpae_iopte *ptep)
562 {
563 	arm_lpae_iopte pte;
564 	struct io_pgtable *iop = &data->iop;
565 
566 	/* Something went horribly wrong and we ran out of page table */
567 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
568 		return 0;
569 
570 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
571 	pte = READ_ONCE(*ptep);
572 	if (WARN_ON(!pte))
573 		return 0;
574 
575 	/* If the size matches this level, we're in the right place */
576 	if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
577 		__arm_lpae_set_pte(ptep, 0, &iop->cfg);
578 
579 		if (!iopte_leaf(pte, lvl)) {
580 			/* Also flush any partial walks */
581 			io_pgtable_tlb_add_flush(iop, iova, size,
582 						ARM_LPAE_GRANULE(data), false);
583 			io_pgtable_tlb_sync(iop);
584 			ptep = iopte_deref(pte, data);
585 			__arm_lpae_free_pgtable(data, lvl + 1, ptep);
586 		} else {
587 			io_pgtable_tlb_add_flush(iop, iova, size, size, true);
588 		}
589 
590 		return size;
591 	} else if (iopte_leaf(pte, lvl)) {
592 		/*
593 		 * Insert a table at the next level to map the old region,
594 		 * minus the part we want to unmap
595 		 */
596 		return arm_lpae_split_blk_unmap(data, iova, size, pte,
597 						lvl + 1, ptep);
598 	}
599 
600 	/* Keep on walkin' */
601 	ptep = iopte_deref(pte, data);
602 	return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
603 }
604 
605 static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
606 			  size_t size)
607 {
608 	size_t unmapped;
609 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
610 	arm_lpae_iopte *ptep = data->pgd;
611 	int lvl = ARM_LPAE_START_LVL(data);
612 
613 	unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
614 	if (unmapped)
615 		io_pgtable_tlb_sync(&data->iop);
616 
617 	return unmapped;
618 }
619 
620 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
621 					 unsigned long iova)
622 {
623 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
624 	arm_lpae_iopte pte, *ptep = data->pgd;
625 	int lvl = ARM_LPAE_START_LVL(data);
626 
627 	do {
628 		/* Valid IOPTE pointer? */
629 		if (!ptep)
630 			return 0;
631 
632 		/* Grab the IOPTE we're interested in */
633 		ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
634 		pte = READ_ONCE(*ptep);
635 
636 		/* Valid entry? */
637 		if (!pte)
638 			return 0;
639 
640 		/* Leaf entry? */
641 		if (iopte_leaf(pte,lvl))
642 			goto found_translation;
643 
644 		/* Take it to the next level */
645 		ptep = iopte_deref(pte, data);
646 	} while (++lvl < ARM_LPAE_MAX_LEVELS);
647 
648 	/* Ran out of page tables to walk */
649 	return 0;
650 
651 found_translation:
652 	iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
653 	return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
654 }
655 
656 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
657 {
658 	unsigned long granule;
659 
660 	/*
661 	 * We need to restrict the supported page sizes to match the
662 	 * translation regime for a particular granule. Aim to match
663 	 * the CPU page size if possible, otherwise prefer smaller sizes.
664 	 * While we're at it, restrict the block sizes to match the
665 	 * chosen granule.
666 	 */
667 	if (cfg->pgsize_bitmap & PAGE_SIZE)
668 		granule = PAGE_SIZE;
669 	else if (cfg->pgsize_bitmap & ~PAGE_MASK)
670 		granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
671 	else if (cfg->pgsize_bitmap & PAGE_MASK)
672 		granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
673 	else
674 		granule = 0;
675 
676 	switch (granule) {
677 	case SZ_4K:
678 		cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
679 		break;
680 	case SZ_16K:
681 		cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
682 		break;
683 	case SZ_64K:
684 		cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
685 		break;
686 	default:
687 		cfg->pgsize_bitmap = 0;
688 	}
689 }
690 
691 static struct arm_lpae_io_pgtable *
692 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
693 {
694 	unsigned long va_bits, pgd_bits;
695 	struct arm_lpae_io_pgtable *data;
696 
697 	arm_lpae_restrict_pgsizes(cfg);
698 
699 	if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
700 		return NULL;
701 
702 	if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
703 		return NULL;
704 
705 	if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
706 		return NULL;
707 
708 	if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
709 		dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
710 		return NULL;
711 	}
712 
713 	data = kmalloc(sizeof(*data), GFP_KERNEL);
714 	if (!data)
715 		return NULL;
716 
717 	data->pg_shift = __ffs(cfg->pgsize_bitmap);
718 	data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
719 
720 	va_bits = cfg->ias - data->pg_shift;
721 	data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
722 
723 	/* Calculate the actual size of our pgd (without concatenation) */
724 	pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
725 	data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
726 
727 	data->iop.ops = (struct io_pgtable_ops) {
728 		.map		= arm_lpae_map,
729 		.unmap		= arm_lpae_unmap,
730 		.iova_to_phys	= arm_lpae_iova_to_phys,
731 	};
732 
733 	return data;
734 }
735 
736 static struct io_pgtable *
737 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
738 {
739 	u64 reg;
740 	struct arm_lpae_io_pgtable *data;
741 
742 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA))
743 		return NULL;
744 
745 	data = arm_lpae_alloc_pgtable(cfg);
746 	if (!data)
747 		return NULL;
748 
749 	/* TCR */
750 	reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
751 	      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
752 	      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
753 
754 	switch (ARM_LPAE_GRANULE(data)) {
755 	case SZ_4K:
756 		reg |= ARM_LPAE_TCR_TG0_4K;
757 		break;
758 	case SZ_16K:
759 		reg |= ARM_LPAE_TCR_TG0_16K;
760 		break;
761 	case SZ_64K:
762 		reg |= ARM_LPAE_TCR_TG0_64K;
763 		break;
764 	}
765 
766 	switch (cfg->oas) {
767 	case 32:
768 		reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
769 		break;
770 	case 36:
771 		reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
772 		break;
773 	case 40:
774 		reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
775 		break;
776 	case 42:
777 		reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
778 		break;
779 	case 44:
780 		reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
781 		break;
782 	case 48:
783 		reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
784 		break;
785 	default:
786 		goto out_free_data;
787 	}
788 
789 	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
790 
791 	/* Disable speculative walks through TTBR1 */
792 	reg |= ARM_LPAE_TCR_EPD1;
793 	cfg->arm_lpae_s1_cfg.tcr = reg;
794 
795 	/* MAIRs */
796 	reg = (ARM_LPAE_MAIR_ATTR_NC
797 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
798 	      (ARM_LPAE_MAIR_ATTR_WBRWA
799 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
800 	      (ARM_LPAE_MAIR_ATTR_DEVICE
801 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
802 
803 	cfg->arm_lpae_s1_cfg.mair[0] = reg;
804 	cfg->arm_lpae_s1_cfg.mair[1] = 0;
805 
806 	/* Looking good; allocate a pgd */
807 	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
808 	if (!data->pgd)
809 		goto out_free_data;
810 
811 	/* Ensure the empty pgd is visible before any actual TTBR write */
812 	wmb();
813 
814 	/* TTBRs */
815 	cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
816 	cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
817 	return &data->iop;
818 
819 out_free_data:
820 	kfree(data);
821 	return NULL;
822 }
823 
824 static struct io_pgtable *
825 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
826 {
827 	u64 reg, sl;
828 	struct arm_lpae_io_pgtable *data;
829 
830 	/* The NS quirk doesn't apply at stage 2 */
831 	if (cfg->quirks & ~IO_PGTABLE_QUIRK_NO_DMA)
832 		return NULL;
833 
834 	data = arm_lpae_alloc_pgtable(cfg);
835 	if (!data)
836 		return NULL;
837 
838 	/*
839 	 * Concatenate PGDs at level 1 if possible in order to reduce
840 	 * the depth of the stage-2 walk.
841 	 */
842 	if (data->levels == ARM_LPAE_MAX_LEVELS) {
843 		unsigned long pgd_pages;
844 
845 		pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
846 		if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
847 			data->pgd_size = pgd_pages << data->pg_shift;
848 			data->levels--;
849 		}
850 	}
851 
852 	/* VTCR */
853 	reg = ARM_64_LPAE_S2_TCR_RES1 |
854 	     (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
855 	     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
856 	     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
857 
858 	sl = ARM_LPAE_START_LVL(data);
859 
860 	switch (ARM_LPAE_GRANULE(data)) {
861 	case SZ_4K:
862 		reg |= ARM_LPAE_TCR_TG0_4K;
863 		sl++; /* SL0 format is different for 4K granule size */
864 		break;
865 	case SZ_16K:
866 		reg |= ARM_LPAE_TCR_TG0_16K;
867 		break;
868 	case SZ_64K:
869 		reg |= ARM_LPAE_TCR_TG0_64K;
870 		break;
871 	}
872 
873 	switch (cfg->oas) {
874 	case 32:
875 		reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
876 		break;
877 	case 36:
878 		reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
879 		break;
880 	case 40:
881 		reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
882 		break;
883 	case 42:
884 		reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
885 		break;
886 	case 44:
887 		reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
888 		break;
889 	case 48:
890 		reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
891 		break;
892 	default:
893 		goto out_free_data;
894 	}
895 
896 	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
897 	reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
898 	cfg->arm_lpae_s2_cfg.vtcr = reg;
899 
900 	/* Allocate pgd pages */
901 	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
902 	if (!data->pgd)
903 		goto out_free_data;
904 
905 	/* Ensure the empty pgd is visible before any actual TTBR write */
906 	wmb();
907 
908 	/* VTTBR */
909 	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
910 	return &data->iop;
911 
912 out_free_data:
913 	kfree(data);
914 	return NULL;
915 }
916 
917 static struct io_pgtable *
918 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
919 {
920 	struct io_pgtable *iop;
921 
922 	if (cfg->ias > 32 || cfg->oas > 40)
923 		return NULL;
924 
925 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
926 	iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
927 	if (iop) {
928 		cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
929 		cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
930 	}
931 
932 	return iop;
933 }
934 
935 static struct io_pgtable *
936 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
937 {
938 	struct io_pgtable *iop;
939 
940 	if (cfg->ias > 40 || cfg->oas > 40)
941 		return NULL;
942 
943 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
944 	iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
945 	if (iop)
946 		cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
947 
948 	return iop;
949 }
950 
951 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
952 	.alloc	= arm_64_lpae_alloc_pgtable_s1,
953 	.free	= arm_lpae_free_pgtable,
954 };
955 
956 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
957 	.alloc	= arm_64_lpae_alloc_pgtable_s2,
958 	.free	= arm_lpae_free_pgtable,
959 };
960 
961 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
962 	.alloc	= arm_32_lpae_alloc_pgtable_s1,
963 	.free	= arm_lpae_free_pgtable,
964 };
965 
966 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
967 	.alloc	= arm_32_lpae_alloc_pgtable_s2,
968 	.free	= arm_lpae_free_pgtable,
969 };
970 
971 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
972 
973 static struct io_pgtable_cfg *cfg_cookie;
974 
975 static void dummy_tlb_flush_all(void *cookie)
976 {
977 	WARN_ON(cookie != cfg_cookie);
978 }
979 
980 static void dummy_tlb_add_flush(unsigned long iova, size_t size,
981 				size_t granule, bool leaf, void *cookie)
982 {
983 	WARN_ON(cookie != cfg_cookie);
984 	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
985 }
986 
987 static void dummy_tlb_sync(void *cookie)
988 {
989 	WARN_ON(cookie != cfg_cookie);
990 }
991 
992 static const struct iommu_gather_ops dummy_tlb_ops __initconst = {
993 	.tlb_flush_all	= dummy_tlb_flush_all,
994 	.tlb_add_flush	= dummy_tlb_add_flush,
995 	.tlb_sync	= dummy_tlb_sync,
996 };
997 
998 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
999 {
1000 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1001 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
1002 
1003 	pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1004 		cfg->pgsize_bitmap, cfg->ias);
1005 	pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
1006 		data->levels, data->pgd_size, data->pg_shift,
1007 		data->bits_per_level, data->pgd);
1008 }
1009 
1010 #define __FAIL(ops, i)	({						\
1011 		WARN(1, "selftest: test failed for fmt idx %d\n", (i));	\
1012 		arm_lpae_dump_ops(ops);					\
1013 		selftest_running = false;				\
1014 		-EFAULT;						\
1015 })
1016 
1017 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1018 {
1019 	static const enum io_pgtable_fmt fmts[] = {
1020 		ARM_64_LPAE_S1,
1021 		ARM_64_LPAE_S2,
1022 	};
1023 
1024 	int i, j;
1025 	unsigned long iova;
1026 	size_t size;
1027 	struct io_pgtable_ops *ops;
1028 
1029 	selftest_running = true;
1030 
1031 	for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1032 		cfg_cookie = cfg;
1033 		ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1034 		if (!ops) {
1035 			pr_err("selftest: failed to allocate io pgtable ops\n");
1036 			return -ENOMEM;
1037 		}
1038 
1039 		/*
1040 		 * Initial sanity checks.
1041 		 * Empty page tables shouldn't provide any translations.
1042 		 */
1043 		if (ops->iova_to_phys(ops, 42))
1044 			return __FAIL(ops, i);
1045 
1046 		if (ops->iova_to_phys(ops, SZ_1G + 42))
1047 			return __FAIL(ops, i);
1048 
1049 		if (ops->iova_to_phys(ops, SZ_2G + 42))
1050 			return __FAIL(ops, i);
1051 
1052 		/*
1053 		 * Distinct mappings of different granule sizes.
1054 		 */
1055 		iova = 0;
1056 		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1057 			size = 1UL << j;
1058 
1059 			if (ops->map(ops, iova, iova, size, IOMMU_READ |
1060 							    IOMMU_WRITE |
1061 							    IOMMU_NOEXEC |
1062 							    IOMMU_CACHE))
1063 				return __FAIL(ops, i);
1064 
1065 			/* Overlapping mappings */
1066 			if (!ops->map(ops, iova, iova + size, size,
1067 				      IOMMU_READ | IOMMU_NOEXEC))
1068 				return __FAIL(ops, i);
1069 
1070 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1071 				return __FAIL(ops, i);
1072 
1073 			iova += SZ_1G;
1074 		}
1075 
1076 		/* Partial unmap */
1077 		size = 1UL << __ffs(cfg->pgsize_bitmap);
1078 		if (ops->unmap(ops, SZ_1G + size, size) != size)
1079 			return __FAIL(ops, i);
1080 
1081 		/* Remap of partial unmap */
1082 		if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
1083 			return __FAIL(ops, i);
1084 
1085 		if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1086 			return __FAIL(ops, i);
1087 
1088 		/* Full unmap */
1089 		iova = 0;
1090 		j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
1091 		while (j != BITS_PER_LONG) {
1092 			size = 1UL << j;
1093 
1094 			if (ops->unmap(ops, iova, size) != size)
1095 				return __FAIL(ops, i);
1096 
1097 			if (ops->iova_to_phys(ops, iova + 42))
1098 				return __FAIL(ops, i);
1099 
1100 			/* Remap full block */
1101 			if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1102 				return __FAIL(ops, i);
1103 
1104 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1105 				return __FAIL(ops, i);
1106 
1107 			iova += SZ_1G;
1108 			j++;
1109 			j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
1110 		}
1111 
1112 		free_io_pgtable_ops(ops);
1113 	}
1114 
1115 	selftest_running = false;
1116 	return 0;
1117 }
1118 
1119 static int __init arm_lpae_do_selftests(void)
1120 {
1121 	static const unsigned long pgsize[] = {
1122 		SZ_4K | SZ_2M | SZ_1G,
1123 		SZ_16K | SZ_32M,
1124 		SZ_64K | SZ_512M,
1125 	};
1126 
1127 	static const unsigned int ias[] = {
1128 		32, 36, 40, 42, 44, 48,
1129 	};
1130 
1131 	int i, j, pass = 0, fail = 0;
1132 	struct io_pgtable_cfg cfg = {
1133 		.tlb = &dummy_tlb_ops,
1134 		.oas = 48,
1135 		.quirks = IO_PGTABLE_QUIRK_NO_DMA,
1136 	};
1137 
1138 	for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1139 		for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1140 			cfg.pgsize_bitmap = pgsize[i];
1141 			cfg.ias = ias[j];
1142 			pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1143 				pgsize[i], ias[j]);
1144 			if (arm_lpae_run_tests(&cfg))
1145 				fail++;
1146 			else
1147 				pass++;
1148 		}
1149 	}
1150 
1151 	pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1152 	return fail ? -EFAULT : 0;
1153 }
1154 subsys_initcall(arm_lpae_do_selftests);
1155 #endif
1156