1 /*
2  * CPU-agnostic ARM page table allocator.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15  *
16  * Copyright (C) 2014 ARM Limited
17  *
18  * Author: Will Deacon <will.deacon@arm.com>
19  */
20 
21 #define pr_fmt(fmt)	"arm-lpae io-pgtable: " fmt
22 
23 #include <linux/atomic.h>
24 #include <linux/bitops.h>
25 #include <linux/iommu.h>
26 #include <linux/kernel.h>
27 #include <linux/sizes.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/dma-mapping.h>
31 
32 #include <asm/barrier.h>
33 
34 #include "io-pgtable.h"
35 
36 #define ARM_LPAE_MAX_ADDR_BITS		52
37 #define ARM_LPAE_S2_MAX_CONCAT_PAGES	16
38 #define ARM_LPAE_MAX_LEVELS		4
39 
40 /* Struct accessors */
41 #define io_pgtable_to_data(x)						\
42 	container_of((x), struct arm_lpae_io_pgtable, iop)
43 
44 #define io_pgtable_ops_to_data(x)					\
45 	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
46 
47 /*
48  * For consistency with the architecture, we always consider
49  * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
50  */
51 #define ARM_LPAE_START_LVL(d)		(ARM_LPAE_MAX_LEVELS - (d)->levels)
52 
53 /*
54  * Calculate the right shift amount to get to the portion describing level l
55  * in a virtual address mapped by the pagetable in d.
56  */
57 #define ARM_LPAE_LVL_SHIFT(l,d)						\
58 	((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1))		\
59 	  * (d)->bits_per_level) + (d)->pg_shift)
60 
61 #define ARM_LPAE_GRANULE(d)		(1UL << (d)->pg_shift)
62 
63 #define ARM_LPAE_PAGES_PER_PGD(d)					\
64 	DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
65 
66 /*
67  * Calculate the index at level l used to map virtual address a using the
68  * pagetable in d.
69  */
70 #define ARM_LPAE_PGD_IDX(l,d)						\
71 	((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
72 
73 #define ARM_LPAE_LVL_IDX(a,l,d)						\
74 	(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &			\
75 	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
76 
77 /* Calculate the block/page mapping size at level l for pagetable in d. */
78 #define ARM_LPAE_BLOCK_SIZE(l,d)					\
79 	(1ULL << (ilog2(sizeof(arm_lpae_iopte)) +			\
80 		((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
81 
82 /* Page table bits */
83 #define ARM_LPAE_PTE_TYPE_SHIFT		0
84 #define ARM_LPAE_PTE_TYPE_MASK		0x3
85 
86 #define ARM_LPAE_PTE_TYPE_BLOCK		1
87 #define ARM_LPAE_PTE_TYPE_TABLE		3
88 #define ARM_LPAE_PTE_TYPE_PAGE		3
89 
90 #define ARM_LPAE_PTE_ADDR_MASK		GENMASK_ULL(47,12)
91 
92 #define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
93 #define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
94 #define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
95 #define ARM_LPAE_PTE_SH_NS		(((arm_lpae_iopte)0) << 8)
96 #define ARM_LPAE_PTE_SH_OS		(((arm_lpae_iopte)2) << 8)
97 #define ARM_LPAE_PTE_SH_IS		(((arm_lpae_iopte)3) << 8)
98 #define ARM_LPAE_PTE_NS			(((arm_lpae_iopte)1) << 5)
99 #define ARM_LPAE_PTE_VALID		(((arm_lpae_iopte)1) << 0)
100 
101 #define ARM_LPAE_PTE_ATTR_LO_MASK	(((arm_lpae_iopte)0x3ff) << 2)
102 /* Ignore the contiguous bit for block splitting */
103 #define ARM_LPAE_PTE_ATTR_HI_MASK	(((arm_lpae_iopte)6) << 52)
104 #define ARM_LPAE_PTE_ATTR_MASK		(ARM_LPAE_PTE_ATTR_LO_MASK |	\
105 					 ARM_LPAE_PTE_ATTR_HI_MASK)
106 /* Software bit for solving coherency races */
107 #define ARM_LPAE_PTE_SW_SYNC		(((arm_lpae_iopte)1) << 55)
108 
109 /* Stage-1 PTE */
110 #define ARM_LPAE_PTE_AP_UNPRIV		(((arm_lpae_iopte)1) << 6)
111 #define ARM_LPAE_PTE_AP_RDONLY		(((arm_lpae_iopte)2) << 6)
112 #define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
113 #define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
114 
115 /* Stage-2 PTE */
116 #define ARM_LPAE_PTE_HAP_FAULT		(((arm_lpae_iopte)0) << 6)
117 #define ARM_LPAE_PTE_HAP_READ		(((arm_lpae_iopte)1) << 6)
118 #define ARM_LPAE_PTE_HAP_WRITE		(((arm_lpae_iopte)2) << 6)
119 #define ARM_LPAE_PTE_MEMATTR_OIWB	(((arm_lpae_iopte)0xf) << 2)
120 #define ARM_LPAE_PTE_MEMATTR_NC		(((arm_lpae_iopte)0x5) << 2)
121 #define ARM_LPAE_PTE_MEMATTR_DEV	(((arm_lpae_iopte)0x1) << 2)
122 
123 /* Register bits */
124 #define ARM_32_LPAE_TCR_EAE		(1 << 31)
125 #define ARM_64_LPAE_S2_TCR_RES1		(1 << 31)
126 
127 #define ARM_LPAE_TCR_EPD1		(1 << 23)
128 
129 #define ARM_LPAE_TCR_TG0_4K		(0 << 14)
130 #define ARM_LPAE_TCR_TG0_64K		(1 << 14)
131 #define ARM_LPAE_TCR_TG0_16K		(2 << 14)
132 
133 #define ARM_LPAE_TCR_SH0_SHIFT		12
134 #define ARM_LPAE_TCR_SH0_MASK		0x3
135 #define ARM_LPAE_TCR_SH_NS		0
136 #define ARM_LPAE_TCR_SH_OS		2
137 #define ARM_LPAE_TCR_SH_IS		3
138 
139 #define ARM_LPAE_TCR_ORGN0_SHIFT	10
140 #define ARM_LPAE_TCR_IRGN0_SHIFT	8
141 #define ARM_LPAE_TCR_RGN_MASK		0x3
142 #define ARM_LPAE_TCR_RGN_NC		0
143 #define ARM_LPAE_TCR_RGN_WBWA		1
144 #define ARM_LPAE_TCR_RGN_WT		2
145 #define ARM_LPAE_TCR_RGN_WB		3
146 
147 #define ARM_LPAE_TCR_SL0_SHIFT		6
148 #define ARM_LPAE_TCR_SL0_MASK		0x3
149 
150 #define ARM_LPAE_TCR_T0SZ_SHIFT		0
151 #define ARM_LPAE_TCR_SZ_MASK		0xf
152 
153 #define ARM_LPAE_TCR_PS_SHIFT		16
154 #define ARM_LPAE_TCR_PS_MASK		0x7
155 
156 #define ARM_LPAE_TCR_IPS_SHIFT		32
157 #define ARM_LPAE_TCR_IPS_MASK		0x7
158 
159 #define ARM_LPAE_TCR_PS_32_BIT		0x0ULL
160 #define ARM_LPAE_TCR_PS_36_BIT		0x1ULL
161 #define ARM_LPAE_TCR_PS_40_BIT		0x2ULL
162 #define ARM_LPAE_TCR_PS_42_BIT		0x3ULL
163 #define ARM_LPAE_TCR_PS_44_BIT		0x4ULL
164 #define ARM_LPAE_TCR_PS_48_BIT		0x5ULL
165 #define ARM_LPAE_TCR_PS_52_BIT		0x6ULL
166 
167 #define ARM_LPAE_MAIR_ATTR_SHIFT(n)	((n) << 3)
168 #define ARM_LPAE_MAIR_ATTR_MASK		0xff
169 #define ARM_LPAE_MAIR_ATTR_DEVICE	0x04
170 #define ARM_LPAE_MAIR_ATTR_NC		0x44
171 #define ARM_LPAE_MAIR_ATTR_WBRWA	0xff
172 #define ARM_LPAE_MAIR_ATTR_IDX_NC	0
173 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE	1
174 #define ARM_LPAE_MAIR_ATTR_IDX_DEV	2
175 
176 /* IOPTE accessors */
177 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
178 
179 #define iopte_type(pte,l)					\
180 	(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
181 
182 #define iopte_prot(pte)	((pte) & ARM_LPAE_PTE_ATTR_MASK)
183 
184 #define iopte_leaf(pte,l)					\
185 	(l == (ARM_LPAE_MAX_LEVELS - 1) ?			\
186 		(iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) :	\
187 		(iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
188 
189 struct arm_lpae_io_pgtable {
190 	struct io_pgtable	iop;
191 
192 	int			levels;
193 	size_t			pgd_size;
194 	unsigned long		pg_shift;
195 	unsigned long		bits_per_level;
196 
197 	void			*pgd;
198 };
199 
200 typedef u64 arm_lpae_iopte;
201 
202 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
203 				     struct arm_lpae_io_pgtable *data)
204 {
205 	arm_lpae_iopte pte = paddr;
206 
207 	/* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
208 	return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
209 }
210 
211 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
212 				  struct arm_lpae_io_pgtable *data)
213 {
214 	u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
215 
216 	if (data->pg_shift < 16)
217 		return paddr;
218 
219 	/* Rotate the packed high-order bits back to the top */
220 	return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
221 }
222 
223 static bool selftest_running = false;
224 
225 static dma_addr_t __arm_lpae_dma_addr(void *pages)
226 {
227 	return (dma_addr_t)virt_to_phys(pages);
228 }
229 
230 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
231 				    struct io_pgtable_cfg *cfg)
232 {
233 	struct device *dev = cfg->iommu_dev;
234 	dma_addr_t dma;
235 	void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
236 
237 	if (!pages)
238 		return NULL;
239 
240 	if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
241 		dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
242 		if (dma_mapping_error(dev, dma))
243 			goto out_free;
244 		/*
245 		 * We depend on the IOMMU being able to work with any physical
246 		 * address directly, so if the DMA layer suggests otherwise by
247 		 * translating or truncating them, that bodes very badly...
248 		 */
249 		if (dma != virt_to_phys(pages))
250 			goto out_unmap;
251 	}
252 
253 	return pages;
254 
255 out_unmap:
256 	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
257 	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
258 out_free:
259 	free_pages_exact(pages, size);
260 	return NULL;
261 }
262 
263 static void __arm_lpae_free_pages(void *pages, size_t size,
264 				  struct io_pgtable_cfg *cfg)
265 {
266 	if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
267 		dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
268 				 size, DMA_TO_DEVICE);
269 	free_pages_exact(pages, size);
270 }
271 
272 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep,
273 				struct io_pgtable_cfg *cfg)
274 {
275 	dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
276 				   sizeof(*ptep), DMA_TO_DEVICE);
277 }
278 
279 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
280 			       struct io_pgtable_cfg *cfg)
281 {
282 	*ptep = pte;
283 
284 	if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
285 		__arm_lpae_sync_pte(ptep, cfg);
286 }
287 
288 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
289 			       unsigned long iova, size_t size, int lvl,
290 			       arm_lpae_iopte *ptep);
291 
292 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
293 				phys_addr_t paddr, arm_lpae_iopte prot,
294 				int lvl, arm_lpae_iopte *ptep)
295 {
296 	arm_lpae_iopte pte = prot;
297 
298 	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
299 		pte |= ARM_LPAE_PTE_NS;
300 
301 	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
302 		pte |= ARM_LPAE_PTE_TYPE_PAGE;
303 	else
304 		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
305 
306 	pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
307 	pte |= paddr_to_iopte(paddr, data);
308 
309 	__arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
310 }
311 
312 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
313 			     unsigned long iova, phys_addr_t paddr,
314 			     arm_lpae_iopte prot, int lvl,
315 			     arm_lpae_iopte *ptep)
316 {
317 	arm_lpae_iopte pte = *ptep;
318 
319 	if (iopte_leaf(pte, lvl)) {
320 		/* We require an unmap first */
321 		WARN_ON(!selftest_running);
322 		return -EEXIST;
323 	} else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
324 		/*
325 		 * We need to unmap and free the old table before
326 		 * overwriting it with a block entry.
327 		 */
328 		arm_lpae_iopte *tblp;
329 		size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
330 
331 		tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
332 		if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
333 			return -EINVAL;
334 	}
335 
336 	__arm_lpae_init_pte(data, paddr, prot, lvl, ptep);
337 	return 0;
338 }
339 
340 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
341 					     arm_lpae_iopte *ptep,
342 					     arm_lpae_iopte curr,
343 					     struct io_pgtable_cfg *cfg)
344 {
345 	arm_lpae_iopte old, new;
346 
347 	new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE;
348 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
349 		new |= ARM_LPAE_PTE_NSTABLE;
350 
351 	/*
352 	 * Ensure the table itself is visible before its PTE can be.
353 	 * Whilst we could get away with cmpxchg64_release below, this
354 	 * doesn't have any ordering semantics when !CONFIG_SMP.
355 	 */
356 	dma_wmb();
357 
358 	old = cmpxchg64_relaxed(ptep, curr, new);
359 
360 	if ((cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) ||
361 	    (old & ARM_LPAE_PTE_SW_SYNC))
362 		return old;
363 
364 	/* Even if it's not ours, there's no point waiting; just kick it */
365 	__arm_lpae_sync_pte(ptep, cfg);
366 	if (old == curr)
367 		WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
368 
369 	return old;
370 }
371 
372 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
373 			  phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
374 			  int lvl, arm_lpae_iopte *ptep)
375 {
376 	arm_lpae_iopte *cptep, pte;
377 	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
378 	size_t tblsz = ARM_LPAE_GRANULE(data);
379 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
380 
381 	/* Find our entry at the current level */
382 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
383 
384 	/* If we can install a leaf entry at this level, then do so */
385 	if (size == block_size && (size & cfg->pgsize_bitmap))
386 		return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
387 
388 	/* We can't allocate tables at the final level */
389 	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
390 		return -EINVAL;
391 
392 	/* Grab a pointer to the next level */
393 	pte = READ_ONCE(*ptep);
394 	if (!pte) {
395 		cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg);
396 		if (!cptep)
397 			return -ENOMEM;
398 
399 		pte = arm_lpae_install_table(cptep, ptep, 0, cfg);
400 		if (pte)
401 			__arm_lpae_free_pages(cptep, tblsz, cfg);
402 	} else if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) &&
403 		   !(pte & ARM_LPAE_PTE_SW_SYNC)) {
404 		__arm_lpae_sync_pte(ptep, cfg);
405 	}
406 
407 	if (pte && !iopte_leaf(pte, lvl)) {
408 		cptep = iopte_deref(pte, data);
409 	} else if (pte) {
410 		/* We require an unmap first */
411 		WARN_ON(!selftest_running);
412 		return -EEXIST;
413 	}
414 
415 	/* Rinse, repeat */
416 	return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
417 }
418 
419 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
420 					   int prot)
421 {
422 	arm_lpae_iopte pte;
423 
424 	if (data->iop.fmt == ARM_64_LPAE_S1 ||
425 	    data->iop.fmt == ARM_32_LPAE_S1) {
426 		pte = ARM_LPAE_PTE_nG;
427 
428 		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
429 			pte |= ARM_LPAE_PTE_AP_RDONLY;
430 
431 		if (!(prot & IOMMU_PRIV))
432 			pte |= ARM_LPAE_PTE_AP_UNPRIV;
433 
434 		if (prot & IOMMU_MMIO)
435 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
436 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
437 		else if (prot & IOMMU_CACHE)
438 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
439 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
440 	} else {
441 		pte = ARM_LPAE_PTE_HAP_FAULT;
442 		if (prot & IOMMU_READ)
443 			pte |= ARM_LPAE_PTE_HAP_READ;
444 		if (prot & IOMMU_WRITE)
445 			pte |= ARM_LPAE_PTE_HAP_WRITE;
446 		if (prot & IOMMU_MMIO)
447 			pte |= ARM_LPAE_PTE_MEMATTR_DEV;
448 		else if (prot & IOMMU_CACHE)
449 			pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
450 		else
451 			pte |= ARM_LPAE_PTE_MEMATTR_NC;
452 	}
453 
454 	if (prot & IOMMU_NOEXEC)
455 		pte |= ARM_LPAE_PTE_XN;
456 
457 	return pte;
458 }
459 
460 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
461 			phys_addr_t paddr, size_t size, int iommu_prot)
462 {
463 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
464 	arm_lpae_iopte *ptep = data->pgd;
465 	int ret, lvl = ARM_LPAE_START_LVL(data);
466 	arm_lpae_iopte prot;
467 
468 	/* If no access, then nothing to do */
469 	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
470 		return 0;
471 
472 	if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
473 		    paddr >= (1ULL << data->iop.cfg.oas)))
474 		return -ERANGE;
475 
476 	prot = arm_lpae_prot_to_pte(data, iommu_prot);
477 	ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
478 	/*
479 	 * Synchronise all PTE updates for the new mapping before there's
480 	 * a chance for anything to kick off a table walk for the new iova.
481 	 */
482 	wmb();
483 
484 	return ret;
485 }
486 
487 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
488 				    arm_lpae_iopte *ptep)
489 {
490 	arm_lpae_iopte *start, *end;
491 	unsigned long table_size;
492 
493 	if (lvl == ARM_LPAE_START_LVL(data))
494 		table_size = data->pgd_size;
495 	else
496 		table_size = ARM_LPAE_GRANULE(data);
497 
498 	start = ptep;
499 
500 	/* Only leaf entries at the last level */
501 	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
502 		end = ptep;
503 	else
504 		end = (void *)ptep + table_size;
505 
506 	while (ptep != end) {
507 		arm_lpae_iopte pte = *ptep++;
508 
509 		if (!pte || iopte_leaf(pte, lvl))
510 			continue;
511 
512 		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
513 	}
514 
515 	__arm_lpae_free_pages(start, table_size, &data->iop.cfg);
516 }
517 
518 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
519 {
520 	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
521 
522 	__arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
523 	kfree(data);
524 }
525 
526 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
527 				       unsigned long iova, size_t size,
528 				       arm_lpae_iopte blk_pte, int lvl,
529 				       arm_lpae_iopte *ptep)
530 {
531 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
532 	arm_lpae_iopte pte, *tablep;
533 	phys_addr_t blk_paddr;
534 	size_t tablesz = ARM_LPAE_GRANULE(data);
535 	size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
536 	int i, unmap_idx = -1;
537 
538 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
539 		return 0;
540 
541 	tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
542 	if (!tablep)
543 		return 0; /* Bytes unmapped */
544 
545 	if (size == split_sz)
546 		unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data);
547 
548 	blk_paddr = iopte_to_paddr(blk_pte, data);
549 	pte = iopte_prot(blk_pte);
550 
551 	for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) {
552 		/* Unmap! */
553 		if (i == unmap_idx)
554 			continue;
555 
556 		__arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]);
557 	}
558 
559 	pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg);
560 	if (pte != blk_pte) {
561 		__arm_lpae_free_pages(tablep, tablesz, cfg);
562 		/*
563 		 * We may race against someone unmapping another part of this
564 		 * block, but anything else is invalid. We can't misinterpret
565 		 * a page entry here since we're never at the last level.
566 		 */
567 		if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE)
568 			return 0;
569 
570 		tablep = iopte_deref(pte, data);
571 	}
572 
573 	if (unmap_idx < 0)
574 		return __arm_lpae_unmap(data, iova, size, lvl, tablep);
575 
576 	io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
577 	return size;
578 }
579 
580 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
581 			       unsigned long iova, size_t size, int lvl,
582 			       arm_lpae_iopte *ptep)
583 {
584 	arm_lpae_iopte pte;
585 	struct io_pgtable *iop = &data->iop;
586 
587 	/* Something went horribly wrong and we ran out of page table */
588 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
589 		return 0;
590 
591 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
592 	pte = READ_ONCE(*ptep);
593 	if (WARN_ON(!pte))
594 		return 0;
595 
596 	/* If the size matches this level, we're in the right place */
597 	if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
598 		__arm_lpae_set_pte(ptep, 0, &iop->cfg);
599 
600 		if (!iopte_leaf(pte, lvl)) {
601 			/* Also flush any partial walks */
602 			io_pgtable_tlb_add_flush(iop, iova, size,
603 						ARM_LPAE_GRANULE(data), false);
604 			io_pgtable_tlb_sync(iop);
605 			ptep = iopte_deref(pte, data);
606 			__arm_lpae_free_pgtable(data, lvl + 1, ptep);
607 		} else {
608 			io_pgtable_tlb_add_flush(iop, iova, size, size, true);
609 		}
610 
611 		return size;
612 	} else if (iopte_leaf(pte, lvl)) {
613 		/*
614 		 * Insert a table at the next level to map the old region,
615 		 * minus the part we want to unmap
616 		 */
617 		return arm_lpae_split_blk_unmap(data, iova, size, pte,
618 						lvl + 1, ptep);
619 	}
620 
621 	/* Keep on walkin' */
622 	ptep = iopte_deref(pte, data);
623 	return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
624 }
625 
626 static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
627 			     size_t size)
628 {
629 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
630 	arm_lpae_iopte *ptep = data->pgd;
631 	int lvl = ARM_LPAE_START_LVL(data);
632 
633 	if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
634 		return 0;
635 
636 	return __arm_lpae_unmap(data, iova, size, lvl, ptep);
637 }
638 
639 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
640 					 unsigned long iova)
641 {
642 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
643 	arm_lpae_iopte pte, *ptep = data->pgd;
644 	int lvl = ARM_LPAE_START_LVL(data);
645 
646 	do {
647 		/* Valid IOPTE pointer? */
648 		if (!ptep)
649 			return 0;
650 
651 		/* Grab the IOPTE we're interested in */
652 		ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
653 		pte = READ_ONCE(*ptep);
654 
655 		/* Valid entry? */
656 		if (!pte)
657 			return 0;
658 
659 		/* Leaf entry? */
660 		if (iopte_leaf(pte,lvl))
661 			goto found_translation;
662 
663 		/* Take it to the next level */
664 		ptep = iopte_deref(pte, data);
665 	} while (++lvl < ARM_LPAE_MAX_LEVELS);
666 
667 	/* Ran out of page tables to walk */
668 	return 0;
669 
670 found_translation:
671 	iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
672 	return iopte_to_paddr(pte, data) | iova;
673 }
674 
675 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
676 {
677 	unsigned long granule, page_sizes;
678 	unsigned int max_addr_bits = 48;
679 
680 	/*
681 	 * We need to restrict the supported page sizes to match the
682 	 * translation regime for a particular granule. Aim to match
683 	 * the CPU page size if possible, otherwise prefer smaller sizes.
684 	 * While we're at it, restrict the block sizes to match the
685 	 * chosen granule.
686 	 */
687 	if (cfg->pgsize_bitmap & PAGE_SIZE)
688 		granule = PAGE_SIZE;
689 	else if (cfg->pgsize_bitmap & ~PAGE_MASK)
690 		granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
691 	else if (cfg->pgsize_bitmap & PAGE_MASK)
692 		granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
693 	else
694 		granule = 0;
695 
696 	switch (granule) {
697 	case SZ_4K:
698 		page_sizes = (SZ_4K | SZ_2M | SZ_1G);
699 		break;
700 	case SZ_16K:
701 		page_sizes = (SZ_16K | SZ_32M);
702 		break;
703 	case SZ_64K:
704 		max_addr_bits = 52;
705 		page_sizes = (SZ_64K | SZ_512M);
706 		if (cfg->oas > 48)
707 			page_sizes |= 1ULL << 42; /* 4TB */
708 		break;
709 	default:
710 		page_sizes = 0;
711 	}
712 
713 	cfg->pgsize_bitmap &= page_sizes;
714 	cfg->ias = min(cfg->ias, max_addr_bits);
715 	cfg->oas = min(cfg->oas, max_addr_bits);
716 }
717 
718 static struct arm_lpae_io_pgtable *
719 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
720 {
721 	unsigned long va_bits, pgd_bits;
722 	struct arm_lpae_io_pgtable *data;
723 
724 	arm_lpae_restrict_pgsizes(cfg);
725 
726 	if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
727 		return NULL;
728 
729 	if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
730 		return NULL;
731 
732 	if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
733 		return NULL;
734 
735 	if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
736 		dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
737 		return NULL;
738 	}
739 
740 	data = kmalloc(sizeof(*data), GFP_KERNEL);
741 	if (!data)
742 		return NULL;
743 
744 	data->pg_shift = __ffs(cfg->pgsize_bitmap);
745 	data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
746 
747 	va_bits = cfg->ias - data->pg_shift;
748 	data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
749 
750 	/* Calculate the actual size of our pgd (without concatenation) */
751 	pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
752 	data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
753 
754 	data->iop.ops = (struct io_pgtable_ops) {
755 		.map		= arm_lpae_map,
756 		.unmap		= arm_lpae_unmap,
757 		.iova_to_phys	= arm_lpae_iova_to_phys,
758 	};
759 
760 	return data;
761 }
762 
763 static struct io_pgtable *
764 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
765 {
766 	u64 reg;
767 	struct arm_lpae_io_pgtable *data;
768 
769 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA))
770 		return NULL;
771 
772 	data = arm_lpae_alloc_pgtable(cfg);
773 	if (!data)
774 		return NULL;
775 
776 	/* TCR */
777 	reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
778 	      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
779 	      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
780 
781 	switch (ARM_LPAE_GRANULE(data)) {
782 	case SZ_4K:
783 		reg |= ARM_LPAE_TCR_TG0_4K;
784 		break;
785 	case SZ_16K:
786 		reg |= ARM_LPAE_TCR_TG0_16K;
787 		break;
788 	case SZ_64K:
789 		reg |= ARM_LPAE_TCR_TG0_64K;
790 		break;
791 	}
792 
793 	switch (cfg->oas) {
794 	case 32:
795 		reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
796 		break;
797 	case 36:
798 		reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
799 		break;
800 	case 40:
801 		reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
802 		break;
803 	case 42:
804 		reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
805 		break;
806 	case 44:
807 		reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
808 		break;
809 	case 48:
810 		reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
811 		break;
812 	case 52:
813 		reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_IPS_SHIFT);
814 		break;
815 	default:
816 		goto out_free_data;
817 	}
818 
819 	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
820 
821 	/* Disable speculative walks through TTBR1 */
822 	reg |= ARM_LPAE_TCR_EPD1;
823 	cfg->arm_lpae_s1_cfg.tcr = reg;
824 
825 	/* MAIRs */
826 	reg = (ARM_LPAE_MAIR_ATTR_NC
827 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
828 	      (ARM_LPAE_MAIR_ATTR_WBRWA
829 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
830 	      (ARM_LPAE_MAIR_ATTR_DEVICE
831 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
832 
833 	cfg->arm_lpae_s1_cfg.mair[0] = reg;
834 	cfg->arm_lpae_s1_cfg.mair[1] = 0;
835 
836 	/* Looking good; allocate a pgd */
837 	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
838 	if (!data->pgd)
839 		goto out_free_data;
840 
841 	/* Ensure the empty pgd is visible before any actual TTBR write */
842 	wmb();
843 
844 	/* TTBRs */
845 	cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
846 	cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
847 	return &data->iop;
848 
849 out_free_data:
850 	kfree(data);
851 	return NULL;
852 }
853 
854 static struct io_pgtable *
855 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
856 {
857 	u64 reg, sl;
858 	struct arm_lpae_io_pgtable *data;
859 
860 	/* The NS quirk doesn't apply at stage 2 */
861 	if (cfg->quirks & ~IO_PGTABLE_QUIRK_NO_DMA)
862 		return NULL;
863 
864 	data = arm_lpae_alloc_pgtable(cfg);
865 	if (!data)
866 		return NULL;
867 
868 	/*
869 	 * Concatenate PGDs at level 1 if possible in order to reduce
870 	 * the depth of the stage-2 walk.
871 	 */
872 	if (data->levels == ARM_LPAE_MAX_LEVELS) {
873 		unsigned long pgd_pages;
874 
875 		pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
876 		if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
877 			data->pgd_size = pgd_pages << data->pg_shift;
878 			data->levels--;
879 		}
880 	}
881 
882 	/* VTCR */
883 	reg = ARM_64_LPAE_S2_TCR_RES1 |
884 	     (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
885 	     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
886 	     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
887 
888 	sl = ARM_LPAE_START_LVL(data);
889 
890 	switch (ARM_LPAE_GRANULE(data)) {
891 	case SZ_4K:
892 		reg |= ARM_LPAE_TCR_TG0_4K;
893 		sl++; /* SL0 format is different for 4K granule size */
894 		break;
895 	case SZ_16K:
896 		reg |= ARM_LPAE_TCR_TG0_16K;
897 		break;
898 	case SZ_64K:
899 		reg |= ARM_LPAE_TCR_TG0_64K;
900 		break;
901 	}
902 
903 	switch (cfg->oas) {
904 	case 32:
905 		reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
906 		break;
907 	case 36:
908 		reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
909 		break;
910 	case 40:
911 		reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
912 		break;
913 	case 42:
914 		reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
915 		break;
916 	case 44:
917 		reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
918 		break;
919 	case 48:
920 		reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
921 		break;
922 	case 52:
923 		reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_PS_SHIFT);
924 		break;
925 	default:
926 		goto out_free_data;
927 	}
928 
929 	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
930 	reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
931 	cfg->arm_lpae_s2_cfg.vtcr = reg;
932 
933 	/* Allocate pgd pages */
934 	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
935 	if (!data->pgd)
936 		goto out_free_data;
937 
938 	/* Ensure the empty pgd is visible before any actual TTBR write */
939 	wmb();
940 
941 	/* VTTBR */
942 	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
943 	return &data->iop;
944 
945 out_free_data:
946 	kfree(data);
947 	return NULL;
948 }
949 
950 static struct io_pgtable *
951 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
952 {
953 	struct io_pgtable *iop;
954 
955 	if (cfg->ias > 32 || cfg->oas > 40)
956 		return NULL;
957 
958 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
959 	iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
960 	if (iop) {
961 		cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
962 		cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
963 	}
964 
965 	return iop;
966 }
967 
968 static struct io_pgtable *
969 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
970 {
971 	struct io_pgtable *iop;
972 
973 	if (cfg->ias > 40 || cfg->oas > 40)
974 		return NULL;
975 
976 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
977 	iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
978 	if (iop)
979 		cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
980 
981 	return iop;
982 }
983 
984 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
985 	.alloc	= arm_64_lpae_alloc_pgtable_s1,
986 	.free	= arm_lpae_free_pgtable,
987 };
988 
989 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
990 	.alloc	= arm_64_lpae_alloc_pgtable_s2,
991 	.free	= arm_lpae_free_pgtable,
992 };
993 
994 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
995 	.alloc	= arm_32_lpae_alloc_pgtable_s1,
996 	.free	= arm_lpae_free_pgtable,
997 };
998 
999 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1000 	.alloc	= arm_32_lpae_alloc_pgtable_s2,
1001 	.free	= arm_lpae_free_pgtable,
1002 };
1003 
1004 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1005 
1006 static struct io_pgtable_cfg *cfg_cookie;
1007 
1008 static void dummy_tlb_flush_all(void *cookie)
1009 {
1010 	WARN_ON(cookie != cfg_cookie);
1011 }
1012 
1013 static void dummy_tlb_add_flush(unsigned long iova, size_t size,
1014 				size_t granule, bool leaf, void *cookie)
1015 {
1016 	WARN_ON(cookie != cfg_cookie);
1017 	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1018 }
1019 
1020 static void dummy_tlb_sync(void *cookie)
1021 {
1022 	WARN_ON(cookie != cfg_cookie);
1023 }
1024 
1025 static const struct iommu_gather_ops dummy_tlb_ops __initconst = {
1026 	.tlb_flush_all	= dummy_tlb_flush_all,
1027 	.tlb_add_flush	= dummy_tlb_add_flush,
1028 	.tlb_sync	= dummy_tlb_sync,
1029 };
1030 
1031 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1032 {
1033 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1034 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
1035 
1036 	pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1037 		cfg->pgsize_bitmap, cfg->ias);
1038 	pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
1039 		data->levels, data->pgd_size, data->pg_shift,
1040 		data->bits_per_level, data->pgd);
1041 }
1042 
1043 #define __FAIL(ops, i)	({						\
1044 		WARN(1, "selftest: test failed for fmt idx %d\n", (i));	\
1045 		arm_lpae_dump_ops(ops);					\
1046 		selftest_running = false;				\
1047 		-EFAULT;						\
1048 })
1049 
1050 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1051 {
1052 	static const enum io_pgtable_fmt fmts[] = {
1053 		ARM_64_LPAE_S1,
1054 		ARM_64_LPAE_S2,
1055 	};
1056 
1057 	int i, j;
1058 	unsigned long iova;
1059 	size_t size;
1060 	struct io_pgtable_ops *ops;
1061 
1062 	selftest_running = true;
1063 
1064 	for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1065 		cfg_cookie = cfg;
1066 		ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1067 		if (!ops) {
1068 			pr_err("selftest: failed to allocate io pgtable ops\n");
1069 			return -ENOMEM;
1070 		}
1071 
1072 		/*
1073 		 * Initial sanity checks.
1074 		 * Empty page tables shouldn't provide any translations.
1075 		 */
1076 		if (ops->iova_to_phys(ops, 42))
1077 			return __FAIL(ops, i);
1078 
1079 		if (ops->iova_to_phys(ops, SZ_1G + 42))
1080 			return __FAIL(ops, i);
1081 
1082 		if (ops->iova_to_phys(ops, SZ_2G + 42))
1083 			return __FAIL(ops, i);
1084 
1085 		/*
1086 		 * Distinct mappings of different granule sizes.
1087 		 */
1088 		iova = 0;
1089 		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1090 			size = 1UL << j;
1091 
1092 			if (ops->map(ops, iova, iova, size, IOMMU_READ |
1093 							    IOMMU_WRITE |
1094 							    IOMMU_NOEXEC |
1095 							    IOMMU_CACHE))
1096 				return __FAIL(ops, i);
1097 
1098 			/* Overlapping mappings */
1099 			if (!ops->map(ops, iova, iova + size, size,
1100 				      IOMMU_READ | IOMMU_NOEXEC))
1101 				return __FAIL(ops, i);
1102 
1103 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1104 				return __FAIL(ops, i);
1105 
1106 			iova += SZ_1G;
1107 		}
1108 
1109 		/* Partial unmap */
1110 		size = 1UL << __ffs(cfg->pgsize_bitmap);
1111 		if (ops->unmap(ops, SZ_1G + size, size) != size)
1112 			return __FAIL(ops, i);
1113 
1114 		/* Remap of partial unmap */
1115 		if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
1116 			return __FAIL(ops, i);
1117 
1118 		if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1119 			return __FAIL(ops, i);
1120 
1121 		/* Full unmap */
1122 		iova = 0;
1123 		j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
1124 		while (j != BITS_PER_LONG) {
1125 			size = 1UL << j;
1126 
1127 			if (ops->unmap(ops, iova, size) != size)
1128 				return __FAIL(ops, i);
1129 
1130 			if (ops->iova_to_phys(ops, iova + 42))
1131 				return __FAIL(ops, i);
1132 
1133 			/* Remap full block */
1134 			if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1135 				return __FAIL(ops, i);
1136 
1137 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1138 				return __FAIL(ops, i);
1139 
1140 			iova += SZ_1G;
1141 			j++;
1142 			j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
1143 		}
1144 
1145 		free_io_pgtable_ops(ops);
1146 	}
1147 
1148 	selftest_running = false;
1149 	return 0;
1150 }
1151 
1152 static int __init arm_lpae_do_selftests(void)
1153 {
1154 	static const unsigned long pgsize[] = {
1155 		SZ_4K | SZ_2M | SZ_1G,
1156 		SZ_16K | SZ_32M,
1157 		SZ_64K | SZ_512M,
1158 	};
1159 
1160 	static const unsigned int ias[] = {
1161 		32, 36, 40, 42, 44, 48,
1162 	};
1163 
1164 	int i, j, pass = 0, fail = 0;
1165 	struct io_pgtable_cfg cfg = {
1166 		.tlb = &dummy_tlb_ops,
1167 		.oas = 48,
1168 		.quirks = IO_PGTABLE_QUIRK_NO_DMA,
1169 	};
1170 
1171 	for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1172 		for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1173 			cfg.pgsize_bitmap = pgsize[i];
1174 			cfg.ias = ias[j];
1175 			pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1176 				pgsize[i], ias[j]);
1177 			if (arm_lpae_run_tests(&cfg))
1178 				fail++;
1179 			else
1180 				pass++;
1181 		}
1182 	}
1183 
1184 	pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1185 	return fail ? -EFAULT : 0;
1186 }
1187 subsys_initcall(arm_lpae_do_selftests);
1188 #endif
1189