1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CPU-agnostic ARM page table allocator.
4  *
5  * Copyright (C) 2014 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9 
10 #define pr_fmt(fmt)	"arm-lpae io-pgtable: " fmt
11 
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/dma-mapping.h>
20 
21 #include <asm/barrier.h>
22 
23 #define ARM_LPAE_MAX_ADDR_BITS		52
24 #define ARM_LPAE_S2_MAX_CONCAT_PAGES	16
25 #define ARM_LPAE_MAX_LEVELS		4
26 
27 /* Struct accessors */
28 #define io_pgtable_to_data(x)						\
29 	container_of((x), struct arm_lpae_io_pgtable, iop)
30 
31 #define io_pgtable_ops_to_data(x)					\
32 	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
33 
34 /*
35  * Calculate the right shift amount to get to the portion describing level l
36  * in a virtual address mapped by the pagetable in d.
37  */
38 #define ARM_LPAE_LVL_SHIFT(l,d)						\
39 	(((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) +		\
40 	ilog2(sizeof(arm_lpae_iopte)))
41 
42 #define ARM_LPAE_GRANULE(d)						\
43 	(sizeof(arm_lpae_iopte) << (d)->bits_per_level)
44 #define ARM_LPAE_PGD_SIZE(d)						\
45 	(sizeof(arm_lpae_iopte) << (d)->pgd_bits)
46 
47 /*
48  * Calculate the index at level l used to map virtual address a using the
49  * pagetable in d.
50  */
51 #define ARM_LPAE_PGD_IDX(l,d)						\
52 	((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
53 
54 #define ARM_LPAE_LVL_IDX(a,l,d)						\
55 	(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &			\
56 	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
57 
58 /* Calculate the block/page mapping size at level l for pagetable in d. */
59 #define ARM_LPAE_BLOCK_SIZE(l,d)	(1ULL << ARM_LPAE_LVL_SHIFT(l,d))
60 
61 /* Page table bits */
62 #define ARM_LPAE_PTE_TYPE_SHIFT		0
63 #define ARM_LPAE_PTE_TYPE_MASK		0x3
64 
65 #define ARM_LPAE_PTE_TYPE_BLOCK		1
66 #define ARM_LPAE_PTE_TYPE_TABLE		3
67 #define ARM_LPAE_PTE_TYPE_PAGE		3
68 
69 #define ARM_LPAE_PTE_ADDR_MASK		GENMASK_ULL(47,12)
70 
71 #define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
72 #define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
73 #define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
74 #define ARM_LPAE_PTE_SH_NS		(((arm_lpae_iopte)0) << 8)
75 #define ARM_LPAE_PTE_SH_OS		(((arm_lpae_iopte)2) << 8)
76 #define ARM_LPAE_PTE_SH_IS		(((arm_lpae_iopte)3) << 8)
77 #define ARM_LPAE_PTE_NS			(((arm_lpae_iopte)1) << 5)
78 #define ARM_LPAE_PTE_VALID		(((arm_lpae_iopte)1) << 0)
79 
80 #define ARM_LPAE_PTE_ATTR_LO_MASK	(((arm_lpae_iopte)0x3ff) << 2)
81 /* Ignore the contiguous bit for block splitting */
82 #define ARM_LPAE_PTE_ATTR_HI_MASK	(((arm_lpae_iopte)6) << 52)
83 #define ARM_LPAE_PTE_ATTR_MASK		(ARM_LPAE_PTE_ATTR_LO_MASK |	\
84 					 ARM_LPAE_PTE_ATTR_HI_MASK)
85 /* Software bit for solving coherency races */
86 #define ARM_LPAE_PTE_SW_SYNC		(((arm_lpae_iopte)1) << 55)
87 
88 /* Stage-1 PTE */
89 #define ARM_LPAE_PTE_AP_UNPRIV		(((arm_lpae_iopte)1) << 6)
90 #define ARM_LPAE_PTE_AP_RDONLY		(((arm_lpae_iopte)2) << 6)
91 #define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
92 #define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
93 
94 /* Stage-2 PTE */
95 #define ARM_LPAE_PTE_HAP_FAULT		(((arm_lpae_iopte)0) << 6)
96 #define ARM_LPAE_PTE_HAP_READ		(((arm_lpae_iopte)1) << 6)
97 #define ARM_LPAE_PTE_HAP_WRITE		(((arm_lpae_iopte)2) << 6)
98 #define ARM_LPAE_PTE_MEMATTR_OIWB	(((arm_lpae_iopte)0xf) << 2)
99 #define ARM_LPAE_PTE_MEMATTR_NC		(((arm_lpae_iopte)0x5) << 2)
100 #define ARM_LPAE_PTE_MEMATTR_DEV	(((arm_lpae_iopte)0x1) << 2)
101 
102 /* Register bits */
103 #define ARM_LPAE_TCR_TG0_4K		0
104 #define ARM_LPAE_TCR_TG0_64K		1
105 #define ARM_LPAE_TCR_TG0_16K		2
106 
107 #define ARM_LPAE_TCR_TG1_16K		1
108 #define ARM_LPAE_TCR_TG1_4K		2
109 #define ARM_LPAE_TCR_TG1_64K		3
110 
111 #define ARM_LPAE_TCR_SH_NS		0
112 #define ARM_LPAE_TCR_SH_OS		2
113 #define ARM_LPAE_TCR_SH_IS		3
114 
115 #define ARM_LPAE_TCR_RGN_NC		0
116 #define ARM_LPAE_TCR_RGN_WBWA		1
117 #define ARM_LPAE_TCR_RGN_WT		2
118 #define ARM_LPAE_TCR_RGN_WB		3
119 
120 #define ARM_LPAE_VTCR_SL0_MASK		0x3
121 
122 #define ARM_LPAE_TCR_T0SZ_SHIFT		0
123 
124 #define ARM_LPAE_VTCR_PS_SHIFT		16
125 #define ARM_LPAE_VTCR_PS_MASK		0x7
126 
127 #define ARM_LPAE_TCR_PS_32_BIT		0x0ULL
128 #define ARM_LPAE_TCR_PS_36_BIT		0x1ULL
129 #define ARM_LPAE_TCR_PS_40_BIT		0x2ULL
130 #define ARM_LPAE_TCR_PS_42_BIT		0x3ULL
131 #define ARM_LPAE_TCR_PS_44_BIT		0x4ULL
132 #define ARM_LPAE_TCR_PS_48_BIT		0x5ULL
133 #define ARM_LPAE_TCR_PS_52_BIT		0x6ULL
134 
135 #define ARM_LPAE_MAIR_ATTR_SHIFT(n)	((n) << 3)
136 #define ARM_LPAE_MAIR_ATTR_MASK		0xff
137 #define ARM_LPAE_MAIR_ATTR_DEVICE	0x04
138 #define ARM_LPAE_MAIR_ATTR_NC		0x44
139 #define ARM_LPAE_MAIR_ATTR_INC_OWBRWA	0xf4
140 #define ARM_LPAE_MAIR_ATTR_WBRWA	0xff
141 #define ARM_LPAE_MAIR_ATTR_IDX_NC	0
142 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE	1
143 #define ARM_LPAE_MAIR_ATTR_IDX_DEV	2
144 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE	3
145 
146 #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
147 #define ARM_MALI_LPAE_TTBR_READ_INNER	BIT(2)
148 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER	BIT(4)
149 
150 #define ARM_MALI_LPAE_MEMATTR_IMP_DEF	0x88ULL
151 #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
152 
153 /* IOPTE accessors */
154 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
155 
156 #define iopte_type(pte,l)					\
157 	(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
158 
159 #define iopte_prot(pte)	((pte) & ARM_LPAE_PTE_ATTR_MASK)
160 
161 struct arm_lpae_io_pgtable {
162 	struct io_pgtable	iop;
163 
164 	int			pgd_bits;
165 	int			start_level;
166 	int			bits_per_level;
167 
168 	void			*pgd;
169 };
170 
171 typedef u64 arm_lpae_iopte;
172 
173 static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
174 			      enum io_pgtable_fmt fmt)
175 {
176 	if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
177 		return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE;
178 
179 	return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK;
180 }
181 
182 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
183 				     struct arm_lpae_io_pgtable *data)
184 {
185 	arm_lpae_iopte pte = paddr;
186 
187 	/* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
188 	return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
189 }
190 
191 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
192 				  struct arm_lpae_io_pgtable *data)
193 {
194 	u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
195 
196 	if (ARM_LPAE_GRANULE(data) < SZ_64K)
197 		return paddr;
198 
199 	/* Rotate the packed high-order bits back to the top */
200 	return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
201 }
202 
203 static bool selftest_running = false;
204 
205 static dma_addr_t __arm_lpae_dma_addr(void *pages)
206 {
207 	return (dma_addr_t)virt_to_phys(pages);
208 }
209 
210 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
211 				    struct io_pgtable_cfg *cfg)
212 {
213 	struct device *dev = cfg->iommu_dev;
214 	int order = get_order(size);
215 	struct page *p;
216 	dma_addr_t dma;
217 	void *pages;
218 
219 	VM_BUG_ON((gfp & __GFP_HIGHMEM));
220 	p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE,
221 			     gfp | __GFP_ZERO, order);
222 	if (!p)
223 		return NULL;
224 
225 	pages = page_address(p);
226 	if (!cfg->coherent_walk) {
227 		dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
228 		if (dma_mapping_error(dev, dma))
229 			goto out_free;
230 		/*
231 		 * We depend on the IOMMU being able to work with any physical
232 		 * address directly, so if the DMA layer suggests otherwise by
233 		 * translating or truncating them, that bodes very badly...
234 		 */
235 		if (dma != virt_to_phys(pages))
236 			goto out_unmap;
237 	}
238 
239 	return pages;
240 
241 out_unmap:
242 	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
243 	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
244 out_free:
245 	__free_pages(p, order);
246 	return NULL;
247 }
248 
249 static void __arm_lpae_free_pages(void *pages, size_t size,
250 				  struct io_pgtable_cfg *cfg)
251 {
252 	if (!cfg->coherent_walk)
253 		dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
254 				 size, DMA_TO_DEVICE);
255 	free_pages((unsigned long)pages, get_order(size));
256 }
257 
258 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep,
259 				struct io_pgtable_cfg *cfg)
260 {
261 	dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
262 				   sizeof(*ptep), DMA_TO_DEVICE);
263 }
264 
265 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
266 			       struct io_pgtable_cfg *cfg)
267 {
268 	*ptep = pte;
269 
270 	if (!cfg->coherent_walk)
271 		__arm_lpae_sync_pte(ptep, cfg);
272 }
273 
274 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
275 			       struct iommu_iotlb_gather *gather,
276 			       unsigned long iova, size_t size, int lvl,
277 			       arm_lpae_iopte *ptep);
278 
279 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
280 				phys_addr_t paddr, arm_lpae_iopte prot,
281 				int lvl, arm_lpae_iopte *ptep)
282 {
283 	arm_lpae_iopte pte = prot;
284 
285 	if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
286 		pte |= ARM_LPAE_PTE_TYPE_PAGE;
287 	else
288 		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
289 
290 	pte |= paddr_to_iopte(paddr, data);
291 
292 	__arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
293 }
294 
295 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
296 			     unsigned long iova, phys_addr_t paddr,
297 			     arm_lpae_iopte prot, int lvl,
298 			     arm_lpae_iopte *ptep)
299 {
300 	arm_lpae_iopte pte = *ptep;
301 
302 	if (iopte_leaf(pte, lvl, data->iop.fmt)) {
303 		/* We require an unmap first */
304 		WARN_ON(!selftest_running);
305 		return -EEXIST;
306 	} else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
307 		/*
308 		 * We need to unmap and free the old table before
309 		 * overwriting it with a block entry.
310 		 */
311 		arm_lpae_iopte *tblp;
312 		size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
313 
314 		tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
315 		if (__arm_lpae_unmap(data, NULL, iova, sz, lvl, tblp) != sz) {
316 			WARN_ON(1);
317 			return -EINVAL;
318 		}
319 	}
320 
321 	__arm_lpae_init_pte(data, paddr, prot, lvl, ptep);
322 	return 0;
323 }
324 
325 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
326 					     arm_lpae_iopte *ptep,
327 					     arm_lpae_iopte curr,
328 					     struct io_pgtable_cfg *cfg)
329 {
330 	arm_lpae_iopte old, new;
331 
332 	new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE;
333 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
334 		new |= ARM_LPAE_PTE_NSTABLE;
335 
336 	/*
337 	 * Ensure the table itself is visible before its PTE can be.
338 	 * Whilst we could get away with cmpxchg64_release below, this
339 	 * doesn't have any ordering semantics when !CONFIG_SMP.
340 	 */
341 	dma_wmb();
342 
343 	old = cmpxchg64_relaxed(ptep, curr, new);
344 
345 	if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
346 		return old;
347 
348 	/* Even if it's not ours, there's no point waiting; just kick it */
349 	__arm_lpae_sync_pte(ptep, cfg);
350 	if (old == curr)
351 		WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
352 
353 	return old;
354 }
355 
356 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
357 			  phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
358 			  int lvl, arm_lpae_iopte *ptep, gfp_t gfp)
359 {
360 	arm_lpae_iopte *cptep, pte;
361 	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
362 	size_t tblsz = ARM_LPAE_GRANULE(data);
363 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
364 
365 	/* Find our entry at the current level */
366 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
367 
368 	/* If we can install a leaf entry at this level, then do so */
369 	if (size == block_size)
370 		return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
371 
372 	/* We can't allocate tables at the final level */
373 	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
374 		return -EINVAL;
375 
376 	/* Grab a pointer to the next level */
377 	pte = READ_ONCE(*ptep);
378 	if (!pte) {
379 		cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg);
380 		if (!cptep)
381 			return -ENOMEM;
382 
383 		pte = arm_lpae_install_table(cptep, ptep, 0, cfg);
384 		if (pte)
385 			__arm_lpae_free_pages(cptep, tblsz, cfg);
386 	} else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
387 		__arm_lpae_sync_pte(ptep, cfg);
388 	}
389 
390 	if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
391 		cptep = iopte_deref(pte, data);
392 	} else if (pte) {
393 		/* We require an unmap first */
394 		WARN_ON(!selftest_running);
395 		return -EEXIST;
396 	}
397 
398 	/* Rinse, repeat */
399 	return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp);
400 }
401 
402 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
403 					   int prot)
404 {
405 	arm_lpae_iopte pte;
406 
407 	if (data->iop.fmt == ARM_64_LPAE_S1 ||
408 	    data->iop.fmt == ARM_32_LPAE_S1) {
409 		pte = ARM_LPAE_PTE_nG;
410 		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
411 			pte |= ARM_LPAE_PTE_AP_RDONLY;
412 		if (!(prot & IOMMU_PRIV))
413 			pte |= ARM_LPAE_PTE_AP_UNPRIV;
414 	} else {
415 		pte = ARM_LPAE_PTE_HAP_FAULT;
416 		if (prot & IOMMU_READ)
417 			pte |= ARM_LPAE_PTE_HAP_READ;
418 		if (prot & IOMMU_WRITE)
419 			pte |= ARM_LPAE_PTE_HAP_WRITE;
420 	}
421 
422 	/*
423 	 * Note that this logic is structured to accommodate Mali LPAE
424 	 * having stage-1-like attributes but stage-2-like permissions.
425 	 */
426 	if (data->iop.fmt == ARM_64_LPAE_S2 ||
427 	    data->iop.fmt == ARM_32_LPAE_S2) {
428 		if (prot & IOMMU_MMIO)
429 			pte |= ARM_LPAE_PTE_MEMATTR_DEV;
430 		else if (prot & IOMMU_CACHE)
431 			pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
432 		else
433 			pte |= ARM_LPAE_PTE_MEMATTR_NC;
434 	} else {
435 		if (prot & IOMMU_MMIO)
436 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
437 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
438 		else if (prot & IOMMU_CACHE)
439 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
440 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
441 	}
442 
443 	if (prot & IOMMU_CACHE)
444 		pte |= ARM_LPAE_PTE_SH_IS;
445 	else
446 		pte |= ARM_LPAE_PTE_SH_OS;
447 
448 	if (prot & IOMMU_NOEXEC)
449 		pte |= ARM_LPAE_PTE_XN;
450 
451 	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
452 		pte |= ARM_LPAE_PTE_NS;
453 
454 	if (data->iop.fmt != ARM_MALI_LPAE)
455 		pte |= ARM_LPAE_PTE_AF;
456 
457 	return pte;
458 }
459 
460 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
461 			phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp)
462 {
463 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
464 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
465 	arm_lpae_iopte *ptep = data->pgd;
466 	int ret, lvl = data->start_level;
467 	arm_lpae_iopte prot;
468 	long iaext = (s64)iova >> cfg->ias;
469 
470 	/* If no access, then nothing to do */
471 	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
472 		return 0;
473 
474 	if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
475 		return -EINVAL;
476 
477 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
478 		iaext = ~iaext;
479 	if (WARN_ON(iaext || paddr >> cfg->oas))
480 		return -ERANGE;
481 
482 	prot = arm_lpae_prot_to_pte(data, iommu_prot);
483 	ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, gfp);
484 	/*
485 	 * Synchronise all PTE updates for the new mapping before there's
486 	 * a chance for anything to kick off a table walk for the new iova.
487 	 */
488 	wmb();
489 
490 	return ret;
491 }
492 
493 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
494 				    arm_lpae_iopte *ptep)
495 {
496 	arm_lpae_iopte *start, *end;
497 	unsigned long table_size;
498 
499 	if (lvl == data->start_level)
500 		table_size = ARM_LPAE_PGD_SIZE(data);
501 	else
502 		table_size = ARM_LPAE_GRANULE(data);
503 
504 	start = ptep;
505 
506 	/* Only leaf entries at the last level */
507 	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
508 		end = ptep;
509 	else
510 		end = (void *)ptep + table_size;
511 
512 	while (ptep != end) {
513 		arm_lpae_iopte pte = *ptep++;
514 
515 		if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
516 			continue;
517 
518 		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
519 	}
520 
521 	__arm_lpae_free_pages(start, table_size, &data->iop.cfg);
522 }
523 
524 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
525 {
526 	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
527 
528 	__arm_lpae_free_pgtable(data, data->start_level, data->pgd);
529 	kfree(data);
530 }
531 
532 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
533 				       struct iommu_iotlb_gather *gather,
534 				       unsigned long iova, size_t size,
535 				       arm_lpae_iopte blk_pte, int lvl,
536 				       arm_lpae_iopte *ptep)
537 {
538 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
539 	arm_lpae_iopte pte, *tablep;
540 	phys_addr_t blk_paddr;
541 	size_t tablesz = ARM_LPAE_GRANULE(data);
542 	size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
543 	int i, unmap_idx = -1;
544 
545 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
546 		return 0;
547 
548 	tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
549 	if (!tablep)
550 		return 0; /* Bytes unmapped */
551 
552 	if (size == split_sz)
553 		unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data);
554 
555 	blk_paddr = iopte_to_paddr(blk_pte, data);
556 	pte = iopte_prot(blk_pte);
557 
558 	for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) {
559 		/* Unmap! */
560 		if (i == unmap_idx)
561 			continue;
562 
563 		__arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]);
564 	}
565 
566 	pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg);
567 	if (pte != blk_pte) {
568 		__arm_lpae_free_pages(tablep, tablesz, cfg);
569 		/*
570 		 * We may race against someone unmapping another part of this
571 		 * block, but anything else is invalid. We can't misinterpret
572 		 * a page entry here since we're never at the last level.
573 		 */
574 		if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE)
575 			return 0;
576 
577 		tablep = iopte_deref(pte, data);
578 	} else if (unmap_idx >= 0) {
579 		io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
580 		return size;
581 	}
582 
583 	return __arm_lpae_unmap(data, gather, iova, size, lvl, tablep);
584 }
585 
586 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
587 			       struct iommu_iotlb_gather *gather,
588 			       unsigned long iova, size_t size, int lvl,
589 			       arm_lpae_iopte *ptep)
590 {
591 	arm_lpae_iopte pte;
592 	struct io_pgtable *iop = &data->iop;
593 
594 	/* Something went horribly wrong and we ran out of page table */
595 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
596 		return 0;
597 
598 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
599 	pte = READ_ONCE(*ptep);
600 	if (WARN_ON(!pte))
601 		return 0;
602 
603 	/* If the size matches this level, we're in the right place */
604 	if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
605 		__arm_lpae_set_pte(ptep, 0, &iop->cfg);
606 
607 		if (!iopte_leaf(pte, lvl, iop->fmt)) {
608 			/* Also flush any partial walks */
609 			io_pgtable_tlb_flush_walk(iop, iova, size,
610 						  ARM_LPAE_GRANULE(data));
611 			ptep = iopte_deref(pte, data);
612 			__arm_lpae_free_pgtable(data, lvl + 1, ptep);
613 		} else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
614 			/*
615 			 * Order the PTE update against queueing the IOVA, to
616 			 * guarantee that a flush callback from a different CPU
617 			 * has observed it before the TLBIALL can be issued.
618 			 */
619 			smp_wmb();
620 		} else {
621 			io_pgtable_tlb_add_page(iop, gather, iova, size);
622 		}
623 
624 		return size;
625 	} else if (iopte_leaf(pte, lvl, iop->fmt)) {
626 		/*
627 		 * Insert a table at the next level to map the old region,
628 		 * minus the part we want to unmap
629 		 */
630 		return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
631 						lvl + 1, ptep);
632 	}
633 
634 	/* Keep on walkin' */
635 	ptep = iopte_deref(pte, data);
636 	return __arm_lpae_unmap(data, gather, iova, size, lvl + 1, ptep);
637 }
638 
639 static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
640 			     size_t size, struct iommu_iotlb_gather *gather)
641 {
642 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
643 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
644 	arm_lpae_iopte *ptep = data->pgd;
645 	long iaext = (s64)iova >> cfg->ias;
646 
647 	if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
648 		return 0;
649 
650 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
651 		iaext = ~iaext;
652 	if (WARN_ON(iaext))
653 		return 0;
654 
655 	return __arm_lpae_unmap(data, gather, iova, size, data->start_level, ptep);
656 }
657 
658 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
659 					 unsigned long iova)
660 {
661 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
662 	arm_lpae_iopte pte, *ptep = data->pgd;
663 	int lvl = data->start_level;
664 
665 	do {
666 		/* Valid IOPTE pointer? */
667 		if (!ptep)
668 			return 0;
669 
670 		/* Grab the IOPTE we're interested in */
671 		ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
672 		pte = READ_ONCE(*ptep);
673 
674 		/* Valid entry? */
675 		if (!pte)
676 			return 0;
677 
678 		/* Leaf entry? */
679 		if (iopte_leaf(pte, lvl, data->iop.fmt))
680 			goto found_translation;
681 
682 		/* Take it to the next level */
683 		ptep = iopte_deref(pte, data);
684 	} while (++lvl < ARM_LPAE_MAX_LEVELS);
685 
686 	/* Ran out of page tables to walk */
687 	return 0;
688 
689 found_translation:
690 	iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
691 	return iopte_to_paddr(pte, data) | iova;
692 }
693 
694 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
695 {
696 	unsigned long granule, page_sizes;
697 	unsigned int max_addr_bits = 48;
698 
699 	/*
700 	 * We need to restrict the supported page sizes to match the
701 	 * translation regime for a particular granule. Aim to match
702 	 * the CPU page size if possible, otherwise prefer smaller sizes.
703 	 * While we're at it, restrict the block sizes to match the
704 	 * chosen granule.
705 	 */
706 	if (cfg->pgsize_bitmap & PAGE_SIZE)
707 		granule = PAGE_SIZE;
708 	else if (cfg->pgsize_bitmap & ~PAGE_MASK)
709 		granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
710 	else if (cfg->pgsize_bitmap & PAGE_MASK)
711 		granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
712 	else
713 		granule = 0;
714 
715 	switch (granule) {
716 	case SZ_4K:
717 		page_sizes = (SZ_4K | SZ_2M | SZ_1G);
718 		break;
719 	case SZ_16K:
720 		page_sizes = (SZ_16K | SZ_32M);
721 		break;
722 	case SZ_64K:
723 		max_addr_bits = 52;
724 		page_sizes = (SZ_64K | SZ_512M);
725 		if (cfg->oas > 48)
726 			page_sizes |= 1ULL << 42; /* 4TB */
727 		break;
728 	default:
729 		page_sizes = 0;
730 	}
731 
732 	cfg->pgsize_bitmap &= page_sizes;
733 	cfg->ias = min(cfg->ias, max_addr_bits);
734 	cfg->oas = min(cfg->oas, max_addr_bits);
735 }
736 
737 static struct arm_lpae_io_pgtable *
738 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
739 {
740 	struct arm_lpae_io_pgtable *data;
741 	int levels, va_bits, pg_shift;
742 
743 	arm_lpae_restrict_pgsizes(cfg);
744 
745 	if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
746 		return NULL;
747 
748 	if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
749 		return NULL;
750 
751 	if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
752 		return NULL;
753 
754 	if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
755 		dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
756 		return NULL;
757 	}
758 
759 	data = kmalloc(sizeof(*data), GFP_KERNEL);
760 	if (!data)
761 		return NULL;
762 
763 	pg_shift = __ffs(cfg->pgsize_bitmap);
764 	data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
765 
766 	va_bits = cfg->ias - pg_shift;
767 	levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
768 	data->start_level = ARM_LPAE_MAX_LEVELS - levels;
769 
770 	/* Calculate the actual size of our pgd (without concatenation) */
771 	data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
772 
773 	data->iop.ops = (struct io_pgtable_ops) {
774 		.map		= arm_lpae_map,
775 		.unmap		= arm_lpae_unmap,
776 		.iova_to_phys	= arm_lpae_iova_to_phys,
777 	};
778 
779 	return data;
780 }
781 
782 static struct io_pgtable *
783 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
784 {
785 	u64 reg;
786 	struct arm_lpae_io_pgtable *data;
787 	typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
788 	bool tg1;
789 
790 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
791 			    IO_PGTABLE_QUIRK_NON_STRICT |
792 			    IO_PGTABLE_QUIRK_ARM_TTBR1))
793 		return NULL;
794 
795 	data = arm_lpae_alloc_pgtable(cfg);
796 	if (!data)
797 		return NULL;
798 
799 	/* TCR */
800 	if (cfg->coherent_walk) {
801 		tcr->sh = ARM_LPAE_TCR_SH_IS;
802 		tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
803 		tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
804 	} else {
805 		tcr->sh = ARM_LPAE_TCR_SH_OS;
806 		tcr->irgn = ARM_LPAE_TCR_RGN_NC;
807 		tcr->orgn = ARM_LPAE_TCR_RGN_NC;
808 	}
809 
810 	tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
811 	switch (ARM_LPAE_GRANULE(data)) {
812 	case SZ_4K:
813 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
814 		break;
815 	case SZ_16K:
816 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
817 		break;
818 	case SZ_64K:
819 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
820 		break;
821 	}
822 
823 	switch (cfg->oas) {
824 	case 32:
825 		tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
826 		break;
827 	case 36:
828 		tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
829 		break;
830 	case 40:
831 		tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
832 		break;
833 	case 42:
834 		tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
835 		break;
836 	case 44:
837 		tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
838 		break;
839 	case 48:
840 		tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
841 		break;
842 	case 52:
843 		tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
844 		break;
845 	default:
846 		goto out_free_data;
847 	}
848 
849 	tcr->tsz = 64ULL - cfg->ias;
850 
851 	/* MAIRs */
852 	reg = (ARM_LPAE_MAIR_ATTR_NC
853 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
854 	      (ARM_LPAE_MAIR_ATTR_WBRWA
855 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
856 	      (ARM_LPAE_MAIR_ATTR_DEVICE
857 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
858 	      (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
859 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
860 
861 	cfg->arm_lpae_s1_cfg.mair = reg;
862 
863 	/* Looking good; allocate a pgd */
864 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
865 					   GFP_KERNEL, cfg);
866 	if (!data->pgd)
867 		goto out_free_data;
868 
869 	/* Ensure the empty pgd is visible before any actual TTBR write */
870 	wmb();
871 
872 	/* TTBR */
873 	cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
874 	return &data->iop;
875 
876 out_free_data:
877 	kfree(data);
878 	return NULL;
879 }
880 
881 static struct io_pgtable *
882 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
883 {
884 	u64 sl;
885 	struct arm_lpae_io_pgtable *data;
886 	typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
887 
888 	/* The NS quirk doesn't apply at stage 2 */
889 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT))
890 		return NULL;
891 
892 	data = arm_lpae_alloc_pgtable(cfg);
893 	if (!data)
894 		return NULL;
895 
896 	/*
897 	 * Concatenate PGDs at level 1 if possible in order to reduce
898 	 * the depth of the stage-2 walk.
899 	 */
900 	if (data->start_level == 0) {
901 		unsigned long pgd_pages;
902 
903 		pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
904 		if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
905 			data->pgd_bits += data->bits_per_level;
906 			data->start_level++;
907 		}
908 	}
909 
910 	/* VTCR */
911 	if (cfg->coherent_walk) {
912 		vtcr->sh = ARM_LPAE_TCR_SH_IS;
913 		vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
914 		vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
915 	} else {
916 		vtcr->sh = ARM_LPAE_TCR_SH_OS;
917 		vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
918 		vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
919 	}
920 
921 	sl = data->start_level;
922 
923 	switch (ARM_LPAE_GRANULE(data)) {
924 	case SZ_4K:
925 		vtcr->tg = ARM_LPAE_TCR_TG0_4K;
926 		sl++; /* SL0 format is different for 4K granule size */
927 		break;
928 	case SZ_16K:
929 		vtcr->tg = ARM_LPAE_TCR_TG0_16K;
930 		break;
931 	case SZ_64K:
932 		vtcr->tg = ARM_LPAE_TCR_TG0_64K;
933 		break;
934 	}
935 
936 	switch (cfg->oas) {
937 	case 32:
938 		vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
939 		break;
940 	case 36:
941 		vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
942 		break;
943 	case 40:
944 		vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
945 		break;
946 	case 42:
947 		vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
948 		break;
949 	case 44:
950 		vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
951 		break;
952 	case 48:
953 		vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
954 		break;
955 	case 52:
956 		vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
957 		break;
958 	default:
959 		goto out_free_data;
960 	}
961 
962 	vtcr->tsz = 64ULL - cfg->ias;
963 	vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
964 
965 	/* Allocate pgd pages */
966 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
967 					   GFP_KERNEL, cfg);
968 	if (!data->pgd)
969 		goto out_free_data;
970 
971 	/* Ensure the empty pgd is visible before any actual TTBR write */
972 	wmb();
973 
974 	/* VTTBR */
975 	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
976 	return &data->iop;
977 
978 out_free_data:
979 	kfree(data);
980 	return NULL;
981 }
982 
983 static struct io_pgtable *
984 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
985 {
986 	if (cfg->ias > 32 || cfg->oas > 40)
987 		return NULL;
988 
989 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
990 	return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
991 }
992 
993 static struct io_pgtable *
994 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
995 {
996 	if (cfg->ias > 40 || cfg->oas > 40)
997 		return NULL;
998 
999 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1000 	return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1001 }
1002 
1003 static struct io_pgtable *
1004 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1005 {
1006 	struct arm_lpae_io_pgtable *data;
1007 
1008 	/* No quirks for Mali (hopefully) */
1009 	if (cfg->quirks)
1010 		return NULL;
1011 
1012 	if (cfg->ias > 48 || cfg->oas > 40)
1013 		return NULL;
1014 
1015 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1016 
1017 	data = arm_lpae_alloc_pgtable(cfg);
1018 	if (!data)
1019 		return NULL;
1020 
1021 	/* Mali seems to need a full 4-level table regardless of IAS */
1022 	if (data->start_level > 0) {
1023 		data->start_level = 0;
1024 		data->pgd_bits = 0;
1025 	}
1026 	/*
1027 	 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
1028 	 * best we can do is mimic the out-of-tree driver and hope that the
1029 	 * "implementation-defined caching policy" is good enough. Similarly,
1030 	 * we'll use it for the sake of a valid attribute for our 'device'
1031 	 * index, although callers should never request that in practice.
1032 	 */
1033 	cfg->arm_mali_lpae_cfg.memattr =
1034 		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1035 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1036 		(ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
1037 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1038 		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1039 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
1040 
1041 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
1042 					   cfg);
1043 	if (!data->pgd)
1044 		goto out_free_data;
1045 
1046 	/* Ensure the empty pgd is visible before TRANSTAB can be written */
1047 	wmb();
1048 
1049 	cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
1050 					  ARM_MALI_LPAE_TTBR_READ_INNER |
1051 					  ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1052 	return &data->iop;
1053 
1054 out_free_data:
1055 	kfree(data);
1056 	return NULL;
1057 }
1058 
1059 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1060 	.alloc	= arm_64_lpae_alloc_pgtable_s1,
1061 	.free	= arm_lpae_free_pgtable,
1062 };
1063 
1064 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1065 	.alloc	= arm_64_lpae_alloc_pgtable_s2,
1066 	.free	= arm_lpae_free_pgtable,
1067 };
1068 
1069 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1070 	.alloc	= arm_32_lpae_alloc_pgtable_s1,
1071 	.free	= arm_lpae_free_pgtable,
1072 };
1073 
1074 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1075 	.alloc	= arm_32_lpae_alloc_pgtable_s2,
1076 	.free	= arm_lpae_free_pgtable,
1077 };
1078 
1079 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1080 	.alloc	= arm_mali_lpae_alloc_pgtable,
1081 	.free	= arm_lpae_free_pgtable,
1082 };
1083 
1084 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1085 
1086 static struct io_pgtable_cfg *cfg_cookie __initdata;
1087 
1088 static void __init dummy_tlb_flush_all(void *cookie)
1089 {
1090 	WARN_ON(cookie != cfg_cookie);
1091 }
1092 
1093 static void __init dummy_tlb_flush(unsigned long iova, size_t size,
1094 				   size_t granule, void *cookie)
1095 {
1096 	WARN_ON(cookie != cfg_cookie);
1097 	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1098 }
1099 
1100 static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
1101 				      unsigned long iova, size_t granule,
1102 				      void *cookie)
1103 {
1104 	dummy_tlb_flush(iova, granule, granule, cookie);
1105 }
1106 
1107 static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
1108 	.tlb_flush_all	= dummy_tlb_flush_all,
1109 	.tlb_flush_walk	= dummy_tlb_flush,
1110 	.tlb_flush_leaf	= dummy_tlb_flush,
1111 	.tlb_add_page	= dummy_tlb_add_page,
1112 };
1113 
1114 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1115 {
1116 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1117 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
1118 
1119 	pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1120 		cfg->pgsize_bitmap, cfg->ias);
1121 	pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
1122 		ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
1123 		ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
1124 }
1125 
1126 #define __FAIL(ops, i)	({						\
1127 		WARN(1, "selftest: test failed for fmt idx %d\n", (i));	\
1128 		arm_lpae_dump_ops(ops);					\
1129 		selftest_running = false;				\
1130 		-EFAULT;						\
1131 })
1132 
1133 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1134 {
1135 	static const enum io_pgtable_fmt fmts[] __initconst = {
1136 		ARM_64_LPAE_S1,
1137 		ARM_64_LPAE_S2,
1138 	};
1139 
1140 	int i, j;
1141 	unsigned long iova;
1142 	size_t size;
1143 	struct io_pgtable_ops *ops;
1144 
1145 	selftest_running = true;
1146 
1147 	for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1148 		cfg_cookie = cfg;
1149 		ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1150 		if (!ops) {
1151 			pr_err("selftest: failed to allocate io pgtable ops\n");
1152 			return -ENOMEM;
1153 		}
1154 
1155 		/*
1156 		 * Initial sanity checks.
1157 		 * Empty page tables shouldn't provide any translations.
1158 		 */
1159 		if (ops->iova_to_phys(ops, 42))
1160 			return __FAIL(ops, i);
1161 
1162 		if (ops->iova_to_phys(ops, SZ_1G + 42))
1163 			return __FAIL(ops, i);
1164 
1165 		if (ops->iova_to_phys(ops, SZ_2G + 42))
1166 			return __FAIL(ops, i);
1167 
1168 		/*
1169 		 * Distinct mappings of different granule sizes.
1170 		 */
1171 		iova = 0;
1172 		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1173 			size = 1UL << j;
1174 
1175 			if (ops->map(ops, iova, iova, size, IOMMU_READ |
1176 							    IOMMU_WRITE |
1177 							    IOMMU_NOEXEC |
1178 							    IOMMU_CACHE, GFP_KERNEL))
1179 				return __FAIL(ops, i);
1180 
1181 			/* Overlapping mappings */
1182 			if (!ops->map(ops, iova, iova + size, size,
1183 				      IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL))
1184 				return __FAIL(ops, i);
1185 
1186 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1187 				return __FAIL(ops, i);
1188 
1189 			iova += SZ_1G;
1190 		}
1191 
1192 		/* Partial unmap */
1193 		size = 1UL << __ffs(cfg->pgsize_bitmap);
1194 		if (ops->unmap(ops, SZ_1G + size, size, NULL) != size)
1195 			return __FAIL(ops, i);
1196 
1197 		/* Remap of partial unmap */
1198 		if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ, GFP_KERNEL))
1199 			return __FAIL(ops, i);
1200 
1201 		if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1202 			return __FAIL(ops, i);
1203 
1204 		/* Full unmap */
1205 		iova = 0;
1206 		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1207 			size = 1UL << j;
1208 
1209 			if (ops->unmap(ops, iova, size, NULL) != size)
1210 				return __FAIL(ops, i);
1211 
1212 			if (ops->iova_to_phys(ops, iova + 42))
1213 				return __FAIL(ops, i);
1214 
1215 			/* Remap full block */
1216 			if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL))
1217 				return __FAIL(ops, i);
1218 
1219 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1220 				return __FAIL(ops, i);
1221 
1222 			iova += SZ_1G;
1223 		}
1224 
1225 		free_io_pgtable_ops(ops);
1226 	}
1227 
1228 	selftest_running = false;
1229 	return 0;
1230 }
1231 
1232 static int __init arm_lpae_do_selftests(void)
1233 {
1234 	static const unsigned long pgsize[] __initconst = {
1235 		SZ_4K | SZ_2M | SZ_1G,
1236 		SZ_16K | SZ_32M,
1237 		SZ_64K | SZ_512M,
1238 	};
1239 
1240 	static const unsigned int ias[] __initconst = {
1241 		32, 36, 40, 42, 44, 48,
1242 	};
1243 
1244 	int i, j, pass = 0, fail = 0;
1245 	struct io_pgtable_cfg cfg = {
1246 		.tlb = &dummy_tlb_ops,
1247 		.oas = 48,
1248 		.coherent_walk = true,
1249 	};
1250 
1251 	for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1252 		for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1253 			cfg.pgsize_bitmap = pgsize[i];
1254 			cfg.ias = ias[j];
1255 			pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1256 				pgsize[i], ias[j]);
1257 			if (arm_lpae_run_tests(&cfg))
1258 				fail++;
1259 			else
1260 				pass++;
1261 		}
1262 	}
1263 
1264 	pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1265 	return fail ? -EFAULT : 0;
1266 }
1267 subsys_initcall(arm_lpae_do_selftests);
1268 #endif
1269