1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CPU-agnostic ARM page table allocator.
4  *
5  * Copyright (C) 2014 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9 
10 #define pr_fmt(fmt)	"arm-lpae io-pgtable: " fmt
11 
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/dma-mapping.h>
20 
21 #include <asm/barrier.h>
22 
23 #include "io-pgtable-arm.h"
24 
25 #define ARM_LPAE_MAX_ADDR_BITS		52
26 #define ARM_LPAE_S2_MAX_CONCAT_PAGES	16
27 #define ARM_LPAE_MAX_LEVELS		4
28 
29 /* Struct accessors */
30 #define io_pgtable_to_data(x)						\
31 	container_of((x), struct arm_lpae_io_pgtable, iop)
32 
33 #define io_pgtable_ops_to_data(x)					\
34 	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
35 
36 /*
37  * Calculate the right shift amount to get to the portion describing level l
38  * in a virtual address mapped by the pagetable in d.
39  */
40 #define ARM_LPAE_LVL_SHIFT(l,d)						\
41 	(((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) +		\
42 	ilog2(sizeof(arm_lpae_iopte)))
43 
44 #define ARM_LPAE_GRANULE(d)						\
45 	(sizeof(arm_lpae_iopte) << (d)->bits_per_level)
46 #define ARM_LPAE_PGD_SIZE(d)						\
47 	(sizeof(arm_lpae_iopte) << (d)->pgd_bits)
48 
49 #define ARM_LPAE_PTES_PER_TABLE(d)					\
50 	(ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte)))
51 
52 /*
53  * Calculate the index at level l used to map virtual address a using the
54  * pagetable in d.
55  */
56 #define ARM_LPAE_PGD_IDX(l,d)						\
57 	((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
58 
59 #define ARM_LPAE_LVL_IDX(a,l,d)						\
60 	(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &			\
61 	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
62 
63 /* Calculate the block/page mapping size at level l for pagetable in d. */
64 #define ARM_LPAE_BLOCK_SIZE(l,d)	(1ULL << ARM_LPAE_LVL_SHIFT(l,d))
65 
66 /* Page table bits */
67 #define ARM_LPAE_PTE_TYPE_SHIFT		0
68 #define ARM_LPAE_PTE_TYPE_MASK		0x3
69 
70 #define ARM_LPAE_PTE_TYPE_BLOCK		1
71 #define ARM_LPAE_PTE_TYPE_TABLE		3
72 #define ARM_LPAE_PTE_TYPE_PAGE		3
73 
74 #define ARM_LPAE_PTE_ADDR_MASK		GENMASK_ULL(47,12)
75 
76 #define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
77 #define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
78 #define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
79 #define ARM_LPAE_PTE_SH_NS		(((arm_lpae_iopte)0) << 8)
80 #define ARM_LPAE_PTE_SH_OS		(((arm_lpae_iopte)2) << 8)
81 #define ARM_LPAE_PTE_SH_IS		(((arm_lpae_iopte)3) << 8)
82 #define ARM_LPAE_PTE_NS			(((arm_lpae_iopte)1) << 5)
83 #define ARM_LPAE_PTE_VALID		(((arm_lpae_iopte)1) << 0)
84 
85 #define ARM_LPAE_PTE_ATTR_LO_MASK	(((arm_lpae_iopte)0x3ff) << 2)
86 /* Ignore the contiguous bit for block splitting */
87 #define ARM_LPAE_PTE_ATTR_HI_MASK	(((arm_lpae_iopte)6) << 52)
88 #define ARM_LPAE_PTE_ATTR_MASK		(ARM_LPAE_PTE_ATTR_LO_MASK |	\
89 					 ARM_LPAE_PTE_ATTR_HI_MASK)
90 /* Software bit for solving coherency races */
91 #define ARM_LPAE_PTE_SW_SYNC		(((arm_lpae_iopte)1) << 55)
92 
93 /* Stage-1 PTE */
94 #define ARM_LPAE_PTE_AP_UNPRIV		(((arm_lpae_iopte)1) << 6)
95 #define ARM_LPAE_PTE_AP_RDONLY		(((arm_lpae_iopte)2) << 6)
96 #define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
97 #define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
98 
99 /* Stage-2 PTE */
100 #define ARM_LPAE_PTE_HAP_FAULT		(((arm_lpae_iopte)0) << 6)
101 #define ARM_LPAE_PTE_HAP_READ		(((arm_lpae_iopte)1) << 6)
102 #define ARM_LPAE_PTE_HAP_WRITE		(((arm_lpae_iopte)2) << 6)
103 #define ARM_LPAE_PTE_MEMATTR_OIWB	(((arm_lpae_iopte)0xf) << 2)
104 #define ARM_LPAE_PTE_MEMATTR_NC		(((arm_lpae_iopte)0x5) << 2)
105 #define ARM_LPAE_PTE_MEMATTR_DEV	(((arm_lpae_iopte)0x1) << 2)
106 
107 /* Register bits */
108 #define ARM_LPAE_VTCR_SL0_MASK		0x3
109 
110 #define ARM_LPAE_TCR_T0SZ_SHIFT		0
111 
112 #define ARM_LPAE_VTCR_PS_SHIFT		16
113 #define ARM_LPAE_VTCR_PS_MASK		0x7
114 
115 #define ARM_LPAE_MAIR_ATTR_SHIFT(n)	((n) << 3)
116 #define ARM_LPAE_MAIR_ATTR_MASK		0xff
117 #define ARM_LPAE_MAIR_ATTR_DEVICE	0x04
118 #define ARM_LPAE_MAIR_ATTR_NC		0x44
119 #define ARM_LPAE_MAIR_ATTR_INC_OWBRWA	0xf4
120 #define ARM_LPAE_MAIR_ATTR_WBRWA	0xff
121 #define ARM_LPAE_MAIR_ATTR_IDX_NC	0
122 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE	1
123 #define ARM_LPAE_MAIR_ATTR_IDX_DEV	2
124 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE	3
125 
126 #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
127 #define ARM_MALI_LPAE_TTBR_READ_INNER	BIT(2)
128 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER	BIT(4)
129 
130 #define ARM_MALI_LPAE_MEMATTR_IMP_DEF	0x88ULL
131 #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
132 
133 /* IOPTE accessors */
134 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
135 
136 #define iopte_type(pte)					\
137 	(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
138 
139 #define iopte_prot(pte)	((pte) & ARM_LPAE_PTE_ATTR_MASK)
140 
141 struct arm_lpae_io_pgtable {
142 	struct io_pgtable	iop;
143 
144 	int			pgd_bits;
145 	int			start_level;
146 	int			bits_per_level;
147 
148 	void			*pgd;
149 };
150 
151 typedef u64 arm_lpae_iopte;
152 
153 static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
154 			      enum io_pgtable_fmt fmt)
155 {
156 	if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
157 		return iopte_type(pte) == ARM_LPAE_PTE_TYPE_PAGE;
158 
159 	return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK;
160 }
161 
162 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
163 				     struct arm_lpae_io_pgtable *data)
164 {
165 	arm_lpae_iopte pte = paddr;
166 
167 	/* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
168 	return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
169 }
170 
171 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
172 				  struct arm_lpae_io_pgtable *data)
173 {
174 	u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
175 
176 	if (ARM_LPAE_GRANULE(data) < SZ_64K)
177 		return paddr;
178 
179 	/* Rotate the packed high-order bits back to the top */
180 	return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
181 }
182 
183 static bool selftest_running = false;
184 
185 static dma_addr_t __arm_lpae_dma_addr(void *pages)
186 {
187 	return (dma_addr_t)virt_to_phys(pages);
188 }
189 
190 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
191 				    struct io_pgtable_cfg *cfg)
192 {
193 	struct device *dev = cfg->iommu_dev;
194 	int order = get_order(size);
195 	struct page *p;
196 	dma_addr_t dma;
197 	void *pages;
198 
199 	VM_BUG_ON((gfp & __GFP_HIGHMEM));
200 	p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
201 	if (!p)
202 		return NULL;
203 
204 	pages = page_address(p);
205 	if (!cfg->coherent_walk) {
206 		dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
207 		if (dma_mapping_error(dev, dma))
208 			goto out_free;
209 		/*
210 		 * We depend on the IOMMU being able to work with any physical
211 		 * address directly, so if the DMA layer suggests otherwise by
212 		 * translating or truncating them, that bodes very badly...
213 		 */
214 		if (dma != virt_to_phys(pages))
215 			goto out_unmap;
216 	}
217 
218 	return pages;
219 
220 out_unmap:
221 	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
222 	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
223 out_free:
224 	__free_pages(p, order);
225 	return NULL;
226 }
227 
228 static void __arm_lpae_free_pages(void *pages, size_t size,
229 				  struct io_pgtable_cfg *cfg)
230 {
231 	if (!cfg->coherent_walk)
232 		dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
233 				 size, DMA_TO_DEVICE);
234 	free_pages((unsigned long)pages, get_order(size));
235 }
236 
237 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
238 				struct io_pgtable_cfg *cfg)
239 {
240 	dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
241 				   sizeof(*ptep) * num_entries, DMA_TO_DEVICE);
242 }
243 
244 static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg)
245 {
246 
247 	*ptep = 0;
248 
249 	if (!cfg->coherent_walk)
250 		__arm_lpae_sync_pte(ptep, 1, cfg);
251 }
252 
253 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
254 			       struct iommu_iotlb_gather *gather,
255 			       unsigned long iova, size_t size, size_t pgcount,
256 			       int lvl, arm_lpae_iopte *ptep);
257 
258 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
259 				phys_addr_t paddr, arm_lpae_iopte prot,
260 				int lvl, int num_entries, arm_lpae_iopte *ptep)
261 {
262 	arm_lpae_iopte pte = prot;
263 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
264 	size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
265 	int i;
266 
267 	if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
268 		pte |= ARM_LPAE_PTE_TYPE_PAGE;
269 	else
270 		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
271 
272 	for (i = 0; i < num_entries; i++)
273 		ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data);
274 
275 	if (!cfg->coherent_walk)
276 		__arm_lpae_sync_pte(ptep, num_entries, cfg);
277 }
278 
279 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
280 			     unsigned long iova, phys_addr_t paddr,
281 			     arm_lpae_iopte prot, int lvl, int num_entries,
282 			     arm_lpae_iopte *ptep)
283 {
284 	int i;
285 
286 	for (i = 0; i < num_entries; i++)
287 		if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
288 			/* We require an unmap first */
289 			WARN_ON(!selftest_running);
290 			return -EEXIST;
291 		} else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) {
292 			/*
293 			 * We need to unmap and free the old table before
294 			 * overwriting it with a block entry.
295 			 */
296 			arm_lpae_iopte *tblp;
297 			size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
298 
299 			tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
300 			if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1,
301 					     lvl, tblp) != sz) {
302 				WARN_ON(1);
303 				return -EINVAL;
304 			}
305 		}
306 
307 	__arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep);
308 	return 0;
309 }
310 
311 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
312 					     arm_lpae_iopte *ptep,
313 					     arm_lpae_iopte curr,
314 					     struct arm_lpae_io_pgtable *data)
315 {
316 	arm_lpae_iopte old, new;
317 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
318 
319 	new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
320 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
321 		new |= ARM_LPAE_PTE_NSTABLE;
322 
323 	/*
324 	 * Ensure the table itself is visible before its PTE can be.
325 	 * Whilst we could get away with cmpxchg64_release below, this
326 	 * doesn't have any ordering semantics when !CONFIG_SMP.
327 	 */
328 	dma_wmb();
329 
330 	old = cmpxchg64_relaxed(ptep, curr, new);
331 
332 	if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
333 		return old;
334 
335 	/* Even if it's not ours, there's no point waiting; just kick it */
336 	__arm_lpae_sync_pte(ptep, 1, cfg);
337 	if (old == curr)
338 		WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
339 
340 	return old;
341 }
342 
343 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
344 			  phys_addr_t paddr, size_t size, size_t pgcount,
345 			  arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep,
346 			  gfp_t gfp, size_t *mapped)
347 {
348 	arm_lpae_iopte *cptep, pte;
349 	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
350 	size_t tblsz = ARM_LPAE_GRANULE(data);
351 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
352 	int ret = 0, num_entries, max_entries, map_idx_start;
353 
354 	/* Find our entry at the current level */
355 	map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
356 	ptep += map_idx_start;
357 
358 	/* If we can install a leaf entry at this level, then do so */
359 	if (size == block_size) {
360 		max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start;
361 		num_entries = min_t(int, pgcount, max_entries);
362 		ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep);
363 		if (!ret && mapped)
364 			*mapped += num_entries * size;
365 
366 		return ret;
367 	}
368 
369 	/* We can't allocate tables at the final level */
370 	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
371 		return -EINVAL;
372 
373 	/* Grab a pointer to the next level */
374 	pte = READ_ONCE(*ptep);
375 	if (!pte) {
376 		cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg);
377 		if (!cptep)
378 			return -ENOMEM;
379 
380 		pte = arm_lpae_install_table(cptep, ptep, 0, data);
381 		if (pte)
382 			__arm_lpae_free_pages(cptep, tblsz, cfg);
383 	} else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
384 		__arm_lpae_sync_pte(ptep, 1, cfg);
385 	}
386 
387 	if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
388 		cptep = iopte_deref(pte, data);
389 	} else if (pte) {
390 		/* We require an unmap first */
391 		WARN_ON(!selftest_running);
392 		return -EEXIST;
393 	}
394 
395 	/* Rinse, repeat */
396 	return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1,
397 			      cptep, gfp, mapped);
398 }
399 
400 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
401 					   int prot)
402 {
403 	arm_lpae_iopte pte;
404 
405 	if (data->iop.fmt == ARM_64_LPAE_S1 ||
406 	    data->iop.fmt == ARM_32_LPAE_S1) {
407 		pte = ARM_LPAE_PTE_nG;
408 		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
409 			pte |= ARM_LPAE_PTE_AP_RDONLY;
410 		if (!(prot & IOMMU_PRIV))
411 			pte |= ARM_LPAE_PTE_AP_UNPRIV;
412 	} else {
413 		pte = ARM_LPAE_PTE_HAP_FAULT;
414 		if (prot & IOMMU_READ)
415 			pte |= ARM_LPAE_PTE_HAP_READ;
416 		if (prot & IOMMU_WRITE)
417 			pte |= ARM_LPAE_PTE_HAP_WRITE;
418 	}
419 
420 	/*
421 	 * Note that this logic is structured to accommodate Mali LPAE
422 	 * having stage-1-like attributes but stage-2-like permissions.
423 	 */
424 	if (data->iop.fmt == ARM_64_LPAE_S2 ||
425 	    data->iop.fmt == ARM_32_LPAE_S2) {
426 		if (prot & IOMMU_MMIO)
427 			pte |= ARM_LPAE_PTE_MEMATTR_DEV;
428 		else if (prot & IOMMU_CACHE)
429 			pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
430 		else
431 			pte |= ARM_LPAE_PTE_MEMATTR_NC;
432 	} else {
433 		if (prot & IOMMU_MMIO)
434 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
435 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
436 		else if (prot & IOMMU_CACHE)
437 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
438 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
439 	}
440 
441 	/*
442 	 * Also Mali has its own notions of shareability wherein its Inner
443 	 * domain covers the cores within the GPU, and its Outer domain is
444 	 * "outside the GPU" (i.e. either the Inner or System domain in CPU
445 	 * terms, depending on coherency).
446 	 */
447 	if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
448 		pte |= ARM_LPAE_PTE_SH_IS;
449 	else
450 		pte |= ARM_LPAE_PTE_SH_OS;
451 
452 	if (prot & IOMMU_NOEXEC)
453 		pte |= ARM_LPAE_PTE_XN;
454 
455 	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
456 		pte |= ARM_LPAE_PTE_NS;
457 
458 	if (data->iop.fmt != ARM_MALI_LPAE)
459 		pte |= ARM_LPAE_PTE_AF;
460 
461 	return pte;
462 }
463 
464 static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
465 			      phys_addr_t paddr, size_t pgsize, size_t pgcount,
466 			      int iommu_prot, gfp_t gfp, size_t *mapped)
467 {
468 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
469 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
470 	arm_lpae_iopte *ptep = data->pgd;
471 	int ret, lvl = data->start_level;
472 	arm_lpae_iopte prot;
473 	long iaext = (s64)iova >> cfg->ias;
474 
475 	if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize))
476 		return -EINVAL;
477 
478 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
479 		iaext = ~iaext;
480 	if (WARN_ON(iaext || paddr >> cfg->oas))
481 		return -ERANGE;
482 
483 	/* If no access, then nothing to do */
484 	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
485 		return 0;
486 
487 	prot = arm_lpae_prot_to_pte(data, iommu_prot);
488 	ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
489 			     ptep, gfp, mapped);
490 	/*
491 	 * Synchronise all PTE updates for the new mapping before there's
492 	 * a chance for anything to kick off a table walk for the new iova.
493 	 */
494 	wmb();
495 
496 	return ret;
497 }
498 
499 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
500 			phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp)
501 {
502 	return arm_lpae_map_pages(ops, iova, paddr, size, 1, iommu_prot, gfp,
503 				  NULL);
504 }
505 
506 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
507 				    arm_lpae_iopte *ptep)
508 {
509 	arm_lpae_iopte *start, *end;
510 	unsigned long table_size;
511 
512 	if (lvl == data->start_level)
513 		table_size = ARM_LPAE_PGD_SIZE(data);
514 	else
515 		table_size = ARM_LPAE_GRANULE(data);
516 
517 	start = ptep;
518 
519 	/* Only leaf entries at the last level */
520 	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
521 		end = ptep;
522 	else
523 		end = (void *)ptep + table_size;
524 
525 	while (ptep != end) {
526 		arm_lpae_iopte pte = *ptep++;
527 
528 		if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
529 			continue;
530 
531 		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
532 	}
533 
534 	__arm_lpae_free_pages(start, table_size, &data->iop.cfg);
535 }
536 
537 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
538 {
539 	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
540 
541 	__arm_lpae_free_pgtable(data, data->start_level, data->pgd);
542 	kfree(data);
543 }
544 
545 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
546 				       struct iommu_iotlb_gather *gather,
547 				       unsigned long iova, size_t size,
548 				       arm_lpae_iopte blk_pte, int lvl,
549 				       arm_lpae_iopte *ptep, size_t pgcount)
550 {
551 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
552 	arm_lpae_iopte pte, *tablep;
553 	phys_addr_t blk_paddr;
554 	size_t tablesz = ARM_LPAE_GRANULE(data);
555 	size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
556 	int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
557 	int i, unmap_idx_start = -1, num_entries = 0, max_entries;
558 
559 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
560 		return 0;
561 
562 	tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
563 	if (!tablep)
564 		return 0; /* Bytes unmapped */
565 
566 	if (size == split_sz) {
567 		unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
568 		max_entries = ptes_per_table - unmap_idx_start;
569 		num_entries = min_t(int, pgcount, max_entries);
570 	}
571 
572 	blk_paddr = iopte_to_paddr(blk_pte, data);
573 	pte = iopte_prot(blk_pte);
574 
575 	for (i = 0; i < ptes_per_table; i++, blk_paddr += split_sz) {
576 		/* Unmap! */
577 		if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries))
578 			continue;
579 
580 		__arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]);
581 	}
582 
583 	pte = arm_lpae_install_table(tablep, ptep, blk_pte, data);
584 	if (pte != blk_pte) {
585 		__arm_lpae_free_pages(tablep, tablesz, cfg);
586 		/*
587 		 * We may race against someone unmapping another part of this
588 		 * block, but anything else is invalid. We can't misinterpret
589 		 * a page entry here since we're never at the last level.
590 		 */
591 		if (iopte_type(pte) != ARM_LPAE_PTE_TYPE_TABLE)
592 			return 0;
593 
594 		tablep = iopte_deref(pte, data);
595 	} else if (unmap_idx_start >= 0) {
596 		for (i = 0; i < num_entries; i++)
597 			io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size);
598 
599 		return num_entries * size;
600 	}
601 
602 	return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep);
603 }
604 
605 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
606 			       struct iommu_iotlb_gather *gather,
607 			       unsigned long iova, size_t size, size_t pgcount,
608 			       int lvl, arm_lpae_iopte *ptep)
609 {
610 	arm_lpae_iopte pte;
611 	struct io_pgtable *iop = &data->iop;
612 	int i = 0, num_entries, max_entries, unmap_idx_start;
613 
614 	/* Something went horribly wrong and we ran out of page table */
615 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
616 		return 0;
617 
618 	unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
619 	ptep += unmap_idx_start;
620 	pte = READ_ONCE(*ptep);
621 	if (WARN_ON(!pte))
622 		return 0;
623 
624 	/* If the size matches this level, we're in the right place */
625 	if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
626 		max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start;
627 		num_entries = min_t(int, pgcount, max_entries);
628 
629 		while (i < num_entries) {
630 			pte = READ_ONCE(*ptep);
631 			if (WARN_ON(!pte))
632 				break;
633 
634 			__arm_lpae_clear_pte(ptep, &iop->cfg);
635 
636 			if (!iopte_leaf(pte, lvl, iop->fmt)) {
637 				/* Also flush any partial walks */
638 				io_pgtable_tlb_flush_walk(iop, iova + i * size, size,
639 							  ARM_LPAE_GRANULE(data));
640 				__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
641 			} else if (!iommu_iotlb_gather_queued(gather)) {
642 				io_pgtable_tlb_add_page(iop, gather, iova + i * size, size);
643 			}
644 
645 			ptep++;
646 			i++;
647 		}
648 
649 		return i * size;
650 	} else if (iopte_leaf(pte, lvl, iop->fmt)) {
651 		/*
652 		 * Insert a table at the next level to map the old region,
653 		 * minus the part we want to unmap
654 		 */
655 		return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
656 						lvl + 1, ptep, pgcount);
657 	}
658 
659 	/* Keep on walkin' */
660 	ptep = iopte_deref(pte, data);
661 	return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep);
662 }
663 
664 static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
665 				   size_t pgsize, size_t pgcount,
666 				   struct iommu_iotlb_gather *gather)
667 {
668 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
669 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
670 	arm_lpae_iopte *ptep = data->pgd;
671 	long iaext = (s64)iova >> cfg->ias;
672 
673 	if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
674 		return 0;
675 
676 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
677 		iaext = ~iaext;
678 	if (WARN_ON(iaext))
679 		return 0;
680 
681 	return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount,
682 				data->start_level, ptep);
683 }
684 
685 static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
686 			     size_t size, struct iommu_iotlb_gather *gather)
687 {
688 	return arm_lpae_unmap_pages(ops, iova, size, 1, gather);
689 }
690 
691 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
692 					 unsigned long iova)
693 {
694 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
695 	arm_lpae_iopte pte, *ptep = data->pgd;
696 	int lvl = data->start_level;
697 
698 	do {
699 		/* Valid IOPTE pointer? */
700 		if (!ptep)
701 			return 0;
702 
703 		/* Grab the IOPTE we're interested in */
704 		ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
705 		pte = READ_ONCE(*ptep);
706 
707 		/* Valid entry? */
708 		if (!pte)
709 			return 0;
710 
711 		/* Leaf entry? */
712 		if (iopte_leaf(pte, lvl, data->iop.fmt))
713 			goto found_translation;
714 
715 		/* Take it to the next level */
716 		ptep = iopte_deref(pte, data);
717 	} while (++lvl < ARM_LPAE_MAX_LEVELS);
718 
719 	/* Ran out of page tables to walk */
720 	return 0;
721 
722 found_translation:
723 	iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
724 	return iopte_to_paddr(pte, data) | iova;
725 }
726 
727 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
728 {
729 	unsigned long granule, page_sizes;
730 	unsigned int max_addr_bits = 48;
731 
732 	/*
733 	 * We need to restrict the supported page sizes to match the
734 	 * translation regime for a particular granule. Aim to match
735 	 * the CPU page size if possible, otherwise prefer smaller sizes.
736 	 * While we're at it, restrict the block sizes to match the
737 	 * chosen granule.
738 	 */
739 	if (cfg->pgsize_bitmap & PAGE_SIZE)
740 		granule = PAGE_SIZE;
741 	else if (cfg->pgsize_bitmap & ~PAGE_MASK)
742 		granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
743 	else if (cfg->pgsize_bitmap & PAGE_MASK)
744 		granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
745 	else
746 		granule = 0;
747 
748 	switch (granule) {
749 	case SZ_4K:
750 		page_sizes = (SZ_4K | SZ_2M | SZ_1G);
751 		break;
752 	case SZ_16K:
753 		page_sizes = (SZ_16K | SZ_32M);
754 		break;
755 	case SZ_64K:
756 		max_addr_bits = 52;
757 		page_sizes = (SZ_64K | SZ_512M);
758 		if (cfg->oas > 48)
759 			page_sizes |= 1ULL << 42; /* 4TB */
760 		break;
761 	default:
762 		page_sizes = 0;
763 	}
764 
765 	cfg->pgsize_bitmap &= page_sizes;
766 	cfg->ias = min(cfg->ias, max_addr_bits);
767 	cfg->oas = min(cfg->oas, max_addr_bits);
768 }
769 
770 static struct arm_lpae_io_pgtable *
771 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
772 {
773 	struct arm_lpae_io_pgtable *data;
774 	int levels, va_bits, pg_shift;
775 
776 	arm_lpae_restrict_pgsizes(cfg);
777 
778 	if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
779 		return NULL;
780 
781 	if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
782 		return NULL;
783 
784 	if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
785 		return NULL;
786 
787 	data = kmalloc(sizeof(*data), GFP_KERNEL);
788 	if (!data)
789 		return NULL;
790 
791 	pg_shift = __ffs(cfg->pgsize_bitmap);
792 	data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
793 
794 	va_bits = cfg->ias - pg_shift;
795 	levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
796 	data->start_level = ARM_LPAE_MAX_LEVELS - levels;
797 
798 	/* Calculate the actual size of our pgd (without concatenation) */
799 	data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
800 
801 	data->iop.ops = (struct io_pgtable_ops) {
802 		.map		= arm_lpae_map,
803 		.map_pages	= arm_lpae_map_pages,
804 		.unmap		= arm_lpae_unmap,
805 		.unmap_pages	= arm_lpae_unmap_pages,
806 		.iova_to_phys	= arm_lpae_iova_to_phys,
807 	};
808 
809 	return data;
810 }
811 
812 static struct io_pgtable *
813 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
814 {
815 	u64 reg;
816 	struct arm_lpae_io_pgtable *data;
817 	typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
818 	bool tg1;
819 
820 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
821 			    IO_PGTABLE_QUIRK_ARM_TTBR1 |
822 			    IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
823 		return NULL;
824 
825 	data = arm_lpae_alloc_pgtable(cfg);
826 	if (!data)
827 		return NULL;
828 
829 	/* TCR */
830 	if (cfg->coherent_walk) {
831 		tcr->sh = ARM_LPAE_TCR_SH_IS;
832 		tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
833 		tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
834 		if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)
835 			goto out_free_data;
836 	} else {
837 		tcr->sh = ARM_LPAE_TCR_SH_OS;
838 		tcr->irgn = ARM_LPAE_TCR_RGN_NC;
839 		if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
840 			tcr->orgn = ARM_LPAE_TCR_RGN_NC;
841 		else
842 			tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
843 	}
844 
845 	tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
846 	switch (ARM_LPAE_GRANULE(data)) {
847 	case SZ_4K:
848 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
849 		break;
850 	case SZ_16K:
851 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
852 		break;
853 	case SZ_64K:
854 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
855 		break;
856 	}
857 
858 	switch (cfg->oas) {
859 	case 32:
860 		tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
861 		break;
862 	case 36:
863 		tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
864 		break;
865 	case 40:
866 		tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
867 		break;
868 	case 42:
869 		tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
870 		break;
871 	case 44:
872 		tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
873 		break;
874 	case 48:
875 		tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
876 		break;
877 	case 52:
878 		tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
879 		break;
880 	default:
881 		goto out_free_data;
882 	}
883 
884 	tcr->tsz = 64ULL - cfg->ias;
885 
886 	/* MAIRs */
887 	reg = (ARM_LPAE_MAIR_ATTR_NC
888 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
889 	      (ARM_LPAE_MAIR_ATTR_WBRWA
890 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
891 	      (ARM_LPAE_MAIR_ATTR_DEVICE
892 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
893 	      (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
894 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
895 
896 	cfg->arm_lpae_s1_cfg.mair = reg;
897 
898 	/* Looking good; allocate a pgd */
899 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
900 					   GFP_KERNEL, cfg);
901 	if (!data->pgd)
902 		goto out_free_data;
903 
904 	/* Ensure the empty pgd is visible before any actual TTBR write */
905 	wmb();
906 
907 	/* TTBR */
908 	cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
909 	return &data->iop;
910 
911 out_free_data:
912 	kfree(data);
913 	return NULL;
914 }
915 
916 static struct io_pgtable *
917 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
918 {
919 	u64 sl;
920 	struct arm_lpae_io_pgtable *data;
921 	typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
922 
923 	/* The NS quirk doesn't apply at stage 2 */
924 	if (cfg->quirks)
925 		return NULL;
926 
927 	data = arm_lpae_alloc_pgtable(cfg);
928 	if (!data)
929 		return NULL;
930 
931 	/*
932 	 * Concatenate PGDs at level 1 if possible in order to reduce
933 	 * the depth of the stage-2 walk.
934 	 */
935 	if (data->start_level == 0) {
936 		unsigned long pgd_pages;
937 
938 		pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
939 		if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
940 			data->pgd_bits += data->bits_per_level;
941 			data->start_level++;
942 		}
943 	}
944 
945 	/* VTCR */
946 	if (cfg->coherent_walk) {
947 		vtcr->sh = ARM_LPAE_TCR_SH_IS;
948 		vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
949 		vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
950 	} else {
951 		vtcr->sh = ARM_LPAE_TCR_SH_OS;
952 		vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
953 		vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
954 	}
955 
956 	sl = data->start_level;
957 
958 	switch (ARM_LPAE_GRANULE(data)) {
959 	case SZ_4K:
960 		vtcr->tg = ARM_LPAE_TCR_TG0_4K;
961 		sl++; /* SL0 format is different for 4K granule size */
962 		break;
963 	case SZ_16K:
964 		vtcr->tg = ARM_LPAE_TCR_TG0_16K;
965 		break;
966 	case SZ_64K:
967 		vtcr->tg = ARM_LPAE_TCR_TG0_64K;
968 		break;
969 	}
970 
971 	switch (cfg->oas) {
972 	case 32:
973 		vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
974 		break;
975 	case 36:
976 		vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
977 		break;
978 	case 40:
979 		vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
980 		break;
981 	case 42:
982 		vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
983 		break;
984 	case 44:
985 		vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
986 		break;
987 	case 48:
988 		vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
989 		break;
990 	case 52:
991 		vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
992 		break;
993 	default:
994 		goto out_free_data;
995 	}
996 
997 	vtcr->tsz = 64ULL - cfg->ias;
998 	vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
999 
1000 	/* Allocate pgd pages */
1001 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
1002 					   GFP_KERNEL, cfg);
1003 	if (!data->pgd)
1004 		goto out_free_data;
1005 
1006 	/* Ensure the empty pgd is visible before any actual TTBR write */
1007 	wmb();
1008 
1009 	/* VTTBR */
1010 	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
1011 	return &data->iop;
1012 
1013 out_free_data:
1014 	kfree(data);
1015 	return NULL;
1016 }
1017 
1018 static struct io_pgtable *
1019 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
1020 {
1021 	if (cfg->ias > 32 || cfg->oas > 40)
1022 		return NULL;
1023 
1024 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1025 	return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
1026 }
1027 
1028 static struct io_pgtable *
1029 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1030 {
1031 	if (cfg->ias > 40 || cfg->oas > 40)
1032 		return NULL;
1033 
1034 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1035 	return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1036 }
1037 
1038 static struct io_pgtable *
1039 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1040 {
1041 	struct arm_lpae_io_pgtable *data;
1042 
1043 	/* No quirks for Mali (hopefully) */
1044 	if (cfg->quirks)
1045 		return NULL;
1046 
1047 	if (cfg->ias > 48 || cfg->oas > 40)
1048 		return NULL;
1049 
1050 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1051 
1052 	data = arm_lpae_alloc_pgtable(cfg);
1053 	if (!data)
1054 		return NULL;
1055 
1056 	/* Mali seems to need a full 4-level table regardless of IAS */
1057 	if (data->start_level > 0) {
1058 		data->start_level = 0;
1059 		data->pgd_bits = 0;
1060 	}
1061 	/*
1062 	 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
1063 	 * best we can do is mimic the out-of-tree driver and hope that the
1064 	 * "implementation-defined caching policy" is good enough. Similarly,
1065 	 * we'll use it for the sake of a valid attribute for our 'device'
1066 	 * index, although callers should never request that in practice.
1067 	 */
1068 	cfg->arm_mali_lpae_cfg.memattr =
1069 		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1070 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1071 		(ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
1072 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1073 		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1074 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
1075 
1076 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
1077 					   cfg);
1078 	if (!data->pgd)
1079 		goto out_free_data;
1080 
1081 	/* Ensure the empty pgd is visible before TRANSTAB can be written */
1082 	wmb();
1083 
1084 	cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
1085 					  ARM_MALI_LPAE_TTBR_READ_INNER |
1086 					  ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1087 	if (cfg->coherent_walk)
1088 		cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER;
1089 
1090 	return &data->iop;
1091 
1092 out_free_data:
1093 	kfree(data);
1094 	return NULL;
1095 }
1096 
1097 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1098 	.alloc	= arm_64_lpae_alloc_pgtable_s1,
1099 	.free	= arm_lpae_free_pgtable,
1100 };
1101 
1102 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1103 	.alloc	= arm_64_lpae_alloc_pgtable_s2,
1104 	.free	= arm_lpae_free_pgtable,
1105 };
1106 
1107 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1108 	.alloc	= arm_32_lpae_alloc_pgtable_s1,
1109 	.free	= arm_lpae_free_pgtable,
1110 };
1111 
1112 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1113 	.alloc	= arm_32_lpae_alloc_pgtable_s2,
1114 	.free	= arm_lpae_free_pgtable,
1115 };
1116 
1117 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1118 	.alloc	= arm_mali_lpae_alloc_pgtable,
1119 	.free	= arm_lpae_free_pgtable,
1120 };
1121 
1122 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1123 
1124 static struct io_pgtable_cfg *cfg_cookie __initdata;
1125 
1126 static void __init dummy_tlb_flush_all(void *cookie)
1127 {
1128 	WARN_ON(cookie != cfg_cookie);
1129 }
1130 
1131 static void __init dummy_tlb_flush(unsigned long iova, size_t size,
1132 				   size_t granule, void *cookie)
1133 {
1134 	WARN_ON(cookie != cfg_cookie);
1135 	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1136 }
1137 
1138 static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
1139 				      unsigned long iova, size_t granule,
1140 				      void *cookie)
1141 {
1142 	dummy_tlb_flush(iova, granule, granule, cookie);
1143 }
1144 
1145 static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
1146 	.tlb_flush_all	= dummy_tlb_flush_all,
1147 	.tlb_flush_walk	= dummy_tlb_flush,
1148 	.tlb_add_page	= dummy_tlb_add_page,
1149 };
1150 
1151 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1152 {
1153 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1154 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
1155 
1156 	pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1157 		cfg->pgsize_bitmap, cfg->ias);
1158 	pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
1159 		ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
1160 		ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
1161 }
1162 
1163 #define __FAIL(ops, i)	({						\
1164 		WARN(1, "selftest: test failed for fmt idx %d\n", (i));	\
1165 		arm_lpae_dump_ops(ops);					\
1166 		selftest_running = false;				\
1167 		-EFAULT;						\
1168 })
1169 
1170 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1171 {
1172 	static const enum io_pgtable_fmt fmts[] __initconst = {
1173 		ARM_64_LPAE_S1,
1174 		ARM_64_LPAE_S2,
1175 	};
1176 
1177 	int i, j;
1178 	unsigned long iova;
1179 	size_t size;
1180 	struct io_pgtable_ops *ops;
1181 
1182 	selftest_running = true;
1183 
1184 	for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1185 		cfg_cookie = cfg;
1186 		ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1187 		if (!ops) {
1188 			pr_err("selftest: failed to allocate io pgtable ops\n");
1189 			return -ENOMEM;
1190 		}
1191 
1192 		/*
1193 		 * Initial sanity checks.
1194 		 * Empty page tables shouldn't provide any translations.
1195 		 */
1196 		if (ops->iova_to_phys(ops, 42))
1197 			return __FAIL(ops, i);
1198 
1199 		if (ops->iova_to_phys(ops, SZ_1G + 42))
1200 			return __FAIL(ops, i);
1201 
1202 		if (ops->iova_to_phys(ops, SZ_2G + 42))
1203 			return __FAIL(ops, i);
1204 
1205 		/*
1206 		 * Distinct mappings of different granule sizes.
1207 		 */
1208 		iova = 0;
1209 		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1210 			size = 1UL << j;
1211 
1212 			if (ops->map(ops, iova, iova, size, IOMMU_READ |
1213 							    IOMMU_WRITE |
1214 							    IOMMU_NOEXEC |
1215 							    IOMMU_CACHE, GFP_KERNEL))
1216 				return __FAIL(ops, i);
1217 
1218 			/* Overlapping mappings */
1219 			if (!ops->map(ops, iova, iova + size, size,
1220 				      IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL))
1221 				return __FAIL(ops, i);
1222 
1223 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1224 				return __FAIL(ops, i);
1225 
1226 			iova += SZ_1G;
1227 		}
1228 
1229 		/* Partial unmap */
1230 		size = 1UL << __ffs(cfg->pgsize_bitmap);
1231 		if (ops->unmap(ops, SZ_1G + size, size, NULL) != size)
1232 			return __FAIL(ops, i);
1233 
1234 		/* Remap of partial unmap */
1235 		if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ, GFP_KERNEL))
1236 			return __FAIL(ops, i);
1237 
1238 		if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1239 			return __FAIL(ops, i);
1240 
1241 		/* Full unmap */
1242 		iova = 0;
1243 		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1244 			size = 1UL << j;
1245 
1246 			if (ops->unmap(ops, iova, size, NULL) != size)
1247 				return __FAIL(ops, i);
1248 
1249 			if (ops->iova_to_phys(ops, iova + 42))
1250 				return __FAIL(ops, i);
1251 
1252 			/* Remap full block */
1253 			if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL))
1254 				return __FAIL(ops, i);
1255 
1256 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1257 				return __FAIL(ops, i);
1258 
1259 			iova += SZ_1G;
1260 		}
1261 
1262 		free_io_pgtable_ops(ops);
1263 	}
1264 
1265 	selftest_running = false;
1266 	return 0;
1267 }
1268 
1269 static int __init arm_lpae_do_selftests(void)
1270 {
1271 	static const unsigned long pgsize[] __initconst = {
1272 		SZ_4K | SZ_2M | SZ_1G,
1273 		SZ_16K | SZ_32M,
1274 		SZ_64K | SZ_512M,
1275 	};
1276 
1277 	static const unsigned int ias[] __initconst = {
1278 		32, 36, 40, 42, 44, 48,
1279 	};
1280 
1281 	int i, j, pass = 0, fail = 0;
1282 	struct device dev;
1283 	struct io_pgtable_cfg cfg = {
1284 		.tlb = &dummy_tlb_ops,
1285 		.oas = 48,
1286 		.coherent_walk = true,
1287 		.iommu_dev = &dev,
1288 	};
1289 
1290 	/* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */
1291 	set_dev_node(&dev, NUMA_NO_NODE);
1292 
1293 	for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1294 		for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1295 			cfg.pgsize_bitmap = pgsize[i];
1296 			cfg.ias = ias[j];
1297 			pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1298 				pgsize[i], ias[j]);
1299 			if (arm_lpae_run_tests(&cfg))
1300 				fail++;
1301 			else
1302 				pass++;
1303 		}
1304 	}
1305 
1306 	pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1307 	return fail ? -EFAULT : 0;
1308 }
1309 subsys_initcall(arm_lpae_do_selftests);
1310 #endif
1311