1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CPU-agnostic ARM page table allocator.
4  *
5  * Copyright (C) 2014 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9 
10 #define pr_fmt(fmt)	"arm-lpae io-pgtable: " fmt
11 
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/dma-mapping.h>
20 
21 #include <asm/barrier.h>
22 
23 #define ARM_LPAE_MAX_ADDR_BITS		52
24 #define ARM_LPAE_S2_MAX_CONCAT_PAGES	16
25 #define ARM_LPAE_MAX_LEVELS		4
26 
27 /* Struct accessors */
28 #define io_pgtable_to_data(x)						\
29 	container_of((x), struct arm_lpae_io_pgtable, iop)
30 
31 #define io_pgtable_ops_to_data(x)					\
32 	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
33 
34 /*
35  * For consistency with the architecture, we always consider
36  * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
37  */
38 #define ARM_LPAE_START_LVL(d)		(ARM_LPAE_MAX_LEVELS - (d)->levels)
39 
40 /*
41  * Calculate the right shift amount to get to the portion describing level l
42  * in a virtual address mapped by the pagetable in d.
43  */
44 #define ARM_LPAE_LVL_SHIFT(l,d)						\
45 	((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1))		\
46 	  * (d)->bits_per_level) + (d)->pg_shift)
47 
48 #define ARM_LPAE_GRANULE(d)		(1UL << (d)->pg_shift)
49 
50 #define ARM_LPAE_PAGES_PER_PGD(d)					\
51 	DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
52 
53 /*
54  * Calculate the index at level l used to map virtual address a using the
55  * pagetable in d.
56  */
57 #define ARM_LPAE_PGD_IDX(l,d)						\
58 	((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
59 
60 #define ARM_LPAE_LVL_IDX(a,l,d)						\
61 	(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &			\
62 	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
63 
64 /* Calculate the block/page mapping size at level l for pagetable in d. */
65 #define ARM_LPAE_BLOCK_SIZE(l,d)					\
66 	(1ULL << (ilog2(sizeof(arm_lpae_iopte)) +			\
67 		((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
68 
69 /* Page table bits */
70 #define ARM_LPAE_PTE_TYPE_SHIFT		0
71 #define ARM_LPAE_PTE_TYPE_MASK		0x3
72 
73 #define ARM_LPAE_PTE_TYPE_BLOCK		1
74 #define ARM_LPAE_PTE_TYPE_TABLE		3
75 #define ARM_LPAE_PTE_TYPE_PAGE		3
76 
77 #define ARM_LPAE_PTE_ADDR_MASK		GENMASK_ULL(47,12)
78 
79 #define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
80 #define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
81 #define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
82 #define ARM_LPAE_PTE_SH_NS		(((arm_lpae_iopte)0) << 8)
83 #define ARM_LPAE_PTE_SH_OS		(((arm_lpae_iopte)2) << 8)
84 #define ARM_LPAE_PTE_SH_IS		(((arm_lpae_iopte)3) << 8)
85 #define ARM_LPAE_PTE_NS			(((arm_lpae_iopte)1) << 5)
86 #define ARM_LPAE_PTE_VALID		(((arm_lpae_iopte)1) << 0)
87 
88 #define ARM_LPAE_PTE_ATTR_LO_MASK	(((arm_lpae_iopte)0x3ff) << 2)
89 /* Ignore the contiguous bit for block splitting */
90 #define ARM_LPAE_PTE_ATTR_HI_MASK	(((arm_lpae_iopte)6) << 52)
91 #define ARM_LPAE_PTE_ATTR_MASK		(ARM_LPAE_PTE_ATTR_LO_MASK |	\
92 					 ARM_LPAE_PTE_ATTR_HI_MASK)
93 /* Software bit for solving coherency races */
94 #define ARM_LPAE_PTE_SW_SYNC		(((arm_lpae_iopte)1) << 55)
95 
96 /* Stage-1 PTE */
97 #define ARM_LPAE_PTE_AP_UNPRIV		(((arm_lpae_iopte)1) << 6)
98 #define ARM_LPAE_PTE_AP_RDONLY		(((arm_lpae_iopte)2) << 6)
99 #define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
100 #define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
101 
102 /* Stage-2 PTE */
103 #define ARM_LPAE_PTE_HAP_FAULT		(((arm_lpae_iopte)0) << 6)
104 #define ARM_LPAE_PTE_HAP_READ		(((arm_lpae_iopte)1) << 6)
105 #define ARM_LPAE_PTE_HAP_WRITE		(((arm_lpae_iopte)2) << 6)
106 #define ARM_LPAE_PTE_MEMATTR_OIWB	(((arm_lpae_iopte)0xf) << 2)
107 #define ARM_LPAE_PTE_MEMATTR_NC		(((arm_lpae_iopte)0x5) << 2)
108 #define ARM_LPAE_PTE_MEMATTR_DEV	(((arm_lpae_iopte)0x1) << 2)
109 
110 /* Register bits */
111 #define ARM_32_LPAE_TCR_EAE		(1 << 31)
112 #define ARM_64_LPAE_S2_TCR_RES1		(1 << 31)
113 
114 #define ARM_LPAE_TCR_EPD1		(1 << 23)
115 
116 #define ARM_LPAE_TCR_TG0_4K		(0 << 14)
117 #define ARM_LPAE_TCR_TG0_64K		(1 << 14)
118 #define ARM_LPAE_TCR_TG0_16K		(2 << 14)
119 
120 #define ARM_LPAE_TCR_SH0_SHIFT		12
121 #define ARM_LPAE_TCR_SH0_MASK		0x3
122 #define ARM_LPAE_TCR_SH_NS		0
123 #define ARM_LPAE_TCR_SH_OS		2
124 #define ARM_LPAE_TCR_SH_IS		3
125 
126 #define ARM_LPAE_TCR_ORGN0_SHIFT	10
127 #define ARM_LPAE_TCR_IRGN0_SHIFT	8
128 #define ARM_LPAE_TCR_RGN_MASK		0x3
129 #define ARM_LPAE_TCR_RGN_NC		0
130 #define ARM_LPAE_TCR_RGN_WBWA		1
131 #define ARM_LPAE_TCR_RGN_WT		2
132 #define ARM_LPAE_TCR_RGN_WB		3
133 
134 #define ARM_LPAE_TCR_SL0_SHIFT		6
135 #define ARM_LPAE_TCR_SL0_MASK		0x3
136 
137 #define ARM_LPAE_TCR_T0SZ_SHIFT		0
138 #define ARM_LPAE_TCR_SZ_MASK		0xf
139 
140 #define ARM_LPAE_TCR_PS_SHIFT		16
141 #define ARM_LPAE_TCR_PS_MASK		0x7
142 
143 #define ARM_LPAE_TCR_IPS_SHIFT		32
144 #define ARM_LPAE_TCR_IPS_MASK		0x7
145 
146 #define ARM_LPAE_TCR_PS_32_BIT		0x0ULL
147 #define ARM_LPAE_TCR_PS_36_BIT		0x1ULL
148 #define ARM_LPAE_TCR_PS_40_BIT		0x2ULL
149 #define ARM_LPAE_TCR_PS_42_BIT		0x3ULL
150 #define ARM_LPAE_TCR_PS_44_BIT		0x4ULL
151 #define ARM_LPAE_TCR_PS_48_BIT		0x5ULL
152 #define ARM_LPAE_TCR_PS_52_BIT		0x6ULL
153 
154 #define ARM_LPAE_MAIR_ATTR_SHIFT(n)	((n) << 3)
155 #define ARM_LPAE_MAIR_ATTR_MASK		0xff
156 #define ARM_LPAE_MAIR_ATTR_DEVICE	0x04
157 #define ARM_LPAE_MAIR_ATTR_NC		0x44
158 #define ARM_LPAE_MAIR_ATTR_INC_OWBRWA	0xf4
159 #define ARM_LPAE_MAIR_ATTR_WBRWA	0xff
160 #define ARM_LPAE_MAIR_ATTR_IDX_NC	0
161 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE	1
162 #define ARM_LPAE_MAIR_ATTR_IDX_DEV	2
163 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE	3
164 
165 #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
166 #define ARM_MALI_LPAE_TTBR_READ_INNER	BIT(2)
167 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER	BIT(4)
168 
169 /* IOPTE accessors */
170 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
171 
172 #define iopte_type(pte,l)					\
173 	(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
174 
175 #define iopte_prot(pte)	((pte) & ARM_LPAE_PTE_ATTR_MASK)
176 
177 struct arm_lpae_io_pgtable {
178 	struct io_pgtable	iop;
179 
180 	int			levels;
181 	size_t			pgd_size;
182 	unsigned long		pg_shift;
183 	unsigned long		bits_per_level;
184 
185 	void			*pgd;
186 };
187 
188 typedef u64 arm_lpae_iopte;
189 
190 static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
191 			      enum io_pgtable_fmt fmt)
192 {
193 	if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
194 		return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE;
195 
196 	return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK;
197 }
198 
199 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
200 				     struct arm_lpae_io_pgtable *data)
201 {
202 	arm_lpae_iopte pte = paddr;
203 
204 	/* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
205 	return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
206 }
207 
208 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
209 				  struct arm_lpae_io_pgtable *data)
210 {
211 	u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
212 
213 	if (data->pg_shift < 16)
214 		return paddr;
215 
216 	/* Rotate the packed high-order bits back to the top */
217 	return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
218 }
219 
220 static bool selftest_running = false;
221 
222 static dma_addr_t __arm_lpae_dma_addr(void *pages)
223 {
224 	return (dma_addr_t)virt_to_phys(pages);
225 }
226 
227 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
228 				    struct io_pgtable_cfg *cfg)
229 {
230 	struct device *dev = cfg->iommu_dev;
231 	int order = get_order(size);
232 	struct page *p;
233 	dma_addr_t dma;
234 	void *pages;
235 
236 	VM_BUG_ON((gfp & __GFP_HIGHMEM));
237 	p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE,
238 			     gfp | __GFP_ZERO, order);
239 	if (!p)
240 		return NULL;
241 
242 	pages = page_address(p);
243 	if (!cfg->coherent_walk) {
244 		dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
245 		if (dma_mapping_error(dev, dma))
246 			goto out_free;
247 		/*
248 		 * We depend on the IOMMU being able to work with any physical
249 		 * address directly, so if the DMA layer suggests otherwise by
250 		 * translating or truncating them, that bodes very badly...
251 		 */
252 		if (dma != virt_to_phys(pages))
253 			goto out_unmap;
254 	}
255 
256 	return pages;
257 
258 out_unmap:
259 	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
260 	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
261 out_free:
262 	__free_pages(p, order);
263 	return NULL;
264 }
265 
266 static void __arm_lpae_free_pages(void *pages, size_t size,
267 				  struct io_pgtable_cfg *cfg)
268 {
269 	if (!cfg->coherent_walk)
270 		dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
271 				 size, DMA_TO_DEVICE);
272 	free_pages((unsigned long)pages, get_order(size));
273 }
274 
275 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep,
276 				struct io_pgtable_cfg *cfg)
277 {
278 	dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
279 				   sizeof(*ptep), DMA_TO_DEVICE);
280 }
281 
282 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
283 			       struct io_pgtable_cfg *cfg)
284 {
285 	*ptep = pte;
286 
287 	if (!cfg->coherent_walk)
288 		__arm_lpae_sync_pte(ptep, cfg);
289 }
290 
291 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
292 			       struct iommu_iotlb_gather *gather,
293 			       unsigned long iova, size_t size, int lvl,
294 			       arm_lpae_iopte *ptep);
295 
296 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
297 				phys_addr_t paddr, arm_lpae_iopte prot,
298 				int lvl, arm_lpae_iopte *ptep)
299 {
300 	arm_lpae_iopte pte = prot;
301 
302 	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
303 		pte |= ARM_LPAE_PTE_NS;
304 
305 	if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
306 		pte |= ARM_LPAE_PTE_TYPE_PAGE;
307 	else
308 		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
309 
310 	if (data->iop.fmt != ARM_MALI_LPAE)
311 		pte |= ARM_LPAE_PTE_AF;
312 	pte |= ARM_LPAE_PTE_SH_IS;
313 	pte |= paddr_to_iopte(paddr, data);
314 
315 	__arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
316 }
317 
318 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
319 			     unsigned long iova, phys_addr_t paddr,
320 			     arm_lpae_iopte prot, int lvl,
321 			     arm_lpae_iopte *ptep)
322 {
323 	arm_lpae_iopte pte = *ptep;
324 
325 	if (iopte_leaf(pte, lvl, data->iop.fmt)) {
326 		/* We require an unmap first */
327 		WARN_ON(!selftest_running);
328 		return -EEXIST;
329 	} else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
330 		/*
331 		 * We need to unmap and free the old table before
332 		 * overwriting it with a block entry.
333 		 */
334 		arm_lpae_iopte *tblp;
335 		size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
336 
337 		tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
338 		if (__arm_lpae_unmap(data, NULL, iova, sz, lvl, tblp) != sz) {
339 			WARN_ON(1);
340 			return -EINVAL;
341 		}
342 	}
343 
344 	__arm_lpae_init_pte(data, paddr, prot, lvl, ptep);
345 	return 0;
346 }
347 
348 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
349 					     arm_lpae_iopte *ptep,
350 					     arm_lpae_iopte curr,
351 					     struct io_pgtable_cfg *cfg)
352 {
353 	arm_lpae_iopte old, new;
354 
355 	new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE;
356 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
357 		new |= ARM_LPAE_PTE_NSTABLE;
358 
359 	/*
360 	 * Ensure the table itself is visible before its PTE can be.
361 	 * Whilst we could get away with cmpxchg64_release below, this
362 	 * doesn't have any ordering semantics when !CONFIG_SMP.
363 	 */
364 	dma_wmb();
365 
366 	old = cmpxchg64_relaxed(ptep, curr, new);
367 
368 	if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
369 		return old;
370 
371 	/* Even if it's not ours, there's no point waiting; just kick it */
372 	__arm_lpae_sync_pte(ptep, cfg);
373 	if (old == curr)
374 		WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
375 
376 	return old;
377 }
378 
379 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
380 			  phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
381 			  int lvl, arm_lpae_iopte *ptep)
382 {
383 	arm_lpae_iopte *cptep, pte;
384 	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
385 	size_t tblsz = ARM_LPAE_GRANULE(data);
386 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
387 
388 	/* Find our entry at the current level */
389 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
390 
391 	/* If we can install a leaf entry at this level, then do so */
392 	if (size == block_size && (size & cfg->pgsize_bitmap))
393 		return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
394 
395 	/* We can't allocate tables at the final level */
396 	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
397 		return -EINVAL;
398 
399 	/* Grab a pointer to the next level */
400 	pte = READ_ONCE(*ptep);
401 	if (!pte) {
402 		cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg);
403 		if (!cptep)
404 			return -ENOMEM;
405 
406 		pte = arm_lpae_install_table(cptep, ptep, 0, cfg);
407 		if (pte)
408 			__arm_lpae_free_pages(cptep, tblsz, cfg);
409 	} else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
410 		__arm_lpae_sync_pte(ptep, cfg);
411 	}
412 
413 	if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
414 		cptep = iopte_deref(pte, data);
415 	} else if (pte) {
416 		/* We require an unmap first */
417 		WARN_ON(!selftest_running);
418 		return -EEXIST;
419 	}
420 
421 	/* Rinse, repeat */
422 	return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
423 }
424 
425 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
426 					   int prot)
427 {
428 	arm_lpae_iopte pte;
429 
430 	if (data->iop.fmt == ARM_64_LPAE_S1 ||
431 	    data->iop.fmt == ARM_32_LPAE_S1) {
432 		pte = ARM_LPAE_PTE_nG;
433 		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
434 			pte |= ARM_LPAE_PTE_AP_RDONLY;
435 		if (!(prot & IOMMU_PRIV))
436 			pte |= ARM_LPAE_PTE_AP_UNPRIV;
437 	} else {
438 		pte = ARM_LPAE_PTE_HAP_FAULT;
439 		if (prot & IOMMU_READ)
440 			pte |= ARM_LPAE_PTE_HAP_READ;
441 		if (prot & IOMMU_WRITE)
442 			pte |= ARM_LPAE_PTE_HAP_WRITE;
443 	}
444 
445 	/*
446 	 * Note that this logic is structured to accommodate Mali LPAE
447 	 * having stage-1-like attributes but stage-2-like permissions.
448 	 */
449 	if (data->iop.fmt == ARM_64_LPAE_S2 ||
450 	    data->iop.fmt == ARM_32_LPAE_S2) {
451 		if (prot & IOMMU_MMIO)
452 			pte |= ARM_LPAE_PTE_MEMATTR_DEV;
453 		else if (prot & IOMMU_CACHE)
454 			pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
455 		else
456 			pte |= ARM_LPAE_PTE_MEMATTR_NC;
457 	} else {
458 		if (prot & IOMMU_MMIO)
459 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
460 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
461 		else if (prot & IOMMU_CACHE)
462 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
463 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
464 		else if (prot & IOMMU_QCOM_SYS_CACHE)
465 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE
466 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
467 	}
468 
469 	if (prot & IOMMU_NOEXEC)
470 		pte |= ARM_LPAE_PTE_XN;
471 
472 	return pte;
473 }
474 
475 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
476 			phys_addr_t paddr, size_t size, int iommu_prot)
477 {
478 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
479 	arm_lpae_iopte *ptep = data->pgd;
480 	int ret, lvl = ARM_LPAE_START_LVL(data);
481 	arm_lpae_iopte prot;
482 
483 	/* If no access, then nothing to do */
484 	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
485 		return 0;
486 
487 	if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
488 		    paddr >= (1ULL << data->iop.cfg.oas)))
489 		return -ERANGE;
490 
491 	prot = arm_lpae_prot_to_pte(data, iommu_prot);
492 	ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
493 	/*
494 	 * Synchronise all PTE updates for the new mapping before there's
495 	 * a chance for anything to kick off a table walk for the new iova.
496 	 */
497 	wmb();
498 
499 	return ret;
500 }
501 
502 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
503 				    arm_lpae_iopte *ptep)
504 {
505 	arm_lpae_iopte *start, *end;
506 	unsigned long table_size;
507 
508 	if (lvl == ARM_LPAE_START_LVL(data))
509 		table_size = data->pgd_size;
510 	else
511 		table_size = ARM_LPAE_GRANULE(data);
512 
513 	start = ptep;
514 
515 	/* Only leaf entries at the last level */
516 	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
517 		end = ptep;
518 	else
519 		end = (void *)ptep + table_size;
520 
521 	while (ptep != end) {
522 		arm_lpae_iopte pte = *ptep++;
523 
524 		if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
525 			continue;
526 
527 		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
528 	}
529 
530 	__arm_lpae_free_pages(start, table_size, &data->iop.cfg);
531 }
532 
533 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
534 {
535 	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
536 
537 	__arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
538 	kfree(data);
539 }
540 
541 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
542 				       struct iommu_iotlb_gather *gather,
543 				       unsigned long iova, size_t size,
544 				       arm_lpae_iopte blk_pte, int lvl,
545 				       arm_lpae_iopte *ptep)
546 {
547 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
548 	arm_lpae_iopte pte, *tablep;
549 	phys_addr_t blk_paddr;
550 	size_t tablesz = ARM_LPAE_GRANULE(data);
551 	size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
552 	int i, unmap_idx = -1;
553 
554 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
555 		return 0;
556 
557 	tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
558 	if (!tablep)
559 		return 0; /* Bytes unmapped */
560 
561 	if (size == split_sz)
562 		unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data);
563 
564 	blk_paddr = iopte_to_paddr(blk_pte, data);
565 	pte = iopte_prot(blk_pte);
566 
567 	for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) {
568 		/* Unmap! */
569 		if (i == unmap_idx)
570 			continue;
571 
572 		__arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]);
573 	}
574 
575 	pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg);
576 	if (pte != blk_pte) {
577 		__arm_lpae_free_pages(tablep, tablesz, cfg);
578 		/*
579 		 * We may race against someone unmapping another part of this
580 		 * block, but anything else is invalid. We can't misinterpret
581 		 * a page entry here since we're never at the last level.
582 		 */
583 		if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE)
584 			return 0;
585 
586 		tablep = iopte_deref(pte, data);
587 	} else if (unmap_idx >= 0) {
588 		io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
589 		return size;
590 	}
591 
592 	return __arm_lpae_unmap(data, gather, iova, size, lvl, tablep);
593 }
594 
595 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
596 			       struct iommu_iotlb_gather *gather,
597 			       unsigned long iova, size_t size, int lvl,
598 			       arm_lpae_iopte *ptep)
599 {
600 	arm_lpae_iopte pte;
601 	struct io_pgtable *iop = &data->iop;
602 
603 	/* Something went horribly wrong and we ran out of page table */
604 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
605 		return 0;
606 
607 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
608 	pte = READ_ONCE(*ptep);
609 	if (WARN_ON(!pte))
610 		return 0;
611 
612 	/* If the size matches this level, we're in the right place */
613 	if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
614 		__arm_lpae_set_pte(ptep, 0, &iop->cfg);
615 
616 		if (!iopte_leaf(pte, lvl, iop->fmt)) {
617 			/* Also flush any partial walks */
618 			io_pgtable_tlb_flush_walk(iop, iova, size,
619 						  ARM_LPAE_GRANULE(data));
620 			ptep = iopte_deref(pte, data);
621 			__arm_lpae_free_pgtable(data, lvl + 1, ptep);
622 		} else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
623 			/*
624 			 * Order the PTE update against queueing the IOVA, to
625 			 * guarantee that a flush callback from a different CPU
626 			 * has observed it before the TLBIALL can be issued.
627 			 */
628 			smp_wmb();
629 		} else {
630 			io_pgtable_tlb_add_page(iop, gather, iova, size);
631 		}
632 
633 		return size;
634 	} else if (iopte_leaf(pte, lvl, iop->fmt)) {
635 		/*
636 		 * Insert a table at the next level to map the old region,
637 		 * minus the part we want to unmap
638 		 */
639 		return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
640 						lvl + 1, ptep);
641 	}
642 
643 	/* Keep on walkin' */
644 	ptep = iopte_deref(pte, data);
645 	return __arm_lpae_unmap(data, gather, iova, size, lvl + 1, ptep);
646 }
647 
648 static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
649 			     size_t size, struct iommu_iotlb_gather *gather)
650 {
651 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
652 	arm_lpae_iopte *ptep = data->pgd;
653 	int lvl = ARM_LPAE_START_LVL(data);
654 
655 	if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
656 		return 0;
657 
658 	return __arm_lpae_unmap(data, gather, iova, size, lvl, ptep);
659 }
660 
661 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
662 					 unsigned long iova)
663 {
664 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
665 	arm_lpae_iopte pte, *ptep = data->pgd;
666 	int lvl = ARM_LPAE_START_LVL(data);
667 
668 	do {
669 		/* Valid IOPTE pointer? */
670 		if (!ptep)
671 			return 0;
672 
673 		/* Grab the IOPTE we're interested in */
674 		ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
675 		pte = READ_ONCE(*ptep);
676 
677 		/* Valid entry? */
678 		if (!pte)
679 			return 0;
680 
681 		/* Leaf entry? */
682 		if (iopte_leaf(pte, lvl, data->iop.fmt))
683 			goto found_translation;
684 
685 		/* Take it to the next level */
686 		ptep = iopte_deref(pte, data);
687 	} while (++lvl < ARM_LPAE_MAX_LEVELS);
688 
689 	/* Ran out of page tables to walk */
690 	return 0;
691 
692 found_translation:
693 	iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
694 	return iopte_to_paddr(pte, data) | iova;
695 }
696 
697 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
698 {
699 	unsigned long granule, page_sizes;
700 	unsigned int max_addr_bits = 48;
701 
702 	/*
703 	 * We need to restrict the supported page sizes to match the
704 	 * translation regime for a particular granule. Aim to match
705 	 * the CPU page size if possible, otherwise prefer smaller sizes.
706 	 * While we're at it, restrict the block sizes to match the
707 	 * chosen granule.
708 	 */
709 	if (cfg->pgsize_bitmap & PAGE_SIZE)
710 		granule = PAGE_SIZE;
711 	else if (cfg->pgsize_bitmap & ~PAGE_MASK)
712 		granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
713 	else if (cfg->pgsize_bitmap & PAGE_MASK)
714 		granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
715 	else
716 		granule = 0;
717 
718 	switch (granule) {
719 	case SZ_4K:
720 		page_sizes = (SZ_4K | SZ_2M | SZ_1G);
721 		break;
722 	case SZ_16K:
723 		page_sizes = (SZ_16K | SZ_32M);
724 		break;
725 	case SZ_64K:
726 		max_addr_bits = 52;
727 		page_sizes = (SZ_64K | SZ_512M);
728 		if (cfg->oas > 48)
729 			page_sizes |= 1ULL << 42; /* 4TB */
730 		break;
731 	default:
732 		page_sizes = 0;
733 	}
734 
735 	cfg->pgsize_bitmap &= page_sizes;
736 	cfg->ias = min(cfg->ias, max_addr_bits);
737 	cfg->oas = min(cfg->oas, max_addr_bits);
738 }
739 
740 static struct arm_lpae_io_pgtable *
741 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
742 {
743 	unsigned long va_bits, pgd_bits;
744 	struct arm_lpae_io_pgtable *data;
745 
746 	arm_lpae_restrict_pgsizes(cfg);
747 
748 	if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
749 		return NULL;
750 
751 	if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
752 		return NULL;
753 
754 	if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
755 		return NULL;
756 
757 	if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
758 		dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
759 		return NULL;
760 	}
761 
762 	data = kmalloc(sizeof(*data), GFP_KERNEL);
763 	if (!data)
764 		return NULL;
765 
766 	data->pg_shift = __ffs(cfg->pgsize_bitmap);
767 	data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
768 
769 	va_bits = cfg->ias - data->pg_shift;
770 	data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
771 
772 	/* Calculate the actual size of our pgd (without concatenation) */
773 	pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
774 	data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
775 
776 	data->iop.ops = (struct io_pgtable_ops) {
777 		.map		= arm_lpae_map,
778 		.unmap		= arm_lpae_unmap,
779 		.iova_to_phys	= arm_lpae_iova_to_phys,
780 	};
781 
782 	return data;
783 }
784 
785 static struct io_pgtable *
786 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
787 {
788 	u64 reg;
789 	struct arm_lpae_io_pgtable *data;
790 
791 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
792 			    IO_PGTABLE_QUIRK_NON_STRICT))
793 		return NULL;
794 
795 	data = arm_lpae_alloc_pgtable(cfg);
796 	if (!data)
797 		return NULL;
798 
799 	/* TCR */
800 	if (cfg->coherent_walk) {
801 		reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
802 		      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
803 		      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
804 	} else {
805 		reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
806 		      (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
807 		      (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT);
808 	}
809 
810 	switch (ARM_LPAE_GRANULE(data)) {
811 	case SZ_4K:
812 		reg |= ARM_LPAE_TCR_TG0_4K;
813 		break;
814 	case SZ_16K:
815 		reg |= ARM_LPAE_TCR_TG0_16K;
816 		break;
817 	case SZ_64K:
818 		reg |= ARM_LPAE_TCR_TG0_64K;
819 		break;
820 	}
821 
822 	switch (cfg->oas) {
823 	case 32:
824 		reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
825 		break;
826 	case 36:
827 		reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
828 		break;
829 	case 40:
830 		reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
831 		break;
832 	case 42:
833 		reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
834 		break;
835 	case 44:
836 		reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
837 		break;
838 	case 48:
839 		reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
840 		break;
841 	case 52:
842 		reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_IPS_SHIFT);
843 		break;
844 	default:
845 		goto out_free_data;
846 	}
847 
848 	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
849 
850 	/* Disable speculative walks through TTBR1 */
851 	reg |= ARM_LPAE_TCR_EPD1;
852 	cfg->arm_lpae_s1_cfg.tcr = reg;
853 
854 	/* MAIRs */
855 	reg = (ARM_LPAE_MAIR_ATTR_NC
856 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
857 	      (ARM_LPAE_MAIR_ATTR_WBRWA
858 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
859 	      (ARM_LPAE_MAIR_ATTR_DEVICE
860 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
861 	      (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
862 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
863 
864 	cfg->arm_lpae_s1_cfg.mair[0] = reg;
865 	cfg->arm_lpae_s1_cfg.mair[1] = 0;
866 
867 	/* Looking good; allocate a pgd */
868 	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
869 	if (!data->pgd)
870 		goto out_free_data;
871 
872 	/* Ensure the empty pgd is visible before any actual TTBR write */
873 	wmb();
874 
875 	/* TTBRs */
876 	cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
877 	cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
878 	return &data->iop;
879 
880 out_free_data:
881 	kfree(data);
882 	return NULL;
883 }
884 
885 static struct io_pgtable *
886 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
887 {
888 	u64 reg, sl;
889 	struct arm_lpae_io_pgtable *data;
890 
891 	/* The NS quirk doesn't apply at stage 2 */
892 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT))
893 		return NULL;
894 
895 	data = arm_lpae_alloc_pgtable(cfg);
896 	if (!data)
897 		return NULL;
898 
899 	/*
900 	 * Concatenate PGDs at level 1 if possible in order to reduce
901 	 * the depth of the stage-2 walk.
902 	 */
903 	if (data->levels == ARM_LPAE_MAX_LEVELS) {
904 		unsigned long pgd_pages;
905 
906 		pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
907 		if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
908 			data->pgd_size = pgd_pages << data->pg_shift;
909 			data->levels--;
910 		}
911 	}
912 
913 	/* VTCR */
914 	reg = ARM_64_LPAE_S2_TCR_RES1 |
915 	     (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
916 	     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
917 	     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
918 
919 	sl = ARM_LPAE_START_LVL(data);
920 
921 	switch (ARM_LPAE_GRANULE(data)) {
922 	case SZ_4K:
923 		reg |= ARM_LPAE_TCR_TG0_4K;
924 		sl++; /* SL0 format is different for 4K granule size */
925 		break;
926 	case SZ_16K:
927 		reg |= ARM_LPAE_TCR_TG0_16K;
928 		break;
929 	case SZ_64K:
930 		reg |= ARM_LPAE_TCR_TG0_64K;
931 		break;
932 	}
933 
934 	switch (cfg->oas) {
935 	case 32:
936 		reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
937 		break;
938 	case 36:
939 		reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
940 		break;
941 	case 40:
942 		reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
943 		break;
944 	case 42:
945 		reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
946 		break;
947 	case 44:
948 		reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
949 		break;
950 	case 48:
951 		reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
952 		break;
953 	case 52:
954 		reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_PS_SHIFT);
955 		break;
956 	default:
957 		goto out_free_data;
958 	}
959 
960 	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
961 	reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
962 	cfg->arm_lpae_s2_cfg.vtcr = reg;
963 
964 	/* Allocate pgd pages */
965 	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
966 	if (!data->pgd)
967 		goto out_free_data;
968 
969 	/* Ensure the empty pgd is visible before any actual TTBR write */
970 	wmb();
971 
972 	/* VTTBR */
973 	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
974 	return &data->iop;
975 
976 out_free_data:
977 	kfree(data);
978 	return NULL;
979 }
980 
981 static struct io_pgtable *
982 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
983 {
984 	struct io_pgtable *iop;
985 
986 	if (cfg->ias > 32 || cfg->oas > 40)
987 		return NULL;
988 
989 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
990 	iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
991 	if (iop) {
992 		cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
993 		cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
994 	}
995 
996 	return iop;
997 }
998 
999 static struct io_pgtable *
1000 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1001 {
1002 	struct io_pgtable *iop;
1003 
1004 	if (cfg->ias > 40 || cfg->oas > 40)
1005 		return NULL;
1006 
1007 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1008 	iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1009 	if (iop)
1010 		cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
1011 
1012 	return iop;
1013 }
1014 
1015 static struct io_pgtable *
1016 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1017 {
1018 	struct io_pgtable *iop;
1019 
1020 	if (cfg->ias != 48 || cfg->oas > 40)
1021 		return NULL;
1022 
1023 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1024 	iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
1025 	if (iop) {
1026 		u64 mair, ttbr;
1027 
1028 		/* Copy values as union fields overlap */
1029 		mair = cfg->arm_lpae_s1_cfg.mair[0];
1030 		ttbr = cfg->arm_lpae_s1_cfg.ttbr[0];
1031 
1032 		cfg->arm_mali_lpae_cfg.memattr = mair;
1033 		cfg->arm_mali_lpae_cfg.transtab = ttbr |
1034 			ARM_MALI_LPAE_TTBR_READ_INNER |
1035 			ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1036 	}
1037 
1038 	return iop;
1039 }
1040 
1041 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1042 	.alloc	= arm_64_lpae_alloc_pgtable_s1,
1043 	.free	= arm_lpae_free_pgtable,
1044 };
1045 
1046 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1047 	.alloc	= arm_64_lpae_alloc_pgtable_s2,
1048 	.free	= arm_lpae_free_pgtable,
1049 };
1050 
1051 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1052 	.alloc	= arm_32_lpae_alloc_pgtable_s1,
1053 	.free	= arm_lpae_free_pgtable,
1054 };
1055 
1056 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1057 	.alloc	= arm_32_lpae_alloc_pgtable_s2,
1058 	.free	= arm_lpae_free_pgtable,
1059 };
1060 
1061 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1062 	.alloc	= arm_mali_lpae_alloc_pgtable,
1063 	.free	= arm_lpae_free_pgtable,
1064 };
1065 
1066 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1067 
1068 static struct io_pgtable_cfg *cfg_cookie;
1069 
1070 static void dummy_tlb_flush_all(void *cookie)
1071 {
1072 	WARN_ON(cookie != cfg_cookie);
1073 }
1074 
1075 static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
1076 			    void *cookie)
1077 {
1078 	WARN_ON(cookie != cfg_cookie);
1079 	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1080 }
1081 
1082 static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
1083 			       unsigned long iova, size_t granule, void *cookie)
1084 {
1085 	dummy_tlb_flush(iova, granule, granule, cookie);
1086 }
1087 
1088 static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
1089 	.tlb_flush_all	= dummy_tlb_flush_all,
1090 	.tlb_flush_walk	= dummy_tlb_flush,
1091 	.tlb_flush_leaf	= dummy_tlb_flush,
1092 	.tlb_add_page	= dummy_tlb_add_page,
1093 };
1094 
1095 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1096 {
1097 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1098 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
1099 
1100 	pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1101 		cfg->pgsize_bitmap, cfg->ias);
1102 	pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
1103 		data->levels, data->pgd_size, data->pg_shift,
1104 		data->bits_per_level, data->pgd);
1105 }
1106 
1107 #define __FAIL(ops, i)	({						\
1108 		WARN(1, "selftest: test failed for fmt idx %d\n", (i));	\
1109 		arm_lpae_dump_ops(ops);					\
1110 		selftest_running = false;				\
1111 		-EFAULT;						\
1112 })
1113 
1114 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1115 {
1116 	static const enum io_pgtable_fmt fmts[] = {
1117 		ARM_64_LPAE_S1,
1118 		ARM_64_LPAE_S2,
1119 	};
1120 
1121 	int i, j;
1122 	unsigned long iova;
1123 	size_t size;
1124 	struct io_pgtable_ops *ops;
1125 
1126 	selftest_running = true;
1127 
1128 	for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1129 		cfg_cookie = cfg;
1130 		ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1131 		if (!ops) {
1132 			pr_err("selftest: failed to allocate io pgtable ops\n");
1133 			return -ENOMEM;
1134 		}
1135 
1136 		/*
1137 		 * Initial sanity checks.
1138 		 * Empty page tables shouldn't provide any translations.
1139 		 */
1140 		if (ops->iova_to_phys(ops, 42))
1141 			return __FAIL(ops, i);
1142 
1143 		if (ops->iova_to_phys(ops, SZ_1G + 42))
1144 			return __FAIL(ops, i);
1145 
1146 		if (ops->iova_to_phys(ops, SZ_2G + 42))
1147 			return __FAIL(ops, i);
1148 
1149 		/*
1150 		 * Distinct mappings of different granule sizes.
1151 		 */
1152 		iova = 0;
1153 		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1154 			size = 1UL << j;
1155 
1156 			if (ops->map(ops, iova, iova, size, IOMMU_READ |
1157 							    IOMMU_WRITE |
1158 							    IOMMU_NOEXEC |
1159 							    IOMMU_CACHE))
1160 				return __FAIL(ops, i);
1161 
1162 			/* Overlapping mappings */
1163 			if (!ops->map(ops, iova, iova + size, size,
1164 				      IOMMU_READ | IOMMU_NOEXEC))
1165 				return __FAIL(ops, i);
1166 
1167 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1168 				return __FAIL(ops, i);
1169 
1170 			iova += SZ_1G;
1171 		}
1172 
1173 		/* Partial unmap */
1174 		size = 1UL << __ffs(cfg->pgsize_bitmap);
1175 		if (ops->unmap(ops, SZ_1G + size, size, NULL) != size)
1176 			return __FAIL(ops, i);
1177 
1178 		/* Remap of partial unmap */
1179 		if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
1180 			return __FAIL(ops, i);
1181 
1182 		if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1183 			return __FAIL(ops, i);
1184 
1185 		/* Full unmap */
1186 		iova = 0;
1187 		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1188 			size = 1UL << j;
1189 
1190 			if (ops->unmap(ops, iova, size, NULL) != size)
1191 				return __FAIL(ops, i);
1192 
1193 			if (ops->iova_to_phys(ops, iova + 42))
1194 				return __FAIL(ops, i);
1195 
1196 			/* Remap full block */
1197 			if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1198 				return __FAIL(ops, i);
1199 
1200 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1201 				return __FAIL(ops, i);
1202 
1203 			iova += SZ_1G;
1204 		}
1205 
1206 		free_io_pgtable_ops(ops);
1207 	}
1208 
1209 	selftest_running = false;
1210 	return 0;
1211 }
1212 
1213 static int __init arm_lpae_do_selftests(void)
1214 {
1215 	static const unsigned long pgsize[] = {
1216 		SZ_4K | SZ_2M | SZ_1G,
1217 		SZ_16K | SZ_32M,
1218 		SZ_64K | SZ_512M,
1219 	};
1220 
1221 	static const unsigned int ias[] = {
1222 		32, 36, 40, 42, 44, 48,
1223 	};
1224 
1225 	int i, j, pass = 0, fail = 0;
1226 	struct io_pgtable_cfg cfg = {
1227 		.tlb = &dummy_tlb_ops,
1228 		.oas = 48,
1229 		.coherent_walk = true,
1230 	};
1231 
1232 	for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1233 		for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1234 			cfg.pgsize_bitmap = pgsize[i];
1235 			cfg.ias = ias[j];
1236 			pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1237 				pgsize[i], ias[j]);
1238 			if (arm_lpae_run_tests(&cfg))
1239 				fail++;
1240 			else
1241 				pass++;
1242 		}
1243 	}
1244 
1245 	pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1246 	return fail ? -EFAULT : 0;
1247 }
1248 subsys_initcall(arm_lpae_do_selftests);
1249 #endif
1250