1 /*
2  * CPU-agnostic ARM page table allocator.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15  *
16  * Copyright (C) 2014 ARM Limited
17  *
18  * Author: Will Deacon <will.deacon@arm.com>
19  */
20 
21 #define pr_fmt(fmt)	"arm-lpae io-pgtable: " fmt
22 
23 #include <linux/iommu.h>
24 #include <linux/kernel.h>
25 #include <linux/sizes.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28 #include <linux/dma-mapping.h>
29 
30 #include <asm/barrier.h>
31 
32 #include "io-pgtable.h"
33 
34 #define ARM_LPAE_MAX_ADDR_BITS		48
35 #define ARM_LPAE_S2_MAX_CONCAT_PAGES	16
36 #define ARM_LPAE_MAX_LEVELS		4
37 
38 /* Struct accessors */
39 #define io_pgtable_to_data(x)						\
40 	container_of((x), struct arm_lpae_io_pgtable, iop)
41 
42 #define io_pgtable_ops_to_data(x)					\
43 	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
44 
45 /*
46  * For consistency with the architecture, we always consider
47  * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
48  */
49 #define ARM_LPAE_START_LVL(d)		(ARM_LPAE_MAX_LEVELS - (d)->levels)
50 
51 /*
52  * Calculate the right shift amount to get to the portion describing level l
53  * in a virtual address mapped by the pagetable in d.
54  */
55 #define ARM_LPAE_LVL_SHIFT(l,d)						\
56 	((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1))		\
57 	  * (d)->bits_per_level) + (d)->pg_shift)
58 
59 #define ARM_LPAE_GRANULE(d)		(1UL << (d)->pg_shift)
60 
61 #define ARM_LPAE_PAGES_PER_PGD(d)					\
62 	DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
63 
64 /*
65  * Calculate the index at level l used to map virtual address a using the
66  * pagetable in d.
67  */
68 #define ARM_LPAE_PGD_IDX(l,d)						\
69 	((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
70 
71 #define ARM_LPAE_LVL_IDX(a,l,d)						\
72 	(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &			\
73 	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
74 
75 /* Calculate the block/page mapping size at level l for pagetable in d. */
76 #define ARM_LPAE_BLOCK_SIZE(l,d)					\
77 	(1 << (ilog2(sizeof(arm_lpae_iopte)) +				\
78 		((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
79 
80 /* Page table bits */
81 #define ARM_LPAE_PTE_TYPE_SHIFT		0
82 #define ARM_LPAE_PTE_TYPE_MASK		0x3
83 
84 #define ARM_LPAE_PTE_TYPE_BLOCK		1
85 #define ARM_LPAE_PTE_TYPE_TABLE		3
86 #define ARM_LPAE_PTE_TYPE_PAGE		3
87 
88 #define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
89 #define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
90 #define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
91 #define ARM_LPAE_PTE_SH_NS		(((arm_lpae_iopte)0) << 8)
92 #define ARM_LPAE_PTE_SH_OS		(((arm_lpae_iopte)2) << 8)
93 #define ARM_LPAE_PTE_SH_IS		(((arm_lpae_iopte)3) << 8)
94 #define ARM_LPAE_PTE_NS			(((arm_lpae_iopte)1) << 5)
95 #define ARM_LPAE_PTE_VALID		(((arm_lpae_iopte)1) << 0)
96 
97 #define ARM_LPAE_PTE_ATTR_LO_MASK	(((arm_lpae_iopte)0x3ff) << 2)
98 /* Ignore the contiguous bit for block splitting */
99 #define ARM_LPAE_PTE_ATTR_HI_MASK	(((arm_lpae_iopte)6) << 52)
100 #define ARM_LPAE_PTE_ATTR_MASK		(ARM_LPAE_PTE_ATTR_LO_MASK |	\
101 					 ARM_LPAE_PTE_ATTR_HI_MASK)
102 
103 /* Stage-1 PTE */
104 #define ARM_LPAE_PTE_AP_UNPRIV		(((arm_lpae_iopte)1) << 6)
105 #define ARM_LPAE_PTE_AP_RDONLY		(((arm_lpae_iopte)2) << 6)
106 #define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
107 #define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
108 
109 /* Stage-2 PTE */
110 #define ARM_LPAE_PTE_HAP_FAULT		(((arm_lpae_iopte)0) << 6)
111 #define ARM_LPAE_PTE_HAP_READ		(((arm_lpae_iopte)1) << 6)
112 #define ARM_LPAE_PTE_HAP_WRITE		(((arm_lpae_iopte)2) << 6)
113 #define ARM_LPAE_PTE_MEMATTR_OIWB	(((arm_lpae_iopte)0xf) << 2)
114 #define ARM_LPAE_PTE_MEMATTR_NC		(((arm_lpae_iopte)0x5) << 2)
115 #define ARM_LPAE_PTE_MEMATTR_DEV	(((arm_lpae_iopte)0x1) << 2)
116 
117 /* Register bits */
118 #define ARM_32_LPAE_TCR_EAE		(1 << 31)
119 #define ARM_64_LPAE_S2_TCR_RES1		(1 << 31)
120 
121 #define ARM_LPAE_TCR_EPD1		(1 << 23)
122 
123 #define ARM_LPAE_TCR_TG0_4K		(0 << 14)
124 #define ARM_LPAE_TCR_TG0_64K		(1 << 14)
125 #define ARM_LPAE_TCR_TG0_16K		(2 << 14)
126 
127 #define ARM_LPAE_TCR_SH0_SHIFT		12
128 #define ARM_LPAE_TCR_SH0_MASK		0x3
129 #define ARM_LPAE_TCR_SH_NS		0
130 #define ARM_LPAE_TCR_SH_OS		2
131 #define ARM_LPAE_TCR_SH_IS		3
132 
133 #define ARM_LPAE_TCR_ORGN0_SHIFT	10
134 #define ARM_LPAE_TCR_IRGN0_SHIFT	8
135 #define ARM_LPAE_TCR_RGN_MASK		0x3
136 #define ARM_LPAE_TCR_RGN_NC		0
137 #define ARM_LPAE_TCR_RGN_WBWA		1
138 #define ARM_LPAE_TCR_RGN_WT		2
139 #define ARM_LPAE_TCR_RGN_WB		3
140 
141 #define ARM_LPAE_TCR_SL0_SHIFT		6
142 #define ARM_LPAE_TCR_SL0_MASK		0x3
143 
144 #define ARM_LPAE_TCR_T0SZ_SHIFT		0
145 #define ARM_LPAE_TCR_SZ_MASK		0xf
146 
147 #define ARM_LPAE_TCR_PS_SHIFT		16
148 #define ARM_LPAE_TCR_PS_MASK		0x7
149 
150 #define ARM_LPAE_TCR_IPS_SHIFT		32
151 #define ARM_LPAE_TCR_IPS_MASK		0x7
152 
153 #define ARM_LPAE_TCR_PS_32_BIT		0x0ULL
154 #define ARM_LPAE_TCR_PS_36_BIT		0x1ULL
155 #define ARM_LPAE_TCR_PS_40_BIT		0x2ULL
156 #define ARM_LPAE_TCR_PS_42_BIT		0x3ULL
157 #define ARM_LPAE_TCR_PS_44_BIT		0x4ULL
158 #define ARM_LPAE_TCR_PS_48_BIT		0x5ULL
159 
160 #define ARM_LPAE_MAIR_ATTR_SHIFT(n)	((n) << 3)
161 #define ARM_LPAE_MAIR_ATTR_MASK		0xff
162 #define ARM_LPAE_MAIR_ATTR_DEVICE	0x04
163 #define ARM_LPAE_MAIR_ATTR_NC		0x44
164 #define ARM_LPAE_MAIR_ATTR_WBRWA	0xff
165 #define ARM_LPAE_MAIR_ATTR_IDX_NC	0
166 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE	1
167 #define ARM_LPAE_MAIR_ATTR_IDX_DEV	2
168 
169 /* IOPTE accessors */
170 #define iopte_deref(pte,d)					\
171 	(__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)	\
172 	& ~(ARM_LPAE_GRANULE(d) - 1ULL)))
173 
174 #define iopte_type(pte,l)					\
175 	(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
176 
177 #define iopte_prot(pte)	((pte) & ARM_LPAE_PTE_ATTR_MASK)
178 
179 #define iopte_leaf(pte,l)					\
180 	(l == (ARM_LPAE_MAX_LEVELS - 1) ?			\
181 		(iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) :	\
182 		(iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
183 
184 #define iopte_to_pfn(pte,d)					\
185 	(((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
186 
187 #define pfn_to_iopte(pfn,d)					\
188 	(((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
189 
190 struct arm_lpae_io_pgtable {
191 	struct io_pgtable	iop;
192 
193 	int			levels;
194 	size_t			pgd_size;
195 	unsigned long		pg_shift;
196 	unsigned long		bits_per_level;
197 
198 	void			*pgd;
199 };
200 
201 typedef u64 arm_lpae_iopte;
202 
203 static bool selftest_running = false;
204 
205 static dma_addr_t __arm_lpae_dma_addr(void *pages)
206 {
207 	return (dma_addr_t)virt_to_phys(pages);
208 }
209 
210 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
211 				    struct io_pgtable_cfg *cfg)
212 {
213 	struct device *dev = cfg->iommu_dev;
214 	dma_addr_t dma;
215 	void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
216 
217 	if (!pages)
218 		return NULL;
219 
220 	if (!selftest_running) {
221 		dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
222 		if (dma_mapping_error(dev, dma))
223 			goto out_free;
224 		/*
225 		 * We depend on the IOMMU being able to work with any physical
226 		 * address directly, so if the DMA layer suggests otherwise by
227 		 * translating or truncating them, that bodes very badly...
228 		 */
229 		if (dma != virt_to_phys(pages))
230 			goto out_unmap;
231 	}
232 
233 	return pages;
234 
235 out_unmap:
236 	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
237 	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
238 out_free:
239 	free_pages_exact(pages, size);
240 	return NULL;
241 }
242 
243 static void __arm_lpae_free_pages(void *pages, size_t size,
244 				  struct io_pgtable_cfg *cfg)
245 {
246 	if (!selftest_running)
247 		dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
248 				 size, DMA_TO_DEVICE);
249 	free_pages_exact(pages, size);
250 }
251 
252 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
253 			       struct io_pgtable_cfg *cfg)
254 {
255 	*ptep = pte;
256 
257 	if (!selftest_running)
258 		dma_sync_single_for_device(cfg->iommu_dev,
259 					   __arm_lpae_dma_addr(ptep),
260 					   sizeof(pte), DMA_TO_DEVICE);
261 }
262 
263 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
264 			    unsigned long iova, size_t size, int lvl,
265 			    arm_lpae_iopte *ptep);
266 
267 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
268 			     unsigned long iova, phys_addr_t paddr,
269 			     arm_lpae_iopte prot, int lvl,
270 			     arm_lpae_iopte *ptep)
271 {
272 	arm_lpae_iopte pte = prot;
273 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
274 
275 	if (iopte_leaf(*ptep, lvl)) {
276 		/* We require an unmap first */
277 		WARN_ON(!selftest_running);
278 		return -EEXIST;
279 	} else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
280 		/*
281 		 * We need to unmap and free the old table before
282 		 * overwriting it with a block entry.
283 		 */
284 		arm_lpae_iopte *tblp;
285 		size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
286 
287 		tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
288 		if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
289 			return -EINVAL;
290 	}
291 
292 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
293 		pte |= ARM_LPAE_PTE_NS;
294 
295 	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
296 		pte |= ARM_LPAE_PTE_TYPE_PAGE;
297 	else
298 		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
299 
300 	pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
301 	pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
302 
303 	__arm_lpae_set_pte(ptep, pte, cfg);
304 	return 0;
305 }
306 
307 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
308 			  phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
309 			  int lvl, arm_lpae_iopte *ptep)
310 {
311 	arm_lpae_iopte *cptep, pte;
312 	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
313 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
314 
315 	/* Find our entry at the current level */
316 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
317 
318 	/* If we can install a leaf entry at this level, then do so */
319 	if (size == block_size && (size & cfg->pgsize_bitmap))
320 		return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
321 
322 	/* We can't allocate tables at the final level */
323 	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
324 		return -EINVAL;
325 
326 	/* Grab a pointer to the next level */
327 	pte = *ptep;
328 	if (!pte) {
329 		cptep = __arm_lpae_alloc_pages(ARM_LPAE_GRANULE(data),
330 					       GFP_ATOMIC, cfg);
331 		if (!cptep)
332 			return -ENOMEM;
333 
334 		pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
335 		if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
336 			pte |= ARM_LPAE_PTE_NSTABLE;
337 		__arm_lpae_set_pte(ptep, pte, cfg);
338 	} else {
339 		cptep = iopte_deref(pte, data);
340 	}
341 
342 	/* Rinse, repeat */
343 	return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
344 }
345 
346 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
347 					   int prot)
348 {
349 	arm_lpae_iopte pte;
350 
351 	if (data->iop.fmt == ARM_64_LPAE_S1 ||
352 	    data->iop.fmt == ARM_32_LPAE_S1) {
353 		pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
354 
355 		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
356 			pte |= ARM_LPAE_PTE_AP_RDONLY;
357 
358 		if (prot & IOMMU_CACHE)
359 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
360 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
361 	} else {
362 		pte = ARM_LPAE_PTE_HAP_FAULT;
363 		if (prot & IOMMU_READ)
364 			pte |= ARM_LPAE_PTE_HAP_READ;
365 		if (prot & IOMMU_WRITE)
366 			pte |= ARM_LPAE_PTE_HAP_WRITE;
367 		if (prot & IOMMU_CACHE)
368 			pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
369 		else
370 			pte |= ARM_LPAE_PTE_MEMATTR_NC;
371 	}
372 
373 	if (prot & IOMMU_NOEXEC)
374 		pte |= ARM_LPAE_PTE_XN;
375 
376 	return pte;
377 }
378 
379 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
380 			phys_addr_t paddr, size_t size, int iommu_prot)
381 {
382 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
383 	arm_lpae_iopte *ptep = data->pgd;
384 	int ret, lvl = ARM_LPAE_START_LVL(data);
385 	arm_lpae_iopte prot;
386 
387 	/* If no access, then nothing to do */
388 	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
389 		return 0;
390 
391 	prot = arm_lpae_prot_to_pte(data, iommu_prot);
392 	ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
393 	/*
394 	 * Synchronise all PTE updates for the new mapping before there's
395 	 * a chance for anything to kick off a table walk for the new iova.
396 	 */
397 	wmb();
398 
399 	return ret;
400 }
401 
402 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
403 				    arm_lpae_iopte *ptep)
404 {
405 	arm_lpae_iopte *start, *end;
406 	unsigned long table_size;
407 
408 	if (lvl == ARM_LPAE_START_LVL(data))
409 		table_size = data->pgd_size;
410 	else
411 		table_size = ARM_LPAE_GRANULE(data);
412 
413 	start = ptep;
414 
415 	/* Only leaf entries at the last level */
416 	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
417 		end = ptep;
418 	else
419 		end = (void *)ptep + table_size;
420 
421 	while (ptep != end) {
422 		arm_lpae_iopte pte = *ptep++;
423 
424 		if (!pte || iopte_leaf(pte, lvl))
425 			continue;
426 
427 		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
428 	}
429 
430 	__arm_lpae_free_pages(start, table_size, &data->iop.cfg);
431 }
432 
433 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
434 {
435 	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
436 
437 	__arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
438 	kfree(data);
439 }
440 
441 static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
442 				    unsigned long iova, size_t size,
443 				    arm_lpae_iopte prot, int lvl,
444 				    arm_lpae_iopte *ptep, size_t blk_size)
445 {
446 	unsigned long blk_start, blk_end;
447 	phys_addr_t blk_paddr;
448 	arm_lpae_iopte table = 0;
449 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
450 
451 	blk_start = iova & ~(blk_size - 1);
452 	blk_end = blk_start + blk_size;
453 	blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
454 
455 	for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
456 		arm_lpae_iopte *tablep;
457 
458 		/* Unmap! */
459 		if (blk_start == iova)
460 			continue;
461 
462 		/* __arm_lpae_map expects a pointer to the start of the table */
463 		tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
464 		if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
465 				   tablep) < 0) {
466 			if (table) {
467 				/* Free the table we allocated */
468 				tablep = iopte_deref(table, data);
469 				__arm_lpae_free_pgtable(data, lvl + 1, tablep);
470 			}
471 			return 0; /* Bytes unmapped */
472 		}
473 	}
474 
475 	__arm_lpae_set_pte(ptep, table, cfg);
476 	iova &= ~(blk_size - 1);
477 	cfg->tlb->tlb_add_flush(iova, blk_size, blk_size, true, data->iop.cookie);
478 	return size;
479 }
480 
481 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
482 			    unsigned long iova, size_t size, int lvl,
483 			    arm_lpae_iopte *ptep)
484 {
485 	arm_lpae_iopte pte;
486 	const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
487 	void *cookie = data->iop.cookie;
488 	size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
489 
490 	/* Something went horribly wrong and we ran out of page table */
491 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
492 		return 0;
493 
494 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
495 	pte = *ptep;
496 	if (WARN_ON(!pte))
497 		return 0;
498 
499 	/* If the size matches this level, we're in the right place */
500 	if (size == blk_size) {
501 		__arm_lpae_set_pte(ptep, 0, &data->iop.cfg);
502 
503 		if (!iopte_leaf(pte, lvl)) {
504 			/* Also flush any partial walks */
505 			tlb->tlb_add_flush(iova, size, ARM_LPAE_GRANULE(data),
506 					   false, cookie);
507 			tlb->tlb_sync(cookie);
508 			ptep = iopte_deref(pte, data);
509 			__arm_lpae_free_pgtable(data, lvl + 1, ptep);
510 		} else {
511 			tlb->tlb_add_flush(iova, size, size, true, cookie);
512 		}
513 
514 		return size;
515 	} else if (iopte_leaf(pte, lvl)) {
516 		/*
517 		 * Insert a table at the next level to map the old region,
518 		 * minus the part we want to unmap
519 		 */
520 		return arm_lpae_split_blk_unmap(data, iova, size,
521 						iopte_prot(pte), lvl, ptep,
522 						blk_size);
523 	}
524 
525 	/* Keep on walkin' */
526 	ptep = iopte_deref(pte, data);
527 	return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
528 }
529 
530 static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
531 			  size_t size)
532 {
533 	size_t unmapped;
534 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
535 	struct io_pgtable *iop = &data->iop;
536 	arm_lpae_iopte *ptep = data->pgd;
537 	int lvl = ARM_LPAE_START_LVL(data);
538 
539 	unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
540 	if (unmapped)
541 		iop->cfg.tlb->tlb_sync(iop->cookie);
542 
543 	return unmapped;
544 }
545 
546 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
547 					 unsigned long iova)
548 {
549 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
550 	arm_lpae_iopte pte, *ptep = data->pgd;
551 	int lvl = ARM_LPAE_START_LVL(data);
552 
553 	do {
554 		/* Valid IOPTE pointer? */
555 		if (!ptep)
556 			return 0;
557 
558 		/* Grab the IOPTE we're interested in */
559 		pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
560 
561 		/* Valid entry? */
562 		if (!pte)
563 			return 0;
564 
565 		/* Leaf entry? */
566 		if (iopte_leaf(pte,lvl))
567 			goto found_translation;
568 
569 		/* Take it to the next level */
570 		ptep = iopte_deref(pte, data);
571 	} while (++lvl < ARM_LPAE_MAX_LEVELS);
572 
573 	/* Ran out of page tables to walk */
574 	return 0;
575 
576 found_translation:
577 	iova &= (ARM_LPAE_GRANULE(data) - 1);
578 	return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
579 }
580 
581 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
582 {
583 	unsigned long granule;
584 
585 	/*
586 	 * We need to restrict the supported page sizes to match the
587 	 * translation regime for a particular granule. Aim to match
588 	 * the CPU page size if possible, otherwise prefer smaller sizes.
589 	 * While we're at it, restrict the block sizes to match the
590 	 * chosen granule.
591 	 */
592 	if (cfg->pgsize_bitmap & PAGE_SIZE)
593 		granule = PAGE_SIZE;
594 	else if (cfg->pgsize_bitmap & ~PAGE_MASK)
595 		granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
596 	else if (cfg->pgsize_bitmap & PAGE_MASK)
597 		granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
598 	else
599 		granule = 0;
600 
601 	switch (granule) {
602 	case SZ_4K:
603 		cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
604 		break;
605 	case SZ_16K:
606 		cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
607 		break;
608 	case SZ_64K:
609 		cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
610 		break;
611 	default:
612 		cfg->pgsize_bitmap = 0;
613 	}
614 }
615 
616 static struct arm_lpae_io_pgtable *
617 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
618 {
619 	unsigned long va_bits, pgd_bits;
620 	struct arm_lpae_io_pgtable *data;
621 
622 	arm_lpae_restrict_pgsizes(cfg);
623 
624 	if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
625 		return NULL;
626 
627 	if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
628 		return NULL;
629 
630 	if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
631 		return NULL;
632 
633 	if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
634 		dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
635 		return NULL;
636 	}
637 
638 	data = kmalloc(sizeof(*data), GFP_KERNEL);
639 	if (!data)
640 		return NULL;
641 
642 	data->pg_shift = __ffs(cfg->pgsize_bitmap);
643 	data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
644 
645 	va_bits = cfg->ias - data->pg_shift;
646 	data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
647 
648 	/* Calculate the actual size of our pgd (without concatenation) */
649 	pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
650 	data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
651 
652 	data->iop.ops = (struct io_pgtable_ops) {
653 		.map		= arm_lpae_map,
654 		.unmap		= arm_lpae_unmap,
655 		.iova_to_phys	= arm_lpae_iova_to_phys,
656 	};
657 
658 	return data;
659 }
660 
661 static struct io_pgtable *
662 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
663 {
664 	u64 reg;
665 	struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
666 
667 	if (!data)
668 		return NULL;
669 
670 	/* TCR */
671 	reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
672 	      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
673 	      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
674 
675 	switch (ARM_LPAE_GRANULE(data)) {
676 	case SZ_4K:
677 		reg |= ARM_LPAE_TCR_TG0_4K;
678 		break;
679 	case SZ_16K:
680 		reg |= ARM_LPAE_TCR_TG0_16K;
681 		break;
682 	case SZ_64K:
683 		reg |= ARM_LPAE_TCR_TG0_64K;
684 		break;
685 	}
686 
687 	switch (cfg->oas) {
688 	case 32:
689 		reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
690 		break;
691 	case 36:
692 		reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
693 		break;
694 	case 40:
695 		reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
696 		break;
697 	case 42:
698 		reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
699 		break;
700 	case 44:
701 		reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
702 		break;
703 	case 48:
704 		reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
705 		break;
706 	default:
707 		goto out_free_data;
708 	}
709 
710 	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
711 
712 	/* Disable speculative walks through TTBR1 */
713 	reg |= ARM_LPAE_TCR_EPD1;
714 	cfg->arm_lpae_s1_cfg.tcr = reg;
715 
716 	/* MAIRs */
717 	reg = (ARM_LPAE_MAIR_ATTR_NC
718 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
719 	      (ARM_LPAE_MAIR_ATTR_WBRWA
720 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
721 	      (ARM_LPAE_MAIR_ATTR_DEVICE
722 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
723 
724 	cfg->arm_lpae_s1_cfg.mair[0] = reg;
725 	cfg->arm_lpae_s1_cfg.mair[1] = 0;
726 
727 	/* Looking good; allocate a pgd */
728 	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
729 	if (!data->pgd)
730 		goto out_free_data;
731 
732 	/* Ensure the empty pgd is visible before any actual TTBR write */
733 	wmb();
734 
735 	/* TTBRs */
736 	cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
737 	cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
738 	return &data->iop;
739 
740 out_free_data:
741 	kfree(data);
742 	return NULL;
743 }
744 
745 static struct io_pgtable *
746 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
747 {
748 	u64 reg, sl;
749 	struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
750 
751 	if (!data)
752 		return NULL;
753 
754 	/*
755 	 * Concatenate PGDs at level 1 if possible in order to reduce
756 	 * the depth of the stage-2 walk.
757 	 */
758 	if (data->levels == ARM_LPAE_MAX_LEVELS) {
759 		unsigned long pgd_pages;
760 
761 		pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
762 		if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
763 			data->pgd_size = pgd_pages << data->pg_shift;
764 			data->levels--;
765 		}
766 	}
767 
768 	/* VTCR */
769 	reg = ARM_64_LPAE_S2_TCR_RES1 |
770 	     (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
771 	     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
772 	     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
773 
774 	sl = ARM_LPAE_START_LVL(data);
775 
776 	switch (ARM_LPAE_GRANULE(data)) {
777 	case SZ_4K:
778 		reg |= ARM_LPAE_TCR_TG0_4K;
779 		sl++; /* SL0 format is different for 4K granule size */
780 		break;
781 	case SZ_16K:
782 		reg |= ARM_LPAE_TCR_TG0_16K;
783 		break;
784 	case SZ_64K:
785 		reg |= ARM_LPAE_TCR_TG0_64K;
786 		break;
787 	}
788 
789 	switch (cfg->oas) {
790 	case 32:
791 		reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
792 		break;
793 	case 36:
794 		reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
795 		break;
796 	case 40:
797 		reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
798 		break;
799 	case 42:
800 		reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
801 		break;
802 	case 44:
803 		reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
804 		break;
805 	case 48:
806 		reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
807 		break;
808 	default:
809 		goto out_free_data;
810 	}
811 
812 	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
813 	reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
814 	cfg->arm_lpae_s2_cfg.vtcr = reg;
815 
816 	/* Allocate pgd pages */
817 	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
818 	if (!data->pgd)
819 		goto out_free_data;
820 
821 	/* Ensure the empty pgd is visible before any actual TTBR write */
822 	wmb();
823 
824 	/* VTTBR */
825 	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
826 	return &data->iop;
827 
828 out_free_data:
829 	kfree(data);
830 	return NULL;
831 }
832 
833 static struct io_pgtable *
834 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
835 {
836 	struct io_pgtable *iop;
837 
838 	if (cfg->ias > 32 || cfg->oas > 40)
839 		return NULL;
840 
841 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
842 	iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
843 	if (iop) {
844 		cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
845 		cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
846 	}
847 
848 	return iop;
849 }
850 
851 static struct io_pgtable *
852 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
853 {
854 	struct io_pgtable *iop;
855 
856 	if (cfg->ias > 40 || cfg->oas > 40)
857 		return NULL;
858 
859 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
860 	iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
861 	if (iop)
862 		cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
863 
864 	return iop;
865 }
866 
867 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
868 	.alloc	= arm_64_lpae_alloc_pgtable_s1,
869 	.free	= arm_lpae_free_pgtable,
870 };
871 
872 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
873 	.alloc	= arm_64_lpae_alloc_pgtable_s2,
874 	.free	= arm_lpae_free_pgtable,
875 };
876 
877 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
878 	.alloc	= arm_32_lpae_alloc_pgtable_s1,
879 	.free	= arm_lpae_free_pgtable,
880 };
881 
882 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
883 	.alloc	= arm_32_lpae_alloc_pgtable_s2,
884 	.free	= arm_lpae_free_pgtable,
885 };
886 
887 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
888 
889 static struct io_pgtable_cfg *cfg_cookie;
890 
891 static void dummy_tlb_flush_all(void *cookie)
892 {
893 	WARN_ON(cookie != cfg_cookie);
894 }
895 
896 static void dummy_tlb_add_flush(unsigned long iova, size_t size,
897 				size_t granule, bool leaf, void *cookie)
898 {
899 	WARN_ON(cookie != cfg_cookie);
900 	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
901 }
902 
903 static void dummy_tlb_sync(void *cookie)
904 {
905 	WARN_ON(cookie != cfg_cookie);
906 }
907 
908 static struct iommu_gather_ops dummy_tlb_ops __initdata = {
909 	.tlb_flush_all	= dummy_tlb_flush_all,
910 	.tlb_add_flush	= dummy_tlb_add_flush,
911 	.tlb_sync	= dummy_tlb_sync,
912 };
913 
914 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
915 {
916 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
917 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
918 
919 	pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
920 		cfg->pgsize_bitmap, cfg->ias);
921 	pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
922 		data->levels, data->pgd_size, data->pg_shift,
923 		data->bits_per_level, data->pgd);
924 }
925 
926 #define __FAIL(ops, i)	({						\
927 		WARN(1, "selftest: test failed for fmt idx %d\n", (i));	\
928 		arm_lpae_dump_ops(ops);					\
929 		selftest_running = false;				\
930 		-EFAULT;						\
931 })
932 
933 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
934 {
935 	static const enum io_pgtable_fmt fmts[] = {
936 		ARM_64_LPAE_S1,
937 		ARM_64_LPAE_S2,
938 	};
939 
940 	int i, j;
941 	unsigned long iova;
942 	size_t size;
943 	struct io_pgtable_ops *ops;
944 
945 	selftest_running = true;
946 
947 	for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
948 		cfg_cookie = cfg;
949 		ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
950 		if (!ops) {
951 			pr_err("selftest: failed to allocate io pgtable ops\n");
952 			return -ENOMEM;
953 		}
954 
955 		/*
956 		 * Initial sanity checks.
957 		 * Empty page tables shouldn't provide any translations.
958 		 */
959 		if (ops->iova_to_phys(ops, 42))
960 			return __FAIL(ops, i);
961 
962 		if (ops->iova_to_phys(ops, SZ_1G + 42))
963 			return __FAIL(ops, i);
964 
965 		if (ops->iova_to_phys(ops, SZ_2G + 42))
966 			return __FAIL(ops, i);
967 
968 		/*
969 		 * Distinct mappings of different granule sizes.
970 		 */
971 		iova = 0;
972 		j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
973 		while (j != BITS_PER_LONG) {
974 			size = 1UL << j;
975 
976 			if (ops->map(ops, iova, iova, size, IOMMU_READ |
977 							    IOMMU_WRITE |
978 							    IOMMU_NOEXEC |
979 							    IOMMU_CACHE))
980 				return __FAIL(ops, i);
981 
982 			/* Overlapping mappings */
983 			if (!ops->map(ops, iova, iova + size, size,
984 				      IOMMU_READ | IOMMU_NOEXEC))
985 				return __FAIL(ops, i);
986 
987 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
988 				return __FAIL(ops, i);
989 
990 			iova += SZ_1G;
991 			j++;
992 			j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
993 		}
994 
995 		/* Partial unmap */
996 		size = 1UL << __ffs(cfg->pgsize_bitmap);
997 		if (ops->unmap(ops, SZ_1G + size, size) != size)
998 			return __FAIL(ops, i);
999 
1000 		/* Remap of partial unmap */
1001 		if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
1002 			return __FAIL(ops, i);
1003 
1004 		if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1005 			return __FAIL(ops, i);
1006 
1007 		/* Full unmap */
1008 		iova = 0;
1009 		j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
1010 		while (j != BITS_PER_LONG) {
1011 			size = 1UL << j;
1012 
1013 			if (ops->unmap(ops, iova, size) != size)
1014 				return __FAIL(ops, i);
1015 
1016 			if (ops->iova_to_phys(ops, iova + 42))
1017 				return __FAIL(ops, i);
1018 
1019 			/* Remap full block */
1020 			if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1021 				return __FAIL(ops, i);
1022 
1023 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1024 				return __FAIL(ops, i);
1025 
1026 			iova += SZ_1G;
1027 			j++;
1028 			j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
1029 		}
1030 
1031 		free_io_pgtable_ops(ops);
1032 	}
1033 
1034 	selftest_running = false;
1035 	return 0;
1036 }
1037 
1038 static int __init arm_lpae_do_selftests(void)
1039 {
1040 	static const unsigned long pgsize[] = {
1041 		SZ_4K | SZ_2M | SZ_1G,
1042 		SZ_16K | SZ_32M,
1043 		SZ_64K | SZ_512M,
1044 	};
1045 
1046 	static const unsigned int ias[] = {
1047 		32, 36, 40, 42, 44, 48,
1048 	};
1049 
1050 	int i, j, pass = 0, fail = 0;
1051 	struct io_pgtable_cfg cfg = {
1052 		.tlb = &dummy_tlb_ops,
1053 		.oas = 48,
1054 	};
1055 
1056 	for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1057 		for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1058 			cfg.pgsize_bitmap = pgsize[i];
1059 			cfg.ias = ias[j];
1060 			pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1061 				pgsize[i], ias[j]);
1062 			if (arm_lpae_run_tests(&cfg))
1063 				fail++;
1064 			else
1065 				pass++;
1066 		}
1067 	}
1068 
1069 	pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1070 	return fail ? -EFAULT : 0;
1071 }
1072 subsys_initcall(arm_lpae_do_selftests);
1073 #endif
1074