1 /*
2  * CPU-agnostic ARM page table allocator.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15  *
16  * Copyright (C) 2014 ARM Limited
17  *
18  * Author: Will Deacon <will.deacon@arm.com>
19  */
20 
21 #define pr_fmt(fmt)	"arm-lpae io-pgtable: " fmt
22 
23 #include <linux/iommu.h>
24 #include <linux/kernel.h>
25 #include <linux/sizes.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28 
29 #include <asm/barrier.h>
30 
31 #include "io-pgtable.h"
32 
33 #define ARM_LPAE_MAX_ADDR_BITS		48
34 #define ARM_LPAE_S2_MAX_CONCAT_PAGES	16
35 #define ARM_LPAE_MAX_LEVELS		4
36 
37 /* Struct accessors */
38 #define io_pgtable_to_data(x)						\
39 	container_of((x), struct arm_lpae_io_pgtable, iop)
40 
41 #define io_pgtable_ops_to_data(x)					\
42 	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
43 
44 /*
45  * For consistency with the architecture, we always consider
46  * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
47  */
48 #define ARM_LPAE_START_LVL(d)		(ARM_LPAE_MAX_LEVELS - (d)->levels)
49 
50 /*
51  * Calculate the right shift amount to get to the portion describing level l
52  * in a virtual address mapped by the pagetable in d.
53  */
54 #define ARM_LPAE_LVL_SHIFT(l,d)						\
55 	((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1))		\
56 	  * (d)->bits_per_level) + (d)->pg_shift)
57 
58 #define ARM_LPAE_GRANULE(d)		(1UL << (d)->pg_shift)
59 
60 #define ARM_LPAE_PAGES_PER_PGD(d)					\
61 	DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
62 
63 /*
64  * Calculate the index at level l used to map virtual address a using the
65  * pagetable in d.
66  */
67 #define ARM_LPAE_PGD_IDX(l,d)						\
68 	((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
69 
70 #define ARM_LPAE_LVL_IDX(a,l,d)						\
71 	(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &			\
72 	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
73 
74 /* Calculate the block/page mapping size at level l for pagetable in d. */
75 #define ARM_LPAE_BLOCK_SIZE(l,d)					\
76 	(1 << (ilog2(sizeof(arm_lpae_iopte)) +				\
77 		((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
78 
79 /* Page table bits */
80 #define ARM_LPAE_PTE_TYPE_SHIFT		0
81 #define ARM_LPAE_PTE_TYPE_MASK		0x3
82 
83 #define ARM_LPAE_PTE_TYPE_BLOCK		1
84 #define ARM_LPAE_PTE_TYPE_TABLE		3
85 #define ARM_LPAE_PTE_TYPE_PAGE		3
86 
87 #define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
88 #define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
89 #define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
90 #define ARM_LPAE_PTE_SH_NS		(((arm_lpae_iopte)0) << 8)
91 #define ARM_LPAE_PTE_SH_OS		(((arm_lpae_iopte)2) << 8)
92 #define ARM_LPAE_PTE_SH_IS		(((arm_lpae_iopte)3) << 8)
93 #define ARM_LPAE_PTE_NS			(((arm_lpae_iopte)1) << 5)
94 #define ARM_LPAE_PTE_VALID		(((arm_lpae_iopte)1) << 0)
95 
96 #define ARM_LPAE_PTE_ATTR_LO_MASK	(((arm_lpae_iopte)0x3ff) << 2)
97 /* Ignore the contiguous bit for block splitting */
98 #define ARM_LPAE_PTE_ATTR_HI_MASK	(((arm_lpae_iopte)6) << 52)
99 #define ARM_LPAE_PTE_ATTR_MASK		(ARM_LPAE_PTE_ATTR_LO_MASK |	\
100 					 ARM_LPAE_PTE_ATTR_HI_MASK)
101 
102 /* Stage-1 PTE */
103 #define ARM_LPAE_PTE_AP_UNPRIV		(((arm_lpae_iopte)1) << 6)
104 #define ARM_LPAE_PTE_AP_RDONLY		(((arm_lpae_iopte)2) << 6)
105 #define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
106 #define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
107 
108 /* Stage-2 PTE */
109 #define ARM_LPAE_PTE_HAP_FAULT		(((arm_lpae_iopte)0) << 6)
110 #define ARM_LPAE_PTE_HAP_READ		(((arm_lpae_iopte)1) << 6)
111 #define ARM_LPAE_PTE_HAP_WRITE		(((arm_lpae_iopte)2) << 6)
112 #define ARM_LPAE_PTE_MEMATTR_OIWB	(((arm_lpae_iopte)0xf) << 2)
113 #define ARM_LPAE_PTE_MEMATTR_NC		(((arm_lpae_iopte)0x5) << 2)
114 #define ARM_LPAE_PTE_MEMATTR_DEV	(((arm_lpae_iopte)0x1) << 2)
115 
116 /* Register bits */
117 #define ARM_32_LPAE_TCR_EAE		(1 << 31)
118 #define ARM_64_LPAE_S2_TCR_RES1		(1 << 31)
119 
120 #define ARM_LPAE_TCR_EPD1		(1 << 23)
121 
122 #define ARM_LPAE_TCR_TG0_4K		(0 << 14)
123 #define ARM_LPAE_TCR_TG0_64K		(1 << 14)
124 #define ARM_LPAE_TCR_TG0_16K		(2 << 14)
125 
126 #define ARM_LPAE_TCR_SH0_SHIFT		12
127 #define ARM_LPAE_TCR_SH0_MASK		0x3
128 #define ARM_LPAE_TCR_SH_NS		0
129 #define ARM_LPAE_TCR_SH_OS		2
130 #define ARM_LPAE_TCR_SH_IS		3
131 
132 #define ARM_LPAE_TCR_ORGN0_SHIFT	10
133 #define ARM_LPAE_TCR_IRGN0_SHIFT	8
134 #define ARM_LPAE_TCR_RGN_MASK		0x3
135 #define ARM_LPAE_TCR_RGN_NC		0
136 #define ARM_LPAE_TCR_RGN_WBWA		1
137 #define ARM_LPAE_TCR_RGN_WT		2
138 #define ARM_LPAE_TCR_RGN_WB		3
139 
140 #define ARM_LPAE_TCR_SL0_SHIFT		6
141 #define ARM_LPAE_TCR_SL0_MASK		0x3
142 
143 #define ARM_LPAE_TCR_T0SZ_SHIFT		0
144 #define ARM_LPAE_TCR_SZ_MASK		0xf
145 
146 #define ARM_LPAE_TCR_PS_SHIFT		16
147 #define ARM_LPAE_TCR_PS_MASK		0x7
148 
149 #define ARM_LPAE_TCR_IPS_SHIFT		32
150 #define ARM_LPAE_TCR_IPS_MASK		0x7
151 
152 #define ARM_LPAE_TCR_PS_32_BIT		0x0ULL
153 #define ARM_LPAE_TCR_PS_36_BIT		0x1ULL
154 #define ARM_LPAE_TCR_PS_40_BIT		0x2ULL
155 #define ARM_LPAE_TCR_PS_42_BIT		0x3ULL
156 #define ARM_LPAE_TCR_PS_44_BIT		0x4ULL
157 #define ARM_LPAE_TCR_PS_48_BIT		0x5ULL
158 
159 #define ARM_LPAE_MAIR_ATTR_SHIFT(n)	((n) << 3)
160 #define ARM_LPAE_MAIR_ATTR_MASK		0xff
161 #define ARM_LPAE_MAIR_ATTR_DEVICE	0x04
162 #define ARM_LPAE_MAIR_ATTR_NC		0x44
163 #define ARM_LPAE_MAIR_ATTR_WBRWA	0xff
164 #define ARM_LPAE_MAIR_ATTR_IDX_NC	0
165 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE	1
166 #define ARM_LPAE_MAIR_ATTR_IDX_DEV	2
167 
168 /* IOPTE accessors */
169 #define iopte_deref(pte,d)					\
170 	(__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)	\
171 	& ~(ARM_LPAE_GRANULE(d) - 1ULL)))
172 
173 #define iopte_type(pte,l)					\
174 	(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
175 
176 #define iopte_prot(pte)	((pte) & ARM_LPAE_PTE_ATTR_MASK)
177 
178 #define iopte_leaf(pte,l)					\
179 	(l == (ARM_LPAE_MAX_LEVELS - 1) ?			\
180 		(iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) :	\
181 		(iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
182 
183 #define iopte_to_pfn(pte,d)					\
184 	(((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
185 
186 #define pfn_to_iopte(pfn,d)					\
187 	(((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
188 
189 struct arm_lpae_io_pgtable {
190 	struct io_pgtable	iop;
191 
192 	int			levels;
193 	size_t			pgd_size;
194 	unsigned long		pg_shift;
195 	unsigned long		bits_per_level;
196 
197 	void			*pgd;
198 };
199 
200 typedef u64 arm_lpae_iopte;
201 
202 static bool selftest_running = false;
203 
204 static dma_addr_t __arm_lpae_dma_addr(void *pages)
205 {
206 	return (dma_addr_t)virt_to_phys(pages);
207 }
208 
209 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
210 				    struct io_pgtable_cfg *cfg)
211 {
212 	struct device *dev = cfg->iommu_dev;
213 	dma_addr_t dma;
214 	void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
215 
216 	if (!pages)
217 		return NULL;
218 
219 	if (!selftest_running) {
220 		dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
221 		if (dma_mapping_error(dev, dma))
222 			goto out_free;
223 		/*
224 		 * We depend on the IOMMU being able to work with any physical
225 		 * address directly, so if the DMA layer suggests otherwise by
226 		 * translating or truncating them, that bodes very badly...
227 		 */
228 		if (dma != virt_to_phys(pages))
229 			goto out_unmap;
230 	}
231 
232 	return pages;
233 
234 out_unmap:
235 	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
236 	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
237 out_free:
238 	free_pages_exact(pages, size);
239 	return NULL;
240 }
241 
242 static void __arm_lpae_free_pages(void *pages, size_t size,
243 				  struct io_pgtable_cfg *cfg)
244 {
245 	if (!selftest_running)
246 		dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
247 				 size, DMA_TO_DEVICE);
248 	free_pages_exact(pages, size);
249 }
250 
251 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
252 			       struct io_pgtable_cfg *cfg)
253 {
254 	*ptep = pte;
255 
256 	if (!selftest_running)
257 		dma_sync_single_for_device(cfg->iommu_dev,
258 					   __arm_lpae_dma_addr(ptep),
259 					   sizeof(pte), DMA_TO_DEVICE);
260 }
261 
262 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
263 			    unsigned long iova, size_t size, int lvl,
264 			    arm_lpae_iopte *ptep);
265 
266 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
267 			     unsigned long iova, phys_addr_t paddr,
268 			     arm_lpae_iopte prot, int lvl,
269 			     arm_lpae_iopte *ptep)
270 {
271 	arm_lpae_iopte pte = prot;
272 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
273 
274 	if (iopte_leaf(*ptep, lvl)) {
275 		/* We require an unmap first */
276 		WARN_ON(!selftest_running);
277 		return -EEXIST;
278 	} else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
279 		/*
280 		 * We need to unmap and free the old table before
281 		 * overwriting it with a block entry.
282 		 */
283 		arm_lpae_iopte *tblp;
284 		size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
285 
286 		tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
287 		if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
288 			return -EINVAL;
289 	}
290 
291 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
292 		pte |= ARM_LPAE_PTE_NS;
293 
294 	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
295 		pte |= ARM_LPAE_PTE_TYPE_PAGE;
296 	else
297 		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
298 
299 	pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
300 	pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
301 
302 	__arm_lpae_set_pte(ptep, pte, cfg);
303 	return 0;
304 }
305 
306 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
307 			  phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
308 			  int lvl, arm_lpae_iopte *ptep)
309 {
310 	arm_lpae_iopte *cptep, pte;
311 	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
312 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
313 
314 	/* Find our entry at the current level */
315 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
316 
317 	/* If we can install a leaf entry at this level, then do so */
318 	if (size == block_size && (size & cfg->pgsize_bitmap))
319 		return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
320 
321 	/* We can't allocate tables at the final level */
322 	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
323 		return -EINVAL;
324 
325 	/* Grab a pointer to the next level */
326 	pte = *ptep;
327 	if (!pte) {
328 		cptep = __arm_lpae_alloc_pages(ARM_LPAE_GRANULE(data),
329 					       GFP_ATOMIC, cfg);
330 		if (!cptep)
331 			return -ENOMEM;
332 
333 		pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
334 		if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
335 			pte |= ARM_LPAE_PTE_NSTABLE;
336 		__arm_lpae_set_pte(ptep, pte, cfg);
337 	} else {
338 		cptep = iopte_deref(pte, data);
339 	}
340 
341 	/* Rinse, repeat */
342 	return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
343 }
344 
345 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
346 					   int prot)
347 {
348 	arm_lpae_iopte pte;
349 
350 	if (data->iop.fmt == ARM_64_LPAE_S1 ||
351 	    data->iop.fmt == ARM_32_LPAE_S1) {
352 		pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
353 
354 		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
355 			pte |= ARM_LPAE_PTE_AP_RDONLY;
356 
357 		if (prot & IOMMU_CACHE)
358 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
359 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
360 	} else {
361 		pte = ARM_LPAE_PTE_HAP_FAULT;
362 		if (prot & IOMMU_READ)
363 			pte |= ARM_LPAE_PTE_HAP_READ;
364 		if (prot & IOMMU_WRITE)
365 			pte |= ARM_LPAE_PTE_HAP_WRITE;
366 		if (prot & IOMMU_CACHE)
367 			pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
368 		else
369 			pte |= ARM_LPAE_PTE_MEMATTR_NC;
370 	}
371 
372 	if (prot & IOMMU_NOEXEC)
373 		pte |= ARM_LPAE_PTE_XN;
374 
375 	return pte;
376 }
377 
378 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
379 			phys_addr_t paddr, size_t size, int iommu_prot)
380 {
381 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
382 	arm_lpae_iopte *ptep = data->pgd;
383 	int ret, lvl = ARM_LPAE_START_LVL(data);
384 	arm_lpae_iopte prot;
385 
386 	/* If no access, then nothing to do */
387 	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
388 		return 0;
389 
390 	prot = arm_lpae_prot_to_pte(data, iommu_prot);
391 	ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
392 	/*
393 	 * Synchronise all PTE updates for the new mapping before there's
394 	 * a chance for anything to kick off a table walk for the new iova.
395 	 */
396 	wmb();
397 
398 	return ret;
399 }
400 
401 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
402 				    arm_lpae_iopte *ptep)
403 {
404 	arm_lpae_iopte *start, *end;
405 	unsigned long table_size;
406 
407 	if (lvl == ARM_LPAE_START_LVL(data))
408 		table_size = data->pgd_size;
409 	else
410 		table_size = ARM_LPAE_GRANULE(data);
411 
412 	start = ptep;
413 
414 	/* Only leaf entries at the last level */
415 	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
416 		end = ptep;
417 	else
418 		end = (void *)ptep + table_size;
419 
420 	while (ptep != end) {
421 		arm_lpae_iopte pte = *ptep++;
422 
423 		if (!pte || iopte_leaf(pte, lvl))
424 			continue;
425 
426 		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
427 	}
428 
429 	__arm_lpae_free_pages(start, table_size, &data->iop.cfg);
430 }
431 
432 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
433 {
434 	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
435 
436 	__arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
437 	kfree(data);
438 }
439 
440 static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
441 				    unsigned long iova, size_t size,
442 				    arm_lpae_iopte prot, int lvl,
443 				    arm_lpae_iopte *ptep, size_t blk_size)
444 {
445 	unsigned long blk_start, blk_end;
446 	phys_addr_t blk_paddr;
447 	arm_lpae_iopte table = 0;
448 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
449 
450 	blk_start = iova & ~(blk_size - 1);
451 	blk_end = blk_start + blk_size;
452 	blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
453 
454 	for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
455 		arm_lpae_iopte *tablep;
456 
457 		/* Unmap! */
458 		if (blk_start == iova)
459 			continue;
460 
461 		/* __arm_lpae_map expects a pointer to the start of the table */
462 		tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
463 		if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
464 				   tablep) < 0) {
465 			if (table) {
466 				/* Free the table we allocated */
467 				tablep = iopte_deref(table, data);
468 				__arm_lpae_free_pgtable(data, lvl + 1, tablep);
469 			}
470 			return 0; /* Bytes unmapped */
471 		}
472 	}
473 
474 	__arm_lpae_set_pte(ptep, table, cfg);
475 	iova &= ~(blk_size - 1);
476 	cfg->tlb->tlb_add_flush(iova, blk_size, blk_size, true, data->iop.cookie);
477 	return size;
478 }
479 
480 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
481 			    unsigned long iova, size_t size, int lvl,
482 			    arm_lpae_iopte *ptep)
483 {
484 	arm_lpae_iopte pte;
485 	const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
486 	void *cookie = data->iop.cookie;
487 	size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
488 
489 	/* Something went horribly wrong and we ran out of page table */
490 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
491 		return 0;
492 
493 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
494 	pte = *ptep;
495 	if (WARN_ON(!pte))
496 		return 0;
497 
498 	/* If the size matches this level, we're in the right place */
499 	if (size == blk_size) {
500 		__arm_lpae_set_pte(ptep, 0, &data->iop.cfg);
501 
502 		if (!iopte_leaf(pte, lvl)) {
503 			/* Also flush any partial walks */
504 			tlb->tlb_add_flush(iova, size, ARM_LPAE_GRANULE(data),
505 					   false, cookie);
506 			tlb->tlb_sync(cookie);
507 			ptep = iopte_deref(pte, data);
508 			__arm_lpae_free_pgtable(data, lvl + 1, ptep);
509 		} else {
510 			tlb->tlb_add_flush(iova, size, size, true, cookie);
511 		}
512 
513 		return size;
514 	} else if (iopte_leaf(pte, lvl)) {
515 		/*
516 		 * Insert a table at the next level to map the old region,
517 		 * minus the part we want to unmap
518 		 */
519 		return arm_lpae_split_blk_unmap(data, iova, size,
520 						iopte_prot(pte), lvl, ptep,
521 						blk_size);
522 	}
523 
524 	/* Keep on walkin' */
525 	ptep = iopte_deref(pte, data);
526 	return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
527 }
528 
529 static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
530 			  size_t size)
531 {
532 	size_t unmapped;
533 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
534 	struct io_pgtable *iop = &data->iop;
535 	arm_lpae_iopte *ptep = data->pgd;
536 	int lvl = ARM_LPAE_START_LVL(data);
537 
538 	unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
539 	if (unmapped)
540 		iop->cfg.tlb->tlb_sync(iop->cookie);
541 
542 	return unmapped;
543 }
544 
545 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
546 					 unsigned long iova)
547 {
548 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
549 	arm_lpae_iopte pte, *ptep = data->pgd;
550 	int lvl = ARM_LPAE_START_LVL(data);
551 
552 	do {
553 		/* Valid IOPTE pointer? */
554 		if (!ptep)
555 			return 0;
556 
557 		/* Grab the IOPTE we're interested in */
558 		pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
559 
560 		/* Valid entry? */
561 		if (!pte)
562 			return 0;
563 
564 		/* Leaf entry? */
565 		if (iopte_leaf(pte,lvl))
566 			goto found_translation;
567 
568 		/* Take it to the next level */
569 		ptep = iopte_deref(pte, data);
570 	} while (++lvl < ARM_LPAE_MAX_LEVELS);
571 
572 	/* Ran out of page tables to walk */
573 	return 0;
574 
575 found_translation:
576 	iova &= (ARM_LPAE_GRANULE(data) - 1);
577 	return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
578 }
579 
580 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
581 {
582 	unsigned long granule;
583 
584 	/*
585 	 * We need to restrict the supported page sizes to match the
586 	 * translation regime for a particular granule. Aim to match
587 	 * the CPU page size if possible, otherwise prefer smaller sizes.
588 	 * While we're at it, restrict the block sizes to match the
589 	 * chosen granule.
590 	 */
591 	if (cfg->pgsize_bitmap & PAGE_SIZE)
592 		granule = PAGE_SIZE;
593 	else if (cfg->pgsize_bitmap & ~PAGE_MASK)
594 		granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
595 	else if (cfg->pgsize_bitmap & PAGE_MASK)
596 		granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
597 	else
598 		granule = 0;
599 
600 	switch (granule) {
601 	case SZ_4K:
602 		cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
603 		break;
604 	case SZ_16K:
605 		cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
606 		break;
607 	case SZ_64K:
608 		cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
609 		break;
610 	default:
611 		cfg->pgsize_bitmap = 0;
612 	}
613 }
614 
615 static struct arm_lpae_io_pgtable *
616 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
617 {
618 	unsigned long va_bits, pgd_bits;
619 	struct arm_lpae_io_pgtable *data;
620 
621 	arm_lpae_restrict_pgsizes(cfg);
622 
623 	if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
624 		return NULL;
625 
626 	if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
627 		return NULL;
628 
629 	if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
630 		return NULL;
631 
632 	if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
633 		dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
634 		return NULL;
635 	}
636 
637 	data = kmalloc(sizeof(*data), GFP_KERNEL);
638 	if (!data)
639 		return NULL;
640 
641 	data->pg_shift = __ffs(cfg->pgsize_bitmap);
642 	data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
643 
644 	va_bits = cfg->ias - data->pg_shift;
645 	data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
646 
647 	/* Calculate the actual size of our pgd (without concatenation) */
648 	pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
649 	data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
650 
651 	data->iop.ops = (struct io_pgtable_ops) {
652 		.map		= arm_lpae_map,
653 		.unmap		= arm_lpae_unmap,
654 		.iova_to_phys	= arm_lpae_iova_to_phys,
655 	};
656 
657 	return data;
658 }
659 
660 static struct io_pgtable *
661 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
662 {
663 	u64 reg;
664 	struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
665 
666 	if (!data)
667 		return NULL;
668 
669 	/* TCR */
670 	reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
671 	      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
672 	      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
673 
674 	switch (ARM_LPAE_GRANULE(data)) {
675 	case SZ_4K:
676 		reg |= ARM_LPAE_TCR_TG0_4K;
677 		break;
678 	case SZ_16K:
679 		reg |= ARM_LPAE_TCR_TG0_16K;
680 		break;
681 	case SZ_64K:
682 		reg |= ARM_LPAE_TCR_TG0_64K;
683 		break;
684 	}
685 
686 	switch (cfg->oas) {
687 	case 32:
688 		reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
689 		break;
690 	case 36:
691 		reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
692 		break;
693 	case 40:
694 		reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
695 		break;
696 	case 42:
697 		reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
698 		break;
699 	case 44:
700 		reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
701 		break;
702 	case 48:
703 		reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
704 		break;
705 	default:
706 		goto out_free_data;
707 	}
708 
709 	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
710 
711 	/* Disable speculative walks through TTBR1 */
712 	reg |= ARM_LPAE_TCR_EPD1;
713 	cfg->arm_lpae_s1_cfg.tcr = reg;
714 
715 	/* MAIRs */
716 	reg = (ARM_LPAE_MAIR_ATTR_NC
717 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
718 	      (ARM_LPAE_MAIR_ATTR_WBRWA
719 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
720 	      (ARM_LPAE_MAIR_ATTR_DEVICE
721 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
722 
723 	cfg->arm_lpae_s1_cfg.mair[0] = reg;
724 	cfg->arm_lpae_s1_cfg.mair[1] = 0;
725 
726 	/* Looking good; allocate a pgd */
727 	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
728 	if (!data->pgd)
729 		goto out_free_data;
730 
731 	/* Ensure the empty pgd is visible before any actual TTBR write */
732 	wmb();
733 
734 	/* TTBRs */
735 	cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
736 	cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
737 	return &data->iop;
738 
739 out_free_data:
740 	kfree(data);
741 	return NULL;
742 }
743 
744 static struct io_pgtable *
745 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
746 {
747 	u64 reg, sl;
748 	struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
749 
750 	if (!data)
751 		return NULL;
752 
753 	/*
754 	 * Concatenate PGDs at level 1 if possible in order to reduce
755 	 * the depth of the stage-2 walk.
756 	 */
757 	if (data->levels == ARM_LPAE_MAX_LEVELS) {
758 		unsigned long pgd_pages;
759 
760 		pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
761 		if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
762 			data->pgd_size = pgd_pages << data->pg_shift;
763 			data->levels--;
764 		}
765 	}
766 
767 	/* VTCR */
768 	reg = ARM_64_LPAE_S2_TCR_RES1 |
769 	     (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
770 	     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
771 	     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
772 
773 	sl = ARM_LPAE_START_LVL(data);
774 
775 	switch (ARM_LPAE_GRANULE(data)) {
776 	case SZ_4K:
777 		reg |= ARM_LPAE_TCR_TG0_4K;
778 		sl++; /* SL0 format is different for 4K granule size */
779 		break;
780 	case SZ_16K:
781 		reg |= ARM_LPAE_TCR_TG0_16K;
782 		break;
783 	case SZ_64K:
784 		reg |= ARM_LPAE_TCR_TG0_64K;
785 		break;
786 	}
787 
788 	switch (cfg->oas) {
789 	case 32:
790 		reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
791 		break;
792 	case 36:
793 		reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
794 		break;
795 	case 40:
796 		reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
797 		break;
798 	case 42:
799 		reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
800 		break;
801 	case 44:
802 		reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
803 		break;
804 	case 48:
805 		reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
806 		break;
807 	default:
808 		goto out_free_data;
809 	}
810 
811 	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
812 	reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
813 	cfg->arm_lpae_s2_cfg.vtcr = reg;
814 
815 	/* Allocate pgd pages */
816 	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
817 	if (!data->pgd)
818 		goto out_free_data;
819 
820 	/* Ensure the empty pgd is visible before any actual TTBR write */
821 	wmb();
822 
823 	/* VTTBR */
824 	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
825 	return &data->iop;
826 
827 out_free_data:
828 	kfree(data);
829 	return NULL;
830 }
831 
832 static struct io_pgtable *
833 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
834 {
835 	struct io_pgtable *iop;
836 
837 	if (cfg->ias > 32 || cfg->oas > 40)
838 		return NULL;
839 
840 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
841 	iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
842 	if (iop) {
843 		cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
844 		cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
845 	}
846 
847 	return iop;
848 }
849 
850 static struct io_pgtable *
851 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
852 {
853 	struct io_pgtable *iop;
854 
855 	if (cfg->ias > 40 || cfg->oas > 40)
856 		return NULL;
857 
858 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
859 	iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
860 	if (iop)
861 		cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
862 
863 	return iop;
864 }
865 
866 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
867 	.alloc	= arm_64_lpae_alloc_pgtable_s1,
868 	.free	= arm_lpae_free_pgtable,
869 };
870 
871 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
872 	.alloc	= arm_64_lpae_alloc_pgtable_s2,
873 	.free	= arm_lpae_free_pgtable,
874 };
875 
876 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
877 	.alloc	= arm_32_lpae_alloc_pgtable_s1,
878 	.free	= arm_lpae_free_pgtable,
879 };
880 
881 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
882 	.alloc	= arm_32_lpae_alloc_pgtable_s2,
883 	.free	= arm_lpae_free_pgtable,
884 };
885 
886 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
887 
888 static struct io_pgtable_cfg *cfg_cookie;
889 
890 static void dummy_tlb_flush_all(void *cookie)
891 {
892 	WARN_ON(cookie != cfg_cookie);
893 }
894 
895 static void dummy_tlb_add_flush(unsigned long iova, size_t size,
896 				size_t granule, bool leaf, void *cookie)
897 {
898 	WARN_ON(cookie != cfg_cookie);
899 	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
900 }
901 
902 static void dummy_tlb_sync(void *cookie)
903 {
904 	WARN_ON(cookie != cfg_cookie);
905 }
906 
907 static struct iommu_gather_ops dummy_tlb_ops __initdata = {
908 	.tlb_flush_all	= dummy_tlb_flush_all,
909 	.tlb_add_flush	= dummy_tlb_add_flush,
910 	.tlb_sync	= dummy_tlb_sync,
911 };
912 
913 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
914 {
915 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
916 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
917 
918 	pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
919 		cfg->pgsize_bitmap, cfg->ias);
920 	pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
921 		data->levels, data->pgd_size, data->pg_shift,
922 		data->bits_per_level, data->pgd);
923 }
924 
925 #define __FAIL(ops, i)	({						\
926 		WARN(1, "selftest: test failed for fmt idx %d\n", (i));	\
927 		arm_lpae_dump_ops(ops);					\
928 		selftest_running = false;				\
929 		-EFAULT;						\
930 })
931 
932 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
933 {
934 	static const enum io_pgtable_fmt fmts[] = {
935 		ARM_64_LPAE_S1,
936 		ARM_64_LPAE_S2,
937 	};
938 
939 	int i, j;
940 	unsigned long iova;
941 	size_t size;
942 	struct io_pgtable_ops *ops;
943 
944 	selftest_running = true;
945 
946 	for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
947 		cfg_cookie = cfg;
948 		ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
949 		if (!ops) {
950 			pr_err("selftest: failed to allocate io pgtable ops\n");
951 			return -ENOMEM;
952 		}
953 
954 		/*
955 		 * Initial sanity checks.
956 		 * Empty page tables shouldn't provide any translations.
957 		 */
958 		if (ops->iova_to_phys(ops, 42))
959 			return __FAIL(ops, i);
960 
961 		if (ops->iova_to_phys(ops, SZ_1G + 42))
962 			return __FAIL(ops, i);
963 
964 		if (ops->iova_to_phys(ops, SZ_2G + 42))
965 			return __FAIL(ops, i);
966 
967 		/*
968 		 * Distinct mappings of different granule sizes.
969 		 */
970 		iova = 0;
971 		j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
972 		while (j != BITS_PER_LONG) {
973 			size = 1UL << j;
974 
975 			if (ops->map(ops, iova, iova, size, IOMMU_READ |
976 							    IOMMU_WRITE |
977 							    IOMMU_NOEXEC |
978 							    IOMMU_CACHE))
979 				return __FAIL(ops, i);
980 
981 			/* Overlapping mappings */
982 			if (!ops->map(ops, iova, iova + size, size,
983 				      IOMMU_READ | IOMMU_NOEXEC))
984 				return __FAIL(ops, i);
985 
986 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
987 				return __FAIL(ops, i);
988 
989 			iova += SZ_1G;
990 			j++;
991 			j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
992 		}
993 
994 		/* Partial unmap */
995 		size = 1UL << __ffs(cfg->pgsize_bitmap);
996 		if (ops->unmap(ops, SZ_1G + size, size) != size)
997 			return __FAIL(ops, i);
998 
999 		/* Remap of partial unmap */
1000 		if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
1001 			return __FAIL(ops, i);
1002 
1003 		if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1004 			return __FAIL(ops, i);
1005 
1006 		/* Full unmap */
1007 		iova = 0;
1008 		j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
1009 		while (j != BITS_PER_LONG) {
1010 			size = 1UL << j;
1011 
1012 			if (ops->unmap(ops, iova, size) != size)
1013 				return __FAIL(ops, i);
1014 
1015 			if (ops->iova_to_phys(ops, iova + 42))
1016 				return __FAIL(ops, i);
1017 
1018 			/* Remap full block */
1019 			if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1020 				return __FAIL(ops, i);
1021 
1022 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1023 				return __FAIL(ops, i);
1024 
1025 			iova += SZ_1G;
1026 			j++;
1027 			j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
1028 		}
1029 
1030 		free_io_pgtable_ops(ops);
1031 	}
1032 
1033 	selftest_running = false;
1034 	return 0;
1035 }
1036 
1037 static int __init arm_lpae_do_selftests(void)
1038 {
1039 	static const unsigned long pgsize[] = {
1040 		SZ_4K | SZ_2M | SZ_1G,
1041 		SZ_16K | SZ_32M,
1042 		SZ_64K | SZ_512M,
1043 	};
1044 
1045 	static const unsigned int ias[] = {
1046 		32, 36, 40, 42, 44, 48,
1047 	};
1048 
1049 	int i, j, pass = 0, fail = 0;
1050 	struct io_pgtable_cfg cfg = {
1051 		.tlb = &dummy_tlb_ops,
1052 		.oas = 48,
1053 	};
1054 
1055 	for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1056 		for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1057 			cfg.pgsize_bitmap = pgsize[i];
1058 			cfg.ias = ias[j];
1059 			pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1060 				pgsize[i], ias[j]);
1061 			if (arm_lpae_run_tests(&cfg))
1062 				fail++;
1063 			else
1064 				pass++;
1065 		}
1066 	}
1067 
1068 	pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1069 	return fail ? -EFAULT : 0;
1070 }
1071 subsys_initcall(arm_lpae_do_selftests);
1072 #endif
1073