1 /*
2  * CPU-agnostic ARM page table allocator.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15  *
16  * Copyright (C) 2014 ARM Limited
17  *
18  * Author: Will Deacon <will.deacon@arm.com>
19  */
20 
21 #define pr_fmt(fmt)	"arm-lpae io-pgtable: " fmt
22 
23 #include <linux/iommu.h>
24 #include <linux/kernel.h>
25 #include <linux/sizes.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28 
29 #include <asm/barrier.h>
30 
31 #include "io-pgtable.h"
32 
33 #define ARM_LPAE_MAX_ADDR_BITS		48
34 #define ARM_LPAE_S2_MAX_CONCAT_PAGES	16
35 #define ARM_LPAE_MAX_LEVELS		4
36 
37 /* Struct accessors */
38 #define io_pgtable_to_data(x)						\
39 	container_of((x), struct arm_lpae_io_pgtable, iop)
40 
41 #define io_pgtable_ops_to_pgtable(x)					\
42 	container_of((x), struct io_pgtable, ops)
43 
44 #define io_pgtable_ops_to_data(x)					\
45 	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
46 
47 /*
48  * For consistency with the architecture, we always consider
49  * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
50  */
51 #define ARM_LPAE_START_LVL(d)		(ARM_LPAE_MAX_LEVELS - (d)->levels)
52 
53 /*
54  * Calculate the right shift amount to get to the portion describing level l
55  * in a virtual address mapped by the pagetable in d.
56  */
57 #define ARM_LPAE_LVL_SHIFT(l,d)						\
58 	((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1))		\
59 	  * (d)->bits_per_level) + (d)->pg_shift)
60 
61 #define ARM_LPAE_PAGES_PER_PGD(d)					\
62 	DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift)
63 
64 /*
65  * Calculate the index at level l used to map virtual address a using the
66  * pagetable in d.
67  */
68 #define ARM_LPAE_PGD_IDX(l,d)						\
69 	((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
70 
71 #define ARM_LPAE_LVL_IDX(a,l,d)						\
72 	(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &			\
73 	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
74 
75 /* Calculate the block/page mapping size at level l for pagetable in d. */
76 #define ARM_LPAE_BLOCK_SIZE(l,d)					\
77 	(1 << (ilog2(sizeof(arm_lpae_iopte)) +				\
78 		((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
79 
80 /* Page table bits */
81 #define ARM_LPAE_PTE_TYPE_SHIFT		0
82 #define ARM_LPAE_PTE_TYPE_MASK		0x3
83 
84 #define ARM_LPAE_PTE_TYPE_BLOCK		1
85 #define ARM_LPAE_PTE_TYPE_TABLE		3
86 #define ARM_LPAE_PTE_TYPE_PAGE		3
87 
88 #define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
89 #define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
90 #define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
91 #define ARM_LPAE_PTE_SH_NS		(((arm_lpae_iopte)0) << 8)
92 #define ARM_LPAE_PTE_SH_OS		(((arm_lpae_iopte)2) << 8)
93 #define ARM_LPAE_PTE_SH_IS		(((arm_lpae_iopte)3) << 8)
94 #define ARM_LPAE_PTE_NS			(((arm_lpae_iopte)1) << 5)
95 #define ARM_LPAE_PTE_VALID		(((arm_lpae_iopte)1) << 0)
96 
97 #define ARM_LPAE_PTE_ATTR_LO_MASK	(((arm_lpae_iopte)0x3ff) << 2)
98 /* Ignore the contiguous bit for block splitting */
99 #define ARM_LPAE_PTE_ATTR_HI_MASK	(((arm_lpae_iopte)6) << 52)
100 #define ARM_LPAE_PTE_ATTR_MASK		(ARM_LPAE_PTE_ATTR_LO_MASK |	\
101 					 ARM_LPAE_PTE_ATTR_HI_MASK)
102 
103 /* Stage-1 PTE */
104 #define ARM_LPAE_PTE_AP_UNPRIV		(((arm_lpae_iopte)1) << 6)
105 #define ARM_LPAE_PTE_AP_RDONLY		(((arm_lpae_iopte)2) << 6)
106 #define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
107 #define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
108 
109 /* Stage-2 PTE */
110 #define ARM_LPAE_PTE_HAP_FAULT		(((arm_lpae_iopte)0) << 6)
111 #define ARM_LPAE_PTE_HAP_READ		(((arm_lpae_iopte)1) << 6)
112 #define ARM_LPAE_PTE_HAP_WRITE		(((arm_lpae_iopte)2) << 6)
113 #define ARM_LPAE_PTE_MEMATTR_OIWB	(((arm_lpae_iopte)0xf) << 2)
114 #define ARM_LPAE_PTE_MEMATTR_NC		(((arm_lpae_iopte)0x5) << 2)
115 #define ARM_LPAE_PTE_MEMATTR_DEV	(((arm_lpae_iopte)0x1) << 2)
116 
117 /* Register bits */
118 #define ARM_32_LPAE_TCR_EAE		(1 << 31)
119 #define ARM_64_LPAE_S2_TCR_RES1		(1 << 31)
120 
121 #define ARM_LPAE_TCR_EPD1		(1 << 23)
122 
123 #define ARM_LPAE_TCR_TG0_4K		(0 << 14)
124 #define ARM_LPAE_TCR_TG0_64K		(1 << 14)
125 #define ARM_LPAE_TCR_TG0_16K		(2 << 14)
126 
127 #define ARM_LPAE_TCR_SH0_SHIFT		12
128 #define ARM_LPAE_TCR_SH0_MASK		0x3
129 #define ARM_LPAE_TCR_SH_NS		0
130 #define ARM_LPAE_TCR_SH_OS		2
131 #define ARM_LPAE_TCR_SH_IS		3
132 
133 #define ARM_LPAE_TCR_ORGN0_SHIFT	10
134 #define ARM_LPAE_TCR_IRGN0_SHIFT	8
135 #define ARM_LPAE_TCR_RGN_MASK		0x3
136 #define ARM_LPAE_TCR_RGN_NC		0
137 #define ARM_LPAE_TCR_RGN_WBWA		1
138 #define ARM_LPAE_TCR_RGN_WT		2
139 #define ARM_LPAE_TCR_RGN_WB		3
140 
141 #define ARM_LPAE_TCR_SL0_SHIFT		6
142 #define ARM_LPAE_TCR_SL0_MASK		0x3
143 
144 #define ARM_LPAE_TCR_T0SZ_SHIFT		0
145 #define ARM_LPAE_TCR_SZ_MASK		0xf
146 
147 #define ARM_LPAE_TCR_PS_SHIFT		16
148 #define ARM_LPAE_TCR_PS_MASK		0x7
149 
150 #define ARM_LPAE_TCR_IPS_SHIFT		32
151 #define ARM_LPAE_TCR_IPS_MASK		0x7
152 
153 #define ARM_LPAE_TCR_PS_32_BIT		0x0ULL
154 #define ARM_LPAE_TCR_PS_36_BIT		0x1ULL
155 #define ARM_LPAE_TCR_PS_40_BIT		0x2ULL
156 #define ARM_LPAE_TCR_PS_42_BIT		0x3ULL
157 #define ARM_LPAE_TCR_PS_44_BIT		0x4ULL
158 #define ARM_LPAE_TCR_PS_48_BIT		0x5ULL
159 
160 #define ARM_LPAE_MAIR_ATTR_SHIFT(n)	((n) << 3)
161 #define ARM_LPAE_MAIR_ATTR_MASK		0xff
162 #define ARM_LPAE_MAIR_ATTR_DEVICE	0x04
163 #define ARM_LPAE_MAIR_ATTR_NC		0x44
164 #define ARM_LPAE_MAIR_ATTR_WBRWA	0xff
165 #define ARM_LPAE_MAIR_ATTR_IDX_NC	0
166 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE	1
167 #define ARM_LPAE_MAIR_ATTR_IDX_DEV	2
168 
169 /* IOPTE accessors */
170 #define iopte_deref(pte,d)					\
171 	(__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)	\
172 	& ~((1ULL << (d)->pg_shift) - 1)))
173 
174 #define iopte_type(pte,l)					\
175 	(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
176 
177 #define iopte_prot(pte)	((pte) & ARM_LPAE_PTE_ATTR_MASK)
178 
179 #define iopte_leaf(pte,l)					\
180 	(l == (ARM_LPAE_MAX_LEVELS - 1) ?			\
181 		(iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) :	\
182 		(iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
183 
184 #define iopte_to_pfn(pte,d)					\
185 	(((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
186 
187 #define pfn_to_iopte(pfn,d)					\
188 	(((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
189 
190 struct arm_lpae_io_pgtable {
191 	struct io_pgtable	iop;
192 
193 	int			levels;
194 	size_t			pgd_size;
195 	unsigned long		pg_shift;
196 	unsigned long		bits_per_level;
197 
198 	void			*pgd;
199 };
200 
201 typedef u64 arm_lpae_iopte;
202 
203 static bool selftest_running = false;
204 
205 static dma_addr_t __arm_lpae_dma_addr(struct device *dev, void *pages)
206 {
207 	return phys_to_dma(dev, virt_to_phys(pages));
208 }
209 
210 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
211 				    struct io_pgtable_cfg *cfg)
212 {
213 	struct device *dev = cfg->iommu_dev;
214 	dma_addr_t dma;
215 	void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
216 
217 	if (!pages)
218 		return NULL;
219 
220 	if (!selftest_running) {
221 		dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
222 		if (dma_mapping_error(dev, dma))
223 			goto out_free;
224 		/*
225 		 * We depend on the IOMMU being able to work with any physical
226 		 * address directly, so if the DMA layer suggests it can't by
227 		 * giving us back some translation, that bodes very badly...
228 		 */
229 		if (dma != __arm_lpae_dma_addr(dev, pages))
230 			goto out_unmap;
231 	}
232 
233 	return pages;
234 
235 out_unmap:
236 	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
237 	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
238 out_free:
239 	free_pages_exact(pages, size);
240 	return NULL;
241 }
242 
243 static void __arm_lpae_free_pages(void *pages, size_t size,
244 				  struct io_pgtable_cfg *cfg)
245 {
246 	struct device *dev = cfg->iommu_dev;
247 
248 	if (!selftest_running)
249 		dma_unmap_single(dev, __arm_lpae_dma_addr(dev, pages),
250 				 size, DMA_TO_DEVICE);
251 	free_pages_exact(pages, size);
252 }
253 
254 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
255 			       struct io_pgtable_cfg *cfg)
256 {
257 	struct device *dev = cfg->iommu_dev;
258 
259 	*ptep = pte;
260 
261 	if (!selftest_running)
262 		dma_sync_single_for_device(dev, __arm_lpae_dma_addr(dev, ptep),
263 					   sizeof(pte), DMA_TO_DEVICE);
264 }
265 
266 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
267 			    unsigned long iova, size_t size, int lvl,
268 			    arm_lpae_iopte *ptep);
269 
270 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
271 			     unsigned long iova, phys_addr_t paddr,
272 			     arm_lpae_iopte prot, int lvl,
273 			     arm_lpae_iopte *ptep)
274 {
275 	arm_lpae_iopte pte = prot;
276 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
277 
278 	if (iopte_leaf(*ptep, lvl)) {
279 		/* We require an unmap first */
280 		WARN_ON(!selftest_running);
281 		return -EEXIST;
282 	} else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
283 		/*
284 		 * We need to unmap and free the old table before
285 		 * overwriting it with a block entry.
286 		 */
287 		arm_lpae_iopte *tblp;
288 		size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
289 
290 		tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
291 		if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
292 			return -EINVAL;
293 	}
294 
295 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
296 		pte |= ARM_LPAE_PTE_NS;
297 
298 	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
299 		pte |= ARM_LPAE_PTE_TYPE_PAGE;
300 	else
301 		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
302 
303 	pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
304 	pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
305 
306 	__arm_lpae_set_pte(ptep, pte, cfg);
307 	return 0;
308 }
309 
310 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
311 			  phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
312 			  int lvl, arm_lpae_iopte *ptep)
313 {
314 	arm_lpae_iopte *cptep, pte;
315 	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
316 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
317 
318 	/* Find our entry at the current level */
319 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
320 
321 	/* If we can install a leaf entry at this level, then do so */
322 	if (size == block_size && (size & cfg->pgsize_bitmap))
323 		return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
324 
325 	/* We can't allocate tables at the final level */
326 	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
327 		return -EINVAL;
328 
329 	/* Grab a pointer to the next level */
330 	pte = *ptep;
331 	if (!pte) {
332 		cptep = __arm_lpae_alloc_pages(1UL << data->pg_shift,
333 					       GFP_ATOMIC, cfg);
334 		if (!cptep)
335 			return -ENOMEM;
336 
337 		pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
338 		if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
339 			pte |= ARM_LPAE_PTE_NSTABLE;
340 		__arm_lpae_set_pte(ptep, pte, cfg);
341 	} else {
342 		cptep = iopte_deref(pte, data);
343 	}
344 
345 	/* Rinse, repeat */
346 	return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
347 }
348 
349 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
350 					   int prot)
351 {
352 	arm_lpae_iopte pte;
353 
354 	if (data->iop.fmt == ARM_64_LPAE_S1 ||
355 	    data->iop.fmt == ARM_32_LPAE_S1) {
356 		pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
357 
358 		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
359 			pte |= ARM_LPAE_PTE_AP_RDONLY;
360 
361 		if (prot & IOMMU_CACHE)
362 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
363 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
364 	} else {
365 		pte = ARM_LPAE_PTE_HAP_FAULT;
366 		if (prot & IOMMU_READ)
367 			pte |= ARM_LPAE_PTE_HAP_READ;
368 		if (prot & IOMMU_WRITE)
369 			pte |= ARM_LPAE_PTE_HAP_WRITE;
370 		if (prot & IOMMU_CACHE)
371 			pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
372 		else
373 			pte |= ARM_LPAE_PTE_MEMATTR_NC;
374 	}
375 
376 	if (prot & IOMMU_NOEXEC)
377 		pte |= ARM_LPAE_PTE_XN;
378 
379 	return pte;
380 }
381 
382 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
383 			phys_addr_t paddr, size_t size, int iommu_prot)
384 {
385 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
386 	arm_lpae_iopte *ptep = data->pgd;
387 	int ret, lvl = ARM_LPAE_START_LVL(data);
388 	arm_lpae_iopte prot;
389 
390 	/* If no access, then nothing to do */
391 	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
392 		return 0;
393 
394 	prot = arm_lpae_prot_to_pte(data, iommu_prot);
395 	ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
396 	/*
397 	 * Synchronise all PTE updates for the new mapping before there's
398 	 * a chance for anything to kick off a table walk for the new iova.
399 	 */
400 	wmb();
401 
402 	return ret;
403 }
404 
405 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
406 				    arm_lpae_iopte *ptep)
407 {
408 	arm_lpae_iopte *start, *end;
409 	unsigned long table_size;
410 
411 	/* Only leaf entries at the last level */
412 	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
413 		return;
414 
415 	if (lvl == ARM_LPAE_START_LVL(data))
416 		table_size = data->pgd_size;
417 	else
418 		table_size = 1UL << data->pg_shift;
419 
420 	start = ptep;
421 	end = (void *)ptep + table_size;
422 
423 	while (ptep != end) {
424 		arm_lpae_iopte pte = *ptep++;
425 
426 		if (!pte || iopte_leaf(pte, lvl))
427 			continue;
428 
429 		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
430 	}
431 
432 	__arm_lpae_free_pages(start, table_size, &data->iop.cfg);
433 }
434 
435 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
436 {
437 	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
438 
439 	__arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
440 	kfree(data);
441 }
442 
443 static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
444 				    unsigned long iova, size_t size,
445 				    arm_lpae_iopte prot, int lvl,
446 				    arm_lpae_iopte *ptep, size_t blk_size)
447 {
448 	unsigned long blk_start, blk_end;
449 	phys_addr_t blk_paddr;
450 	arm_lpae_iopte table = 0;
451 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
452 
453 	blk_start = iova & ~(blk_size - 1);
454 	blk_end = blk_start + blk_size;
455 	blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
456 
457 	for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
458 		arm_lpae_iopte *tablep;
459 
460 		/* Unmap! */
461 		if (blk_start == iova)
462 			continue;
463 
464 		/* __arm_lpae_map expects a pointer to the start of the table */
465 		tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
466 		if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
467 				   tablep) < 0) {
468 			if (table) {
469 				/* Free the table we allocated */
470 				tablep = iopte_deref(table, data);
471 				__arm_lpae_free_pgtable(data, lvl + 1, tablep);
472 			}
473 			return 0; /* Bytes unmapped */
474 		}
475 	}
476 
477 	__arm_lpae_set_pte(ptep, table, cfg);
478 	iova &= ~(blk_size - 1);
479 	cfg->tlb->tlb_add_flush(iova, blk_size, true, data->iop.cookie);
480 	return size;
481 }
482 
483 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
484 			    unsigned long iova, size_t size, int lvl,
485 			    arm_lpae_iopte *ptep)
486 {
487 	arm_lpae_iopte pte;
488 	const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
489 	void *cookie = data->iop.cookie;
490 	size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
491 
492 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
493 	pte = *ptep;
494 
495 	/* Something went horribly wrong and we ran out of page table */
496 	if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS)))
497 		return 0;
498 
499 	/* If the size matches this level, we're in the right place */
500 	if (size == blk_size) {
501 		__arm_lpae_set_pte(ptep, 0, &data->iop.cfg);
502 
503 		if (!iopte_leaf(pte, lvl)) {
504 			/* Also flush any partial walks */
505 			tlb->tlb_add_flush(iova, size, false, cookie);
506 			tlb->tlb_sync(cookie);
507 			ptep = iopte_deref(pte, data);
508 			__arm_lpae_free_pgtable(data, lvl + 1, ptep);
509 		} else {
510 			tlb->tlb_add_flush(iova, size, true, cookie);
511 		}
512 
513 		return size;
514 	} else if (iopte_leaf(pte, lvl)) {
515 		/*
516 		 * Insert a table at the next level to map the old region,
517 		 * minus the part we want to unmap
518 		 */
519 		return arm_lpae_split_blk_unmap(data, iova, size,
520 						iopte_prot(pte), lvl, ptep,
521 						blk_size);
522 	}
523 
524 	/* Keep on walkin' */
525 	ptep = iopte_deref(pte, data);
526 	return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
527 }
528 
529 static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
530 			  size_t size)
531 {
532 	size_t unmapped;
533 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
534 	struct io_pgtable *iop = &data->iop;
535 	arm_lpae_iopte *ptep = data->pgd;
536 	int lvl = ARM_LPAE_START_LVL(data);
537 
538 	unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
539 	if (unmapped)
540 		iop->cfg.tlb->tlb_sync(iop->cookie);
541 
542 	return unmapped;
543 }
544 
545 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
546 					 unsigned long iova)
547 {
548 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
549 	arm_lpae_iopte pte, *ptep = data->pgd;
550 	int lvl = ARM_LPAE_START_LVL(data);
551 
552 	do {
553 		/* Valid IOPTE pointer? */
554 		if (!ptep)
555 			return 0;
556 
557 		/* Grab the IOPTE we're interested in */
558 		pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
559 
560 		/* Valid entry? */
561 		if (!pte)
562 			return 0;
563 
564 		/* Leaf entry? */
565 		if (iopte_leaf(pte,lvl))
566 			goto found_translation;
567 
568 		/* Take it to the next level */
569 		ptep = iopte_deref(pte, data);
570 	} while (++lvl < ARM_LPAE_MAX_LEVELS);
571 
572 	/* Ran out of page tables to walk */
573 	return 0;
574 
575 found_translation:
576 	iova &= ((1 << data->pg_shift) - 1);
577 	return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
578 }
579 
580 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
581 {
582 	unsigned long granule;
583 
584 	/*
585 	 * We need to restrict the supported page sizes to match the
586 	 * translation regime for a particular granule. Aim to match
587 	 * the CPU page size if possible, otherwise prefer smaller sizes.
588 	 * While we're at it, restrict the block sizes to match the
589 	 * chosen granule.
590 	 */
591 	if (cfg->pgsize_bitmap & PAGE_SIZE)
592 		granule = PAGE_SIZE;
593 	else if (cfg->pgsize_bitmap & ~PAGE_MASK)
594 		granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
595 	else if (cfg->pgsize_bitmap & PAGE_MASK)
596 		granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
597 	else
598 		granule = 0;
599 
600 	switch (granule) {
601 	case SZ_4K:
602 		cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
603 		break;
604 	case SZ_16K:
605 		cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
606 		break;
607 	case SZ_64K:
608 		cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
609 		break;
610 	default:
611 		cfg->pgsize_bitmap = 0;
612 	}
613 }
614 
615 static struct arm_lpae_io_pgtable *
616 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
617 {
618 	unsigned long va_bits, pgd_bits;
619 	struct arm_lpae_io_pgtable *data;
620 
621 	arm_lpae_restrict_pgsizes(cfg);
622 
623 	if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
624 		return NULL;
625 
626 	if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
627 		return NULL;
628 
629 	if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
630 		return NULL;
631 
632 	data = kmalloc(sizeof(*data), GFP_KERNEL);
633 	if (!data)
634 		return NULL;
635 
636 	data->pg_shift = __ffs(cfg->pgsize_bitmap);
637 	data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
638 
639 	va_bits = cfg->ias - data->pg_shift;
640 	data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
641 
642 	/* Calculate the actual size of our pgd (without concatenation) */
643 	pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
644 	data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
645 
646 	data->iop.ops = (struct io_pgtable_ops) {
647 		.map		= arm_lpae_map,
648 		.unmap		= arm_lpae_unmap,
649 		.iova_to_phys	= arm_lpae_iova_to_phys,
650 	};
651 
652 	return data;
653 }
654 
655 static struct io_pgtable *
656 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
657 {
658 	u64 reg;
659 	struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
660 
661 	if (!data)
662 		return NULL;
663 
664 	/* TCR */
665 	reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
666 	      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
667 	      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
668 
669 	switch (1 << data->pg_shift) {
670 	case SZ_4K:
671 		reg |= ARM_LPAE_TCR_TG0_4K;
672 		break;
673 	case SZ_16K:
674 		reg |= ARM_LPAE_TCR_TG0_16K;
675 		break;
676 	case SZ_64K:
677 		reg |= ARM_LPAE_TCR_TG0_64K;
678 		break;
679 	}
680 
681 	switch (cfg->oas) {
682 	case 32:
683 		reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
684 		break;
685 	case 36:
686 		reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
687 		break;
688 	case 40:
689 		reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
690 		break;
691 	case 42:
692 		reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
693 		break;
694 	case 44:
695 		reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
696 		break;
697 	case 48:
698 		reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
699 		break;
700 	default:
701 		goto out_free_data;
702 	}
703 
704 	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
705 
706 	/* Disable speculative walks through TTBR1 */
707 	reg |= ARM_LPAE_TCR_EPD1;
708 	cfg->arm_lpae_s1_cfg.tcr = reg;
709 
710 	/* MAIRs */
711 	reg = (ARM_LPAE_MAIR_ATTR_NC
712 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
713 	      (ARM_LPAE_MAIR_ATTR_WBRWA
714 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
715 	      (ARM_LPAE_MAIR_ATTR_DEVICE
716 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
717 
718 	cfg->arm_lpae_s1_cfg.mair[0] = reg;
719 	cfg->arm_lpae_s1_cfg.mair[1] = 0;
720 
721 	/* Looking good; allocate a pgd */
722 	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
723 	if (!data->pgd)
724 		goto out_free_data;
725 
726 	/* Ensure the empty pgd is visible before any actual TTBR write */
727 	wmb();
728 
729 	/* TTBRs */
730 	cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
731 	cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
732 	return &data->iop;
733 
734 out_free_data:
735 	kfree(data);
736 	return NULL;
737 }
738 
739 static struct io_pgtable *
740 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
741 {
742 	u64 reg, sl;
743 	struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
744 
745 	if (!data)
746 		return NULL;
747 
748 	/*
749 	 * Concatenate PGDs at level 1 if possible in order to reduce
750 	 * the depth of the stage-2 walk.
751 	 */
752 	if (data->levels == ARM_LPAE_MAX_LEVELS) {
753 		unsigned long pgd_pages;
754 
755 		pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
756 		if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
757 			data->pgd_size = pgd_pages << data->pg_shift;
758 			data->levels--;
759 		}
760 	}
761 
762 	/* VTCR */
763 	reg = ARM_64_LPAE_S2_TCR_RES1 |
764 	     (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
765 	     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
766 	     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
767 
768 	sl = ARM_LPAE_START_LVL(data);
769 
770 	switch (1 << data->pg_shift) {
771 	case SZ_4K:
772 		reg |= ARM_LPAE_TCR_TG0_4K;
773 		sl++; /* SL0 format is different for 4K granule size */
774 		break;
775 	case SZ_16K:
776 		reg |= ARM_LPAE_TCR_TG0_16K;
777 		break;
778 	case SZ_64K:
779 		reg |= ARM_LPAE_TCR_TG0_64K;
780 		break;
781 	}
782 
783 	switch (cfg->oas) {
784 	case 32:
785 		reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
786 		break;
787 	case 36:
788 		reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
789 		break;
790 	case 40:
791 		reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
792 		break;
793 	case 42:
794 		reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
795 		break;
796 	case 44:
797 		reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
798 		break;
799 	case 48:
800 		reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
801 		break;
802 	default:
803 		goto out_free_data;
804 	}
805 
806 	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
807 	reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
808 	cfg->arm_lpae_s2_cfg.vtcr = reg;
809 
810 	/* Allocate pgd pages */
811 	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
812 	if (!data->pgd)
813 		goto out_free_data;
814 
815 	/* Ensure the empty pgd is visible before any actual TTBR write */
816 	wmb();
817 
818 	/* VTTBR */
819 	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
820 	return &data->iop;
821 
822 out_free_data:
823 	kfree(data);
824 	return NULL;
825 }
826 
827 static struct io_pgtable *
828 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
829 {
830 	struct io_pgtable *iop;
831 
832 	if (cfg->ias > 32 || cfg->oas > 40)
833 		return NULL;
834 
835 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
836 	iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
837 	if (iop) {
838 		cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
839 		cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
840 	}
841 
842 	return iop;
843 }
844 
845 static struct io_pgtable *
846 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
847 {
848 	struct io_pgtable *iop;
849 
850 	if (cfg->ias > 40 || cfg->oas > 40)
851 		return NULL;
852 
853 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
854 	iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
855 	if (iop)
856 		cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
857 
858 	return iop;
859 }
860 
861 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
862 	.alloc	= arm_64_lpae_alloc_pgtable_s1,
863 	.free	= arm_lpae_free_pgtable,
864 };
865 
866 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
867 	.alloc	= arm_64_lpae_alloc_pgtable_s2,
868 	.free	= arm_lpae_free_pgtable,
869 };
870 
871 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
872 	.alloc	= arm_32_lpae_alloc_pgtable_s1,
873 	.free	= arm_lpae_free_pgtable,
874 };
875 
876 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
877 	.alloc	= arm_32_lpae_alloc_pgtable_s2,
878 	.free	= arm_lpae_free_pgtable,
879 };
880 
881 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
882 
883 static struct io_pgtable_cfg *cfg_cookie;
884 
885 static void dummy_tlb_flush_all(void *cookie)
886 {
887 	WARN_ON(cookie != cfg_cookie);
888 }
889 
890 static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
891 				void *cookie)
892 {
893 	WARN_ON(cookie != cfg_cookie);
894 	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
895 }
896 
897 static void dummy_tlb_sync(void *cookie)
898 {
899 	WARN_ON(cookie != cfg_cookie);
900 }
901 
902 static struct iommu_gather_ops dummy_tlb_ops __initdata = {
903 	.tlb_flush_all	= dummy_tlb_flush_all,
904 	.tlb_add_flush	= dummy_tlb_add_flush,
905 	.tlb_sync	= dummy_tlb_sync,
906 };
907 
908 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
909 {
910 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
911 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
912 
913 	pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
914 		cfg->pgsize_bitmap, cfg->ias);
915 	pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
916 		data->levels, data->pgd_size, data->pg_shift,
917 		data->bits_per_level, data->pgd);
918 }
919 
920 #define __FAIL(ops, i)	({						\
921 		WARN(1, "selftest: test failed for fmt idx %d\n", (i));	\
922 		arm_lpae_dump_ops(ops);					\
923 		selftest_running = false;				\
924 		-EFAULT;						\
925 })
926 
927 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
928 {
929 	static const enum io_pgtable_fmt fmts[] = {
930 		ARM_64_LPAE_S1,
931 		ARM_64_LPAE_S2,
932 	};
933 
934 	int i, j;
935 	unsigned long iova;
936 	size_t size;
937 	struct io_pgtable_ops *ops;
938 
939 	selftest_running = true;
940 
941 	for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
942 		cfg_cookie = cfg;
943 		ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
944 		if (!ops) {
945 			pr_err("selftest: failed to allocate io pgtable ops\n");
946 			return -ENOMEM;
947 		}
948 
949 		/*
950 		 * Initial sanity checks.
951 		 * Empty page tables shouldn't provide any translations.
952 		 */
953 		if (ops->iova_to_phys(ops, 42))
954 			return __FAIL(ops, i);
955 
956 		if (ops->iova_to_phys(ops, SZ_1G + 42))
957 			return __FAIL(ops, i);
958 
959 		if (ops->iova_to_phys(ops, SZ_2G + 42))
960 			return __FAIL(ops, i);
961 
962 		/*
963 		 * Distinct mappings of different granule sizes.
964 		 */
965 		iova = 0;
966 		j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
967 		while (j != BITS_PER_LONG) {
968 			size = 1UL << j;
969 
970 			if (ops->map(ops, iova, iova, size, IOMMU_READ |
971 							    IOMMU_WRITE |
972 							    IOMMU_NOEXEC |
973 							    IOMMU_CACHE))
974 				return __FAIL(ops, i);
975 
976 			/* Overlapping mappings */
977 			if (!ops->map(ops, iova, iova + size, size,
978 				      IOMMU_READ | IOMMU_NOEXEC))
979 				return __FAIL(ops, i);
980 
981 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
982 				return __FAIL(ops, i);
983 
984 			iova += SZ_1G;
985 			j++;
986 			j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
987 		}
988 
989 		/* Partial unmap */
990 		size = 1UL << __ffs(cfg->pgsize_bitmap);
991 		if (ops->unmap(ops, SZ_1G + size, size) != size)
992 			return __FAIL(ops, i);
993 
994 		/* Remap of partial unmap */
995 		if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
996 			return __FAIL(ops, i);
997 
998 		if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
999 			return __FAIL(ops, i);
1000 
1001 		/* Full unmap */
1002 		iova = 0;
1003 		j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
1004 		while (j != BITS_PER_LONG) {
1005 			size = 1UL << j;
1006 
1007 			if (ops->unmap(ops, iova, size) != size)
1008 				return __FAIL(ops, i);
1009 
1010 			if (ops->iova_to_phys(ops, iova + 42))
1011 				return __FAIL(ops, i);
1012 
1013 			/* Remap full block */
1014 			if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1015 				return __FAIL(ops, i);
1016 
1017 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1018 				return __FAIL(ops, i);
1019 
1020 			iova += SZ_1G;
1021 			j++;
1022 			j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
1023 		}
1024 
1025 		free_io_pgtable_ops(ops);
1026 	}
1027 
1028 	selftest_running = false;
1029 	return 0;
1030 }
1031 
1032 static int __init arm_lpae_do_selftests(void)
1033 {
1034 	static const unsigned long pgsize[] = {
1035 		SZ_4K | SZ_2M | SZ_1G,
1036 		SZ_16K | SZ_32M,
1037 		SZ_64K | SZ_512M,
1038 	};
1039 
1040 	static const unsigned int ias[] = {
1041 		32, 36, 40, 42, 44, 48,
1042 	};
1043 
1044 	int i, j, pass = 0, fail = 0;
1045 	struct io_pgtable_cfg cfg = {
1046 		.tlb = &dummy_tlb_ops,
1047 		.oas = 48,
1048 	};
1049 
1050 	for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1051 		for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1052 			cfg.pgsize_bitmap = pgsize[i];
1053 			cfg.ias = ias[j];
1054 			pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1055 				pgsize[i], ias[j]);
1056 			if (arm_lpae_run_tests(&cfg))
1057 				fail++;
1058 			else
1059 				pass++;
1060 		}
1061 	}
1062 
1063 	pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1064 	return fail ? -EFAULT : 0;
1065 }
1066 subsys_initcall(arm_lpae_do_selftests);
1067 #endif
1068