1 /*
2  * CPU-agnostic ARM page table allocator.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15  *
16  * Copyright (C) 2014 ARM Limited
17  *
18  * Author: Will Deacon <will.deacon@arm.com>
19  */
20 
21 #define pr_fmt(fmt)	"arm-lpae io-pgtable: " fmt
22 
23 #include <linux/iommu.h>
24 #include <linux/kernel.h>
25 #include <linux/sizes.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28 
29 #include "io-pgtable.h"
30 
31 #define ARM_LPAE_MAX_ADDR_BITS		48
32 #define ARM_LPAE_S2_MAX_CONCAT_PAGES	16
33 #define ARM_LPAE_MAX_LEVELS		4
34 
35 /* Struct accessors */
36 #define io_pgtable_to_data(x)						\
37 	container_of((x), struct arm_lpae_io_pgtable, iop)
38 
39 #define io_pgtable_ops_to_pgtable(x)					\
40 	container_of((x), struct io_pgtable, ops)
41 
42 #define io_pgtable_ops_to_data(x)					\
43 	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
44 
45 /*
46  * For consistency with the architecture, we always consider
47  * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
48  */
49 #define ARM_LPAE_START_LVL(d)		(ARM_LPAE_MAX_LEVELS - (d)->levels)
50 
51 /*
52  * Calculate the right shift amount to get to the portion describing level l
53  * in a virtual address mapped by the pagetable in d.
54  */
55 #define ARM_LPAE_LVL_SHIFT(l,d)						\
56 	((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1))		\
57 	  * (d)->bits_per_level) + (d)->pg_shift)
58 
59 #define ARM_LPAE_PAGES_PER_PGD(d)	((d)->pgd_size >> (d)->pg_shift)
60 
61 /*
62  * Calculate the index at level l used to map virtual address a using the
63  * pagetable in d.
64  */
65 #define ARM_LPAE_PGD_IDX(l,d)						\
66 	((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
67 
68 #define ARM_LPAE_LVL_IDX(a,l,d)						\
69 	(((a) >> ARM_LPAE_LVL_SHIFT(l,d)) &				\
70 	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
71 
72 /* Calculate the block/page mapping size at level l for pagetable in d. */
73 #define ARM_LPAE_BLOCK_SIZE(l,d)					\
74 	(1 << (ilog2(sizeof(arm_lpae_iopte)) +				\
75 		((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
76 
77 /* Page table bits */
78 #define ARM_LPAE_PTE_TYPE_SHIFT		0
79 #define ARM_LPAE_PTE_TYPE_MASK		0x3
80 
81 #define ARM_LPAE_PTE_TYPE_BLOCK		1
82 #define ARM_LPAE_PTE_TYPE_TABLE		3
83 #define ARM_LPAE_PTE_TYPE_PAGE		3
84 
85 #define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
86 #define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
87 #define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
88 #define ARM_LPAE_PTE_SH_NS		(((arm_lpae_iopte)0) << 8)
89 #define ARM_LPAE_PTE_SH_OS		(((arm_lpae_iopte)2) << 8)
90 #define ARM_LPAE_PTE_SH_IS		(((arm_lpae_iopte)3) << 8)
91 #define ARM_LPAE_PTE_NS			(((arm_lpae_iopte)1) << 5)
92 #define ARM_LPAE_PTE_VALID		(((arm_lpae_iopte)1) << 0)
93 
94 #define ARM_LPAE_PTE_ATTR_LO_MASK	(((arm_lpae_iopte)0x3ff) << 2)
95 /* Ignore the contiguous bit for block splitting */
96 #define ARM_LPAE_PTE_ATTR_HI_MASK	(((arm_lpae_iopte)6) << 52)
97 #define ARM_LPAE_PTE_ATTR_MASK		(ARM_LPAE_PTE_ATTR_LO_MASK |	\
98 					 ARM_LPAE_PTE_ATTR_HI_MASK)
99 
100 /* Stage-1 PTE */
101 #define ARM_LPAE_PTE_AP_UNPRIV		(((arm_lpae_iopte)1) << 6)
102 #define ARM_LPAE_PTE_AP_RDONLY		(((arm_lpae_iopte)2) << 6)
103 #define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
104 #define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
105 
106 /* Stage-2 PTE */
107 #define ARM_LPAE_PTE_HAP_FAULT		(((arm_lpae_iopte)0) << 6)
108 #define ARM_LPAE_PTE_HAP_READ		(((arm_lpae_iopte)1) << 6)
109 #define ARM_LPAE_PTE_HAP_WRITE		(((arm_lpae_iopte)2) << 6)
110 #define ARM_LPAE_PTE_MEMATTR_OIWB	(((arm_lpae_iopte)0xf) << 2)
111 #define ARM_LPAE_PTE_MEMATTR_NC		(((arm_lpae_iopte)0x5) << 2)
112 #define ARM_LPAE_PTE_MEMATTR_DEV	(((arm_lpae_iopte)0x1) << 2)
113 
114 /* Register bits */
115 #define ARM_32_LPAE_TCR_EAE		(1 << 31)
116 #define ARM_64_LPAE_S2_TCR_RES1		(1 << 31)
117 
118 #define ARM_LPAE_TCR_TG0_4K		(0 << 14)
119 #define ARM_LPAE_TCR_TG0_64K		(1 << 14)
120 #define ARM_LPAE_TCR_TG0_16K		(2 << 14)
121 
122 #define ARM_LPAE_TCR_SH0_SHIFT		12
123 #define ARM_LPAE_TCR_SH0_MASK		0x3
124 #define ARM_LPAE_TCR_SH_NS		0
125 #define ARM_LPAE_TCR_SH_OS		2
126 #define ARM_LPAE_TCR_SH_IS		3
127 
128 #define ARM_LPAE_TCR_ORGN0_SHIFT	10
129 #define ARM_LPAE_TCR_IRGN0_SHIFT	8
130 #define ARM_LPAE_TCR_RGN_MASK		0x3
131 #define ARM_LPAE_TCR_RGN_NC		0
132 #define ARM_LPAE_TCR_RGN_WBWA		1
133 #define ARM_LPAE_TCR_RGN_WT		2
134 #define ARM_LPAE_TCR_RGN_WB		3
135 
136 #define ARM_LPAE_TCR_SL0_SHIFT		6
137 #define ARM_LPAE_TCR_SL0_MASK		0x3
138 
139 #define ARM_LPAE_TCR_T0SZ_SHIFT		0
140 #define ARM_LPAE_TCR_SZ_MASK		0xf
141 
142 #define ARM_LPAE_TCR_PS_SHIFT		16
143 #define ARM_LPAE_TCR_PS_MASK		0x7
144 
145 #define ARM_LPAE_TCR_IPS_SHIFT		32
146 #define ARM_LPAE_TCR_IPS_MASK		0x7
147 
148 #define ARM_LPAE_TCR_PS_32_BIT		0x0ULL
149 #define ARM_LPAE_TCR_PS_36_BIT		0x1ULL
150 #define ARM_LPAE_TCR_PS_40_BIT		0x2ULL
151 #define ARM_LPAE_TCR_PS_42_BIT		0x3ULL
152 #define ARM_LPAE_TCR_PS_44_BIT		0x4ULL
153 #define ARM_LPAE_TCR_PS_48_BIT		0x5ULL
154 
155 #define ARM_LPAE_MAIR_ATTR_SHIFT(n)	((n) << 3)
156 #define ARM_LPAE_MAIR_ATTR_MASK		0xff
157 #define ARM_LPAE_MAIR_ATTR_DEVICE	0x04
158 #define ARM_LPAE_MAIR_ATTR_NC		0x44
159 #define ARM_LPAE_MAIR_ATTR_WBRWA	0xff
160 #define ARM_LPAE_MAIR_ATTR_IDX_NC	0
161 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE	1
162 #define ARM_LPAE_MAIR_ATTR_IDX_DEV	2
163 
164 /* IOPTE accessors */
165 #define iopte_deref(pte,d)					\
166 	(__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)	\
167 	& ~((1ULL << (d)->pg_shift) - 1)))
168 
169 #define iopte_type(pte,l)					\
170 	(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
171 
172 #define iopte_prot(pte)	((pte) & ARM_LPAE_PTE_ATTR_MASK)
173 
174 #define iopte_leaf(pte,l)					\
175 	(l == (ARM_LPAE_MAX_LEVELS - 1) ?			\
176 		(iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) :	\
177 		(iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
178 
179 #define iopte_to_pfn(pte,d)					\
180 	(((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
181 
182 #define pfn_to_iopte(pfn,d)					\
183 	(((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
184 
185 struct arm_lpae_io_pgtable {
186 	struct io_pgtable	iop;
187 
188 	int			levels;
189 	size_t			pgd_size;
190 	unsigned long		pg_shift;
191 	unsigned long		bits_per_level;
192 
193 	void			*pgd;
194 };
195 
196 typedef u64 arm_lpae_iopte;
197 
198 static bool selftest_running = false;
199 
200 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
201 			     unsigned long iova, phys_addr_t paddr,
202 			     arm_lpae_iopte prot, int lvl,
203 			     arm_lpae_iopte *ptep)
204 {
205 	arm_lpae_iopte pte = prot;
206 
207 	/* We require an unmap first */
208 	if (iopte_leaf(*ptep, lvl)) {
209 		WARN_ON(!selftest_running);
210 		return -EEXIST;
211 	}
212 
213 	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
214 		pte |= ARM_LPAE_PTE_NS;
215 
216 	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
217 		pte |= ARM_LPAE_PTE_TYPE_PAGE;
218 	else
219 		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
220 
221 	pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
222 	pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
223 
224 	*ptep = pte;
225 	data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie);
226 	return 0;
227 }
228 
229 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
230 			  phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
231 			  int lvl, arm_lpae_iopte *ptep)
232 {
233 	arm_lpae_iopte *cptep, pte;
234 	void *cookie = data->iop.cookie;
235 	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
236 
237 	/* Find our entry at the current level */
238 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
239 
240 	/* If we can install a leaf entry at this level, then do so */
241 	if (size == block_size && (size & data->iop.cfg.pgsize_bitmap))
242 		return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
243 
244 	/* We can't allocate tables at the final level */
245 	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
246 		return -EINVAL;
247 
248 	/* Grab a pointer to the next level */
249 	pte = *ptep;
250 	if (!pte) {
251 		cptep = alloc_pages_exact(1UL << data->pg_shift,
252 					 GFP_ATOMIC | __GFP_ZERO);
253 		if (!cptep)
254 			return -ENOMEM;
255 
256 		data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift,
257 						 cookie);
258 		pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
259 		if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
260 			pte |= ARM_LPAE_PTE_NSTABLE;
261 		*ptep = pte;
262 		data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
263 	} else {
264 		cptep = iopte_deref(pte, data);
265 	}
266 
267 	/* Rinse, repeat */
268 	return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
269 }
270 
271 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
272 					   int prot)
273 {
274 	arm_lpae_iopte pte;
275 
276 	if (data->iop.fmt == ARM_64_LPAE_S1 ||
277 	    data->iop.fmt == ARM_32_LPAE_S1) {
278 		pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
279 
280 		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
281 			pte |= ARM_LPAE_PTE_AP_RDONLY;
282 
283 		if (prot & IOMMU_CACHE)
284 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
285 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
286 	} else {
287 		pte = ARM_LPAE_PTE_HAP_FAULT;
288 		if (prot & IOMMU_READ)
289 			pte |= ARM_LPAE_PTE_HAP_READ;
290 		if (prot & IOMMU_WRITE)
291 			pte |= ARM_LPAE_PTE_HAP_WRITE;
292 		if (prot & IOMMU_CACHE)
293 			pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
294 		else
295 			pte |= ARM_LPAE_PTE_MEMATTR_NC;
296 	}
297 
298 	if (prot & IOMMU_NOEXEC)
299 		pte |= ARM_LPAE_PTE_XN;
300 
301 	return pte;
302 }
303 
304 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
305 			phys_addr_t paddr, size_t size, int iommu_prot)
306 {
307 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
308 	arm_lpae_iopte *ptep = data->pgd;
309 	int lvl = ARM_LPAE_START_LVL(data);
310 	arm_lpae_iopte prot;
311 
312 	/* If no access, then nothing to do */
313 	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
314 		return 0;
315 
316 	prot = arm_lpae_prot_to_pte(data, iommu_prot);
317 	return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
318 }
319 
320 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
321 				    arm_lpae_iopte *ptep)
322 {
323 	arm_lpae_iopte *start, *end;
324 	unsigned long table_size;
325 
326 	/* Only leaf entries at the last level */
327 	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
328 		return;
329 
330 	if (lvl == ARM_LPAE_START_LVL(data))
331 		table_size = data->pgd_size;
332 	else
333 		table_size = 1UL << data->pg_shift;
334 
335 	start = ptep;
336 	end = (void *)ptep + table_size;
337 
338 	while (ptep != end) {
339 		arm_lpae_iopte pte = *ptep++;
340 
341 		if (!pte || iopte_leaf(pte, lvl))
342 			continue;
343 
344 		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
345 	}
346 
347 	free_pages_exact(start, table_size);
348 }
349 
350 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
351 {
352 	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
353 
354 	__arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
355 	kfree(data);
356 }
357 
358 static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
359 				    unsigned long iova, size_t size,
360 				    arm_lpae_iopte prot, int lvl,
361 				    arm_lpae_iopte *ptep, size_t blk_size)
362 {
363 	unsigned long blk_start, blk_end;
364 	phys_addr_t blk_paddr;
365 	arm_lpae_iopte table = 0;
366 	void *cookie = data->iop.cookie;
367 	const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
368 
369 	blk_start = iova & ~(blk_size - 1);
370 	blk_end = blk_start + blk_size;
371 	blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
372 
373 	for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
374 		arm_lpae_iopte *tablep;
375 
376 		/* Unmap! */
377 		if (blk_start == iova)
378 			continue;
379 
380 		/* __arm_lpae_map expects a pointer to the start of the table */
381 		tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
382 		if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
383 				   tablep) < 0) {
384 			if (table) {
385 				/* Free the table we allocated */
386 				tablep = iopte_deref(table, data);
387 				__arm_lpae_free_pgtable(data, lvl + 1, tablep);
388 			}
389 			return 0; /* Bytes unmapped */
390 		}
391 	}
392 
393 	*ptep = table;
394 	tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
395 	iova &= ~(blk_size - 1);
396 	tlb->tlb_add_flush(iova, blk_size, true, cookie);
397 	return size;
398 }
399 
400 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
401 			    unsigned long iova, size_t size, int lvl,
402 			    arm_lpae_iopte *ptep)
403 {
404 	arm_lpae_iopte pte;
405 	const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
406 	void *cookie = data->iop.cookie;
407 	size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
408 
409 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
410 	pte = *ptep;
411 
412 	/* Something went horribly wrong and we ran out of page table */
413 	if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS)))
414 		return 0;
415 
416 	/* If the size matches this level, we're in the right place */
417 	if (size == blk_size) {
418 		*ptep = 0;
419 		tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
420 
421 		if (!iopte_leaf(pte, lvl)) {
422 			/* Also flush any partial walks */
423 			tlb->tlb_add_flush(iova, size, false, cookie);
424 			tlb->tlb_sync(data->iop.cookie);
425 			ptep = iopte_deref(pte, data);
426 			__arm_lpae_free_pgtable(data, lvl + 1, ptep);
427 		} else {
428 			tlb->tlb_add_flush(iova, size, true, cookie);
429 		}
430 
431 		return size;
432 	} else if (iopte_leaf(pte, lvl)) {
433 		/*
434 		 * Insert a table at the next level to map the old region,
435 		 * minus the part we want to unmap
436 		 */
437 		return arm_lpae_split_blk_unmap(data, iova, size,
438 						iopte_prot(pte), lvl, ptep,
439 						blk_size);
440 	}
441 
442 	/* Keep on walkin' */
443 	ptep = iopte_deref(pte, data);
444 	return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
445 }
446 
447 static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
448 			  size_t size)
449 {
450 	size_t unmapped;
451 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
452 	struct io_pgtable *iop = &data->iop;
453 	arm_lpae_iopte *ptep = data->pgd;
454 	int lvl = ARM_LPAE_START_LVL(data);
455 
456 	unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
457 	if (unmapped)
458 		iop->cfg.tlb->tlb_sync(iop->cookie);
459 
460 	return unmapped;
461 }
462 
463 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
464 					 unsigned long iova)
465 {
466 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
467 	arm_lpae_iopte pte, *ptep = data->pgd;
468 	int lvl = ARM_LPAE_START_LVL(data);
469 
470 	do {
471 		/* Valid IOPTE pointer? */
472 		if (!ptep)
473 			return 0;
474 
475 		/* Grab the IOPTE we're interested in */
476 		pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
477 
478 		/* Valid entry? */
479 		if (!pte)
480 			return 0;
481 
482 		/* Leaf entry? */
483 		if (iopte_leaf(pte,lvl))
484 			goto found_translation;
485 
486 		/* Take it to the next level */
487 		ptep = iopte_deref(pte, data);
488 	} while (++lvl < ARM_LPAE_MAX_LEVELS);
489 
490 	/* Ran out of page tables to walk */
491 	return 0;
492 
493 found_translation:
494 	iova &= ((1 << data->pg_shift) - 1);
495 	return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
496 }
497 
498 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
499 {
500 	unsigned long granule;
501 
502 	/*
503 	 * We need to restrict the supported page sizes to match the
504 	 * translation regime for a particular granule. Aim to match
505 	 * the CPU page size if possible, otherwise prefer smaller sizes.
506 	 * While we're at it, restrict the block sizes to match the
507 	 * chosen granule.
508 	 */
509 	if (cfg->pgsize_bitmap & PAGE_SIZE)
510 		granule = PAGE_SIZE;
511 	else if (cfg->pgsize_bitmap & ~PAGE_MASK)
512 		granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
513 	else if (cfg->pgsize_bitmap & PAGE_MASK)
514 		granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
515 	else
516 		granule = 0;
517 
518 	switch (granule) {
519 	case SZ_4K:
520 		cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
521 		break;
522 	case SZ_16K:
523 		cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
524 		break;
525 	case SZ_64K:
526 		cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
527 		break;
528 	default:
529 		cfg->pgsize_bitmap = 0;
530 	}
531 }
532 
533 static struct arm_lpae_io_pgtable *
534 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
535 {
536 	unsigned long va_bits, pgd_bits;
537 	struct arm_lpae_io_pgtable *data;
538 
539 	arm_lpae_restrict_pgsizes(cfg);
540 
541 	if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
542 		return NULL;
543 
544 	if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
545 		return NULL;
546 
547 	if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
548 		return NULL;
549 
550 	data = kmalloc(sizeof(*data), GFP_KERNEL);
551 	if (!data)
552 		return NULL;
553 
554 	data->pg_shift = __ffs(cfg->pgsize_bitmap);
555 	data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
556 
557 	va_bits = cfg->ias - data->pg_shift;
558 	data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
559 
560 	/* Calculate the actual size of our pgd (without concatenation) */
561 	pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
562 	data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
563 
564 	data->iop.ops = (struct io_pgtable_ops) {
565 		.map		= arm_lpae_map,
566 		.unmap		= arm_lpae_unmap,
567 		.iova_to_phys	= arm_lpae_iova_to_phys,
568 	};
569 
570 	return data;
571 }
572 
573 static struct io_pgtable *
574 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
575 {
576 	u64 reg;
577 	struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
578 
579 	if (!data)
580 		return NULL;
581 
582 	/* TCR */
583 	reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
584 	      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
585 	      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
586 
587 	switch (1 << data->pg_shift) {
588 	case SZ_4K:
589 		reg |= ARM_LPAE_TCR_TG0_4K;
590 		break;
591 	case SZ_16K:
592 		reg |= ARM_LPAE_TCR_TG0_16K;
593 		break;
594 	case SZ_64K:
595 		reg |= ARM_LPAE_TCR_TG0_64K;
596 		break;
597 	}
598 
599 	switch (cfg->oas) {
600 	case 32:
601 		reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
602 		break;
603 	case 36:
604 		reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
605 		break;
606 	case 40:
607 		reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
608 		break;
609 	case 42:
610 		reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
611 		break;
612 	case 44:
613 		reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
614 		break;
615 	case 48:
616 		reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
617 		break;
618 	default:
619 		goto out_free_data;
620 	}
621 
622 	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
623 	cfg->arm_lpae_s1_cfg.tcr = reg;
624 
625 	/* MAIRs */
626 	reg = (ARM_LPAE_MAIR_ATTR_NC
627 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
628 	      (ARM_LPAE_MAIR_ATTR_WBRWA
629 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
630 	      (ARM_LPAE_MAIR_ATTR_DEVICE
631 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
632 
633 	cfg->arm_lpae_s1_cfg.mair[0] = reg;
634 	cfg->arm_lpae_s1_cfg.mair[1] = 0;
635 
636 	/* Looking good; allocate a pgd */
637 	data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
638 	if (!data->pgd)
639 		goto out_free_data;
640 
641 	cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
642 
643 	/* TTBRs */
644 	cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
645 	cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
646 	return &data->iop;
647 
648 out_free_data:
649 	kfree(data);
650 	return NULL;
651 }
652 
653 static struct io_pgtable *
654 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
655 {
656 	u64 reg, sl;
657 	struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
658 
659 	if (!data)
660 		return NULL;
661 
662 	/*
663 	 * Concatenate PGDs at level 1 if possible in order to reduce
664 	 * the depth of the stage-2 walk.
665 	 */
666 	if (data->levels == ARM_LPAE_MAX_LEVELS) {
667 		unsigned long pgd_pages;
668 
669 		pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
670 		if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
671 			data->pgd_size = pgd_pages << data->pg_shift;
672 			data->levels--;
673 		}
674 	}
675 
676 	/* VTCR */
677 	reg = ARM_64_LPAE_S2_TCR_RES1 |
678 	     (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
679 	     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
680 	     (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
681 
682 	sl = ARM_LPAE_START_LVL(data);
683 
684 	switch (1 << data->pg_shift) {
685 	case SZ_4K:
686 		reg |= ARM_LPAE_TCR_TG0_4K;
687 		sl++; /* SL0 format is different for 4K granule size */
688 		break;
689 	case SZ_16K:
690 		reg |= ARM_LPAE_TCR_TG0_16K;
691 		break;
692 	case SZ_64K:
693 		reg |= ARM_LPAE_TCR_TG0_64K;
694 		break;
695 	}
696 
697 	switch (cfg->oas) {
698 	case 32:
699 		reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
700 		break;
701 	case 36:
702 		reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
703 		break;
704 	case 40:
705 		reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
706 		break;
707 	case 42:
708 		reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
709 		break;
710 	case 44:
711 		reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
712 		break;
713 	case 48:
714 		reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
715 		break;
716 	default:
717 		goto out_free_data;
718 	}
719 
720 	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
721 	reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
722 	cfg->arm_lpae_s2_cfg.vtcr = reg;
723 
724 	/* Allocate pgd pages */
725 	data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
726 	if (!data->pgd)
727 		goto out_free_data;
728 
729 	cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
730 
731 	/* VTTBR */
732 	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
733 	return &data->iop;
734 
735 out_free_data:
736 	kfree(data);
737 	return NULL;
738 }
739 
740 static struct io_pgtable *
741 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
742 {
743 	struct io_pgtable *iop;
744 
745 	if (cfg->ias > 32 || cfg->oas > 40)
746 		return NULL;
747 
748 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
749 	iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
750 	if (iop) {
751 		cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
752 		cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
753 	}
754 
755 	return iop;
756 }
757 
758 static struct io_pgtable *
759 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
760 {
761 	struct io_pgtable *iop;
762 
763 	if (cfg->ias > 40 || cfg->oas > 40)
764 		return NULL;
765 
766 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
767 	iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
768 	if (iop)
769 		cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
770 
771 	return iop;
772 }
773 
774 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
775 	.alloc	= arm_64_lpae_alloc_pgtable_s1,
776 	.free	= arm_lpae_free_pgtable,
777 };
778 
779 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
780 	.alloc	= arm_64_lpae_alloc_pgtable_s2,
781 	.free	= arm_lpae_free_pgtable,
782 };
783 
784 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
785 	.alloc	= arm_32_lpae_alloc_pgtable_s1,
786 	.free	= arm_lpae_free_pgtable,
787 };
788 
789 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
790 	.alloc	= arm_32_lpae_alloc_pgtable_s2,
791 	.free	= arm_lpae_free_pgtable,
792 };
793 
794 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
795 
796 static struct io_pgtable_cfg *cfg_cookie;
797 
798 static void dummy_tlb_flush_all(void *cookie)
799 {
800 	WARN_ON(cookie != cfg_cookie);
801 }
802 
803 static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
804 				void *cookie)
805 {
806 	WARN_ON(cookie != cfg_cookie);
807 	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
808 }
809 
810 static void dummy_tlb_sync(void *cookie)
811 {
812 	WARN_ON(cookie != cfg_cookie);
813 }
814 
815 static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie)
816 {
817 	WARN_ON(cookie != cfg_cookie);
818 }
819 
820 static struct iommu_gather_ops dummy_tlb_ops __initdata = {
821 	.tlb_flush_all	= dummy_tlb_flush_all,
822 	.tlb_add_flush	= dummy_tlb_add_flush,
823 	.tlb_sync	= dummy_tlb_sync,
824 	.flush_pgtable	= dummy_flush_pgtable,
825 };
826 
827 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
828 {
829 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
830 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
831 
832 	pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
833 		cfg->pgsize_bitmap, cfg->ias);
834 	pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
835 		data->levels, data->pgd_size, data->pg_shift,
836 		data->bits_per_level, data->pgd);
837 }
838 
839 #define __FAIL(ops, i)	({						\
840 		WARN(1, "selftest: test failed for fmt idx %d\n", (i));	\
841 		arm_lpae_dump_ops(ops);					\
842 		selftest_running = false;				\
843 		-EFAULT;						\
844 })
845 
846 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
847 {
848 	static const enum io_pgtable_fmt fmts[] = {
849 		ARM_64_LPAE_S1,
850 		ARM_64_LPAE_S2,
851 	};
852 
853 	int i, j;
854 	unsigned long iova;
855 	size_t size;
856 	struct io_pgtable_ops *ops;
857 
858 	selftest_running = true;
859 
860 	for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
861 		cfg_cookie = cfg;
862 		ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
863 		if (!ops) {
864 			pr_err("selftest: failed to allocate io pgtable ops\n");
865 			return -ENOMEM;
866 		}
867 
868 		/*
869 		 * Initial sanity checks.
870 		 * Empty page tables shouldn't provide any translations.
871 		 */
872 		if (ops->iova_to_phys(ops, 42))
873 			return __FAIL(ops, i);
874 
875 		if (ops->iova_to_phys(ops, SZ_1G + 42))
876 			return __FAIL(ops, i);
877 
878 		if (ops->iova_to_phys(ops, SZ_2G + 42))
879 			return __FAIL(ops, i);
880 
881 		/*
882 		 * Distinct mappings of different granule sizes.
883 		 */
884 		iova = 0;
885 		j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
886 		while (j != BITS_PER_LONG) {
887 			size = 1UL << j;
888 
889 			if (ops->map(ops, iova, iova, size, IOMMU_READ |
890 							    IOMMU_WRITE |
891 							    IOMMU_NOEXEC |
892 							    IOMMU_CACHE))
893 				return __FAIL(ops, i);
894 
895 			/* Overlapping mappings */
896 			if (!ops->map(ops, iova, iova + size, size,
897 				      IOMMU_READ | IOMMU_NOEXEC))
898 				return __FAIL(ops, i);
899 
900 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
901 				return __FAIL(ops, i);
902 
903 			iova += SZ_1G;
904 			j++;
905 			j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
906 		}
907 
908 		/* Partial unmap */
909 		size = 1UL << __ffs(cfg->pgsize_bitmap);
910 		if (ops->unmap(ops, SZ_1G + size, size) != size)
911 			return __FAIL(ops, i);
912 
913 		/* Remap of partial unmap */
914 		if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
915 			return __FAIL(ops, i);
916 
917 		if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
918 			return __FAIL(ops, i);
919 
920 		/* Full unmap */
921 		iova = 0;
922 		j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
923 		while (j != BITS_PER_LONG) {
924 			size = 1UL << j;
925 
926 			if (ops->unmap(ops, iova, size) != size)
927 				return __FAIL(ops, i);
928 
929 			if (ops->iova_to_phys(ops, iova + 42))
930 				return __FAIL(ops, i);
931 
932 			/* Remap full block */
933 			if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
934 				return __FAIL(ops, i);
935 
936 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
937 				return __FAIL(ops, i);
938 
939 			iova += SZ_1G;
940 			j++;
941 			j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
942 		}
943 
944 		free_io_pgtable_ops(ops);
945 	}
946 
947 	selftest_running = false;
948 	return 0;
949 }
950 
951 static int __init arm_lpae_do_selftests(void)
952 {
953 	static const unsigned long pgsize[] = {
954 		SZ_4K | SZ_2M | SZ_1G,
955 		SZ_16K | SZ_32M,
956 		SZ_64K | SZ_512M,
957 	};
958 
959 	static const unsigned int ias[] = {
960 		32, 36, 40, 42, 44, 48,
961 	};
962 
963 	int i, j, pass = 0, fail = 0;
964 	struct io_pgtable_cfg cfg = {
965 		.tlb = &dummy_tlb_ops,
966 		.oas = 48,
967 	};
968 
969 	for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
970 		for (j = 0; j < ARRAY_SIZE(ias); ++j) {
971 			cfg.pgsize_bitmap = pgsize[i];
972 			cfg.ias = ias[j];
973 			pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
974 				pgsize[i], ias[j]);
975 			if (arm_lpae_run_tests(&cfg))
976 				fail++;
977 			else
978 				pass++;
979 		}
980 	}
981 
982 	pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
983 	return fail ? -EFAULT : 0;
984 }
985 subsys_initcall(arm_lpae_do_selftests);
986 #endif
987