1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * CPU-agnostic ARM page table allocator.
4 *
5 * Copyright (C) 2014 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 */
9
10 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
11
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/dma-mapping.h>
20
21 #include <asm/barrier.h>
22
23 #include "io-pgtable-arm.h"
24
25 #define ARM_LPAE_MAX_ADDR_BITS 52
26 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
27 #define ARM_LPAE_MAX_LEVELS 4
28
29 /* Struct accessors */
30 #define io_pgtable_to_data(x) \
31 container_of((x), struct arm_lpae_io_pgtable, iop)
32
33 #define io_pgtable_ops_to_data(x) \
34 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
35
36 /*
37 * Calculate the right shift amount to get to the portion describing level l
38 * in a virtual address mapped by the pagetable in d.
39 */
40 #define ARM_LPAE_LVL_SHIFT(l,d) \
41 (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \
42 ilog2(sizeof(arm_lpae_iopte)))
43
44 #define ARM_LPAE_GRANULE(d) \
45 (sizeof(arm_lpae_iopte) << (d)->bits_per_level)
46 #define ARM_LPAE_PGD_SIZE(d) \
47 (sizeof(arm_lpae_iopte) << (d)->pgd_bits)
48
49 #define ARM_LPAE_PTES_PER_TABLE(d) \
50 (ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte)))
51
52 /*
53 * Calculate the index at level l used to map virtual address a using the
54 * pagetable in d.
55 */
56 #define ARM_LPAE_PGD_IDX(l,d) \
57 ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
58
59 #define ARM_LPAE_LVL_IDX(a,l,d) \
60 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
61 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
62
63 /* Calculate the block/page mapping size at level l for pagetable in d. */
64 #define ARM_LPAE_BLOCK_SIZE(l,d) (1ULL << ARM_LPAE_LVL_SHIFT(l,d))
65
66 /* Page table bits */
67 #define ARM_LPAE_PTE_TYPE_SHIFT 0
68 #define ARM_LPAE_PTE_TYPE_MASK 0x3
69
70 #define ARM_LPAE_PTE_TYPE_BLOCK 1
71 #define ARM_LPAE_PTE_TYPE_TABLE 3
72 #define ARM_LPAE_PTE_TYPE_PAGE 3
73
74 #define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12)
75
76 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
77 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
78 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
79 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
80 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
81 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
82 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
83 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
84
85 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
86 /* Ignore the contiguous bit for block splitting */
87 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
88 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
89 ARM_LPAE_PTE_ATTR_HI_MASK)
90 /* Software bit for solving coherency races */
91 #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55)
92
93 /* Stage-1 PTE */
94 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
95 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
96 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
97 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
98
99 /* Stage-2 PTE */
100 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
101 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
102 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
103 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
104 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
105 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
106
107 /* Register bits */
108 #define ARM_LPAE_VTCR_SL0_MASK 0x3
109
110 #define ARM_LPAE_TCR_T0SZ_SHIFT 0
111
112 #define ARM_LPAE_VTCR_PS_SHIFT 16
113 #define ARM_LPAE_VTCR_PS_MASK 0x7
114
115 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
116 #define ARM_LPAE_MAIR_ATTR_MASK 0xff
117 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
118 #define ARM_LPAE_MAIR_ATTR_NC 0x44
119 #define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4
120 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
121 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
122 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
123 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
124 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3
125
126 #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
127 #define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
128 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4)
129
130 #define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL
131 #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
132
133 /* IOPTE accessors */
134 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
135
136 #define iopte_type(pte) \
137 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
138
139 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
140
141 struct arm_lpae_io_pgtable {
142 struct io_pgtable iop;
143
144 int pgd_bits;
145 int start_level;
146 int bits_per_level;
147
148 void *pgd;
149 };
150
151 typedef u64 arm_lpae_iopte;
152
iopte_leaf(arm_lpae_iopte pte,int lvl,enum io_pgtable_fmt fmt)153 static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
154 enum io_pgtable_fmt fmt)
155 {
156 if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
157 return iopte_type(pte) == ARM_LPAE_PTE_TYPE_PAGE;
158
159 return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK;
160 }
161
paddr_to_iopte(phys_addr_t paddr,struct arm_lpae_io_pgtable * data)162 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
163 struct arm_lpae_io_pgtable *data)
164 {
165 arm_lpae_iopte pte = paddr;
166
167 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
168 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
169 }
170
iopte_to_paddr(arm_lpae_iopte pte,struct arm_lpae_io_pgtable * data)171 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
172 struct arm_lpae_io_pgtable *data)
173 {
174 u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
175
176 if (ARM_LPAE_GRANULE(data) < SZ_64K)
177 return paddr;
178
179 /* Rotate the packed high-order bits back to the top */
180 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
181 }
182
183 static bool selftest_running = false;
184
__arm_lpae_dma_addr(void * pages)185 static dma_addr_t __arm_lpae_dma_addr(void *pages)
186 {
187 return (dma_addr_t)virt_to_phys(pages);
188 }
189
__arm_lpae_alloc_pages(size_t size,gfp_t gfp,struct io_pgtable_cfg * cfg)190 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
191 struct io_pgtable_cfg *cfg)
192 {
193 struct device *dev = cfg->iommu_dev;
194 int order = get_order(size);
195 struct page *p;
196 dma_addr_t dma;
197 void *pages;
198
199 VM_BUG_ON((gfp & __GFP_HIGHMEM));
200 p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
201 if (!p)
202 return NULL;
203
204 pages = page_address(p);
205 if (!cfg->coherent_walk) {
206 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
207 if (dma_mapping_error(dev, dma))
208 goto out_free;
209 /*
210 * We depend on the IOMMU being able to work with any physical
211 * address directly, so if the DMA layer suggests otherwise by
212 * translating or truncating them, that bodes very badly...
213 */
214 if (dma != virt_to_phys(pages))
215 goto out_unmap;
216 }
217
218 return pages;
219
220 out_unmap:
221 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
222 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
223 out_free:
224 __free_pages(p, order);
225 return NULL;
226 }
227
__arm_lpae_free_pages(void * pages,size_t size,struct io_pgtable_cfg * cfg)228 static void __arm_lpae_free_pages(void *pages, size_t size,
229 struct io_pgtable_cfg *cfg)
230 {
231 if (!cfg->coherent_walk)
232 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
233 size, DMA_TO_DEVICE);
234 free_pages((unsigned long)pages, get_order(size));
235 }
236
__arm_lpae_sync_pte(arm_lpae_iopte * ptep,int num_entries,struct io_pgtable_cfg * cfg)237 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
238 struct io_pgtable_cfg *cfg)
239 {
240 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
241 sizeof(*ptep) * num_entries, DMA_TO_DEVICE);
242 }
243
__arm_lpae_clear_pte(arm_lpae_iopte * ptep,struct io_pgtable_cfg * cfg)244 static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg)
245 {
246
247 *ptep = 0;
248
249 if (!cfg->coherent_walk)
250 __arm_lpae_sync_pte(ptep, 1, cfg);
251 }
252
253 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
254 struct iommu_iotlb_gather *gather,
255 unsigned long iova, size_t size, size_t pgcount,
256 int lvl, arm_lpae_iopte *ptep);
257
__arm_lpae_init_pte(struct arm_lpae_io_pgtable * data,phys_addr_t paddr,arm_lpae_iopte prot,int lvl,int num_entries,arm_lpae_iopte * ptep)258 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
259 phys_addr_t paddr, arm_lpae_iopte prot,
260 int lvl, int num_entries, arm_lpae_iopte *ptep)
261 {
262 arm_lpae_iopte pte = prot;
263 struct io_pgtable_cfg *cfg = &data->iop.cfg;
264 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
265 int i;
266
267 if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
268 pte |= ARM_LPAE_PTE_TYPE_PAGE;
269 else
270 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
271
272 for (i = 0; i < num_entries; i++)
273 ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data);
274
275 if (!cfg->coherent_walk)
276 __arm_lpae_sync_pte(ptep, num_entries, cfg);
277 }
278
arm_lpae_init_pte(struct arm_lpae_io_pgtable * data,unsigned long iova,phys_addr_t paddr,arm_lpae_iopte prot,int lvl,int num_entries,arm_lpae_iopte * ptep)279 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
280 unsigned long iova, phys_addr_t paddr,
281 arm_lpae_iopte prot, int lvl, int num_entries,
282 arm_lpae_iopte *ptep)
283 {
284 int i;
285
286 for (i = 0; i < num_entries; i++)
287 if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
288 /* We require an unmap first */
289 WARN_ON(!selftest_running);
290 return -EEXIST;
291 } else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) {
292 /*
293 * We need to unmap and free the old table before
294 * overwriting it with a block entry.
295 */
296 arm_lpae_iopte *tblp;
297 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
298
299 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
300 if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1,
301 lvl, tblp) != sz) {
302 WARN_ON(1);
303 return -EINVAL;
304 }
305 }
306
307 __arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep);
308 return 0;
309 }
310
arm_lpae_install_table(arm_lpae_iopte * table,arm_lpae_iopte * ptep,arm_lpae_iopte curr,struct arm_lpae_io_pgtable * data)311 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
312 arm_lpae_iopte *ptep,
313 arm_lpae_iopte curr,
314 struct arm_lpae_io_pgtable *data)
315 {
316 arm_lpae_iopte old, new;
317 struct io_pgtable_cfg *cfg = &data->iop.cfg;
318
319 new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
320 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
321 new |= ARM_LPAE_PTE_NSTABLE;
322
323 /*
324 * Ensure the table itself is visible before its PTE can be.
325 * Whilst we could get away with cmpxchg64_release below, this
326 * doesn't have any ordering semantics when !CONFIG_SMP.
327 */
328 dma_wmb();
329
330 old = cmpxchg64_relaxed(ptep, curr, new);
331
332 if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
333 return old;
334
335 /* Even if it's not ours, there's no point waiting; just kick it */
336 __arm_lpae_sync_pte(ptep, 1, cfg);
337 if (old == curr)
338 WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
339
340 return old;
341 }
342
__arm_lpae_map(struct arm_lpae_io_pgtable * data,unsigned long iova,phys_addr_t paddr,size_t size,size_t pgcount,arm_lpae_iopte prot,int lvl,arm_lpae_iopte * ptep,gfp_t gfp,size_t * mapped)343 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
344 phys_addr_t paddr, size_t size, size_t pgcount,
345 arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep,
346 gfp_t gfp, size_t *mapped)
347 {
348 arm_lpae_iopte *cptep, pte;
349 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
350 size_t tblsz = ARM_LPAE_GRANULE(data);
351 struct io_pgtable_cfg *cfg = &data->iop.cfg;
352 int ret = 0, num_entries, max_entries, map_idx_start;
353
354 /* Find our entry at the current level */
355 map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
356 ptep += map_idx_start;
357
358 /* If we can install a leaf entry at this level, then do so */
359 if (size == block_size) {
360 max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start;
361 num_entries = min_t(int, pgcount, max_entries);
362 ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep);
363 if (!ret)
364 *mapped += num_entries * size;
365
366 return ret;
367 }
368
369 /* We can't allocate tables at the final level */
370 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
371 return -EINVAL;
372
373 /* Grab a pointer to the next level */
374 pte = READ_ONCE(*ptep);
375 if (!pte) {
376 cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg);
377 if (!cptep)
378 return -ENOMEM;
379
380 pte = arm_lpae_install_table(cptep, ptep, 0, data);
381 if (pte)
382 __arm_lpae_free_pages(cptep, tblsz, cfg);
383 } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
384 __arm_lpae_sync_pte(ptep, 1, cfg);
385 }
386
387 if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
388 cptep = iopte_deref(pte, data);
389 } else if (pte) {
390 /* We require an unmap first */
391 WARN_ON(!selftest_running);
392 return -EEXIST;
393 }
394
395 /* Rinse, repeat */
396 return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1,
397 cptep, gfp, mapped);
398 }
399
arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable * data,int prot)400 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
401 int prot)
402 {
403 arm_lpae_iopte pte;
404
405 if (data->iop.fmt == ARM_64_LPAE_S1 ||
406 data->iop.fmt == ARM_32_LPAE_S1) {
407 pte = ARM_LPAE_PTE_nG;
408 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
409 pte |= ARM_LPAE_PTE_AP_RDONLY;
410 if (!(prot & IOMMU_PRIV))
411 pte |= ARM_LPAE_PTE_AP_UNPRIV;
412 } else {
413 pte = ARM_LPAE_PTE_HAP_FAULT;
414 if (prot & IOMMU_READ)
415 pte |= ARM_LPAE_PTE_HAP_READ;
416 if (prot & IOMMU_WRITE)
417 pte |= ARM_LPAE_PTE_HAP_WRITE;
418 }
419
420 /*
421 * Note that this logic is structured to accommodate Mali LPAE
422 * having stage-1-like attributes but stage-2-like permissions.
423 */
424 if (data->iop.fmt == ARM_64_LPAE_S2 ||
425 data->iop.fmt == ARM_32_LPAE_S2) {
426 if (prot & IOMMU_MMIO)
427 pte |= ARM_LPAE_PTE_MEMATTR_DEV;
428 else if (prot & IOMMU_CACHE)
429 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
430 else
431 pte |= ARM_LPAE_PTE_MEMATTR_NC;
432 } else {
433 if (prot & IOMMU_MMIO)
434 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
435 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
436 else if (prot & IOMMU_CACHE)
437 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
438 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
439 }
440
441 /*
442 * Also Mali has its own notions of shareability wherein its Inner
443 * domain covers the cores within the GPU, and its Outer domain is
444 * "outside the GPU" (i.e. either the Inner or System domain in CPU
445 * terms, depending on coherency).
446 */
447 if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
448 pte |= ARM_LPAE_PTE_SH_IS;
449 else
450 pte |= ARM_LPAE_PTE_SH_OS;
451
452 if (prot & IOMMU_NOEXEC)
453 pte |= ARM_LPAE_PTE_XN;
454
455 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
456 pte |= ARM_LPAE_PTE_NS;
457
458 if (data->iop.fmt != ARM_MALI_LPAE)
459 pte |= ARM_LPAE_PTE_AF;
460
461 return pte;
462 }
463
arm_lpae_map_pages(struct io_pgtable_ops * ops,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int iommu_prot,gfp_t gfp,size_t * mapped)464 static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
465 phys_addr_t paddr, size_t pgsize, size_t pgcount,
466 int iommu_prot, gfp_t gfp, size_t *mapped)
467 {
468 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
469 struct io_pgtable_cfg *cfg = &data->iop.cfg;
470 arm_lpae_iopte *ptep = data->pgd;
471 int ret, lvl = data->start_level;
472 arm_lpae_iopte prot;
473 long iaext = (s64)iova >> cfg->ias;
474
475 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize))
476 return -EINVAL;
477
478 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
479 iaext = ~iaext;
480 if (WARN_ON(iaext || paddr >> cfg->oas))
481 return -ERANGE;
482
483 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
484 return -EINVAL;
485
486 prot = arm_lpae_prot_to_pte(data, iommu_prot);
487 ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
488 ptep, gfp, mapped);
489 /*
490 * Synchronise all PTE updates for the new mapping before there's
491 * a chance for anything to kick off a table walk for the new iova.
492 */
493 wmb();
494
495 return ret;
496 }
497
__arm_lpae_free_pgtable(struct arm_lpae_io_pgtable * data,int lvl,arm_lpae_iopte * ptep)498 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
499 arm_lpae_iopte *ptep)
500 {
501 arm_lpae_iopte *start, *end;
502 unsigned long table_size;
503
504 if (lvl == data->start_level)
505 table_size = ARM_LPAE_PGD_SIZE(data);
506 else
507 table_size = ARM_LPAE_GRANULE(data);
508
509 start = ptep;
510
511 /* Only leaf entries at the last level */
512 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
513 end = ptep;
514 else
515 end = (void *)ptep + table_size;
516
517 while (ptep != end) {
518 arm_lpae_iopte pte = *ptep++;
519
520 if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
521 continue;
522
523 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
524 }
525
526 __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
527 }
528
arm_lpae_free_pgtable(struct io_pgtable * iop)529 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
530 {
531 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
532
533 __arm_lpae_free_pgtable(data, data->start_level, data->pgd);
534 kfree(data);
535 }
536
arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable * data,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size,arm_lpae_iopte blk_pte,int lvl,arm_lpae_iopte * ptep,size_t pgcount)537 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
538 struct iommu_iotlb_gather *gather,
539 unsigned long iova, size_t size,
540 arm_lpae_iopte blk_pte, int lvl,
541 arm_lpae_iopte *ptep, size_t pgcount)
542 {
543 struct io_pgtable_cfg *cfg = &data->iop.cfg;
544 arm_lpae_iopte pte, *tablep;
545 phys_addr_t blk_paddr;
546 size_t tablesz = ARM_LPAE_GRANULE(data);
547 size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
548 int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
549 int i, unmap_idx_start = -1, num_entries = 0, max_entries;
550
551 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
552 return 0;
553
554 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
555 if (!tablep)
556 return 0; /* Bytes unmapped */
557
558 if (size == split_sz) {
559 unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
560 max_entries = ptes_per_table - unmap_idx_start;
561 num_entries = min_t(int, pgcount, max_entries);
562 }
563
564 blk_paddr = iopte_to_paddr(blk_pte, data);
565 pte = iopte_prot(blk_pte);
566
567 for (i = 0; i < ptes_per_table; i++, blk_paddr += split_sz) {
568 /* Unmap! */
569 if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries))
570 continue;
571
572 __arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]);
573 }
574
575 pte = arm_lpae_install_table(tablep, ptep, blk_pte, data);
576 if (pte != blk_pte) {
577 __arm_lpae_free_pages(tablep, tablesz, cfg);
578 /*
579 * We may race against someone unmapping another part of this
580 * block, but anything else is invalid. We can't misinterpret
581 * a page entry here since we're never at the last level.
582 */
583 if (iopte_type(pte) != ARM_LPAE_PTE_TYPE_TABLE)
584 return 0;
585
586 tablep = iopte_deref(pte, data);
587 } else if (unmap_idx_start >= 0) {
588 for (i = 0; i < num_entries; i++)
589 io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size);
590
591 return num_entries * size;
592 }
593
594 return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep);
595 }
596
__arm_lpae_unmap(struct arm_lpae_io_pgtable * data,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size,size_t pgcount,int lvl,arm_lpae_iopte * ptep)597 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
598 struct iommu_iotlb_gather *gather,
599 unsigned long iova, size_t size, size_t pgcount,
600 int lvl, arm_lpae_iopte *ptep)
601 {
602 arm_lpae_iopte pte;
603 struct io_pgtable *iop = &data->iop;
604 int i = 0, num_entries, max_entries, unmap_idx_start;
605
606 /* Something went horribly wrong and we ran out of page table */
607 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
608 return 0;
609
610 unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
611 ptep += unmap_idx_start;
612 pte = READ_ONCE(*ptep);
613 if (WARN_ON(!pte))
614 return 0;
615
616 /* If the size matches this level, we're in the right place */
617 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
618 max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start;
619 num_entries = min_t(int, pgcount, max_entries);
620
621 while (i < num_entries) {
622 pte = READ_ONCE(*ptep);
623 if (WARN_ON(!pte))
624 break;
625
626 __arm_lpae_clear_pte(ptep, &iop->cfg);
627
628 if (!iopte_leaf(pte, lvl, iop->fmt)) {
629 /* Also flush any partial walks */
630 io_pgtable_tlb_flush_walk(iop, iova + i * size, size,
631 ARM_LPAE_GRANULE(data));
632 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
633 } else if (!iommu_iotlb_gather_queued(gather)) {
634 io_pgtable_tlb_add_page(iop, gather, iova + i * size, size);
635 }
636
637 ptep++;
638 i++;
639 }
640
641 return i * size;
642 } else if (iopte_leaf(pte, lvl, iop->fmt)) {
643 /*
644 * Insert a table at the next level to map the old region,
645 * minus the part we want to unmap
646 */
647 return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
648 lvl + 1, ptep, pgcount);
649 }
650
651 /* Keep on walkin' */
652 ptep = iopte_deref(pte, data);
653 return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep);
654 }
655
arm_lpae_unmap_pages(struct io_pgtable_ops * ops,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * gather)656 static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
657 size_t pgsize, size_t pgcount,
658 struct iommu_iotlb_gather *gather)
659 {
660 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
661 struct io_pgtable_cfg *cfg = &data->iop.cfg;
662 arm_lpae_iopte *ptep = data->pgd;
663 long iaext = (s64)iova >> cfg->ias;
664
665 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
666 return 0;
667
668 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
669 iaext = ~iaext;
670 if (WARN_ON(iaext))
671 return 0;
672
673 return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount,
674 data->start_level, ptep);
675 }
676
arm_lpae_iova_to_phys(struct io_pgtable_ops * ops,unsigned long iova)677 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
678 unsigned long iova)
679 {
680 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
681 arm_lpae_iopte pte, *ptep = data->pgd;
682 int lvl = data->start_level;
683
684 do {
685 /* Valid IOPTE pointer? */
686 if (!ptep)
687 return 0;
688
689 /* Grab the IOPTE we're interested in */
690 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
691 pte = READ_ONCE(*ptep);
692
693 /* Valid entry? */
694 if (!pte)
695 return 0;
696
697 /* Leaf entry? */
698 if (iopte_leaf(pte, lvl, data->iop.fmt))
699 goto found_translation;
700
701 /* Take it to the next level */
702 ptep = iopte_deref(pte, data);
703 } while (++lvl < ARM_LPAE_MAX_LEVELS);
704
705 /* Ran out of page tables to walk */
706 return 0;
707
708 found_translation:
709 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
710 return iopte_to_paddr(pte, data) | iova;
711 }
712
arm_lpae_restrict_pgsizes(struct io_pgtable_cfg * cfg)713 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
714 {
715 unsigned long granule, page_sizes;
716 unsigned int max_addr_bits = 48;
717
718 /*
719 * We need to restrict the supported page sizes to match the
720 * translation regime for a particular granule. Aim to match
721 * the CPU page size if possible, otherwise prefer smaller sizes.
722 * While we're at it, restrict the block sizes to match the
723 * chosen granule.
724 */
725 if (cfg->pgsize_bitmap & PAGE_SIZE)
726 granule = PAGE_SIZE;
727 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
728 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
729 else if (cfg->pgsize_bitmap & PAGE_MASK)
730 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
731 else
732 granule = 0;
733
734 switch (granule) {
735 case SZ_4K:
736 page_sizes = (SZ_4K | SZ_2M | SZ_1G);
737 break;
738 case SZ_16K:
739 page_sizes = (SZ_16K | SZ_32M);
740 break;
741 case SZ_64K:
742 max_addr_bits = 52;
743 page_sizes = (SZ_64K | SZ_512M);
744 if (cfg->oas > 48)
745 page_sizes |= 1ULL << 42; /* 4TB */
746 break;
747 default:
748 page_sizes = 0;
749 }
750
751 cfg->pgsize_bitmap &= page_sizes;
752 cfg->ias = min(cfg->ias, max_addr_bits);
753 cfg->oas = min(cfg->oas, max_addr_bits);
754 }
755
756 static struct arm_lpae_io_pgtable *
arm_lpae_alloc_pgtable(struct io_pgtable_cfg * cfg)757 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
758 {
759 struct arm_lpae_io_pgtable *data;
760 int levels, va_bits, pg_shift;
761
762 arm_lpae_restrict_pgsizes(cfg);
763
764 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
765 return NULL;
766
767 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
768 return NULL;
769
770 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
771 return NULL;
772
773 data = kmalloc(sizeof(*data), GFP_KERNEL);
774 if (!data)
775 return NULL;
776
777 pg_shift = __ffs(cfg->pgsize_bitmap);
778 data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
779
780 va_bits = cfg->ias - pg_shift;
781 levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
782 data->start_level = ARM_LPAE_MAX_LEVELS - levels;
783
784 /* Calculate the actual size of our pgd (without concatenation) */
785 data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
786
787 data->iop.ops = (struct io_pgtable_ops) {
788 .map_pages = arm_lpae_map_pages,
789 .unmap_pages = arm_lpae_unmap_pages,
790 .iova_to_phys = arm_lpae_iova_to_phys,
791 };
792
793 return data;
794 }
795
796 static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg * cfg,void * cookie)797 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
798 {
799 u64 reg;
800 struct arm_lpae_io_pgtable *data;
801 typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
802 bool tg1;
803
804 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
805 IO_PGTABLE_QUIRK_ARM_TTBR1 |
806 IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
807 return NULL;
808
809 data = arm_lpae_alloc_pgtable(cfg);
810 if (!data)
811 return NULL;
812
813 /* TCR */
814 if (cfg->coherent_walk) {
815 tcr->sh = ARM_LPAE_TCR_SH_IS;
816 tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
817 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
818 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)
819 goto out_free_data;
820 } else {
821 tcr->sh = ARM_LPAE_TCR_SH_OS;
822 tcr->irgn = ARM_LPAE_TCR_RGN_NC;
823 if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
824 tcr->orgn = ARM_LPAE_TCR_RGN_NC;
825 else
826 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
827 }
828
829 tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
830 switch (ARM_LPAE_GRANULE(data)) {
831 case SZ_4K:
832 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
833 break;
834 case SZ_16K:
835 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
836 break;
837 case SZ_64K:
838 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
839 break;
840 }
841
842 switch (cfg->oas) {
843 case 32:
844 tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
845 break;
846 case 36:
847 tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
848 break;
849 case 40:
850 tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
851 break;
852 case 42:
853 tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
854 break;
855 case 44:
856 tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
857 break;
858 case 48:
859 tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
860 break;
861 case 52:
862 tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
863 break;
864 default:
865 goto out_free_data;
866 }
867
868 tcr->tsz = 64ULL - cfg->ias;
869
870 /* MAIRs */
871 reg = (ARM_LPAE_MAIR_ATTR_NC
872 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
873 (ARM_LPAE_MAIR_ATTR_WBRWA
874 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
875 (ARM_LPAE_MAIR_ATTR_DEVICE
876 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
877 (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
878 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
879
880 cfg->arm_lpae_s1_cfg.mair = reg;
881
882 /* Looking good; allocate a pgd */
883 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
884 GFP_KERNEL, cfg);
885 if (!data->pgd)
886 goto out_free_data;
887
888 /* Ensure the empty pgd is visible before any actual TTBR write */
889 wmb();
890
891 /* TTBR */
892 cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
893 return &data->iop;
894
895 out_free_data:
896 kfree(data);
897 return NULL;
898 }
899
900 static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg * cfg,void * cookie)901 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
902 {
903 u64 sl;
904 struct arm_lpae_io_pgtable *data;
905 typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
906
907 /* The NS quirk doesn't apply at stage 2 */
908 if (cfg->quirks)
909 return NULL;
910
911 data = arm_lpae_alloc_pgtable(cfg);
912 if (!data)
913 return NULL;
914
915 /*
916 * Concatenate PGDs at level 1 if possible in order to reduce
917 * the depth of the stage-2 walk.
918 */
919 if (data->start_level == 0) {
920 unsigned long pgd_pages;
921
922 pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
923 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
924 data->pgd_bits += data->bits_per_level;
925 data->start_level++;
926 }
927 }
928
929 /* VTCR */
930 if (cfg->coherent_walk) {
931 vtcr->sh = ARM_LPAE_TCR_SH_IS;
932 vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
933 vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
934 } else {
935 vtcr->sh = ARM_LPAE_TCR_SH_OS;
936 vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
937 vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
938 }
939
940 sl = data->start_level;
941
942 switch (ARM_LPAE_GRANULE(data)) {
943 case SZ_4K:
944 vtcr->tg = ARM_LPAE_TCR_TG0_4K;
945 sl++; /* SL0 format is different for 4K granule size */
946 break;
947 case SZ_16K:
948 vtcr->tg = ARM_LPAE_TCR_TG0_16K;
949 break;
950 case SZ_64K:
951 vtcr->tg = ARM_LPAE_TCR_TG0_64K;
952 break;
953 }
954
955 switch (cfg->oas) {
956 case 32:
957 vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
958 break;
959 case 36:
960 vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
961 break;
962 case 40:
963 vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
964 break;
965 case 42:
966 vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
967 break;
968 case 44:
969 vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
970 break;
971 case 48:
972 vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
973 break;
974 case 52:
975 vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
976 break;
977 default:
978 goto out_free_data;
979 }
980
981 vtcr->tsz = 64ULL - cfg->ias;
982 vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
983
984 /* Allocate pgd pages */
985 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
986 GFP_KERNEL, cfg);
987 if (!data->pgd)
988 goto out_free_data;
989
990 /* Ensure the empty pgd is visible before any actual TTBR write */
991 wmb();
992
993 /* VTTBR */
994 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
995 return &data->iop;
996
997 out_free_data:
998 kfree(data);
999 return NULL;
1000 }
1001
1002 static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg * cfg,void * cookie)1003 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
1004 {
1005 if (cfg->ias > 32 || cfg->oas > 40)
1006 return NULL;
1007
1008 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1009 return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
1010 }
1011
1012 static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg * cfg,void * cookie)1013 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1014 {
1015 if (cfg->ias > 40 || cfg->oas > 40)
1016 return NULL;
1017
1018 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1019 return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1020 }
1021
1022 static struct io_pgtable *
arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg * cfg,void * cookie)1023 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1024 {
1025 struct arm_lpae_io_pgtable *data;
1026
1027 /* No quirks for Mali (hopefully) */
1028 if (cfg->quirks)
1029 return NULL;
1030
1031 if (cfg->ias > 48 || cfg->oas > 40)
1032 return NULL;
1033
1034 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1035
1036 data = arm_lpae_alloc_pgtable(cfg);
1037 if (!data)
1038 return NULL;
1039
1040 /* Mali seems to need a full 4-level table regardless of IAS */
1041 if (data->start_level > 0) {
1042 data->start_level = 0;
1043 data->pgd_bits = 0;
1044 }
1045 /*
1046 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
1047 * best we can do is mimic the out-of-tree driver and hope that the
1048 * "implementation-defined caching policy" is good enough. Similarly,
1049 * we'll use it for the sake of a valid attribute for our 'device'
1050 * index, although callers should never request that in practice.
1051 */
1052 cfg->arm_mali_lpae_cfg.memattr =
1053 (ARM_MALI_LPAE_MEMATTR_IMP_DEF
1054 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1055 (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
1056 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1057 (ARM_MALI_LPAE_MEMATTR_IMP_DEF
1058 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
1059
1060 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
1061 cfg);
1062 if (!data->pgd)
1063 goto out_free_data;
1064
1065 /* Ensure the empty pgd is visible before TRANSTAB can be written */
1066 wmb();
1067
1068 cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
1069 ARM_MALI_LPAE_TTBR_READ_INNER |
1070 ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1071 if (cfg->coherent_walk)
1072 cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER;
1073
1074 return &data->iop;
1075
1076 out_free_data:
1077 kfree(data);
1078 return NULL;
1079 }
1080
1081 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1082 .alloc = arm_64_lpae_alloc_pgtable_s1,
1083 .free = arm_lpae_free_pgtable,
1084 };
1085
1086 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1087 .alloc = arm_64_lpae_alloc_pgtable_s2,
1088 .free = arm_lpae_free_pgtable,
1089 };
1090
1091 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1092 .alloc = arm_32_lpae_alloc_pgtable_s1,
1093 .free = arm_lpae_free_pgtable,
1094 };
1095
1096 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1097 .alloc = arm_32_lpae_alloc_pgtable_s2,
1098 .free = arm_lpae_free_pgtable,
1099 };
1100
1101 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1102 .alloc = arm_mali_lpae_alloc_pgtable,
1103 .free = arm_lpae_free_pgtable,
1104 };
1105
1106 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1107
1108 static struct io_pgtable_cfg *cfg_cookie __initdata;
1109
dummy_tlb_flush_all(void * cookie)1110 static void __init dummy_tlb_flush_all(void *cookie)
1111 {
1112 WARN_ON(cookie != cfg_cookie);
1113 }
1114
dummy_tlb_flush(unsigned long iova,size_t size,size_t granule,void * cookie)1115 static void __init dummy_tlb_flush(unsigned long iova, size_t size,
1116 size_t granule, void *cookie)
1117 {
1118 WARN_ON(cookie != cfg_cookie);
1119 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1120 }
1121
dummy_tlb_add_page(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)1122 static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
1123 unsigned long iova, size_t granule,
1124 void *cookie)
1125 {
1126 dummy_tlb_flush(iova, granule, granule, cookie);
1127 }
1128
1129 static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
1130 .tlb_flush_all = dummy_tlb_flush_all,
1131 .tlb_flush_walk = dummy_tlb_flush,
1132 .tlb_add_page = dummy_tlb_add_page,
1133 };
1134
arm_lpae_dump_ops(struct io_pgtable_ops * ops)1135 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1136 {
1137 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1138 struct io_pgtable_cfg *cfg = &data->iop.cfg;
1139
1140 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1141 cfg->pgsize_bitmap, cfg->ias);
1142 pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
1143 ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
1144 ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
1145 }
1146
1147 #define __FAIL(ops, i) ({ \
1148 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
1149 arm_lpae_dump_ops(ops); \
1150 selftest_running = false; \
1151 -EFAULT; \
1152 })
1153
arm_lpae_run_tests(struct io_pgtable_cfg * cfg)1154 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1155 {
1156 static const enum io_pgtable_fmt fmts[] __initconst = {
1157 ARM_64_LPAE_S1,
1158 ARM_64_LPAE_S2,
1159 };
1160
1161 int i, j;
1162 unsigned long iova;
1163 size_t size, mapped;
1164 struct io_pgtable_ops *ops;
1165
1166 selftest_running = true;
1167
1168 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1169 cfg_cookie = cfg;
1170 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1171 if (!ops) {
1172 pr_err("selftest: failed to allocate io pgtable ops\n");
1173 return -ENOMEM;
1174 }
1175
1176 /*
1177 * Initial sanity checks.
1178 * Empty page tables shouldn't provide any translations.
1179 */
1180 if (ops->iova_to_phys(ops, 42))
1181 return __FAIL(ops, i);
1182
1183 if (ops->iova_to_phys(ops, SZ_1G + 42))
1184 return __FAIL(ops, i);
1185
1186 if (ops->iova_to_phys(ops, SZ_2G + 42))
1187 return __FAIL(ops, i);
1188
1189 /*
1190 * Distinct mappings of different granule sizes.
1191 */
1192 iova = 0;
1193 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1194 size = 1UL << j;
1195
1196 if (ops->map_pages(ops, iova, iova, size, 1,
1197 IOMMU_READ | IOMMU_WRITE |
1198 IOMMU_NOEXEC | IOMMU_CACHE,
1199 GFP_KERNEL, &mapped))
1200 return __FAIL(ops, i);
1201
1202 /* Overlapping mappings */
1203 if (!ops->map_pages(ops, iova, iova + size, size, 1,
1204 IOMMU_READ | IOMMU_NOEXEC,
1205 GFP_KERNEL, &mapped))
1206 return __FAIL(ops, i);
1207
1208 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1209 return __FAIL(ops, i);
1210
1211 iova += SZ_1G;
1212 }
1213
1214 /* Partial unmap */
1215 size = 1UL << __ffs(cfg->pgsize_bitmap);
1216 if (ops->unmap_pages(ops, SZ_1G + size, size, 1, NULL) != size)
1217 return __FAIL(ops, i);
1218
1219 /* Remap of partial unmap */
1220 if (ops->map_pages(ops, SZ_1G + size, size, size, 1,
1221 IOMMU_READ, GFP_KERNEL, &mapped))
1222 return __FAIL(ops, i);
1223
1224 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1225 return __FAIL(ops, i);
1226
1227 /* Full unmap */
1228 iova = 0;
1229 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1230 size = 1UL << j;
1231
1232 if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
1233 return __FAIL(ops, i);
1234
1235 if (ops->iova_to_phys(ops, iova + 42))
1236 return __FAIL(ops, i);
1237
1238 /* Remap full block */
1239 if (ops->map_pages(ops, iova, iova, size, 1,
1240 IOMMU_WRITE, GFP_KERNEL, &mapped))
1241 return __FAIL(ops, i);
1242
1243 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1244 return __FAIL(ops, i);
1245
1246 iova += SZ_1G;
1247 }
1248
1249 free_io_pgtable_ops(ops);
1250 }
1251
1252 selftest_running = false;
1253 return 0;
1254 }
1255
arm_lpae_do_selftests(void)1256 static int __init arm_lpae_do_selftests(void)
1257 {
1258 static const unsigned long pgsize[] __initconst = {
1259 SZ_4K | SZ_2M | SZ_1G,
1260 SZ_16K | SZ_32M,
1261 SZ_64K | SZ_512M,
1262 };
1263
1264 static const unsigned int ias[] __initconst = {
1265 32, 36, 40, 42, 44, 48,
1266 };
1267
1268 int i, j, pass = 0, fail = 0;
1269 struct device dev;
1270 struct io_pgtable_cfg cfg = {
1271 .tlb = &dummy_tlb_ops,
1272 .oas = 48,
1273 .coherent_walk = true,
1274 .iommu_dev = &dev,
1275 };
1276
1277 /* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */
1278 set_dev_node(&dev, NUMA_NO_NODE);
1279
1280 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1281 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1282 cfg.pgsize_bitmap = pgsize[i];
1283 cfg.ias = ias[j];
1284 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1285 pgsize[i], ias[j]);
1286 if (arm_lpae_run_tests(&cfg))
1287 fail++;
1288 else
1289 pass++;
1290 }
1291 }
1292
1293 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1294 return fail ? -EFAULT : 0;
1295 }
1296 subsys_initcall(arm_lpae_do_selftests);
1297 #endif
1298