1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * CPU-agnostic ARM page table allocator. 4 * 5 * Copyright (C) 2014 ARM Limited 6 * 7 * Author: Will Deacon <will.deacon@arm.com> 8 */ 9 10 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt 11 12 #include <linux/atomic.h> 13 #include <linux/bitops.h> 14 #include <linux/io-pgtable.h> 15 #include <linux/kernel.h> 16 #include <linux/sizes.h> 17 #include <linux/slab.h> 18 #include <linux/types.h> 19 #include <linux/dma-mapping.h> 20 21 #include <asm/barrier.h> 22 23 #include "io-pgtable-arm.h" 24 25 #define ARM_LPAE_MAX_ADDR_BITS 52 26 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16 27 #define ARM_LPAE_MAX_LEVELS 4 28 29 /* Struct accessors */ 30 #define io_pgtable_to_data(x) \ 31 container_of((x), struct arm_lpae_io_pgtable, iop) 32 33 #define io_pgtable_ops_to_data(x) \ 34 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) 35 36 /* 37 * Calculate the right shift amount to get to the portion describing level l 38 * in a virtual address mapped by the pagetable in d. 39 */ 40 #define ARM_LPAE_LVL_SHIFT(l,d) \ 41 (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \ 42 ilog2(sizeof(arm_lpae_iopte))) 43 44 #define ARM_LPAE_GRANULE(d) \ 45 (sizeof(arm_lpae_iopte) << (d)->bits_per_level) 46 #define ARM_LPAE_PGD_SIZE(d) \ 47 (sizeof(arm_lpae_iopte) << (d)->pgd_bits) 48 49 #define ARM_LPAE_PTES_PER_TABLE(d) \ 50 (ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte))) 51 52 /* 53 * Calculate the index at level l used to map virtual address a using the 54 * pagetable in d. 55 */ 56 #define ARM_LPAE_PGD_IDX(l,d) \ 57 ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0) 58 59 #define ARM_LPAE_LVL_IDX(a,l,d) \ 60 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ 61 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) 62 63 /* Calculate the block/page mapping size at level l for pagetable in d. */ 64 #define ARM_LPAE_BLOCK_SIZE(l,d) (1ULL << ARM_LPAE_LVL_SHIFT(l,d)) 65 66 /* Page table bits */ 67 #define ARM_LPAE_PTE_TYPE_SHIFT 0 68 #define ARM_LPAE_PTE_TYPE_MASK 0x3 69 70 #define ARM_LPAE_PTE_TYPE_BLOCK 1 71 #define ARM_LPAE_PTE_TYPE_TABLE 3 72 #define ARM_LPAE_PTE_TYPE_PAGE 3 73 74 #define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12) 75 76 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63) 77 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53) 78 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10) 79 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8) 80 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8) 81 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8) 82 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5) 83 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0) 84 85 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2) 86 /* Ignore the contiguous bit for block splitting */ 87 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52) 88 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \ 89 ARM_LPAE_PTE_ATTR_HI_MASK) 90 /* Software bit for solving coherency races */ 91 #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55) 92 93 /* Stage-1 PTE */ 94 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6) 95 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6) 96 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2 97 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11) 98 99 /* Stage-2 PTE */ 100 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6) 101 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6) 102 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6) 103 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2) 104 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2) 105 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2) 106 107 /* Register bits */ 108 #define ARM_LPAE_VTCR_SL0_MASK 0x3 109 110 #define ARM_LPAE_TCR_T0SZ_SHIFT 0 111 112 #define ARM_LPAE_VTCR_PS_SHIFT 16 113 #define ARM_LPAE_VTCR_PS_MASK 0x7 114 115 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3) 116 #define ARM_LPAE_MAIR_ATTR_MASK 0xff 117 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04 118 #define ARM_LPAE_MAIR_ATTR_NC 0x44 119 #define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4 120 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff 121 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0 122 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1 123 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2 124 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3 125 126 #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0) 127 #define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2) 128 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4) 129 130 #define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL 131 #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL 132 133 /* IOPTE accessors */ 134 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d)) 135 136 #define iopte_type(pte) \ 137 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) 138 139 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK) 140 141 struct arm_lpae_io_pgtable { 142 struct io_pgtable iop; 143 144 int pgd_bits; 145 int start_level; 146 int bits_per_level; 147 148 void *pgd; 149 }; 150 151 typedef u64 arm_lpae_iopte; 152 153 static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl, 154 enum io_pgtable_fmt fmt) 155 { 156 if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE) 157 return iopte_type(pte) == ARM_LPAE_PTE_TYPE_PAGE; 158 159 return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK; 160 } 161 162 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr, 163 struct arm_lpae_io_pgtable *data) 164 { 165 arm_lpae_iopte pte = paddr; 166 167 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */ 168 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK; 169 } 170 171 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte, 172 struct arm_lpae_io_pgtable *data) 173 { 174 u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK; 175 176 if (ARM_LPAE_GRANULE(data) < SZ_64K) 177 return paddr; 178 179 /* Rotate the packed high-order bits back to the top */ 180 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4); 181 } 182 183 /* 184 * Convert an index returned by ARM_LPAE_PGD_IDX(), which can point into 185 * a concatenated PGD, into the maximum number of entries that can be 186 * mapped in the same table page. 187 */ 188 static inline int arm_lpae_max_entries(int i, struct arm_lpae_io_pgtable *data) 189 { 190 int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data); 191 192 return ptes_per_table - (i & (ptes_per_table - 1)); 193 } 194 195 static bool selftest_running = false; 196 197 static dma_addr_t __arm_lpae_dma_addr(void *pages) 198 { 199 return (dma_addr_t)virt_to_phys(pages); 200 } 201 202 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, 203 struct io_pgtable_cfg *cfg) 204 { 205 struct device *dev = cfg->iommu_dev; 206 int order = get_order(size); 207 struct page *p; 208 dma_addr_t dma; 209 void *pages; 210 211 VM_BUG_ON((gfp & __GFP_HIGHMEM)); 212 p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order); 213 if (!p) 214 return NULL; 215 216 pages = page_address(p); 217 if (!cfg->coherent_walk) { 218 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE); 219 if (dma_mapping_error(dev, dma)) 220 goto out_free; 221 /* 222 * We depend on the IOMMU being able to work with any physical 223 * address directly, so if the DMA layer suggests otherwise by 224 * translating or truncating them, that bodes very badly... 225 */ 226 if (dma != virt_to_phys(pages)) 227 goto out_unmap; 228 } 229 230 return pages; 231 232 out_unmap: 233 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n"); 234 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE); 235 out_free: 236 __free_pages(p, order); 237 return NULL; 238 } 239 240 static void __arm_lpae_free_pages(void *pages, size_t size, 241 struct io_pgtable_cfg *cfg) 242 { 243 if (!cfg->coherent_walk) 244 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), 245 size, DMA_TO_DEVICE); 246 free_pages((unsigned long)pages, get_order(size)); 247 } 248 249 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries, 250 struct io_pgtable_cfg *cfg) 251 { 252 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep), 253 sizeof(*ptep) * num_entries, DMA_TO_DEVICE); 254 } 255 256 static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg) 257 { 258 259 *ptep = 0; 260 261 if (!cfg->coherent_walk) 262 __arm_lpae_sync_pte(ptep, 1, cfg); 263 } 264 265 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, 266 struct iommu_iotlb_gather *gather, 267 unsigned long iova, size_t size, size_t pgcount, 268 int lvl, arm_lpae_iopte *ptep); 269 270 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, 271 phys_addr_t paddr, arm_lpae_iopte prot, 272 int lvl, int num_entries, arm_lpae_iopte *ptep) 273 { 274 arm_lpae_iopte pte = prot; 275 struct io_pgtable_cfg *cfg = &data->iop.cfg; 276 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); 277 int i; 278 279 if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1) 280 pte |= ARM_LPAE_PTE_TYPE_PAGE; 281 else 282 pte |= ARM_LPAE_PTE_TYPE_BLOCK; 283 284 for (i = 0; i < num_entries; i++) 285 ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data); 286 287 if (!cfg->coherent_walk) 288 __arm_lpae_sync_pte(ptep, num_entries, cfg); 289 } 290 291 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, 292 unsigned long iova, phys_addr_t paddr, 293 arm_lpae_iopte prot, int lvl, int num_entries, 294 arm_lpae_iopte *ptep) 295 { 296 int i; 297 298 for (i = 0; i < num_entries; i++) 299 if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) { 300 /* We require an unmap first */ 301 WARN_ON(!selftest_running); 302 return -EEXIST; 303 } else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) { 304 /* 305 * We need to unmap and free the old table before 306 * overwriting it with a block entry. 307 */ 308 arm_lpae_iopte *tblp; 309 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); 310 311 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); 312 if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1, 313 lvl, tblp) != sz) { 314 WARN_ON(1); 315 return -EINVAL; 316 } 317 } 318 319 __arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep); 320 return 0; 321 } 322 323 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, 324 arm_lpae_iopte *ptep, 325 arm_lpae_iopte curr, 326 struct arm_lpae_io_pgtable *data) 327 { 328 arm_lpae_iopte old, new; 329 struct io_pgtable_cfg *cfg = &data->iop.cfg; 330 331 new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE; 332 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) 333 new |= ARM_LPAE_PTE_NSTABLE; 334 335 /* 336 * Ensure the table itself is visible before its PTE can be. 337 * Whilst we could get away with cmpxchg64_release below, this 338 * doesn't have any ordering semantics when !CONFIG_SMP. 339 */ 340 dma_wmb(); 341 342 old = cmpxchg64_relaxed(ptep, curr, new); 343 344 if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC)) 345 return old; 346 347 /* Even if it's not ours, there's no point waiting; just kick it */ 348 __arm_lpae_sync_pte(ptep, 1, cfg); 349 if (old == curr) 350 WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC); 351 352 return old; 353 } 354 355 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, 356 phys_addr_t paddr, size_t size, size_t pgcount, 357 arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep, 358 gfp_t gfp, size_t *mapped) 359 { 360 arm_lpae_iopte *cptep, pte; 361 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); 362 size_t tblsz = ARM_LPAE_GRANULE(data); 363 struct io_pgtable_cfg *cfg = &data->iop.cfg; 364 int ret = 0, num_entries, max_entries, map_idx_start; 365 366 /* Find our entry at the current level */ 367 map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); 368 ptep += map_idx_start; 369 370 /* If we can install a leaf entry at this level, then do so */ 371 if (size == block_size) { 372 max_entries = arm_lpae_max_entries(map_idx_start, data); 373 num_entries = min_t(int, pgcount, max_entries); 374 ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep); 375 if (!ret) 376 *mapped += num_entries * size; 377 378 return ret; 379 } 380 381 /* We can't allocate tables at the final level */ 382 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1)) 383 return -EINVAL; 384 385 /* Grab a pointer to the next level */ 386 pte = READ_ONCE(*ptep); 387 if (!pte) { 388 cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg); 389 if (!cptep) 390 return -ENOMEM; 391 392 pte = arm_lpae_install_table(cptep, ptep, 0, data); 393 if (pte) 394 __arm_lpae_free_pages(cptep, tblsz, cfg); 395 } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) { 396 __arm_lpae_sync_pte(ptep, 1, cfg); 397 } 398 399 if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) { 400 cptep = iopte_deref(pte, data); 401 } else if (pte) { 402 /* We require an unmap first */ 403 WARN_ON(!selftest_running); 404 return -EEXIST; 405 } 406 407 /* Rinse, repeat */ 408 return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1, 409 cptep, gfp, mapped); 410 } 411 412 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, 413 int prot) 414 { 415 arm_lpae_iopte pte; 416 417 if (data->iop.fmt == ARM_64_LPAE_S1 || 418 data->iop.fmt == ARM_32_LPAE_S1) { 419 pte = ARM_LPAE_PTE_nG; 420 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) 421 pte |= ARM_LPAE_PTE_AP_RDONLY; 422 if (!(prot & IOMMU_PRIV)) 423 pte |= ARM_LPAE_PTE_AP_UNPRIV; 424 } else { 425 pte = ARM_LPAE_PTE_HAP_FAULT; 426 if (prot & IOMMU_READ) 427 pte |= ARM_LPAE_PTE_HAP_READ; 428 if (prot & IOMMU_WRITE) 429 pte |= ARM_LPAE_PTE_HAP_WRITE; 430 } 431 432 /* 433 * Note that this logic is structured to accommodate Mali LPAE 434 * having stage-1-like attributes but stage-2-like permissions. 435 */ 436 if (data->iop.fmt == ARM_64_LPAE_S2 || 437 data->iop.fmt == ARM_32_LPAE_S2) { 438 if (prot & IOMMU_MMIO) 439 pte |= ARM_LPAE_PTE_MEMATTR_DEV; 440 else if (prot & IOMMU_CACHE) 441 pte |= ARM_LPAE_PTE_MEMATTR_OIWB; 442 else 443 pte |= ARM_LPAE_PTE_MEMATTR_NC; 444 } else { 445 if (prot & IOMMU_MMIO) 446 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV 447 << ARM_LPAE_PTE_ATTRINDX_SHIFT); 448 else if (prot & IOMMU_CACHE) 449 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE 450 << ARM_LPAE_PTE_ATTRINDX_SHIFT); 451 } 452 453 /* 454 * Also Mali has its own notions of shareability wherein its Inner 455 * domain covers the cores within the GPU, and its Outer domain is 456 * "outside the GPU" (i.e. either the Inner or System domain in CPU 457 * terms, depending on coherency). 458 */ 459 if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE) 460 pte |= ARM_LPAE_PTE_SH_IS; 461 else 462 pte |= ARM_LPAE_PTE_SH_OS; 463 464 if (prot & IOMMU_NOEXEC) 465 pte |= ARM_LPAE_PTE_XN; 466 467 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) 468 pte |= ARM_LPAE_PTE_NS; 469 470 if (data->iop.fmt != ARM_MALI_LPAE) 471 pte |= ARM_LPAE_PTE_AF; 472 473 return pte; 474 } 475 476 static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova, 477 phys_addr_t paddr, size_t pgsize, size_t pgcount, 478 int iommu_prot, gfp_t gfp, size_t *mapped) 479 { 480 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 481 struct io_pgtable_cfg *cfg = &data->iop.cfg; 482 arm_lpae_iopte *ptep = data->pgd; 483 int ret, lvl = data->start_level; 484 arm_lpae_iopte prot; 485 long iaext = (s64)iova >> cfg->ias; 486 487 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize)) 488 return -EINVAL; 489 490 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) 491 iaext = ~iaext; 492 if (WARN_ON(iaext || paddr >> cfg->oas)) 493 return -ERANGE; 494 495 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) 496 return -EINVAL; 497 498 prot = arm_lpae_prot_to_pte(data, iommu_prot); 499 ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl, 500 ptep, gfp, mapped); 501 /* 502 * Synchronise all PTE updates for the new mapping before there's 503 * a chance for anything to kick off a table walk for the new iova. 504 */ 505 wmb(); 506 507 return ret; 508 } 509 510 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, 511 arm_lpae_iopte *ptep) 512 { 513 arm_lpae_iopte *start, *end; 514 unsigned long table_size; 515 516 if (lvl == data->start_level) 517 table_size = ARM_LPAE_PGD_SIZE(data); 518 else 519 table_size = ARM_LPAE_GRANULE(data); 520 521 start = ptep; 522 523 /* Only leaf entries at the last level */ 524 if (lvl == ARM_LPAE_MAX_LEVELS - 1) 525 end = ptep; 526 else 527 end = (void *)ptep + table_size; 528 529 while (ptep != end) { 530 arm_lpae_iopte pte = *ptep++; 531 532 if (!pte || iopte_leaf(pte, lvl, data->iop.fmt)) 533 continue; 534 535 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); 536 } 537 538 __arm_lpae_free_pages(start, table_size, &data->iop.cfg); 539 } 540 541 static void arm_lpae_free_pgtable(struct io_pgtable *iop) 542 { 543 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); 544 545 __arm_lpae_free_pgtable(data, data->start_level, data->pgd); 546 kfree(data); 547 } 548 549 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, 550 struct iommu_iotlb_gather *gather, 551 unsigned long iova, size_t size, 552 arm_lpae_iopte blk_pte, int lvl, 553 arm_lpae_iopte *ptep, size_t pgcount) 554 { 555 struct io_pgtable_cfg *cfg = &data->iop.cfg; 556 arm_lpae_iopte pte, *tablep; 557 phys_addr_t blk_paddr; 558 size_t tablesz = ARM_LPAE_GRANULE(data); 559 size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data); 560 int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data); 561 int i, unmap_idx_start = -1, num_entries = 0, max_entries; 562 563 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) 564 return 0; 565 566 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg); 567 if (!tablep) 568 return 0; /* Bytes unmapped */ 569 570 if (size == split_sz) { 571 unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); 572 max_entries = arm_lpae_max_entries(unmap_idx_start, data); 573 num_entries = min_t(int, pgcount, max_entries); 574 } 575 576 blk_paddr = iopte_to_paddr(blk_pte, data); 577 pte = iopte_prot(blk_pte); 578 579 for (i = 0; i < ptes_per_table; i++, blk_paddr += split_sz) { 580 /* Unmap! */ 581 if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries)) 582 continue; 583 584 __arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]); 585 } 586 587 pte = arm_lpae_install_table(tablep, ptep, blk_pte, data); 588 if (pte != blk_pte) { 589 __arm_lpae_free_pages(tablep, tablesz, cfg); 590 /* 591 * We may race against someone unmapping another part of this 592 * block, but anything else is invalid. We can't misinterpret 593 * a page entry here since we're never at the last level. 594 */ 595 if (iopte_type(pte) != ARM_LPAE_PTE_TYPE_TABLE) 596 return 0; 597 598 tablep = iopte_deref(pte, data); 599 } else if (unmap_idx_start >= 0) { 600 for (i = 0; i < num_entries; i++) 601 io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size); 602 603 return num_entries * size; 604 } 605 606 return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep); 607 } 608 609 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, 610 struct iommu_iotlb_gather *gather, 611 unsigned long iova, size_t size, size_t pgcount, 612 int lvl, arm_lpae_iopte *ptep) 613 { 614 arm_lpae_iopte pte; 615 struct io_pgtable *iop = &data->iop; 616 int i = 0, num_entries, max_entries, unmap_idx_start; 617 618 /* Something went horribly wrong and we ran out of page table */ 619 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) 620 return 0; 621 622 unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); 623 ptep += unmap_idx_start; 624 pte = READ_ONCE(*ptep); 625 if (WARN_ON(!pte)) 626 return 0; 627 628 /* If the size matches this level, we're in the right place */ 629 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) { 630 max_entries = arm_lpae_max_entries(unmap_idx_start, data); 631 num_entries = min_t(int, pgcount, max_entries); 632 633 while (i < num_entries) { 634 pte = READ_ONCE(*ptep); 635 if (WARN_ON(!pte)) 636 break; 637 638 __arm_lpae_clear_pte(ptep, &iop->cfg); 639 640 if (!iopte_leaf(pte, lvl, iop->fmt)) { 641 /* Also flush any partial walks */ 642 io_pgtable_tlb_flush_walk(iop, iova + i * size, size, 643 ARM_LPAE_GRANULE(data)); 644 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); 645 } else if (!iommu_iotlb_gather_queued(gather)) { 646 io_pgtable_tlb_add_page(iop, gather, iova + i * size, size); 647 } 648 649 ptep++; 650 i++; 651 } 652 653 return i * size; 654 } else if (iopte_leaf(pte, lvl, iop->fmt)) { 655 /* 656 * Insert a table at the next level to map the old region, 657 * minus the part we want to unmap 658 */ 659 return arm_lpae_split_blk_unmap(data, gather, iova, size, pte, 660 lvl + 1, ptep, pgcount); 661 } 662 663 /* Keep on walkin' */ 664 ptep = iopte_deref(pte, data); 665 return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep); 666 } 667 668 static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova, 669 size_t pgsize, size_t pgcount, 670 struct iommu_iotlb_gather *gather) 671 { 672 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 673 struct io_pgtable_cfg *cfg = &data->iop.cfg; 674 arm_lpae_iopte *ptep = data->pgd; 675 long iaext = (s64)iova >> cfg->ias; 676 677 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount)) 678 return 0; 679 680 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) 681 iaext = ~iaext; 682 if (WARN_ON(iaext)) 683 return 0; 684 685 return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount, 686 data->start_level, ptep); 687 } 688 689 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, 690 unsigned long iova) 691 { 692 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 693 arm_lpae_iopte pte, *ptep = data->pgd; 694 int lvl = data->start_level; 695 696 do { 697 /* Valid IOPTE pointer? */ 698 if (!ptep) 699 return 0; 700 701 /* Grab the IOPTE we're interested in */ 702 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); 703 pte = READ_ONCE(*ptep); 704 705 /* Valid entry? */ 706 if (!pte) 707 return 0; 708 709 /* Leaf entry? */ 710 if (iopte_leaf(pte, lvl, data->iop.fmt)) 711 goto found_translation; 712 713 /* Take it to the next level */ 714 ptep = iopte_deref(pte, data); 715 } while (++lvl < ARM_LPAE_MAX_LEVELS); 716 717 /* Ran out of page tables to walk */ 718 return 0; 719 720 found_translation: 721 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1); 722 return iopte_to_paddr(pte, data) | iova; 723 } 724 725 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) 726 { 727 unsigned long granule, page_sizes; 728 unsigned int max_addr_bits = 48; 729 730 /* 731 * We need to restrict the supported page sizes to match the 732 * translation regime for a particular granule. Aim to match 733 * the CPU page size if possible, otherwise prefer smaller sizes. 734 * While we're at it, restrict the block sizes to match the 735 * chosen granule. 736 */ 737 if (cfg->pgsize_bitmap & PAGE_SIZE) 738 granule = PAGE_SIZE; 739 else if (cfg->pgsize_bitmap & ~PAGE_MASK) 740 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); 741 else if (cfg->pgsize_bitmap & PAGE_MASK) 742 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); 743 else 744 granule = 0; 745 746 switch (granule) { 747 case SZ_4K: 748 page_sizes = (SZ_4K | SZ_2M | SZ_1G); 749 break; 750 case SZ_16K: 751 page_sizes = (SZ_16K | SZ_32M); 752 break; 753 case SZ_64K: 754 max_addr_bits = 52; 755 page_sizes = (SZ_64K | SZ_512M); 756 if (cfg->oas > 48) 757 page_sizes |= 1ULL << 42; /* 4TB */ 758 break; 759 default: 760 page_sizes = 0; 761 } 762 763 cfg->pgsize_bitmap &= page_sizes; 764 cfg->ias = min(cfg->ias, max_addr_bits); 765 cfg->oas = min(cfg->oas, max_addr_bits); 766 } 767 768 static struct arm_lpae_io_pgtable * 769 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) 770 { 771 struct arm_lpae_io_pgtable *data; 772 int levels, va_bits, pg_shift; 773 774 arm_lpae_restrict_pgsizes(cfg); 775 776 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K))) 777 return NULL; 778 779 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS) 780 return NULL; 781 782 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) 783 return NULL; 784 785 data = kmalloc(sizeof(*data), GFP_KERNEL); 786 if (!data) 787 return NULL; 788 789 pg_shift = __ffs(cfg->pgsize_bitmap); 790 data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte)); 791 792 va_bits = cfg->ias - pg_shift; 793 levels = DIV_ROUND_UP(va_bits, data->bits_per_level); 794 data->start_level = ARM_LPAE_MAX_LEVELS - levels; 795 796 /* Calculate the actual size of our pgd (without concatenation) */ 797 data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1)); 798 799 data->iop.ops = (struct io_pgtable_ops) { 800 .map_pages = arm_lpae_map_pages, 801 .unmap_pages = arm_lpae_unmap_pages, 802 .iova_to_phys = arm_lpae_iova_to_phys, 803 }; 804 805 return data; 806 } 807 808 static struct io_pgtable * 809 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) 810 { 811 u64 reg; 812 struct arm_lpae_io_pgtable *data; 813 typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr; 814 bool tg1; 815 816 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | 817 IO_PGTABLE_QUIRK_ARM_TTBR1 | 818 IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)) 819 return NULL; 820 821 data = arm_lpae_alloc_pgtable(cfg); 822 if (!data) 823 return NULL; 824 825 /* TCR */ 826 if (cfg->coherent_walk) { 827 tcr->sh = ARM_LPAE_TCR_SH_IS; 828 tcr->irgn = ARM_LPAE_TCR_RGN_WBWA; 829 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA; 830 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA) 831 goto out_free_data; 832 } else { 833 tcr->sh = ARM_LPAE_TCR_SH_OS; 834 tcr->irgn = ARM_LPAE_TCR_RGN_NC; 835 if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)) 836 tcr->orgn = ARM_LPAE_TCR_RGN_NC; 837 else 838 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA; 839 } 840 841 tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1; 842 switch (ARM_LPAE_GRANULE(data)) { 843 case SZ_4K: 844 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K; 845 break; 846 case SZ_16K: 847 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K; 848 break; 849 case SZ_64K: 850 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K; 851 break; 852 } 853 854 switch (cfg->oas) { 855 case 32: 856 tcr->ips = ARM_LPAE_TCR_PS_32_BIT; 857 break; 858 case 36: 859 tcr->ips = ARM_LPAE_TCR_PS_36_BIT; 860 break; 861 case 40: 862 tcr->ips = ARM_LPAE_TCR_PS_40_BIT; 863 break; 864 case 42: 865 tcr->ips = ARM_LPAE_TCR_PS_42_BIT; 866 break; 867 case 44: 868 tcr->ips = ARM_LPAE_TCR_PS_44_BIT; 869 break; 870 case 48: 871 tcr->ips = ARM_LPAE_TCR_PS_48_BIT; 872 break; 873 case 52: 874 tcr->ips = ARM_LPAE_TCR_PS_52_BIT; 875 break; 876 default: 877 goto out_free_data; 878 } 879 880 tcr->tsz = 64ULL - cfg->ias; 881 882 /* MAIRs */ 883 reg = (ARM_LPAE_MAIR_ATTR_NC 884 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | 885 (ARM_LPAE_MAIR_ATTR_WBRWA 886 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | 887 (ARM_LPAE_MAIR_ATTR_DEVICE 888 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) | 889 (ARM_LPAE_MAIR_ATTR_INC_OWBRWA 890 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE)); 891 892 cfg->arm_lpae_s1_cfg.mair = reg; 893 894 /* Looking good; allocate a pgd */ 895 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), 896 GFP_KERNEL, cfg); 897 if (!data->pgd) 898 goto out_free_data; 899 900 /* Ensure the empty pgd is visible before any actual TTBR write */ 901 wmb(); 902 903 /* TTBR */ 904 cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd); 905 return &data->iop; 906 907 out_free_data: 908 kfree(data); 909 return NULL; 910 } 911 912 static struct io_pgtable * 913 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) 914 { 915 u64 sl; 916 struct arm_lpae_io_pgtable *data; 917 typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr; 918 919 /* The NS quirk doesn't apply at stage 2 */ 920 if (cfg->quirks) 921 return NULL; 922 923 data = arm_lpae_alloc_pgtable(cfg); 924 if (!data) 925 return NULL; 926 927 /* 928 * Concatenate PGDs at level 1 if possible in order to reduce 929 * the depth of the stage-2 walk. 930 */ 931 if (data->start_level == 0) { 932 unsigned long pgd_pages; 933 934 pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte); 935 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) { 936 data->pgd_bits += data->bits_per_level; 937 data->start_level++; 938 } 939 } 940 941 /* VTCR */ 942 if (cfg->coherent_walk) { 943 vtcr->sh = ARM_LPAE_TCR_SH_IS; 944 vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA; 945 vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA; 946 } else { 947 vtcr->sh = ARM_LPAE_TCR_SH_OS; 948 vtcr->irgn = ARM_LPAE_TCR_RGN_NC; 949 vtcr->orgn = ARM_LPAE_TCR_RGN_NC; 950 } 951 952 sl = data->start_level; 953 954 switch (ARM_LPAE_GRANULE(data)) { 955 case SZ_4K: 956 vtcr->tg = ARM_LPAE_TCR_TG0_4K; 957 sl++; /* SL0 format is different for 4K granule size */ 958 break; 959 case SZ_16K: 960 vtcr->tg = ARM_LPAE_TCR_TG0_16K; 961 break; 962 case SZ_64K: 963 vtcr->tg = ARM_LPAE_TCR_TG0_64K; 964 break; 965 } 966 967 switch (cfg->oas) { 968 case 32: 969 vtcr->ps = ARM_LPAE_TCR_PS_32_BIT; 970 break; 971 case 36: 972 vtcr->ps = ARM_LPAE_TCR_PS_36_BIT; 973 break; 974 case 40: 975 vtcr->ps = ARM_LPAE_TCR_PS_40_BIT; 976 break; 977 case 42: 978 vtcr->ps = ARM_LPAE_TCR_PS_42_BIT; 979 break; 980 case 44: 981 vtcr->ps = ARM_LPAE_TCR_PS_44_BIT; 982 break; 983 case 48: 984 vtcr->ps = ARM_LPAE_TCR_PS_48_BIT; 985 break; 986 case 52: 987 vtcr->ps = ARM_LPAE_TCR_PS_52_BIT; 988 break; 989 default: 990 goto out_free_data; 991 } 992 993 vtcr->tsz = 64ULL - cfg->ias; 994 vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK; 995 996 /* Allocate pgd pages */ 997 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), 998 GFP_KERNEL, cfg); 999 if (!data->pgd) 1000 goto out_free_data; 1001 1002 /* Ensure the empty pgd is visible before any actual TTBR write */ 1003 wmb(); 1004 1005 /* VTTBR */ 1006 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd); 1007 return &data->iop; 1008 1009 out_free_data: 1010 kfree(data); 1011 return NULL; 1012 } 1013 1014 static struct io_pgtable * 1015 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) 1016 { 1017 if (cfg->ias > 32 || cfg->oas > 40) 1018 return NULL; 1019 1020 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 1021 return arm_64_lpae_alloc_pgtable_s1(cfg, cookie); 1022 } 1023 1024 static struct io_pgtable * 1025 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) 1026 { 1027 if (cfg->ias > 40 || cfg->oas > 40) 1028 return NULL; 1029 1030 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 1031 return arm_64_lpae_alloc_pgtable_s2(cfg, cookie); 1032 } 1033 1034 static struct io_pgtable * 1035 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) 1036 { 1037 struct arm_lpae_io_pgtable *data; 1038 1039 /* No quirks for Mali (hopefully) */ 1040 if (cfg->quirks) 1041 return NULL; 1042 1043 if (cfg->ias > 48 || cfg->oas > 40) 1044 return NULL; 1045 1046 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 1047 1048 data = arm_lpae_alloc_pgtable(cfg); 1049 if (!data) 1050 return NULL; 1051 1052 /* Mali seems to need a full 4-level table regardless of IAS */ 1053 if (data->start_level > 0) { 1054 data->start_level = 0; 1055 data->pgd_bits = 0; 1056 } 1057 /* 1058 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the 1059 * best we can do is mimic the out-of-tree driver and hope that the 1060 * "implementation-defined caching policy" is good enough. Similarly, 1061 * we'll use it for the sake of a valid attribute for our 'device' 1062 * index, although callers should never request that in practice. 1063 */ 1064 cfg->arm_mali_lpae_cfg.memattr = 1065 (ARM_MALI_LPAE_MEMATTR_IMP_DEF 1066 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | 1067 (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 1068 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | 1069 (ARM_MALI_LPAE_MEMATTR_IMP_DEF 1070 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)); 1071 1072 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL, 1073 cfg); 1074 if (!data->pgd) 1075 goto out_free_data; 1076 1077 /* Ensure the empty pgd is visible before TRANSTAB can be written */ 1078 wmb(); 1079 1080 cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) | 1081 ARM_MALI_LPAE_TTBR_READ_INNER | 1082 ARM_MALI_LPAE_TTBR_ADRMODE_TABLE; 1083 if (cfg->coherent_walk) 1084 cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER; 1085 1086 return &data->iop; 1087 1088 out_free_data: 1089 kfree(data); 1090 return NULL; 1091 } 1092 1093 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = { 1094 .alloc = arm_64_lpae_alloc_pgtable_s1, 1095 .free = arm_lpae_free_pgtable, 1096 }; 1097 1098 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = { 1099 .alloc = arm_64_lpae_alloc_pgtable_s2, 1100 .free = arm_lpae_free_pgtable, 1101 }; 1102 1103 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = { 1104 .alloc = arm_32_lpae_alloc_pgtable_s1, 1105 .free = arm_lpae_free_pgtable, 1106 }; 1107 1108 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = { 1109 .alloc = arm_32_lpae_alloc_pgtable_s2, 1110 .free = arm_lpae_free_pgtable, 1111 }; 1112 1113 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = { 1114 .alloc = arm_mali_lpae_alloc_pgtable, 1115 .free = arm_lpae_free_pgtable, 1116 }; 1117 1118 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST 1119 1120 static struct io_pgtable_cfg *cfg_cookie __initdata; 1121 1122 static void __init dummy_tlb_flush_all(void *cookie) 1123 { 1124 WARN_ON(cookie != cfg_cookie); 1125 } 1126 1127 static void __init dummy_tlb_flush(unsigned long iova, size_t size, 1128 size_t granule, void *cookie) 1129 { 1130 WARN_ON(cookie != cfg_cookie); 1131 WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); 1132 } 1133 1134 static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather, 1135 unsigned long iova, size_t granule, 1136 void *cookie) 1137 { 1138 dummy_tlb_flush(iova, granule, granule, cookie); 1139 } 1140 1141 static const struct iommu_flush_ops dummy_tlb_ops __initconst = { 1142 .tlb_flush_all = dummy_tlb_flush_all, 1143 .tlb_flush_walk = dummy_tlb_flush, 1144 .tlb_add_page = dummy_tlb_add_page, 1145 }; 1146 1147 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) 1148 { 1149 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 1150 struct io_pgtable_cfg *cfg = &data->iop.cfg; 1151 1152 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", 1153 cfg->pgsize_bitmap, cfg->ias); 1154 pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n", 1155 ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data), 1156 ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd); 1157 } 1158 1159 #define __FAIL(ops, i) ({ \ 1160 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \ 1161 arm_lpae_dump_ops(ops); \ 1162 selftest_running = false; \ 1163 -EFAULT; \ 1164 }) 1165 1166 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) 1167 { 1168 static const enum io_pgtable_fmt fmts[] __initconst = { 1169 ARM_64_LPAE_S1, 1170 ARM_64_LPAE_S2, 1171 }; 1172 1173 int i, j; 1174 unsigned long iova; 1175 size_t size, mapped; 1176 struct io_pgtable_ops *ops; 1177 1178 selftest_running = true; 1179 1180 for (i = 0; i < ARRAY_SIZE(fmts); ++i) { 1181 cfg_cookie = cfg; 1182 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg); 1183 if (!ops) { 1184 pr_err("selftest: failed to allocate io pgtable ops\n"); 1185 return -ENOMEM; 1186 } 1187 1188 /* 1189 * Initial sanity checks. 1190 * Empty page tables shouldn't provide any translations. 1191 */ 1192 if (ops->iova_to_phys(ops, 42)) 1193 return __FAIL(ops, i); 1194 1195 if (ops->iova_to_phys(ops, SZ_1G + 42)) 1196 return __FAIL(ops, i); 1197 1198 if (ops->iova_to_phys(ops, SZ_2G + 42)) 1199 return __FAIL(ops, i); 1200 1201 /* 1202 * Distinct mappings of different granule sizes. 1203 */ 1204 iova = 0; 1205 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { 1206 size = 1UL << j; 1207 1208 if (ops->map_pages(ops, iova, iova, size, 1, 1209 IOMMU_READ | IOMMU_WRITE | 1210 IOMMU_NOEXEC | IOMMU_CACHE, 1211 GFP_KERNEL, &mapped)) 1212 return __FAIL(ops, i); 1213 1214 /* Overlapping mappings */ 1215 if (!ops->map_pages(ops, iova, iova + size, size, 1, 1216 IOMMU_READ | IOMMU_NOEXEC, 1217 GFP_KERNEL, &mapped)) 1218 return __FAIL(ops, i); 1219 1220 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) 1221 return __FAIL(ops, i); 1222 1223 iova += SZ_1G; 1224 } 1225 1226 /* Partial unmap */ 1227 size = 1UL << __ffs(cfg->pgsize_bitmap); 1228 if (ops->unmap_pages(ops, SZ_1G + size, size, 1, NULL) != size) 1229 return __FAIL(ops, i); 1230 1231 /* Remap of partial unmap */ 1232 if (ops->map_pages(ops, SZ_1G + size, size, size, 1, 1233 IOMMU_READ, GFP_KERNEL, &mapped)) 1234 return __FAIL(ops, i); 1235 1236 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) 1237 return __FAIL(ops, i); 1238 1239 /* Full unmap */ 1240 iova = 0; 1241 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { 1242 size = 1UL << j; 1243 1244 if (ops->unmap_pages(ops, iova, size, 1, NULL) != size) 1245 return __FAIL(ops, i); 1246 1247 if (ops->iova_to_phys(ops, iova + 42)) 1248 return __FAIL(ops, i); 1249 1250 /* Remap full block */ 1251 if (ops->map_pages(ops, iova, iova, size, 1, 1252 IOMMU_WRITE, GFP_KERNEL, &mapped)) 1253 return __FAIL(ops, i); 1254 1255 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) 1256 return __FAIL(ops, i); 1257 1258 iova += SZ_1G; 1259 } 1260 1261 free_io_pgtable_ops(ops); 1262 } 1263 1264 selftest_running = false; 1265 return 0; 1266 } 1267 1268 static int __init arm_lpae_do_selftests(void) 1269 { 1270 static const unsigned long pgsize[] __initconst = { 1271 SZ_4K | SZ_2M | SZ_1G, 1272 SZ_16K | SZ_32M, 1273 SZ_64K | SZ_512M, 1274 }; 1275 1276 static const unsigned int ias[] __initconst = { 1277 32, 36, 40, 42, 44, 48, 1278 }; 1279 1280 int i, j, pass = 0, fail = 0; 1281 struct device dev; 1282 struct io_pgtable_cfg cfg = { 1283 .tlb = &dummy_tlb_ops, 1284 .oas = 48, 1285 .coherent_walk = true, 1286 .iommu_dev = &dev, 1287 }; 1288 1289 /* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */ 1290 set_dev_node(&dev, NUMA_NO_NODE); 1291 1292 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { 1293 for (j = 0; j < ARRAY_SIZE(ias); ++j) { 1294 cfg.pgsize_bitmap = pgsize[i]; 1295 cfg.ias = ias[j]; 1296 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n", 1297 pgsize[i], ias[j]); 1298 if (arm_lpae_run_tests(&cfg)) 1299 fail++; 1300 else 1301 pass++; 1302 } 1303 } 1304 1305 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail); 1306 return fail ? -EFAULT : 0; 1307 } 1308 subsys_initcall(arm_lpae_do_selftests); 1309 #endif 1310