1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * CPU-agnostic ARM page table allocator. 4 * 5 * Copyright (C) 2014 ARM Limited 6 * 7 * Author: Will Deacon <will.deacon@arm.com> 8 */ 9 10 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt 11 12 #include <linux/atomic.h> 13 #include <linux/bitops.h> 14 #include <linux/io-pgtable.h> 15 #include <linux/kernel.h> 16 #include <linux/sizes.h> 17 #include <linux/slab.h> 18 #include <linux/types.h> 19 #include <linux/dma-mapping.h> 20 21 #include <asm/barrier.h> 22 23 #include "io-pgtable-arm.h" 24 25 #define ARM_LPAE_MAX_ADDR_BITS 52 26 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16 27 #define ARM_LPAE_MAX_LEVELS 4 28 29 /* Struct accessors */ 30 #define io_pgtable_to_data(x) \ 31 container_of((x), struct arm_lpae_io_pgtable, iop) 32 33 #define io_pgtable_ops_to_data(x) \ 34 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) 35 36 /* 37 * Calculate the right shift amount to get to the portion describing level l 38 * in a virtual address mapped by the pagetable in d. 39 */ 40 #define ARM_LPAE_LVL_SHIFT(l,d) \ 41 (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \ 42 ilog2(sizeof(arm_lpae_iopte))) 43 44 #define ARM_LPAE_GRANULE(d) \ 45 (sizeof(arm_lpae_iopte) << (d)->bits_per_level) 46 #define ARM_LPAE_PGD_SIZE(d) \ 47 (sizeof(arm_lpae_iopte) << (d)->pgd_bits) 48 49 /* 50 * Calculate the index at level l used to map virtual address a using the 51 * pagetable in d. 52 */ 53 #define ARM_LPAE_PGD_IDX(l,d) \ 54 ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0) 55 56 #define ARM_LPAE_LVL_IDX(a,l,d) \ 57 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ 58 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) 59 60 /* Calculate the block/page mapping size at level l for pagetable in d. */ 61 #define ARM_LPAE_BLOCK_SIZE(l,d) (1ULL << ARM_LPAE_LVL_SHIFT(l,d)) 62 63 /* Page table bits */ 64 #define ARM_LPAE_PTE_TYPE_SHIFT 0 65 #define ARM_LPAE_PTE_TYPE_MASK 0x3 66 67 #define ARM_LPAE_PTE_TYPE_BLOCK 1 68 #define ARM_LPAE_PTE_TYPE_TABLE 3 69 #define ARM_LPAE_PTE_TYPE_PAGE 3 70 71 #define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12) 72 73 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63) 74 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53) 75 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10) 76 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8) 77 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8) 78 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8) 79 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5) 80 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0) 81 82 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2) 83 /* Ignore the contiguous bit for block splitting */ 84 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52) 85 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \ 86 ARM_LPAE_PTE_ATTR_HI_MASK) 87 /* Software bit for solving coherency races */ 88 #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55) 89 90 /* Stage-1 PTE */ 91 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6) 92 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6) 93 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2 94 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11) 95 96 /* Stage-2 PTE */ 97 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6) 98 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6) 99 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6) 100 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2) 101 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2) 102 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2) 103 104 /* Register bits */ 105 #define ARM_LPAE_VTCR_SL0_MASK 0x3 106 107 #define ARM_LPAE_TCR_T0SZ_SHIFT 0 108 109 #define ARM_LPAE_VTCR_PS_SHIFT 16 110 #define ARM_LPAE_VTCR_PS_MASK 0x7 111 112 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3) 113 #define ARM_LPAE_MAIR_ATTR_MASK 0xff 114 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04 115 #define ARM_LPAE_MAIR_ATTR_NC 0x44 116 #define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4 117 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff 118 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0 119 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1 120 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2 121 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3 122 123 #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0) 124 #define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2) 125 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4) 126 127 #define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL 128 #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL 129 130 /* IOPTE accessors */ 131 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d)) 132 133 #define iopte_type(pte,l) \ 134 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) 135 136 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK) 137 138 struct arm_lpae_io_pgtable { 139 struct io_pgtable iop; 140 141 int pgd_bits; 142 int start_level; 143 int bits_per_level; 144 145 void *pgd; 146 }; 147 148 typedef u64 arm_lpae_iopte; 149 150 static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl, 151 enum io_pgtable_fmt fmt) 152 { 153 if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE) 154 return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE; 155 156 return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK; 157 } 158 159 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr, 160 struct arm_lpae_io_pgtable *data) 161 { 162 arm_lpae_iopte pte = paddr; 163 164 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */ 165 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK; 166 } 167 168 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte, 169 struct arm_lpae_io_pgtable *data) 170 { 171 u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK; 172 173 if (ARM_LPAE_GRANULE(data) < SZ_64K) 174 return paddr; 175 176 /* Rotate the packed high-order bits back to the top */ 177 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4); 178 } 179 180 static bool selftest_running = false; 181 182 static dma_addr_t __arm_lpae_dma_addr(void *pages) 183 { 184 return (dma_addr_t)virt_to_phys(pages); 185 } 186 187 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, 188 struct io_pgtable_cfg *cfg) 189 { 190 struct device *dev = cfg->iommu_dev; 191 int order = get_order(size); 192 struct page *p; 193 dma_addr_t dma; 194 void *pages; 195 196 VM_BUG_ON((gfp & __GFP_HIGHMEM)); 197 p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE, 198 gfp | __GFP_ZERO, order); 199 if (!p) 200 return NULL; 201 202 pages = page_address(p); 203 if (!cfg->coherent_walk) { 204 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE); 205 if (dma_mapping_error(dev, dma)) 206 goto out_free; 207 /* 208 * We depend on the IOMMU being able to work with any physical 209 * address directly, so if the DMA layer suggests otherwise by 210 * translating or truncating them, that bodes very badly... 211 */ 212 if (dma != virt_to_phys(pages)) 213 goto out_unmap; 214 } 215 216 return pages; 217 218 out_unmap: 219 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n"); 220 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE); 221 out_free: 222 __free_pages(p, order); 223 return NULL; 224 } 225 226 static void __arm_lpae_free_pages(void *pages, size_t size, 227 struct io_pgtable_cfg *cfg) 228 { 229 if (!cfg->coherent_walk) 230 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), 231 size, DMA_TO_DEVICE); 232 free_pages((unsigned long)pages, get_order(size)); 233 } 234 235 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, 236 struct io_pgtable_cfg *cfg) 237 { 238 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep), 239 sizeof(*ptep), DMA_TO_DEVICE); 240 } 241 242 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, 243 struct io_pgtable_cfg *cfg) 244 { 245 *ptep = pte; 246 247 if (!cfg->coherent_walk) 248 __arm_lpae_sync_pte(ptep, cfg); 249 } 250 251 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, 252 struct iommu_iotlb_gather *gather, 253 unsigned long iova, size_t size, int lvl, 254 arm_lpae_iopte *ptep); 255 256 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, 257 phys_addr_t paddr, arm_lpae_iopte prot, 258 int lvl, arm_lpae_iopte *ptep) 259 { 260 arm_lpae_iopte pte = prot; 261 262 if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1) 263 pte |= ARM_LPAE_PTE_TYPE_PAGE; 264 else 265 pte |= ARM_LPAE_PTE_TYPE_BLOCK; 266 267 pte |= paddr_to_iopte(paddr, data); 268 269 __arm_lpae_set_pte(ptep, pte, &data->iop.cfg); 270 } 271 272 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, 273 unsigned long iova, phys_addr_t paddr, 274 arm_lpae_iopte prot, int lvl, 275 arm_lpae_iopte *ptep) 276 { 277 arm_lpae_iopte pte = *ptep; 278 279 if (iopte_leaf(pte, lvl, data->iop.fmt)) { 280 /* We require an unmap first */ 281 WARN_ON(!selftest_running); 282 return -EEXIST; 283 } else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) { 284 /* 285 * We need to unmap and free the old table before 286 * overwriting it with a block entry. 287 */ 288 arm_lpae_iopte *tblp; 289 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); 290 291 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); 292 if (__arm_lpae_unmap(data, NULL, iova, sz, lvl, tblp) != sz) { 293 WARN_ON(1); 294 return -EINVAL; 295 } 296 } 297 298 __arm_lpae_init_pte(data, paddr, prot, lvl, ptep); 299 return 0; 300 } 301 302 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, 303 arm_lpae_iopte *ptep, 304 arm_lpae_iopte curr, 305 struct io_pgtable_cfg *cfg) 306 { 307 arm_lpae_iopte old, new; 308 309 new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE; 310 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) 311 new |= ARM_LPAE_PTE_NSTABLE; 312 313 /* 314 * Ensure the table itself is visible before its PTE can be. 315 * Whilst we could get away with cmpxchg64_release below, this 316 * doesn't have any ordering semantics when !CONFIG_SMP. 317 */ 318 dma_wmb(); 319 320 old = cmpxchg64_relaxed(ptep, curr, new); 321 322 if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC)) 323 return old; 324 325 /* Even if it's not ours, there's no point waiting; just kick it */ 326 __arm_lpae_sync_pte(ptep, cfg); 327 if (old == curr) 328 WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC); 329 330 return old; 331 } 332 333 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, 334 phys_addr_t paddr, size_t size, arm_lpae_iopte prot, 335 int lvl, arm_lpae_iopte *ptep, gfp_t gfp) 336 { 337 arm_lpae_iopte *cptep, pte; 338 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); 339 size_t tblsz = ARM_LPAE_GRANULE(data); 340 struct io_pgtable_cfg *cfg = &data->iop.cfg; 341 342 /* Find our entry at the current level */ 343 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); 344 345 /* If we can install a leaf entry at this level, then do so */ 346 if (size == block_size) 347 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); 348 349 /* We can't allocate tables at the final level */ 350 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1)) 351 return -EINVAL; 352 353 /* Grab a pointer to the next level */ 354 pte = READ_ONCE(*ptep); 355 if (!pte) { 356 cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg); 357 if (!cptep) 358 return -ENOMEM; 359 360 pte = arm_lpae_install_table(cptep, ptep, 0, cfg); 361 if (pte) 362 __arm_lpae_free_pages(cptep, tblsz, cfg); 363 } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) { 364 __arm_lpae_sync_pte(ptep, cfg); 365 } 366 367 if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) { 368 cptep = iopte_deref(pte, data); 369 } else if (pte) { 370 /* We require an unmap first */ 371 WARN_ON(!selftest_running); 372 return -EEXIST; 373 } 374 375 /* Rinse, repeat */ 376 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp); 377 } 378 379 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, 380 int prot) 381 { 382 arm_lpae_iopte pte; 383 384 if (data->iop.fmt == ARM_64_LPAE_S1 || 385 data->iop.fmt == ARM_32_LPAE_S1) { 386 pte = ARM_LPAE_PTE_nG; 387 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) 388 pte |= ARM_LPAE_PTE_AP_RDONLY; 389 if (!(prot & IOMMU_PRIV)) 390 pte |= ARM_LPAE_PTE_AP_UNPRIV; 391 } else { 392 pte = ARM_LPAE_PTE_HAP_FAULT; 393 if (prot & IOMMU_READ) 394 pte |= ARM_LPAE_PTE_HAP_READ; 395 if (prot & IOMMU_WRITE) 396 pte |= ARM_LPAE_PTE_HAP_WRITE; 397 } 398 399 /* 400 * Note that this logic is structured to accommodate Mali LPAE 401 * having stage-1-like attributes but stage-2-like permissions. 402 */ 403 if (data->iop.fmt == ARM_64_LPAE_S2 || 404 data->iop.fmt == ARM_32_LPAE_S2) { 405 if (prot & IOMMU_MMIO) 406 pte |= ARM_LPAE_PTE_MEMATTR_DEV; 407 else if (prot & IOMMU_CACHE) 408 pte |= ARM_LPAE_PTE_MEMATTR_OIWB; 409 else 410 pte |= ARM_LPAE_PTE_MEMATTR_NC; 411 } else { 412 if (prot & IOMMU_MMIO) 413 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV 414 << ARM_LPAE_PTE_ATTRINDX_SHIFT); 415 else if (prot & IOMMU_CACHE) 416 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE 417 << ARM_LPAE_PTE_ATTRINDX_SHIFT); 418 } 419 420 if (prot & IOMMU_CACHE) 421 pte |= ARM_LPAE_PTE_SH_IS; 422 else 423 pte |= ARM_LPAE_PTE_SH_OS; 424 425 if (prot & IOMMU_NOEXEC) 426 pte |= ARM_LPAE_PTE_XN; 427 428 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) 429 pte |= ARM_LPAE_PTE_NS; 430 431 if (data->iop.fmt != ARM_MALI_LPAE) 432 pte |= ARM_LPAE_PTE_AF; 433 434 return pte; 435 } 436 437 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, 438 phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp) 439 { 440 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 441 struct io_pgtable_cfg *cfg = &data->iop.cfg; 442 arm_lpae_iopte *ptep = data->pgd; 443 int ret, lvl = data->start_level; 444 arm_lpae_iopte prot; 445 long iaext = (s64)iova >> cfg->ias; 446 447 /* If no access, then nothing to do */ 448 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) 449 return 0; 450 451 if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size)) 452 return -EINVAL; 453 454 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) 455 iaext = ~iaext; 456 if (WARN_ON(iaext || paddr >> cfg->oas)) 457 return -ERANGE; 458 459 prot = arm_lpae_prot_to_pte(data, iommu_prot); 460 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, gfp); 461 /* 462 * Synchronise all PTE updates for the new mapping before there's 463 * a chance for anything to kick off a table walk for the new iova. 464 */ 465 wmb(); 466 467 return ret; 468 } 469 470 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, 471 arm_lpae_iopte *ptep) 472 { 473 arm_lpae_iopte *start, *end; 474 unsigned long table_size; 475 476 if (lvl == data->start_level) 477 table_size = ARM_LPAE_PGD_SIZE(data); 478 else 479 table_size = ARM_LPAE_GRANULE(data); 480 481 start = ptep; 482 483 /* Only leaf entries at the last level */ 484 if (lvl == ARM_LPAE_MAX_LEVELS - 1) 485 end = ptep; 486 else 487 end = (void *)ptep + table_size; 488 489 while (ptep != end) { 490 arm_lpae_iopte pte = *ptep++; 491 492 if (!pte || iopte_leaf(pte, lvl, data->iop.fmt)) 493 continue; 494 495 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); 496 } 497 498 __arm_lpae_free_pages(start, table_size, &data->iop.cfg); 499 } 500 501 static void arm_lpae_free_pgtable(struct io_pgtable *iop) 502 { 503 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); 504 505 __arm_lpae_free_pgtable(data, data->start_level, data->pgd); 506 kfree(data); 507 } 508 509 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, 510 struct iommu_iotlb_gather *gather, 511 unsigned long iova, size_t size, 512 arm_lpae_iopte blk_pte, int lvl, 513 arm_lpae_iopte *ptep) 514 { 515 struct io_pgtable_cfg *cfg = &data->iop.cfg; 516 arm_lpae_iopte pte, *tablep; 517 phys_addr_t blk_paddr; 518 size_t tablesz = ARM_LPAE_GRANULE(data); 519 size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data); 520 int i, unmap_idx = -1; 521 522 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) 523 return 0; 524 525 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg); 526 if (!tablep) 527 return 0; /* Bytes unmapped */ 528 529 if (size == split_sz) 530 unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data); 531 532 blk_paddr = iopte_to_paddr(blk_pte, data); 533 pte = iopte_prot(blk_pte); 534 535 for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) { 536 /* Unmap! */ 537 if (i == unmap_idx) 538 continue; 539 540 __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]); 541 } 542 543 pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg); 544 if (pte != blk_pte) { 545 __arm_lpae_free_pages(tablep, tablesz, cfg); 546 /* 547 * We may race against someone unmapping another part of this 548 * block, but anything else is invalid. We can't misinterpret 549 * a page entry here since we're never at the last level. 550 */ 551 if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE) 552 return 0; 553 554 tablep = iopte_deref(pte, data); 555 } else if (unmap_idx >= 0) { 556 io_pgtable_tlb_add_page(&data->iop, gather, iova, size); 557 return size; 558 } 559 560 return __arm_lpae_unmap(data, gather, iova, size, lvl, tablep); 561 } 562 563 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, 564 struct iommu_iotlb_gather *gather, 565 unsigned long iova, size_t size, int lvl, 566 arm_lpae_iopte *ptep) 567 { 568 arm_lpae_iopte pte; 569 struct io_pgtable *iop = &data->iop; 570 571 /* Something went horribly wrong and we ran out of page table */ 572 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) 573 return 0; 574 575 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); 576 pte = READ_ONCE(*ptep); 577 if (WARN_ON(!pte)) 578 return 0; 579 580 /* If the size matches this level, we're in the right place */ 581 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) { 582 __arm_lpae_set_pte(ptep, 0, &iop->cfg); 583 584 if (!iopte_leaf(pte, lvl, iop->fmt)) { 585 /* Also flush any partial walks */ 586 io_pgtable_tlb_flush_walk(iop, iova, size, 587 ARM_LPAE_GRANULE(data)); 588 ptep = iopte_deref(pte, data); 589 __arm_lpae_free_pgtable(data, lvl + 1, ptep); 590 } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) { 591 /* 592 * Order the PTE update against queueing the IOVA, to 593 * guarantee that a flush callback from a different CPU 594 * has observed it before the TLBIALL can be issued. 595 */ 596 smp_wmb(); 597 } else { 598 io_pgtable_tlb_add_page(iop, gather, iova, size); 599 } 600 601 return size; 602 } else if (iopte_leaf(pte, lvl, iop->fmt)) { 603 /* 604 * Insert a table at the next level to map the old region, 605 * minus the part we want to unmap 606 */ 607 return arm_lpae_split_blk_unmap(data, gather, iova, size, pte, 608 lvl + 1, ptep); 609 } 610 611 /* Keep on walkin' */ 612 ptep = iopte_deref(pte, data); 613 return __arm_lpae_unmap(data, gather, iova, size, lvl + 1, ptep); 614 } 615 616 static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, 617 size_t size, struct iommu_iotlb_gather *gather) 618 { 619 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 620 struct io_pgtable_cfg *cfg = &data->iop.cfg; 621 arm_lpae_iopte *ptep = data->pgd; 622 long iaext = (s64)iova >> cfg->ias; 623 624 if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size)) 625 return 0; 626 627 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) 628 iaext = ~iaext; 629 if (WARN_ON(iaext)) 630 return 0; 631 632 return __arm_lpae_unmap(data, gather, iova, size, data->start_level, ptep); 633 } 634 635 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, 636 unsigned long iova) 637 { 638 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 639 arm_lpae_iopte pte, *ptep = data->pgd; 640 int lvl = data->start_level; 641 642 do { 643 /* Valid IOPTE pointer? */ 644 if (!ptep) 645 return 0; 646 647 /* Grab the IOPTE we're interested in */ 648 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); 649 pte = READ_ONCE(*ptep); 650 651 /* Valid entry? */ 652 if (!pte) 653 return 0; 654 655 /* Leaf entry? */ 656 if (iopte_leaf(pte, lvl, data->iop.fmt)) 657 goto found_translation; 658 659 /* Take it to the next level */ 660 ptep = iopte_deref(pte, data); 661 } while (++lvl < ARM_LPAE_MAX_LEVELS); 662 663 /* Ran out of page tables to walk */ 664 return 0; 665 666 found_translation: 667 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1); 668 return iopte_to_paddr(pte, data) | iova; 669 } 670 671 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) 672 { 673 unsigned long granule, page_sizes; 674 unsigned int max_addr_bits = 48; 675 676 /* 677 * We need to restrict the supported page sizes to match the 678 * translation regime for a particular granule. Aim to match 679 * the CPU page size if possible, otherwise prefer smaller sizes. 680 * While we're at it, restrict the block sizes to match the 681 * chosen granule. 682 */ 683 if (cfg->pgsize_bitmap & PAGE_SIZE) 684 granule = PAGE_SIZE; 685 else if (cfg->pgsize_bitmap & ~PAGE_MASK) 686 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); 687 else if (cfg->pgsize_bitmap & PAGE_MASK) 688 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); 689 else 690 granule = 0; 691 692 switch (granule) { 693 case SZ_4K: 694 page_sizes = (SZ_4K | SZ_2M | SZ_1G); 695 break; 696 case SZ_16K: 697 page_sizes = (SZ_16K | SZ_32M); 698 break; 699 case SZ_64K: 700 max_addr_bits = 52; 701 page_sizes = (SZ_64K | SZ_512M); 702 if (cfg->oas > 48) 703 page_sizes |= 1ULL << 42; /* 4TB */ 704 break; 705 default: 706 page_sizes = 0; 707 } 708 709 cfg->pgsize_bitmap &= page_sizes; 710 cfg->ias = min(cfg->ias, max_addr_bits); 711 cfg->oas = min(cfg->oas, max_addr_bits); 712 } 713 714 static struct arm_lpae_io_pgtable * 715 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) 716 { 717 struct arm_lpae_io_pgtable *data; 718 int levels, va_bits, pg_shift; 719 720 arm_lpae_restrict_pgsizes(cfg); 721 722 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K))) 723 return NULL; 724 725 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS) 726 return NULL; 727 728 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) 729 return NULL; 730 731 data = kmalloc(sizeof(*data), GFP_KERNEL); 732 if (!data) 733 return NULL; 734 735 pg_shift = __ffs(cfg->pgsize_bitmap); 736 data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte)); 737 738 va_bits = cfg->ias - pg_shift; 739 levels = DIV_ROUND_UP(va_bits, data->bits_per_level); 740 data->start_level = ARM_LPAE_MAX_LEVELS - levels; 741 742 /* Calculate the actual size of our pgd (without concatenation) */ 743 data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1)); 744 745 data->iop.ops = (struct io_pgtable_ops) { 746 .map = arm_lpae_map, 747 .unmap = arm_lpae_unmap, 748 .iova_to_phys = arm_lpae_iova_to_phys, 749 }; 750 751 return data; 752 } 753 754 static struct io_pgtable * 755 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) 756 { 757 u64 reg; 758 struct arm_lpae_io_pgtable *data; 759 typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr; 760 bool tg1; 761 762 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | 763 IO_PGTABLE_QUIRK_NON_STRICT | 764 IO_PGTABLE_QUIRK_ARM_TTBR1)) 765 return NULL; 766 767 data = arm_lpae_alloc_pgtable(cfg); 768 if (!data) 769 return NULL; 770 771 /* TCR */ 772 if (cfg->coherent_walk) { 773 tcr->sh = ARM_LPAE_TCR_SH_IS; 774 tcr->irgn = ARM_LPAE_TCR_RGN_WBWA; 775 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA; 776 } else { 777 tcr->sh = ARM_LPAE_TCR_SH_OS; 778 tcr->irgn = ARM_LPAE_TCR_RGN_NC; 779 tcr->orgn = ARM_LPAE_TCR_RGN_NC; 780 } 781 782 tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1; 783 switch (ARM_LPAE_GRANULE(data)) { 784 case SZ_4K: 785 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K; 786 break; 787 case SZ_16K: 788 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K; 789 break; 790 case SZ_64K: 791 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K; 792 break; 793 } 794 795 switch (cfg->oas) { 796 case 32: 797 tcr->ips = ARM_LPAE_TCR_PS_32_BIT; 798 break; 799 case 36: 800 tcr->ips = ARM_LPAE_TCR_PS_36_BIT; 801 break; 802 case 40: 803 tcr->ips = ARM_LPAE_TCR_PS_40_BIT; 804 break; 805 case 42: 806 tcr->ips = ARM_LPAE_TCR_PS_42_BIT; 807 break; 808 case 44: 809 tcr->ips = ARM_LPAE_TCR_PS_44_BIT; 810 break; 811 case 48: 812 tcr->ips = ARM_LPAE_TCR_PS_48_BIT; 813 break; 814 case 52: 815 tcr->ips = ARM_LPAE_TCR_PS_52_BIT; 816 break; 817 default: 818 goto out_free_data; 819 } 820 821 tcr->tsz = 64ULL - cfg->ias; 822 823 /* MAIRs */ 824 reg = (ARM_LPAE_MAIR_ATTR_NC 825 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | 826 (ARM_LPAE_MAIR_ATTR_WBRWA 827 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | 828 (ARM_LPAE_MAIR_ATTR_DEVICE 829 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) | 830 (ARM_LPAE_MAIR_ATTR_INC_OWBRWA 831 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE)); 832 833 cfg->arm_lpae_s1_cfg.mair = reg; 834 835 /* Looking good; allocate a pgd */ 836 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), 837 GFP_KERNEL, cfg); 838 if (!data->pgd) 839 goto out_free_data; 840 841 /* Ensure the empty pgd is visible before any actual TTBR write */ 842 wmb(); 843 844 /* TTBR */ 845 cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd); 846 return &data->iop; 847 848 out_free_data: 849 kfree(data); 850 return NULL; 851 } 852 853 static struct io_pgtable * 854 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) 855 { 856 u64 sl; 857 struct arm_lpae_io_pgtable *data; 858 typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr; 859 860 /* The NS quirk doesn't apply at stage 2 */ 861 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT)) 862 return NULL; 863 864 data = arm_lpae_alloc_pgtable(cfg); 865 if (!data) 866 return NULL; 867 868 /* 869 * Concatenate PGDs at level 1 if possible in order to reduce 870 * the depth of the stage-2 walk. 871 */ 872 if (data->start_level == 0) { 873 unsigned long pgd_pages; 874 875 pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte); 876 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) { 877 data->pgd_bits += data->bits_per_level; 878 data->start_level++; 879 } 880 } 881 882 /* VTCR */ 883 if (cfg->coherent_walk) { 884 vtcr->sh = ARM_LPAE_TCR_SH_IS; 885 vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA; 886 vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA; 887 } else { 888 vtcr->sh = ARM_LPAE_TCR_SH_OS; 889 vtcr->irgn = ARM_LPAE_TCR_RGN_NC; 890 vtcr->orgn = ARM_LPAE_TCR_RGN_NC; 891 } 892 893 sl = data->start_level; 894 895 switch (ARM_LPAE_GRANULE(data)) { 896 case SZ_4K: 897 vtcr->tg = ARM_LPAE_TCR_TG0_4K; 898 sl++; /* SL0 format is different for 4K granule size */ 899 break; 900 case SZ_16K: 901 vtcr->tg = ARM_LPAE_TCR_TG0_16K; 902 break; 903 case SZ_64K: 904 vtcr->tg = ARM_LPAE_TCR_TG0_64K; 905 break; 906 } 907 908 switch (cfg->oas) { 909 case 32: 910 vtcr->ps = ARM_LPAE_TCR_PS_32_BIT; 911 break; 912 case 36: 913 vtcr->ps = ARM_LPAE_TCR_PS_36_BIT; 914 break; 915 case 40: 916 vtcr->ps = ARM_LPAE_TCR_PS_40_BIT; 917 break; 918 case 42: 919 vtcr->ps = ARM_LPAE_TCR_PS_42_BIT; 920 break; 921 case 44: 922 vtcr->ps = ARM_LPAE_TCR_PS_44_BIT; 923 break; 924 case 48: 925 vtcr->ps = ARM_LPAE_TCR_PS_48_BIT; 926 break; 927 case 52: 928 vtcr->ps = ARM_LPAE_TCR_PS_52_BIT; 929 break; 930 default: 931 goto out_free_data; 932 } 933 934 vtcr->tsz = 64ULL - cfg->ias; 935 vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK; 936 937 /* Allocate pgd pages */ 938 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), 939 GFP_KERNEL, cfg); 940 if (!data->pgd) 941 goto out_free_data; 942 943 /* Ensure the empty pgd is visible before any actual TTBR write */ 944 wmb(); 945 946 /* VTTBR */ 947 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd); 948 return &data->iop; 949 950 out_free_data: 951 kfree(data); 952 return NULL; 953 } 954 955 static struct io_pgtable * 956 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) 957 { 958 if (cfg->ias > 32 || cfg->oas > 40) 959 return NULL; 960 961 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 962 return arm_64_lpae_alloc_pgtable_s1(cfg, cookie); 963 } 964 965 static struct io_pgtable * 966 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) 967 { 968 if (cfg->ias > 40 || cfg->oas > 40) 969 return NULL; 970 971 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 972 return arm_64_lpae_alloc_pgtable_s2(cfg, cookie); 973 } 974 975 static struct io_pgtable * 976 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) 977 { 978 struct arm_lpae_io_pgtable *data; 979 980 /* No quirks for Mali (hopefully) */ 981 if (cfg->quirks) 982 return NULL; 983 984 if (cfg->ias > 48 || cfg->oas > 40) 985 return NULL; 986 987 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 988 989 data = arm_lpae_alloc_pgtable(cfg); 990 if (!data) 991 return NULL; 992 993 /* Mali seems to need a full 4-level table regardless of IAS */ 994 if (data->start_level > 0) { 995 data->start_level = 0; 996 data->pgd_bits = 0; 997 } 998 /* 999 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the 1000 * best we can do is mimic the out-of-tree driver and hope that the 1001 * "implementation-defined caching policy" is good enough. Similarly, 1002 * we'll use it for the sake of a valid attribute for our 'device' 1003 * index, although callers should never request that in practice. 1004 */ 1005 cfg->arm_mali_lpae_cfg.memattr = 1006 (ARM_MALI_LPAE_MEMATTR_IMP_DEF 1007 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | 1008 (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 1009 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | 1010 (ARM_MALI_LPAE_MEMATTR_IMP_DEF 1011 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)); 1012 1013 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL, 1014 cfg); 1015 if (!data->pgd) 1016 goto out_free_data; 1017 1018 /* Ensure the empty pgd is visible before TRANSTAB can be written */ 1019 wmb(); 1020 1021 cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) | 1022 ARM_MALI_LPAE_TTBR_READ_INNER | 1023 ARM_MALI_LPAE_TTBR_ADRMODE_TABLE; 1024 return &data->iop; 1025 1026 out_free_data: 1027 kfree(data); 1028 return NULL; 1029 } 1030 1031 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = { 1032 .alloc = arm_64_lpae_alloc_pgtable_s1, 1033 .free = arm_lpae_free_pgtable, 1034 }; 1035 1036 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = { 1037 .alloc = arm_64_lpae_alloc_pgtable_s2, 1038 .free = arm_lpae_free_pgtable, 1039 }; 1040 1041 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = { 1042 .alloc = arm_32_lpae_alloc_pgtable_s1, 1043 .free = arm_lpae_free_pgtable, 1044 }; 1045 1046 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = { 1047 .alloc = arm_32_lpae_alloc_pgtable_s2, 1048 .free = arm_lpae_free_pgtable, 1049 }; 1050 1051 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = { 1052 .alloc = arm_mali_lpae_alloc_pgtable, 1053 .free = arm_lpae_free_pgtable, 1054 }; 1055 1056 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST 1057 1058 static struct io_pgtable_cfg *cfg_cookie __initdata; 1059 1060 static void __init dummy_tlb_flush_all(void *cookie) 1061 { 1062 WARN_ON(cookie != cfg_cookie); 1063 } 1064 1065 static void __init dummy_tlb_flush(unsigned long iova, size_t size, 1066 size_t granule, void *cookie) 1067 { 1068 WARN_ON(cookie != cfg_cookie); 1069 WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); 1070 } 1071 1072 static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather, 1073 unsigned long iova, size_t granule, 1074 void *cookie) 1075 { 1076 dummy_tlb_flush(iova, granule, granule, cookie); 1077 } 1078 1079 static const struct iommu_flush_ops dummy_tlb_ops __initconst = { 1080 .tlb_flush_all = dummy_tlb_flush_all, 1081 .tlb_flush_walk = dummy_tlb_flush, 1082 .tlb_flush_leaf = dummy_tlb_flush, 1083 .tlb_add_page = dummy_tlb_add_page, 1084 }; 1085 1086 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) 1087 { 1088 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 1089 struct io_pgtable_cfg *cfg = &data->iop.cfg; 1090 1091 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", 1092 cfg->pgsize_bitmap, cfg->ias); 1093 pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n", 1094 ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data), 1095 ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd); 1096 } 1097 1098 #define __FAIL(ops, i) ({ \ 1099 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \ 1100 arm_lpae_dump_ops(ops); \ 1101 selftest_running = false; \ 1102 -EFAULT; \ 1103 }) 1104 1105 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) 1106 { 1107 static const enum io_pgtable_fmt fmts[] __initconst = { 1108 ARM_64_LPAE_S1, 1109 ARM_64_LPAE_S2, 1110 }; 1111 1112 int i, j; 1113 unsigned long iova; 1114 size_t size; 1115 struct io_pgtable_ops *ops; 1116 1117 selftest_running = true; 1118 1119 for (i = 0; i < ARRAY_SIZE(fmts); ++i) { 1120 cfg_cookie = cfg; 1121 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg); 1122 if (!ops) { 1123 pr_err("selftest: failed to allocate io pgtable ops\n"); 1124 return -ENOMEM; 1125 } 1126 1127 /* 1128 * Initial sanity checks. 1129 * Empty page tables shouldn't provide any translations. 1130 */ 1131 if (ops->iova_to_phys(ops, 42)) 1132 return __FAIL(ops, i); 1133 1134 if (ops->iova_to_phys(ops, SZ_1G + 42)) 1135 return __FAIL(ops, i); 1136 1137 if (ops->iova_to_phys(ops, SZ_2G + 42)) 1138 return __FAIL(ops, i); 1139 1140 /* 1141 * Distinct mappings of different granule sizes. 1142 */ 1143 iova = 0; 1144 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { 1145 size = 1UL << j; 1146 1147 if (ops->map(ops, iova, iova, size, IOMMU_READ | 1148 IOMMU_WRITE | 1149 IOMMU_NOEXEC | 1150 IOMMU_CACHE, GFP_KERNEL)) 1151 return __FAIL(ops, i); 1152 1153 /* Overlapping mappings */ 1154 if (!ops->map(ops, iova, iova + size, size, 1155 IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL)) 1156 return __FAIL(ops, i); 1157 1158 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) 1159 return __FAIL(ops, i); 1160 1161 iova += SZ_1G; 1162 } 1163 1164 /* Partial unmap */ 1165 size = 1UL << __ffs(cfg->pgsize_bitmap); 1166 if (ops->unmap(ops, SZ_1G + size, size, NULL) != size) 1167 return __FAIL(ops, i); 1168 1169 /* Remap of partial unmap */ 1170 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ, GFP_KERNEL)) 1171 return __FAIL(ops, i); 1172 1173 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) 1174 return __FAIL(ops, i); 1175 1176 /* Full unmap */ 1177 iova = 0; 1178 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { 1179 size = 1UL << j; 1180 1181 if (ops->unmap(ops, iova, size, NULL) != size) 1182 return __FAIL(ops, i); 1183 1184 if (ops->iova_to_phys(ops, iova + 42)) 1185 return __FAIL(ops, i); 1186 1187 /* Remap full block */ 1188 if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL)) 1189 return __FAIL(ops, i); 1190 1191 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) 1192 return __FAIL(ops, i); 1193 1194 iova += SZ_1G; 1195 } 1196 1197 free_io_pgtable_ops(ops); 1198 } 1199 1200 selftest_running = false; 1201 return 0; 1202 } 1203 1204 static int __init arm_lpae_do_selftests(void) 1205 { 1206 static const unsigned long pgsize[] __initconst = { 1207 SZ_4K | SZ_2M | SZ_1G, 1208 SZ_16K | SZ_32M, 1209 SZ_64K | SZ_512M, 1210 }; 1211 1212 static const unsigned int ias[] __initconst = { 1213 32, 36, 40, 42, 44, 48, 1214 }; 1215 1216 int i, j, pass = 0, fail = 0; 1217 struct io_pgtable_cfg cfg = { 1218 .tlb = &dummy_tlb_ops, 1219 .oas = 48, 1220 .coherent_walk = true, 1221 }; 1222 1223 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { 1224 for (j = 0; j < ARRAY_SIZE(ias); ++j) { 1225 cfg.pgsize_bitmap = pgsize[i]; 1226 cfg.ias = ias[j]; 1227 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n", 1228 pgsize[i], ias[j]); 1229 if (arm_lpae_run_tests(&cfg)) 1230 fail++; 1231 else 1232 pass++; 1233 } 1234 } 1235 1236 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail); 1237 return fail ? -EFAULT : 0; 1238 } 1239 subsys_initcall(arm_lpae_do_selftests); 1240 #endif 1241