1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * CPU-agnostic ARM page table allocator. 4 * 5 * Copyright (C) 2014 ARM Limited 6 * 7 * Author: Will Deacon <will.deacon@arm.com> 8 */ 9 10 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt 11 12 #include <linux/atomic.h> 13 #include <linux/bitops.h> 14 #include <linux/io-pgtable.h> 15 #include <linux/kernel.h> 16 #include <linux/sizes.h> 17 #include <linux/slab.h> 18 #include <linux/types.h> 19 #include <linux/dma-mapping.h> 20 21 #include <asm/barrier.h> 22 23 #define ARM_LPAE_MAX_ADDR_BITS 52 24 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16 25 #define ARM_LPAE_MAX_LEVELS 4 26 27 /* Struct accessors */ 28 #define io_pgtable_to_data(x) \ 29 container_of((x), struct arm_lpae_io_pgtable, iop) 30 31 #define io_pgtable_ops_to_data(x) \ 32 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) 33 34 /* 35 * Calculate the right shift amount to get to the portion describing level l 36 * in a virtual address mapped by the pagetable in d. 37 */ 38 #define ARM_LPAE_LVL_SHIFT(l,d) \ 39 (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \ 40 ilog2(sizeof(arm_lpae_iopte))) 41 42 #define ARM_LPAE_GRANULE(d) \ 43 (sizeof(arm_lpae_iopte) << (d)->bits_per_level) 44 #define ARM_LPAE_PGD_SIZE(d) \ 45 (sizeof(arm_lpae_iopte) << (d)->pgd_bits) 46 47 /* 48 * Calculate the index at level l used to map virtual address a using the 49 * pagetable in d. 50 */ 51 #define ARM_LPAE_PGD_IDX(l,d) \ 52 ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0) 53 54 #define ARM_LPAE_LVL_IDX(a,l,d) \ 55 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ 56 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) 57 58 /* Calculate the block/page mapping size at level l for pagetable in d. */ 59 #define ARM_LPAE_BLOCK_SIZE(l,d) (1ULL << ARM_LPAE_LVL_SHIFT(l,d)) 60 61 /* Page table bits */ 62 #define ARM_LPAE_PTE_TYPE_SHIFT 0 63 #define ARM_LPAE_PTE_TYPE_MASK 0x3 64 65 #define ARM_LPAE_PTE_TYPE_BLOCK 1 66 #define ARM_LPAE_PTE_TYPE_TABLE 3 67 #define ARM_LPAE_PTE_TYPE_PAGE 3 68 69 #define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12) 70 71 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63) 72 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53) 73 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10) 74 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8) 75 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8) 76 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8) 77 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5) 78 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0) 79 80 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2) 81 /* Ignore the contiguous bit for block splitting */ 82 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52) 83 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \ 84 ARM_LPAE_PTE_ATTR_HI_MASK) 85 /* Software bit for solving coherency races */ 86 #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55) 87 88 /* Stage-1 PTE */ 89 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6) 90 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6) 91 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2 92 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11) 93 94 /* Stage-2 PTE */ 95 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6) 96 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6) 97 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6) 98 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2) 99 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2) 100 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2) 101 102 /* Register bits */ 103 #define ARM_LPAE_TCR_TG0_4K 0 104 #define ARM_LPAE_TCR_TG0_64K 1 105 #define ARM_LPAE_TCR_TG0_16K 2 106 107 #define ARM_LPAE_TCR_TG1_16K 1 108 #define ARM_LPAE_TCR_TG1_4K 2 109 #define ARM_LPAE_TCR_TG1_64K 3 110 111 #define ARM_LPAE_TCR_SH_NS 0 112 #define ARM_LPAE_TCR_SH_OS 2 113 #define ARM_LPAE_TCR_SH_IS 3 114 115 #define ARM_LPAE_TCR_RGN_NC 0 116 #define ARM_LPAE_TCR_RGN_WBWA 1 117 #define ARM_LPAE_TCR_RGN_WT 2 118 #define ARM_LPAE_TCR_RGN_WB 3 119 120 #define ARM_LPAE_VTCR_SL0_MASK 0x3 121 122 #define ARM_LPAE_TCR_T0SZ_SHIFT 0 123 124 #define ARM_LPAE_VTCR_PS_SHIFT 16 125 #define ARM_LPAE_VTCR_PS_MASK 0x7 126 127 #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL 128 #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL 129 #define ARM_LPAE_TCR_PS_40_BIT 0x2ULL 130 #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL 131 #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL 132 #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL 133 #define ARM_LPAE_TCR_PS_52_BIT 0x6ULL 134 135 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3) 136 #define ARM_LPAE_MAIR_ATTR_MASK 0xff 137 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04 138 #define ARM_LPAE_MAIR_ATTR_NC 0x44 139 #define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4 140 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff 141 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0 142 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1 143 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2 144 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3 145 146 #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0) 147 #define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2) 148 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4) 149 150 #define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL 151 #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL 152 153 /* IOPTE accessors */ 154 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d)) 155 156 #define iopte_type(pte,l) \ 157 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) 158 159 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK) 160 161 struct arm_lpae_io_pgtable { 162 struct io_pgtable iop; 163 164 int pgd_bits; 165 int start_level; 166 int bits_per_level; 167 168 void *pgd; 169 }; 170 171 typedef u64 arm_lpae_iopte; 172 173 static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl, 174 enum io_pgtable_fmt fmt) 175 { 176 if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE) 177 return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE; 178 179 return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK; 180 } 181 182 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr, 183 struct arm_lpae_io_pgtable *data) 184 { 185 arm_lpae_iopte pte = paddr; 186 187 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */ 188 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK; 189 } 190 191 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte, 192 struct arm_lpae_io_pgtable *data) 193 { 194 u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK; 195 196 if (ARM_LPAE_GRANULE(data) < SZ_64K) 197 return paddr; 198 199 /* Rotate the packed high-order bits back to the top */ 200 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4); 201 } 202 203 static bool selftest_running = false; 204 205 static dma_addr_t __arm_lpae_dma_addr(void *pages) 206 { 207 return (dma_addr_t)virt_to_phys(pages); 208 } 209 210 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, 211 struct io_pgtable_cfg *cfg) 212 { 213 struct device *dev = cfg->iommu_dev; 214 int order = get_order(size); 215 struct page *p; 216 dma_addr_t dma; 217 void *pages; 218 219 VM_BUG_ON((gfp & __GFP_HIGHMEM)); 220 p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE, 221 gfp | __GFP_ZERO, order); 222 if (!p) 223 return NULL; 224 225 pages = page_address(p); 226 if (!cfg->coherent_walk) { 227 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE); 228 if (dma_mapping_error(dev, dma)) 229 goto out_free; 230 /* 231 * We depend on the IOMMU being able to work with any physical 232 * address directly, so if the DMA layer suggests otherwise by 233 * translating or truncating them, that bodes very badly... 234 */ 235 if (dma != virt_to_phys(pages)) 236 goto out_unmap; 237 } 238 239 return pages; 240 241 out_unmap: 242 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n"); 243 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE); 244 out_free: 245 __free_pages(p, order); 246 return NULL; 247 } 248 249 static void __arm_lpae_free_pages(void *pages, size_t size, 250 struct io_pgtable_cfg *cfg) 251 { 252 if (!cfg->coherent_walk) 253 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), 254 size, DMA_TO_DEVICE); 255 free_pages((unsigned long)pages, get_order(size)); 256 } 257 258 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, 259 struct io_pgtable_cfg *cfg) 260 { 261 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep), 262 sizeof(*ptep), DMA_TO_DEVICE); 263 } 264 265 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, 266 struct io_pgtable_cfg *cfg) 267 { 268 *ptep = pte; 269 270 if (!cfg->coherent_walk) 271 __arm_lpae_sync_pte(ptep, cfg); 272 } 273 274 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, 275 struct iommu_iotlb_gather *gather, 276 unsigned long iova, size_t size, int lvl, 277 arm_lpae_iopte *ptep); 278 279 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, 280 phys_addr_t paddr, arm_lpae_iopte prot, 281 int lvl, arm_lpae_iopte *ptep) 282 { 283 arm_lpae_iopte pte = prot; 284 285 if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1) 286 pte |= ARM_LPAE_PTE_TYPE_PAGE; 287 else 288 pte |= ARM_LPAE_PTE_TYPE_BLOCK; 289 290 pte |= paddr_to_iopte(paddr, data); 291 292 __arm_lpae_set_pte(ptep, pte, &data->iop.cfg); 293 } 294 295 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, 296 unsigned long iova, phys_addr_t paddr, 297 arm_lpae_iopte prot, int lvl, 298 arm_lpae_iopte *ptep) 299 { 300 arm_lpae_iopte pte = *ptep; 301 302 if (iopte_leaf(pte, lvl, data->iop.fmt)) { 303 /* We require an unmap first */ 304 WARN_ON(!selftest_running); 305 return -EEXIST; 306 } else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) { 307 /* 308 * We need to unmap and free the old table before 309 * overwriting it with a block entry. 310 */ 311 arm_lpae_iopte *tblp; 312 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); 313 314 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); 315 if (__arm_lpae_unmap(data, NULL, iova, sz, lvl, tblp) != sz) { 316 WARN_ON(1); 317 return -EINVAL; 318 } 319 } 320 321 __arm_lpae_init_pte(data, paddr, prot, lvl, ptep); 322 return 0; 323 } 324 325 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, 326 arm_lpae_iopte *ptep, 327 arm_lpae_iopte curr, 328 struct io_pgtable_cfg *cfg) 329 { 330 arm_lpae_iopte old, new; 331 332 new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE; 333 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) 334 new |= ARM_LPAE_PTE_NSTABLE; 335 336 /* 337 * Ensure the table itself is visible before its PTE can be. 338 * Whilst we could get away with cmpxchg64_release below, this 339 * doesn't have any ordering semantics when !CONFIG_SMP. 340 */ 341 dma_wmb(); 342 343 old = cmpxchg64_relaxed(ptep, curr, new); 344 345 if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC)) 346 return old; 347 348 /* Even if it's not ours, there's no point waiting; just kick it */ 349 __arm_lpae_sync_pte(ptep, cfg); 350 if (old == curr) 351 WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC); 352 353 return old; 354 } 355 356 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, 357 phys_addr_t paddr, size_t size, arm_lpae_iopte prot, 358 int lvl, arm_lpae_iopte *ptep) 359 { 360 arm_lpae_iopte *cptep, pte; 361 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); 362 size_t tblsz = ARM_LPAE_GRANULE(data); 363 struct io_pgtable_cfg *cfg = &data->iop.cfg; 364 365 /* Find our entry at the current level */ 366 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); 367 368 /* If we can install a leaf entry at this level, then do so */ 369 if (size == block_size) 370 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); 371 372 /* We can't allocate tables at the final level */ 373 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1)) 374 return -EINVAL; 375 376 /* Grab a pointer to the next level */ 377 pte = READ_ONCE(*ptep); 378 if (!pte) { 379 cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg); 380 if (!cptep) 381 return -ENOMEM; 382 383 pte = arm_lpae_install_table(cptep, ptep, 0, cfg); 384 if (pte) 385 __arm_lpae_free_pages(cptep, tblsz, cfg); 386 } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) { 387 __arm_lpae_sync_pte(ptep, cfg); 388 } 389 390 if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) { 391 cptep = iopte_deref(pte, data); 392 } else if (pte) { 393 /* We require an unmap first */ 394 WARN_ON(!selftest_running); 395 return -EEXIST; 396 } 397 398 /* Rinse, repeat */ 399 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep); 400 } 401 402 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, 403 int prot) 404 { 405 arm_lpae_iopte pte; 406 407 if (data->iop.fmt == ARM_64_LPAE_S1 || 408 data->iop.fmt == ARM_32_LPAE_S1) { 409 pte = ARM_LPAE_PTE_nG; 410 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) 411 pte |= ARM_LPAE_PTE_AP_RDONLY; 412 if (!(prot & IOMMU_PRIV)) 413 pte |= ARM_LPAE_PTE_AP_UNPRIV; 414 } else { 415 pte = ARM_LPAE_PTE_HAP_FAULT; 416 if (prot & IOMMU_READ) 417 pte |= ARM_LPAE_PTE_HAP_READ; 418 if (prot & IOMMU_WRITE) 419 pte |= ARM_LPAE_PTE_HAP_WRITE; 420 } 421 422 /* 423 * Note that this logic is structured to accommodate Mali LPAE 424 * having stage-1-like attributes but stage-2-like permissions. 425 */ 426 if (data->iop.fmt == ARM_64_LPAE_S2 || 427 data->iop.fmt == ARM_32_LPAE_S2) { 428 if (prot & IOMMU_MMIO) 429 pte |= ARM_LPAE_PTE_MEMATTR_DEV; 430 else if (prot & IOMMU_CACHE) 431 pte |= ARM_LPAE_PTE_MEMATTR_OIWB; 432 else 433 pte |= ARM_LPAE_PTE_MEMATTR_NC; 434 } else { 435 if (prot & IOMMU_MMIO) 436 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV 437 << ARM_LPAE_PTE_ATTRINDX_SHIFT); 438 else if (prot & IOMMU_CACHE) 439 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE 440 << ARM_LPAE_PTE_ATTRINDX_SHIFT); 441 else if (prot & IOMMU_SYS_CACHE_ONLY) 442 pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 443 << ARM_LPAE_PTE_ATTRINDX_SHIFT); 444 } 445 446 if (prot & IOMMU_CACHE) 447 pte |= ARM_LPAE_PTE_SH_IS; 448 else 449 pte |= ARM_LPAE_PTE_SH_OS; 450 451 if (prot & IOMMU_NOEXEC) 452 pte |= ARM_LPAE_PTE_XN; 453 454 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) 455 pte |= ARM_LPAE_PTE_NS; 456 457 if (data->iop.fmt != ARM_MALI_LPAE) 458 pte |= ARM_LPAE_PTE_AF; 459 460 return pte; 461 } 462 463 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, 464 phys_addr_t paddr, size_t size, int iommu_prot) 465 { 466 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 467 struct io_pgtable_cfg *cfg = &data->iop.cfg; 468 arm_lpae_iopte *ptep = data->pgd; 469 int ret, lvl = data->start_level; 470 arm_lpae_iopte prot; 471 long iaext = (s64)iova >> cfg->ias; 472 473 /* If no access, then nothing to do */ 474 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) 475 return 0; 476 477 if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size)) 478 return -EINVAL; 479 480 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) 481 iaext = ~iaext; 482 if (WARN_ON(iaext || paddr >> cfg->oas)) 483 return -ERANGE; 484 485 prot = arm_lpae_prot_to_pte(data, iommu_prot); 486 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); 487 /* 488 * Synchronise all PTE updates for the new mapping before there's 489 * a chance for anything to kick off a table walk for the new iova. 490 */ 491 wmb(); 492 493 return ret; 494 } 495 496 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, 497 arm_lpae_iopte *ptep) 498 { 499 arm_lpae_iopte *start, *end; 500 unsigned long table_size; 501 502 if (lvl == data->start_level) 503 table_size = ARM_LPAE_PGD_SIZE(data); 504 else 505 table_size = ARM_LPAE_GRANULE(data); 506 507 start = ptep; 508 509 /* Only leaf entries at the last level */ 510 if (lvl == ARM_LPAE_MAX_LEVELS - 1) 511 end = ptep; 512 else 513 end = (void *)ptep + table_size; 514 515 while (ptep != end) { 516 arm_lpae_iopte pte = *ptep++; 517 518 if (!pte || iopte_leaf(pte, lvl, data->iop.fmt)) 519 continue; 520 521 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); 522 } 523 524 __arm_lpae_free_pages(start, table_size, &data->iop.cfg); 525 } 526 527 static void arm_lpae_free_pgtable(struct io_pgtable *iop) 528 { 529 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); 530 531 __arm_lpae_free_pgtable(data, data->start_level, data->pgd); 532 kfree(data); 533 } 534 535 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, 536 struct iommu_iotlb_gather *gather, 537 unsigned long iova, size_t size, 538 arm_lpae_iopte blk_pte, int lvl, 539 arm_lpae_iopte *ptep) 540 { 541 struct io_pgtable_cfg *cfg = &data->iop.cfg; 542 arm_lpae_iopte pte, *tablep; 543 phys_addr_t blk_paddr; 544 size_t tablesz = ARM_LPAE_GRANULE(data); 545 size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data); 546 int i, unmap_idx = -1; 547 548 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) 549 return 0; 550 551 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg); 552 if (!tablep) 553 return 0; /* Bytes unmapped */ 554 555 if (size == split_sz) 556 unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data); 557 558 blk_paddr = iopte_to_paddr(blk_pte, data); 559 pte = iopte_prot(blk_pte); 560 561 for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) { 562 /* Unmap! */ 563 if (i == unmap_idx) 564 continue; 565 566 __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]); 567 } 568 569 pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg); 570 if (pte != blk_pte) { 571 __arm_lpae_free_pages(tablep, tablesz, cfg); 572 /* 573 * We may race against someone unmapping another part of this 574 * block, but anything else is invalid. We can't misinterpret 575 * a page entry here since we're never at the last level. 576 */ 577 if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE) 578 return 0; 579 580 tablep = iopte_deref(pte, data); 581 } else if (unmap_idx >= 0) { 582 io_pgtable_tlb_add_page(&data->iop, gather, iova, size); 583 return size; 584 } 585 586 return __arm_lpae_unmap(data, gather, iova, size, lvl, tablep); 587 } 588 589 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, 590 struct iommu_iotlb_gather *gather, 591 unsigned long iova, size_t size, int lvl, 592 arm_lpae_iopte *ptep) 593 { 594 arm_lpae_iopte pte; 595 struct io_pgtable *iop = &data->iop; 596 597 /* Something went horribly wrong and we ran out of page table */ 598 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) 599 return 0; 600 601 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); 602 pte = READ_ONCE(*ptep); 603 if (WARN_ON(!pte)) 604 return 0; 605 606 /* If the size matches this level, we're in the right place */ 607 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) { 608 __arm_lpae_set_pte(ptep, 0, &iop->cfg); 609 610 if (!iopte_leaf(pte, lvl, iop->fmt)) { 611 /* Also flush any partial walks */ 612 io_pgtable_tlb_flush_walk(iop, iova, size, 613 ARM_LPAE_GRANULE(data)); 614 ptep = iopte_deref(pte, data); 615 __arm_lpae_free_pgtable(data, lvl + 1, ptep); 616 } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) { 617 /* 618 * Order the PTE update against queueing the IOVA, to 619 * guarantee that a flush callback from a different CPU 620 * has observed it before the TLBIALL can be issued. 621 */ 622 smp_wmb(); 623 } else { 624 io_pgtable_tlb_add_page(iop, gather, iova, size); 625 } 626 627 return size; 628 } else if (iopte_leaf(pte, lvl, iop->fmt)) { 629 /* 630 * Insert a table at the next level to map the old region, 631 * minus the part we want to unmap 632 */ 633 return arm_lpae_split_blk_unmap(data, gather, iova, size, pte, 634 lvl + 1, ptep); 635 } 636 637 /* Keep on walkin' */ 638 ptep = iopte_deref(pte, data); 639 return __arm_lpae_unmap(data, gather, iova, size, lvl + 1, ptep); 640 } 641 642 static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, 643 size_t size, struct iommu_iotlb_gather *gather) 644 { 645 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 646 struct io_pgtable_cfg *cfg = &data->iop.cfg; 647 arm_lpae_iopte *ptep = data->pgd; 648 long iaext = (s64)iova >> cfg->ias; 649 650 if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size)) 651 return 0; 652 653 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) 654 iaext = ~iaext; 655 if (WARN_ON(iaext)) 656 return 0; 657 658 return __arm_lpae_unmap(data, gather, iova, size, data->start_level, ptep); 659 } 660 661 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, 662 unsigned long iova) 663 { 664 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 665 arm_lpae_iopte pte, *ptep = data->pgd; 666 int lvl = data->start_level; 667 668 do { 669 /* Valid IOPTE pointer? */ 670 if (!ptep) 671 return 0; 672 673 /* Grab the IOPTE we're interested in */ 674 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); 675 pte = READ_ONCE(*ptep); 676 677 /* Valid entry? */ 678 if (!pte) 679 return 0; 680 681 /* Leaf entry? */ 682 if (iopte_leaf(pte, lvl, data->iop.fmt)) 683 goto found_translation; 684 685 /* Take it to the next level */ 686 ptep = iopte_deref(pte, data); 687 } while (++lvl < ARM_LPAE_MAX_LEVELS); 688 689 /* Ran out of page tables to walk */ 690 return 0; 691 692 found_translation: 693 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1); 694 return iopte_to_paddr(pte, data) | iova; 695 } 696 697 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) 698 { 699 unsigned long granule, page_sizes; 700 unsigned int max_addr_bits = 48; 701 702 /* 703 * We need to restrict the supported page sizes to match the 704 * translation regime for a particular granule. Aim to match 705 * the CPU page size if possible, otherwise prefer smaller sizes. 706 * While we're at it, restrict the block sizes to match the 707 * chosen granule. 708 */ 709 if (cfg->pgsize_bitmap & PAGE_SIZE) 710 granule = PAGE_SIZE; 711 else if (cfg->pgsize_bitmap & ~PAGE_MASK) 712 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); 713 else if (cfg->pgsize_bitmap & PAGE_MASK) 714 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); 715 else 716 granule = 0; 717 718 switch (granule) { 719 case SZ_4K: 720 page_sizes = (SZ_4K | SZ_2M | SZ_1G); 721 break; 722 case SZ_16K: 723 page_sizes = (SZ_16K | SZ_32M); 724 break; 725 case SZ_64K: 726 max_addr_bits = 52; 727 page_sizes = (SZ_64K | SZ_512M); 728 if (cfg->oas > 48) 729 page_sizes |= 1ULL << 42; /* 4TB */ 730 break; 731 default: 732 page_sizes = 0; 733 } 734 735 cfg->pgsize_bitmap &= page_sizes; 736 cfg->ias = min(cfg->ias, max_addr_bits); 737 cfg->oas = min(cfg->oas, max_addr_bits); 738 } 739 740 static struct arm_lpae_io_pgtable * 741 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) 742 { 743 struct arm_lpae_io_pgtable *data; 744 int levels, va_bits, pg_shift; 745 746 arm_lpae_restrict_pgsizes(cfg); 747 748 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K))) 749 return NULL; 750 751 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS) 752 return NULL; 753 754 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) 755 return NULL; 756 757 if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) { 758 dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n"); 759 return NULL; 760 } 761 762 data = kmalloc(sizeof(*data), GFP_KERNEL); 763 if (!data) 764 return NULL; 765 766 pg_shift = __ffs(cfg->pgsize_bitmap); 767 data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte)); 768 769 va_bits = cfg->ias - pg_shift; 770 levels = DIV_ROUND_UP(va_bits, data->bits_per_level); 771 data->start_level = ARM_LPAE_MAX_LEVELS - levels; 772 773 /* Calculate the actual size of our pgd (without concatenation) */ 774 data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1)); 775 776 data->iop.ops = (struct io_pgtable_ops) { 777 .map = arm_lpae_map, 778 .unmap = arm_lpae_unmap, 779 .iova_to_phys = arm_lpae_iova_to_phys, 780 }; 781 782 return data; 783 } 784 785 static struct io_pgtable * 786 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) 787 { 788 u64 reg; 789 struct arm_lpae_io_pgtable *data; 790 typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr; 791 bool tg1; 792 793 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | 794 IO_PGTABLE_QUIRK_NON_STRICT | 795 IO_PGTABLE_QUIRK_ARM_TTBR1)) 796 return NULL; 797 798 data = arm_lpae_alloc_pgtable(cfg); 799 if (!data) 800 return NULL; 801 802 /* TCR */ 803 if (cfg->coherent_walk) { 804 tcr->sh = ARM_LPAE_TCR_SH_IS; 805 tcr->irgn = ARM_LPAE_TCR_RGN_WBWA; 806 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA; 807 } else { 808 tcr->sh = ARM_LPAE_TCR_SH_OS; 809 tcr->irgn = ARM_LPAE_TCR_RGN_NC; 810 tcr->orgn = ARM_LPAE_TCR_RGN_NC; 811 } 812 813 tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1; 814 switch (ARM_LPAE_GRANULE(data)) { 815 case SZ_4K: 816 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K; 817 break; 818 case SZ_16K: 819 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K; 820 break; 821 case SZ_64K: 822 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K; 823 break; 824 } 825 826 switch (cfg->oas) { 827 case 32: 828 tcr->ips = ARM_LPAE_TCR_PS_32_BIT; 829 break; 830 case 36: 831 tcr->ips = ARM_LPAE_TCR_PS_36_BIT; 832 break; 833 case 40: 834 tcr->ips = ARM_LPAE_TCR_PS_40_BIT; 835 break; 836 case 42: 837 tcr->ips = ARM_LPAE_TCR_PS_42_BIT; 838 break; 839 case 44: 840 tcr->ips = ARM_LPAE_TCR_PS_44_BIT; 841 break; 842 case 48: 843 tcr->ips = ARM_LPAE_TCR_PS_48_BIT; 844 break; 845 case 52: 846 tcr->ips = ARM_LPAE_TCR_PS_52_BIT; 847 break; 848 default: 849 goto out_free_data; 850 } 851 852 tcr->tsz = 64ULL - cfg->ias; 853 854 /* MAIRs */ 855 reg = (ARM_LPAE_MAIR_ATTR_NC 856 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | 857 (ARM_LPAE_MAIR_ATTR_WBRWA 858 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | 859 (ARM_LPAE_MAIR_ATTR_DEVICE 860 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) | 861 (ARM_LPAE_MAIR_ATTR_INC_OWBRWA 862 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE)); 863 864 cfg->arm_lpae_s1_cfg.mair = reg; 865 866 /* Looking good; allocate a pgd */ 867 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), 868 GFP_KERNEL, cfg); 869 if (!data->pgd) 870 goto out_free_data; 871 872 /* Ensure the empty pgd is visible before any actual TTBR write */ 873 wmb(); 874 875 /* TTBR */ 876 cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd); 877 return &data->iop; 878 879 out_free_data: 880 kfree(data); 881 return NULL; 882 } 883 884 static struct io_pgtable * 885 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) 886 { 887 u64 sl; 888 struct arm_lpae_io_pgtable *data; 889 typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr; 890 891 /* The NS quirk doesn't apply at stage 2 */ 892 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT)) 893 return NULL; 894 895 data = arm_lpae_alloc_pgtable(cfg); 896 if (!data) 897 return NULL; 898 899 /* 900 * Concatenate PGDs at level 1 if possible in order to reduce 901 * the depth of the stage-2 walk. 902 */ 903 if (data->start_level == 0) { 904 unsigned long pgd_pages; 905 906 pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte); 907 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) { 908 data->pgd_bits += data->bits_per_level; 909 data->start_level++; 910 } 911 } 912 913 /* VTCR */ 914 if (cfg->coherent_walk) { 915 vtcr->sh = ARM_LPAE_TCR_SH_IS; 916 vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA; 917 vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA; 918 } else { 919 vtcr->sh = ARM_LPAE_TCR_SH_OS; 920 vtcr->irgn = ARM_LPAE_TCR_RGN_NC; 921 vtcr->orgn = ARM_LPAE_TCR_RGN_NC; 922 } 923 924 sl = data->start_level; 925 926 switch (ARM_LPAE_GRANULE(data)) { 927 case SZ_4K: 928 vtcr->tg = ARM_LPAE_TCR_TG0_4K; 929 sl++; /* SL0 format is different for 4K granule size */ 930 break; 931 case SZ_16K: 932 vtcr->tg = ARM_LPAE_TCR_TG0_16K; 933 break; 934 case SZ_64K: 935 vtcr->tg = ARM_LPAE_TCR_TG0_64K; 936 break; 937 } 938 939 switch (cfg->oas) { 940 case 32: 941 vtcr->ps = ARM_LPAE_TCR_PS_32_BIT; 942 break; 943 case 36: 944 vtcr->ps = ARM_LPAE_TCR_PS_36_BIT; 945 break; 946 case 40: 947 vtcr->ps = ARM_LPAE_TCR_PS_40_BIT; 948 break; 949 case 42: 950 vtcr->ps = ARM_LPAE_TCR_PS_42_BIT; 951 break; 952 case 44: 953 vtcr->ps = ARM_LPAE_TCR_PS_44_BIT; 954 break; 955 case 48: 956 vtcr->ps = ARM_LPAE_TCR_PS_48_BIT; 957 break; 958 case 52: 959 vtcr->ps = ARM_LPAE_TCR_PS_52_BIT; 960 break; 961 default: 962 goto out_free_data; 963 } 964 965 vtcr->tsz = 64ULL - cfg->ias; 966 vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK; 967 968 /* Allocate pgd pages */ 969 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), 970 GFP_KERNEL, cfg); 971 if (!data->pgd) 972 goto out_free_data; 973 974 /* Ensure the empty pgd is visible before any actual TTBR write */ 975 wmb(); 976 977 /* VTTBR */ 978 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd); 979 return &data->iop; 980 981 out_free_data: 982 kfree(data); 983 return NULL; 984 } 985 986 static struct io_pgtable * 987 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) 988 { 989 if (cfg->ias > 32 || cfg->oas > 40) 990 return NULL; 991 992 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 993 return arm_64_lpae_alloc_pgtable_s1(cfg, cookie); 994 } 995 996 static struct io_pgtable * 997 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) 998 { 999 if (cfg->ias > 40 || cfg->oas > 40) 1000 return NULL; 1001 1002 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 1003 return arm_64_lpae_alloc_pgtable_s2(cfg, cookie); 1004 } 1005 1006 static struct io_pgtable * 1007 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) 1008 { 1009 struct arm_lpae_io_pgtable *data; 1010 1011 /* No quirks for Mali (hopefully) */ 1012 if (cfg->quirks) 1013 return NULL; 1014 1015 if (cfg->ias > 48 || cfg->oas > 40) 1016 return NULL; 1017 1018 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 1019 1020 data = arm_lpae_alloc_pgtable(cfg); 1021 if (!data) 1022 return NULL; 1023 1024 /* Mali seems to need a full 4-level table regardless of IAS */ 1025 if (data->start_level > 0) { 1026 data->start_level = 0; 1027 data->pgd_bits = 0; 1028 } 1029 /* 1030 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the 1031 * best we can do is mimic the out-of-tree driver and hope that the 1032 * "implementation-defined caching policy" is good enough. Similarly, 1033 * we'll use it for the sake of a valid attribute for our 'device' 1034 * index, although callers should never request that in practice. 1035 */ 1036 cfg->arm_mali_lpae_cfg.memattr = 1037 (ARM_MALI_LPAE_MEMATTR_IMP_DEF 1038 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | 1039 (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 1040 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | 1041 (ARM_MALI_LPAE_MEMATTR_IMP_DEF 1042 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)); 1043 1044 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL, 1045 cfg); 1046 if (!data->pgd) 1047 goto out_free_data; 1048 1049 /* Ensure the empty pgd is visible before TRANSTAB can be written */ 1050 wmb(); 1051 1052 cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) | 1053 ARM_MALI_LPAE_TTBR_READ_INNER | 1054 ARM_MALI_LPAE_TTBR_ADRMODE_TABLE; 1055 return &data->iop; 1056 1057 out_free_data: 1058 kfree(data); 1059 return NULL; 1060 } 1061 1062 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = { 1063 .alloc = arm_64_lpae_alloc_pgtable_s1, 1064 .free = arm_lpae_free_pgtable, 1065 }; 1066 1067 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = { 1068 .alloc = arm_64_lpae_alloc_pgtable_s2, 1069 .free = arm_lpae_free_pgtable, 1070 }; 1071 1072 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = { 1073 .alloc = arm_32_lpae_alloc_pgtable_s1, 1074 .free = arm_lpae_free_pgtable, 1075 }; 1076 1077 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = { 1078 .alloc = arm_32_lpae_alloc_pgtable_s2, 1079 .free = arm_lpae_free_pgtable, 1080 }; 1081 1082 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = { 1083 .alloc = arm_mali_lpae_alloc_pgtable, 1084 .free = arm_lpae_free_pgtable, 1085 }; 1086 1087 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST 1088 1089 static struct io_pgtable_cfg *cfg_cookie __initdata; 1090 1091 static void __init dummy_tlb_flush_all(void *cookie) 1092 { 1093 WARN_ON(cookie != cfg_cookie); 1094 } 1095 1096 static void __init dummy_tlb_flush(unsigned long iova, size_t size, 1097 size_t granule, void *cookie) 1098 { 1099 WARN_ON(cookie != cfg_cookie); 1100 WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); 1101 } 1102 1103 static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather, 1104 unsigned long iova, size_t granule, 1105 void *cookie) 1106 { 1107 dummy_tlb_flush(iova, granule, granule, cookie); 1108 } 1109 1110 static const struct iommu_flush_ops dummy_tlb_ops __initconst = { 1111 .tlb_flush_all = dummy_tlb_flush_all, 1112 .tlb_flush_walk = dummy_tlb_flush, 1113 .tlb_flush_leaf = dummy_tlb_flush, 1114 .tlb_add_page = dummy_tlb_add_page, 1115 }; 1116 1117 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) 1118 { 1119 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 1120 struct io_pgtable_cfg *cfg = &data->iop.cfg; 1121 1122 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", 1123 cfg->pgsize_bitmap, cfg->ias); 1124 pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n", 1125 ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data), 1126 ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd); 1127 } 1128 1129 #define __FAIL(ops, i) ({ \ 1130 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \ 1131 arm_lpae_dump_ops(ops); \ 1132 selftest_running = false; \ 1133 -EFAULT; \ 1134 }) 1135 1136 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) 1137 { 1138 static const enum io_pgtable_fmt fmts[] __initconst = { 1139 ARM_64_LPAE_S1, 1140 ARM_64_LPAE_S2, 1141 }; 1142 1143 int i, j; 1144 unsigned long iova; 1145 size_t size; 1146 struct io_pgtable_ops *ops; 1147 1148 selftest_running = true; 1149 1150 for (i = 0; i < ARRAY_SIZE(fmts); ++i) { 1151 cfg_cookie = cfg; 1152 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg); 1153 if (!ops) { 1154 pr_err("selftest: failed to allocate io pgtable ops\n"); 1155 return -ENOMEM; 1156 } 1157 1158 /* 1159 * Initial sanity checks. 1160 * Empty page tables shouldn't provide any translations. 1161 */ 1162 if (ops->iova_to_phys(ops, 42)) 1163 return __FAIL(ops, i); 1164 1165 if (ops->iova_to_phys(ops, SZ_1G + 42)) 1166 return __FAIL(ops, i); 1167 1168 if (ops->iova_to_phys(ops, SZ_2G + 42)) 1169 return __FAIL(ops, i); 1170 1171 /* 1172 * Distinct mappings of different granule sizes. 1173 */ 1174 iova = 0; 1175 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { 1176 size = 1UL << j; 1177 1178 if (ops->map(ops, iova, iova, size, IOMMU_READ | 1179 IOMMU_WRITE | 1180 IOMMU_NOEXEC | 1181 IOMMU_CACHE)) 1182 return __FAIL(ops, i); 1183 1184 /* Overlapping mappings */ 1185 if (!ops->map(ops, iova, iova + size, size, 1186 IOMMU_READ | IOMMU_NOEXEC)) 1187 return __FAIL(ops, i); 1188 1189 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) 1190 return __FAIL(ops, i); 1191 1192 iova += SZ_1G; 1193 } 1194 1195 /* Partial unmap */ 1196 size = 1UL << __ffs(cfg->pgsize_bitmap); 1197 if (ops->unmap(ops, SZ_1G + size, size, NULL) != size) 1198 return __FAIL(ops, i); 1199 1200 /* Remap of partial unmap */ 1201 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ)) 1202 return __FAIL(ops, i); 1203 1204 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) 1205 return __FAIL(ops, i); 1206 1207 /* Full unmap */ 1208 iova = 0; 1209 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { 1210 size = 1UL << j; 1211 1212 if (ops->unmap(ops, iova, size, NULL) != size) 1213 return __FAIL(ops, i); 1214 1215 if (ops->iova_to_phys(ops, iova + 42)) 1216 return __FAIL(ops, i); 1217 1218 /* Remap full block */ 1219 if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) 1220 return __FAIL(ops, i); 1221 1222 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) 1223 return __FAIL(ops, i); 1224 1225 iova += SZ_1G; 1226 } 1227 1228 free_io_pgtable_ops(ops); 1229 } 1230 1231 selftest_running = false; 1232 return 0; 1233 } 1234 1235 static int __init arm_lpae_do_selftests(void) 1236 { 1237 static const unsigned long pgsize[] __initconst = { 1238 SZ_4K | SZ_2M | SZ_1G, 1239 SZ_16K | SZ_32M, 1240 SZ_64K | SZ_512M, 1241 }; 1242 1243 static const unsigned int ias[] __initconst = { 1244 32, 36, 40, 42, 44, 48, 1245 }; 1246 1247 int i, j, pass = 0, fail = 0; 1248 struct io_pgtable_cfg cfg = { 1249 .tlb = &dummy_tlb_ops, 1250 .oas = 48, 1251 .coherent_walk = true, 1252 }; 1253 1254 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { 1255 for (j = 0; j < ARRAY_SIZE(ias); ++j) { 1256 cfg.pgsize_bitmap = pgsize[i]; 1257 cfg.ias = ias[j]; 1258 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n", 1259 pgsize[i], ias[j]); 1260 if (arm_lpae_run_tests(&cfg)) 1261 fail++; 1262 else 1263 pass++; 1264 } 1265 } 1266 1267 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail); 1268 return fail ? -EFAULT : 0; 1269 } 1270 subsys_initcall(arm_lpae_do_selftests); 1271 #endif 1272