1 /* 2 * CPU-agnostic ARM page table allocator. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 * 16 * Copyright (C) 2014 ARM Limited 17 * 18 * Author: Will Deacon <will.deacon@arm.com> 19 */ 20 21 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt 22 23 #include <linux/atomic.h> 24 #include <linux/bitops.h> 25 #include <linux/iommu.h> 26 #include <linux/kernel.h> 27 #include <linux/sizes.h> 28 #include <linux/slab.h> 29 #include <linux/types.h> 30 #include <linux/dma-mapping.h> 31 32 #include <asm/barrier.h> 33 34 #include "io-pgtable.h" 35 36 #define ARM_LPAE_MAX_ADDR_BITS 52 37 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16 38 #define ARM_LPAE_MAX_LEVELS 4 39 40 /* Struct accessors */ 41 #define io_pgtable_to_data(x) \ 42 container_of((x), struct arm_lpae_io_pgtable, iop) 43 44 #define io_pgtable_ops_to_data(x) \ 45 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) 46 47 /* 48 * For consistency with the architecture, we always consider 49 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0 50 */ 51 #define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels) 52 53 /* 54 * Calculate the right shift amount to get to the portion describing level l 55 * in a virtual address mapped by the pagetable in d. 56 */ 57 #define ARM_LPAE_LVL_SHIFT(l,d) \ 58 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ 59 * (d)->bits_per_level) + (d)->pg_shift) 60 61 #define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift) 62 63 #define ARM_LPAE_PAGES_PER_PGD(d) \ 64 DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d)) 65 66 /* 67 * Calculate the index at level l used to map virtual address a using the 68 * pagetable in d. 69 */ 70 #define ARM_LPAE_PGD_IDX(l,d) \ 71 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) 72 73 #define ARM_LPAE_LVL_IDX(a,l,d) \ 74 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ 75 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) 76 77 /* Calculate the block/page mapping size at level l for pagetable in d. */ 78 #define ARM_LPAE_BLOCK_SIZE(l,d) \ 79 (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \ 80 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level))) 81 82 /* Page table bits */ 83 #define ARM_LPAE_PTE_TYPE_SHIFT 0 84 #define ARM_LPAE_PTE_TYPE_MASK 0x3 85 86 #define ARM_LPAE_PTE_TYPE_BLOCK 1 87 #define ARM_LPAE_PTE_TYPE_TABLE 3 88 #define ARM_LPAE_PTE_TYPE_PAGE 3 89 90 #define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12) 91 92 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63) 93 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53) 94 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10) 95 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8) 96 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8) 97 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8) 98 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5) 99 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0) 100 101 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2) 102 /* Ignore the contiguous bit for block splitting */ 103 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52) 104 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \ 105 ARM_LPAE_PTE_ATTR_HI_MASK) 106 /* Software bit for solving coherency races */ 107 #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55) 108 109 /* Stage-1 PTE */ 110 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6) 111 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6) 112 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2 113 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11) 114 115 /* Stage-2 PTE */ 116 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6) 117 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6) 118 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6) 119 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2) 120 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2) 121 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2) 122 123 /* Register bits */ 124 #define ARM_32_LPAE_TCR_EAE (1 << 31) 125 #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31) 126 127 #define ARM_LPAE_TCR_EPD1 (1 << 23) 128 129 #define ARM_LPAE_TCR_TG0_4K (0 << 14) 130 #define ARM_LPAE_TCR_TG0_64K (1 << 14) 131 #define ARM_LPAE_TCR_TG0_16K (2 << 14) 132 133 #define ARM_LPAE_TCR_SH0_SHIFT 12 134 #define ARM_LPAE_TCR_SH0_MASK 0x3 135 #define ARM_LPAE_TCR_SH_NS 0 136 #define ARM_LPAE_TCR_SH_OS 2 137 #define ARM_LPAE_TCR_SH_IS 3 138 139 #define ARM_LPAE_TCR_ORGN0_SHIFT 10 140 #define ARM_LPAE_TCR_IRGN0_SHIFT 8 141 #define ARM_LPAE_TCR_RGN_MASK 0x3 142 #define ARM_LPAE_TCR_RGN_NC 0 143 #define ARM_LPAE_TCR_RGN_WBWA 1 144 #define ARM_LPAE_TCR_RGN_WT 2 145 #define ARM_LPAE_TCR_RGN_WB 3 146 147 #define ARM_LPAE_TCR_SL0_SHIFT 6 148 #define ARM_LPAE_TCR_SL0_MASK 0x3 149 150 #define ARM_LPAE_TCR_T0SZ_SHIFT 0 151 #define ARM_LPAE_TCR_SZ_MASK 0xf 152 153 #define ARM_LPAE_TCR_PS_SHIFT 16 154 #define ARM_LPAE_TCR_PS_MASK 0x7 155 156 #define ARM_LPAE_TCR_IPS_SHIFT 32 157 #define ARM_LPAE_TCR_IPS_MASK 0x7 158 159 #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL 160 #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL 161 #define ARM_LPAE_TCR_PS_40_BIT 0x2ULL 162 #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL 163 #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL 164 #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL 165 #define ARM_LPAE_TCR_PS_52_BIT 0x6ULL 166 167 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3) 168 #define ARM_LPAE_MAIR_ATTR_MASK 0xff 169 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04 170 #define ARM_LPAE_MAIR_ATTR_NC 0x44 171 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff 172 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0 173 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1 174 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2 175 176 /* IOPTE accessors */ 177 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d)) 178 179 #define iopte_type(pte,l) \ 180 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) 181 182 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK) 183 184 #define iopte_leaf(pte,l) \ 185 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \ 186 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \ 187 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK)) 188 189 struct arm_lpae_io_pgtable { 190 struct io_pgtable iop; 191 192 int levels; 193 size_t pgd_size; 194 unsigned long pg_shift; 195 unsigned long bits_per_level; 196 197 void *pgd; 198 }; 199 200 typedef u64 arm_lpae_iopte; 201 202 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr, 203 struct arm_lpae_io_pgtable *data) 204 { 205 arm_lpae_iopte pte = paddr; 206 207 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */ 208 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK; 209 } 210 211 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte, 212 struct arm_lpae_io_pgtable *data) 213 { 214 u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK; 215 216 if (data->pg_shift < 16) 217 return paddr; 218 219 /* Rotate the packed high-order bits back to the top */ 220 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4); 221 } 222 223 static bool selftest_running = false; 224 225 static dma_addr_t __arm_lpae_dma_addr(void *pages) 226 { 227 return (dma_addr_t)virt_to_phys(pages); 228 } 229 230 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, 231 struct io_pgtable_cfg *cfg) 232 { 233 struct device *dev = cfg->iommu_dev; 234 int order = get_order(size); 235 struct page *p; 236 dma_addr_t dma; 237 void *pages; 238 239 VM_BUG_ON((gfp & __GFP_HIGHMEM)); 240 p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE, 241 gfp | __GFP_ZERO, order); 242 if (!p) 243 return NULL; 244 245 pages = page_address(p); 246 if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) { 247 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE); 248 if (dma_mapping_error(dev, dma)) 249 goto out_free; 250 /* 251 * We depend on the IOMMU being able to work with any physical 252 * address directly, so if the DMA layer suggests otherwise by 253 * translating or truncating them, that bodes very badly... 254 */ 255 if (dma != virt_to_phys(pages)) 256 goto out_unmap; 257 } 258 259 return pages; 260 261 out_unmap: 262 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n"); 263 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE); 264 out_free: 265 __free_pages(p, order); 266 return NULL; 267 } 268 269 static void __arm_lpae_free_pages(void *pages, size_t size, 270 struct io_pgtable_cfg *cfg) 271 { 272 if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) 273 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), 274 size, DMA_TO_DEVICE); 275 free_pages((unsigned long)pages, get_order(size)); 276 } 277 278 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, 279 struct io_pgtable_cfg *cfg) 280 { 281 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep), 282 sizeof(*ptep), DMA_TO_DEVICE); 283 } 284 285 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, 286 struct io_pgtable_cfg *cfg) 287 { 288 *ptep = pte; 289 290 if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) 291 __arm_lpae_sync_pte(ptep, cfg); 292 } 293 294 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, 295 unsigned long iova, size_t size, int lvl, 296 arm_lpae_iopte *ptep); 297 298 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, 299 phys_addr_t paddr, arm_lpae_iopte prot, 300 int lvl, arm_lpae_iopte *ptep) 301 { 302 arm_lpae_iopte pte = prot; 303 304 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) 305 pte |= ARM_LPAE_PTE_NS; 306 307 if (lvl == ARM_LPAE_MAX_LEVELS - 1) 308 pte |= ARM_LPAE_PTE_TYPE_PAGE; 309 else 310 pte |= ARM_LPAE_PTE_TYPE_BLOCK; 311 312 pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS; 313 pte |= paddr_to_iopte(paddr, data); 314 315 __arm_lpae_set_pte(ptep, pte, &data->iop.cfg); 316 } 317 318 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, 319 unsigned long iova, phys_addr_t paddr, 320 arm_lpae_iopte prot, int lvl, 321 arm_lpae_iopte *ptep) 322 { 323 arm_lpae_iopte pte = *ptep; 324 325 if (iopte_leaf(pte, lvl)) { 326 /* We require an unmap first */ 327 WARN_ON(!selftest_running); 328 return -EEXIST; 329 } else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) { 330 /* 331 * We need to unmap and free the old table before 332 * overwriting it with a block entry. 333 */ 334 arm_lpae_iopte *tblp; 335 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); 336 337 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); 338 if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz)) 339 return -EINVAL; 340 } 341 342 __arm_lpae_init_pte(data, paddr, prot, lvl, ptep); 343 return 0; 344 } 345 346 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, 347 arm_lpae_iopte *ptep, 348 arm_lpae_iopte curr, 349 struct io_pgtable_cfg *cfg) 350 { 351 arm_lpae_iopte old, new; 352 353 new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE; 354 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) 355 new |= ARM_LPAE_PTE_NSTABLE; 356 357 /* 358 * Ensure the table itself is visible before its PTE can be. 359 * Whilst we could get away with cmpxchg64_release below, this 360 * doesn't have any ordering semantics when !CONFIG_SMP. 361 */ 362 dma_wmb(); 363 364 old = cmpxchg64_relaxed(ptep, curr, new); 365 366 if ((cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) || 367 (old & ARM_LPAE_PTE_SW_SYNC)) 368 return old; 369 370 /* Even if it's not ours, there's no point waiting; just kick it */ 371 __arm_lpae_sync_pte(ptep, cfg); 372 if (old == curr) 373 WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC); 374 375 return old; 376 } 377 378 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, 379 phys_addr_t paddr, size_t size, arm_lpae_iopte prot, 380 int lvl, arm_lpae_iopte *ptep) 381 { 382 arm_lpae_iopte *cptep, pte; 383 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); 384 size_t tblsz = ARM_LPAE_GRANULE(data); 385 struct io_pgtable_cfg *cfg = &data->iop.cfg; 386 387 /* Find our entry at the current level */ 388 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); 389 390 /* If we can install a leaf entry at this level, then do so */ 391 if (size == block_size && (size & cfg->pgsize_bitmap)) 392 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); 393 394 /* We can't allocate tables at the final level */ 395 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1)) 396 return -EINVAL; 397 398 /* Grab a pointer to the next level */ 399 pte = READ_ONCE(*ptep); 400 if (!pte) { 401 cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg); 402 if (!cptep) 403 return -ENOMEM; 404 405 pte = arm_lpae_install_table(cptep, ptep, 0, cfg); 406 if (pte) 407 __arm_lpae_free_pages(cptep, tblsz, cfg); 408 } else if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) && 409 !(pte & ARM_LPAE_PTE_SW_SYNC)) { 410 __arm_lpae_sync_pte(ptep, cfg); 411 } 412 413 if (pte && !iopte_leaf(pte, lvl)) { 414 cptep = iopte_deref(pte, data); 415 } else if (pte) { 416 /* We require an unmap first */ 417 WARN_ON(!selftest_running); 418 return -EEXIST; 419 } 420 421 /* Rinse, repeat */ 422 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep); 423 } 424 425 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, 426 int prot) 427 { 428 arm_lpae_iopte pte; 429 430 if (data->iop.fmt == ARM_64_LPAE_S1 || 431 data->iop.fmt == ARM_32_LPAE_S1) { 432 pte = ARM_LPAE_PTE_nG; 433 434 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) 435 pte |= ARM_LPAE_PTE_AP_RDONLY; 436 437 if (!(prot & IOMMU_PRIV)) 438 pte |= ARM_LPAE_PTE_AP_UNPRIV; 439 440 if (prot & IOMMU_MMIO) 441 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV 442 << ARM_LPAE_PTE_ATTRINDX_SHIFT); 443 else if (prot & IOMMU_CACHE) 444 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE 445 << ARM_LPAE_PTE_ATTRINDX_SHIFT); 446 } else { 447 pte = ARM_LPAE_PTE_HAP_FAULT; 448 if (prot & IOMMU_READ) 449 pte |= ARM_LPAE_PTE_HAP_READ; 450 if (prot & IOMMU_WRITE) 451 pte |= ARM_LPAE_PTE_HAP_WRITE; 452 if (prot & IOMMU_MMIO) 453 pte |= ARM_LPAE_PTE_MEMATTR_DEV; 454 else if (prot & IOMMU_CACHE) 455 pte |= ARM_LPAE_PTE_MEMATTR_OIWB; 456 else 457 pte |= ARM_LPAE_PTE_MEMATTR_NC; 458 } 459 460 if (prot & IOMMU_NOEXEC) 461 pte |= ARM_LPAE_PTE_XN; 462 463 return pte; 464 } 465 466 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, 467 phys_addr_t paddr, size_t size, int iommu_prot) 468 { 469 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 470 arm_lpae_iopte *ptep = data->pgd; 471 int ret, lvl = ARM_LPAE_START_LVL(data); 472 arm_lpae_iopte prot; 473 474 /* If no access, then nothing to do */ 475 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) 476 return 0; 477 478 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) || 479 paddr >= (1ULL << data->iop.cfg.oas))) 480 return -ERANGE; 481 482 prot = arm_lpae_prot_to_pte(data, iommu_prot); 483 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); 484 /* 485 * Synchronise all PTE updates for the new mapping before there's 486 * a chance for anything to kick off a table walk for the new iova. 487 */ 488 wmb(); 489 490 return ret; 491 } 492 493 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, 494 arm_lpae_iopte *ptep) 495 { 496 arm_lpae_iopte *start, *end; 497 unsigned long table_size; 498 499 if (lvl == ARM_LPAE_START_LVL(data)) 500 table_size = data->pgd_size; 501 else 502 table_size = ARM_LPAE_GRANULE(data); 503 504 start = ptep; 505 506 /* Only leaf entries at the last level */ 507 if (lvl == ARM_LPAE_MAX_LEVELS - 1) 508 end = ptep; 509 else 510 end = (void *)ptep + table_size; 511 512 while (ptep != end) { 513 arm_lpae_iopte pte = *ptep++; 514 515 if (!pte || iopte_leaf(pte, lvl)) 516 continue; 517 518 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); 519 } 520 521 __arm_lpae_free_pages(start, table_size, &data->iop.cfg); 522 } 523 524 static void arm_lpae_free_pgtable(struct io_pgtable *iop) 525 { 526 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); 527 528 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd); 529 kfree(data); 530 } 531 532 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, 533 unsigned long iova, size_t size, 534 arm_lpae_iopte blk_pte, int lvl, 535 arm_lpae_iopte *ptep) 536 { 537 struct io_pgtable_cfg *cfg = &data->iop.cfg; 538 arm_lpae_iopte pte, *tablep; 539 phys_addr_t blk_paddr; 540 size_t tablesz = ARM_LPAE_GRANULE(data); 541 size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data); 542 int i, unmap_idx = -1; 543 544 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) 545 return 0; 546 547 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg); 548 if (!tablep) 549 return 0; /* Bytes unmapped */ 550 551 if (size == split_sz) 552 unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data); 553 554 blk_paddr = iopte_to_paddr(blk_pte, data); 555 pte = iopte_prot(blk_pte); 556 557 for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) { 558 /* Unmap! */ 559 if (i == unmap_idx) 560 continue; 561 562 __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]); 563 } 564 565 pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg); 566 if (pte != blk_pte) { 567 __arm_lpae_free_pages(tablep, tablesz, cfg); 568 /* 569 * We may race against someone unmapping another part of this 570 * block, but anything else is invalid. We can't misinterpret 571 * a page entry here since we're never at the last level. 572 */ 573 if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE) 574 return 0; 575 576 tablep = iopte_deref(pte, data); 577 } 578 579 if (unmap_idx < 0) 580 return __arm_lpae_unmap(data, iova, size, lvl, tablep); 581 582 io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); 583 return size; 584 } 585 586 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, 587 unsigned long iova, size_t size, int lvl, 588 arm_lpae_iopte *ptep) 589 { 590 arm_lpae_iopte pte; 591 struct io_pgtable *iop = &data->iop; 592 593 /* Something went horribly wrong and we ran out of page table */ 594 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) 595 return 0; 596 597 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); 598 pte = READ_ONCE(*ptep); 599 if (WARN_ON(!pte)) 600 return 0; 601 602 /* If the size matches this level, we're in the right place */ 603 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) { 604 __arm_lpae_set_pte(ptep, 0, &iop->cfg); 605 606 if (!iopte_leaf(pte, lvl)) { 607 /* Also flush any partial walks */ 608 io_pgtable_tlb_add_flush(iop, iova, size, 609 ARM_LPAE_GRANULE(data), false); 610 io_pgtable_tlb_sync(iop); 611 ptep = iopte_deref(pte, data); 612 __arm_lpae_free_pgtable(data, lvl + 1, ptep); 613 } else { 614 io_pgtable_tlb_add_flush(iop, iova, size, size, true); 615 } 616 617 return size; 618 } else if (iopte_leaf(pte, lvl)) { 619 /* 620 * Insert a table at the next level to map the old region, 621 * minus the part we want to unmap 622 */ 623 return arm_lpae_split_blk_unmap(data, iova, size, pte, 624 lvl + 1, ptep); 625 } 626 627 /* Keep on walkin' */ 628 ptep = iopte_deref(pte, data); 629 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep); 630 } 631 632 static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, 633 size_t size) 634 { 635 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 636 arm_lpae_iopte *ptep = data->pgd; 637 int lvl = ARM_LPAE_START_LVL(data); 638 639 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias))) 640 return 0; 641 642 return __arm_lpae_unmap(data, iova, size, lvl, ptep); 643 } 644 645 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, 646 unsigned long iova) 647 { 648 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 649 arm_lpae_iopte pte, *ptep = data->pgd; 650 int lvl = ARM_LPAE_START_LVL(data); 651 652 do { 653 /* Valid IOPTE pointer? */ 654 if (!ptep) 655 return 0; 656 657 /* Grab the IOPTE we're interested in */ 658 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); 659 pte = READ_ONCE(*ptep); 660 661 /* Valid entry? */ 662 if (!pte) 663 return 0; 664 665 /* Leaf entry? */ 666 if (iopte_leaf(pte,lvl)) 667 goto found_translation; 668 669 /* Take it to the next level */ 670 ptep = iopte_deref(pte, data); 671 } while (++lvl < ARM_LPAE_MAX_LEVELS); 672 673 /* Ran out of page tables to walk */ 674 return 0; 675 676 found_translation: 677 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1); 678 return iopte_to_paddr(pte, data) | iova; 679 } 680 681 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) 682 { 683 unsigned long granule, page_sizes; 684 unsigned int max_addr_bits = 48; 685 686 /* 687 * We need to restrict the supported page sizes to match the 688 * translation regime for a particular granule. Aim to match 689 * the CPU page size if possible, otherwise prefer smaller sizes. 690 * While we're at it, restrict the block sizes to match the 691 * chosen granule. 692 */ 693 if (cfg->pgsize_bitmap & PAGE_SIZE) 694 granule = PAGE_SIZE; 695 else if (cfg->pgsize_bitmap & ~PAGE_MASK) 696 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); 697 else if (cfg->pgsize_bitmap & PAGE_MASK) 698 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); 699 else 700 granule = 0; 701 702 switch (granule) { 703 case SZ_4K: 704 page_sizes = (SZ_4K | SZ_2M | SZ_1G); 705 break; 706 case SZ_16K: 707 page_sizes = (SZ_16K | SZ_32M); 708 break; 709 case SZ_64K: 710 max_addr_bits = 52; 711 page_sizes = (SZ_64K | SZ_512M); 712 if (cfg->oas > 48) 713 page_sizes |= 1ULL << 42; /* 4TB */ 714 break; 715 default: 716 page_sizes = 0; 717 } 718 719 cfg->pgsize_bitmap &= page_sizes; 720 cfg->ias = min(cfg->ias, max_addr_bits); 721 cfg->oas = min(cfg->oas, max_addr_bits); 722 } 723 724 static struct arm_lpae_io_pgtable * 725 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) 726 { 727 unsigned long va_bits, pgd_bits; 728 struct arm_lpae_io_pgtable *data; 729 730 arm_lpae_restrict_pgsizes(cfg); 731 732 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K))) 733 return NULL; 734 735 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS) 736 return NULL; 737 738 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) 739 return NULL; 740 741 if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) { 742 dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n"); 743 return NULL; 744 } 745 746 data = kmalloc(sizeof(*data), GFP_KERNEL); 747 if (!data) 748 return NULL; 749 750 data->pg_shift = __ffs(cfg->pgsize_bitmap); 751 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte)); 752 753 va_bits = cfg->ias - data->pg_shift; 754 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level); 755 756 /* Calculate the actual size of our pgd (without concatenation) */ 757 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1)); 758 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte))); 759 760 data->iop.ops = (struct io_pgtable_ops) { 761 .map = arm_lpae_map, 762 .unmap = arm_lpae_unmap, 763 .iova_to_phys = arm_lpae_iova_to_phys, 764 }; 765 766 return data; 767 } 768 769 static struct io_pgtable * 770 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) 771 { 772 u64 reg; 773 struct arm_lpae_io_pgtable *data; 774 775 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA)) 776 return NULL; 777 778 data = arm_lpae_alloc_pgtable(cfg); 779 if (!data) 780 return NULL; 781 782 /* TCR */ 783 reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | 784 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | 785 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); 786 787 switch (ARM_LPAE_GRANULE(data)) { 788 case SZ_4K: 789 reg |= ARM_LPAE_TCR_TG0_4K; 790 break; 791 case SZ_16K: 792 reg |= ARM_LPAE_TCR_TG0_16K; 793 break; 794 case SZ_64K: 795 reg |= ARM_LPAE_TCR_TG0_64K; 796 break; 797 } 798 799 switch (cfg->oas) { 800 case 32: 801 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT); 802 break; 803 case 36: 804 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT); 805 break; 806 case 40: 807 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT); 808 break; 809 case 42: 810 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT); 811 break; 812 case 44: 813 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT); 814 break; 815 case 48: 816 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT); 817 break; 818 case 52: 819 reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_IPS_SHIFT); 820 break; 821 default: 822 goto out_free_data; 823 } 824 825 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; 826 827 /* Disable speculative walks through TTBR1 */ 828 reg |= ARM_LPAE_TCR_EPD1; 829 cfg->arm_lpae_s1_cfg.tcr = reg; 830 831 /* MAIRs */ 832 reg = (ARM_LPAE_MAIR_ATTR_NC 833 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | 834 (ARM_LPAE_MAIR_ATTR_WBRWA 835 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | 836 (ARM_LPAE_MAIR_ATTR_DEVICE 837 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)); 838 839 cfg->arm_lpae_s1_cfg.mair[0] = reg; 840 cfg->arm_lpae_s1_cfg.mair[1] = 0; 841 842 /* Looking good; allocate a pgd */ 843 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg); 844 if (!data->pgd) 845 goto out_free_data; 846 847 /* Ensure the empty pgd is visible before any actual TTBR write */ 848 wmb(); 849 850 /* TTBRs */ 851 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd); 852 cfg->arm_lpae_s1_cfg.ttbr[1] = 0; 853 return &data->iop; 854 855 out_free_data: 856 kfree(data); 857 return NULL; 858 } 859 860 static struct io_pgtable * 861 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) 862 { 863 u64 reg, sl; 864 struct arm_lpae_io_pgtable *data; 865 866 /* The NS quirk doesn't apply at stage 2 */ 867 if (cfg->quirks & ~IO_PGTABLE_QUIRK_NO_DMA) 868 return NULL; 869 870 data = arm_lpae_alloc_pgtable(cfg); 871 if (!data) 872 return NULL; 873 874 /* 875 * Concatenate PGDs at level 1 if possible in order to reduce 876 * the depth of the stage-2 walk. 877 */ 878 if (data->levels == ARM_LPAE_MAX_LEVELS) { 879 unsigned long pgd_pages; 880 881 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte)); 882 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) { 883 data->pgd_size = pgd_pages << data->pg_shift; 884 data->levels--; 885 } 886 } 887 888 /* VTCR */ 889 reg = ARM_64_LPAE_S2_TCR_RES1 | 890 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | 891 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | 892 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); 893 894 sl = ARM_LPAE_START_LVL(data); 895 896 switch (ARM_LPAE_GRANULE(data)) { 897 case SZ_4K: 898 reg |= ARM_LPAE_TCR_TG0_4K; 899 sl++; /* SL0 format is different for 4K granule size */ 900 break; 901 case SZ_16K: 902 reg |= ARM_LPAE_TCR_TG0_16K; 903 break; 904 case SZ_64K: 905 reg |= ARM_LPAE_TCR_TG0_64K; 906 break; 907 } 908 909 switch (cfg->oas) { 910 case 32: 911 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT); 912 break; 913 case 36: 914 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT); 915 break; 916 case 40: 917 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT); 918 break; 919 case 42: 920 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT); 921 break; 922 case 44: 923 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT); 924 break; 925 case 48: 926 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT); 927 break; 928 case 52: 929 reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_PS_SHIFT); 930 break; 931 default: 932 goto out_free_data; 933 } 934 935 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; 936 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT; 937 cfg->arm_lpae_s2_cfg.vtcr = reg; 938 939 /* Allocate pgd pages */ 940 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg); 941 if (!data->pgd) 942 goto out_free_data; 943 944 /* Ensure the empty pgd is visible before any actual TTBR write */ 945 wmb(); 946 947 /* VTTBR */ 948 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd); 949 return &data->iop; 950 951 out_free_data: 952 kfree(data); 953 return NULL; 954 } 955 956 static struct io_pgtable * 957 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) 958 { 959 struct io_pgtable *iop; 960 961 if (cfg->ias > 32 || cfg->oas > 40) 962 return NULL; 963 964 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 965 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie); 966 if (iop) { 967 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE; 968 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff; 969 } 970 971 return iop; 972 } 973 974 static struct io_pgtable * 975 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) 976 { 977 struct io_pgtable *iop; 978 979 if (cfg->ias > 40 || cfg->oas > 40) 980 return NULL; 981 982 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 983 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie); 984 if (iop) 985 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff; 986 987 return iop; 988 } 989 990 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = { 991 .alloc = arm_64_lpae_alloc_pgtable_s1, 992 .free = arm_lpae_free_pgtable, 993 }; 994 995 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = { 996 .alloc = arm_64_lpae_alloc_pgtable_s2, 997 .free = arm_lpae_free_pgtable, 998 }; 999 1000 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = { 1001 .alloc = arm_32_lpae_alloc_pgtable_s1, 1002 .free = arm_lpae_free_pgtable, 1003 }; 1004 1005 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = { 1006 .alloc = arm_32_lpae_alloc_pgtable_s2, 1007 .free = arm_lpae_free_pgtable, 1008 }; 1009 1010 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST 1011 1012 static struct io_pgtable_cfg *cfg_cookie; 1013 1014 static void dummy_tlb_flush_all(void *cookie) 1015 { 1016 WARN_ON(cookie != cfg_cookie); 1017 } 1018 1019 static void dummy_tlb_add_flush(unsigned long iova, size_t size, 1020 size_t granule, bool leaf, void *cookie) 1021 { 1022 WARN_ON(cookie != cfg_cookie); 1023 WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); 1024 } 1025 1026 static void dummy_tlb_sync(void *cookie) 1027 { 1028 WARN_ON(cookie != cfg_cookie); 1029 } 1030 1031 static const struct iommu_gather_ops dummy_tlb_ops __initconst = { 1032 .tlb_flush_all = dummy_tlb_flush_all, 1033 .tlb_add_flush = dummy_tlb_add_flush, 1034 .tlb_sync = dummy_tlb_sync, 1035 }; 1036 1037 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) 1038 { 1039 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 1040 struct io_pgtable_cfg *cfg = &data->iop.cfg; 1041 1042 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", 1043 cfg->pgsize_bitmap, cfg->ias); 1044 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n", 1045 data->levels, data->pgd_size, data->pg_shift, 1046 data->bits_per_level, data->pgd); 1047 } 1048 1049 #define __FAIL(ops, i) ({ \ 1050 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \ 1051 arm_lpae_dump_ops(ops); \ 1052 selftest_running = false; \ 1053 -EFAULT; \ 1054 }) 1055 1056 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) 1057 { 1058 static const enum io_pgtable_fmt fmts[] = { 1059 ARM_64_LPAE_S1, 1060 ARM_64_LPAE_S2, 1061 }; 1062 1063 int i, j; 1064 unsigned long iova; 1065 size_t size; 1066 struct io_pgtable_ops *ops; 1067 1068 selftest_running = true; 1069 1070 for (i = 0; i < ARRAY_SIZE(fmts); ++i) { 1071 cfg_cookie = cfg; 1072 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg); 1073 if (!ops) { 1074 pr_err("selftest: failed to allocate io pgtable ops\n"); 1075 return -ENOMEM; 1076 } 1077 1078 /* 1079 * Initial sanity checks. 1080 * Empty page tables shouldn't provide any translations. 1081 */ 1082 if (ops->iova_to_phys(ops, 42)) 1083 return __FAIL(ops, i); 1084 1085 if (ops->iova_to_phys(ops, SZ_1G + 42)) 1086 return __FAIL(ops, i); 1087 1088 if (ops->iova_to_phys(ops, SZ_2G + 42)) 1089 return __FAIL(ops, i); 1090 1091 /* 1092 * Distinct mappings of different granule sizes. 1093 */ 1094 iova = 0; 1095 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { 1096 size = 1UL << j; 1097 1098 if (ops->map(ops, iova, iova, size, IOMMU_READ | 1099 IOMMU_WRITE | 1100 IOMMU_NOEXEC | 1101 IOMMU_CACHE)) 1102 return __FAIL(ops, i); 1103 1104 /* Overlapping mappings */ 1105 if (!ops->map(ops, iova, iova + size, size, 1106 IOMMU_READ | IOMMU_NOEXEC)) 1107 return __FAIL(ops, i); 1108 1109 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) 1110 return __FAIL(ops, i); 1111 1112 iova += SZ_1G; 1113 } 1114 1115 /* Partial unmap */ 1116 size = 1UL << __ffs(cfg->pgsize_bitmap); 1117 if (ops->unmap(ops, SZ_1G + size, size) != size) 1118 return __FAIL(ops, i); 1119 1120 /* Remap of partial unmap */ 1121 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ)) 1122 return __FAIL(ops, i); 1123 1124 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) 1125 return __FAIL(ops, i); 1126 1127 /* Full unmap */ 1128 iova = 0; 1129 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { 1130 size = 1UL << j; 1131 1132 if (ops->unmap(ops, iova, size) != size) 1133 return __FAIL(ops, i); 1134 1135 if (ops->iova_to_phys(ops, iova + 42)) 1136 return __FAIL(ops, i); 1137 1138 /* Remap full block */ 1139 if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) 1140 return __FAIL(ops, i); 1141 1142 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) 1143 return __FAIL(ops, i); 1144 1145 iova += SZ_1G; 1146 } 1147 1148 free_io_pgtable_ops(ops); 1149 } 1150 1151 selftest_running = false; 1152 return 0; 1153 } 1154 1155 static int __init arm_lpae_do_selftests(void) 1156 { 1157 static const unsigned long pgsize[] = { 1158 SZ_4K | SZ_2M | SZ_1G, 1159 SZ_16K | SZ_32M, 1160 SZ_64K | SZ_512M, 1161 }; 1162 1163 static const unsigned int ias[] = { 1164 32, 36, 40, 42, 44, 48, 1165 }; 1166 1167 int i, j, pass = 0, fail = 0; 1168 struct io_pgtable_cfg cfg = { 1169 .tlb = &dummy_tlb_ops, 1170 .oas = 48, 1171 .quirks = IO_PGTABLE_QUIRK_NO_DMA, 1172 }; 1173 1174 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { 1175 for (j = 0; j < ARRAY_SIZE(ias); ++j) { 1176 cfg.pgsize_bitmap = pgsize[i]; 1177 cfg.ias = ias[j]; 1178 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n", 1179 pgsize[i], ias[j]); 1180 if (arm_lpae_run_tests(&cfg)) 1181 fail++; 1182 else 1183 pass++; 1184 } 1185 } 1186 1187 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail); 1188 return fail ? -EFAULT : 0; 1189 } 1190 subsys_initcall(arm_lpae_do_selftests); 1191 #endif 1192