1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * CPU-agnostic ARM page table allocator. 4 * 5 * Copyright (C) 2014 ARM Limited 6 * 7 * Author: Will Deacon <will.deacon@arm.com> 8 */ 9 10 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt 11 12 #include <linux/atomic.h> 13 #include <linux/bitops.h> 14 #include <linux/io-pgtable.h> 15 #include <linux/kernel.h> 16 #include <linux/sizes.h> 17 #include <linux/slab.h> 18 #include <linux/types.h> 19 #include <linux/dma-mapping.h> 20 21 #include <asm/barrier.h> 22 23 #include "io-pgtable-arm.h" 24 25 #define ARM_LPAE_MAX_ADDR_BITS 52 26 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16 27 #define ARM_LPAE_MAX_LEVELS 4 28 29 /* Struct accessors */ 30 #define io_pgtable_to_data(x) \ 31 container_of((x), struct arm_lpae_io_pgtable, iop) 32 33 #define io_pgtable_ops_to_data(x) \ 34 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) 35 36 /* 37 * Calculate the right shift amount to get to the portion describing level l 38 * in a virtual address mapped by the pagetable in d. 39 */ 40 #define ARM_LPAE_LVL_SHIFT(l,d) \ 41 (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \ 42 ilog2(sizeof(arm_lpae_iopte))) 43 44 #define ARM_LPAE_GRANULE(d) \ 45 (sizeof(arm_lpae_iopte) << (d)->bits_per_level) 46 #define ARM_LPAE_PGD_SIZE(d) \ 47 (sizeof(arm_lpae_iopte) << (d)->pgd_bits) 48 49 #define ARM_LPAE_PTES_PER_TABLE(d) \ 50 (ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte))) 51 52 /* 53 * Calculate the index at level l used to map virtual address a using the 54 * pagetable in d. 55 */ 56 #define ARM_LPAE_PGD_IDX(l,d) \ 57 ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0) 58 59 #define ARM_LPAE_LVL_IDX(a,l,d) \ 60 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ 61 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) 62 63 /* Calculate the block/page mapping size at level l for pagetable in d. */ 64 #define ARM_LPAE_BLOCK_SIZE(l,d) (1ULL << ARM_LPAE_LVL_SHIFT(l,d)) 65 66 /* Page table bits */ 67 #define ARM_LPAE_PTE_TYPE_SHIFT 0 68 #define ARM_LPAE_PTE_TYPE_MASK 0x3 69 70 #define ARM_LPAE_PTE_TYPE_BLOCK 1 71 #define ARM_LPAE_PTE_TYPE_TABLE 3 72 #define ARM_LPAE_PTE_TYPE_PAGE 3 73 74 #define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12) 75 76 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63) 77 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53) 78 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10) 79 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8) 80 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8) 81 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8) 82 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5) 83 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0) 84 85 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2) 86 /* Ignore the contiguous bit for block splitting */ 87 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52) 88 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \ 89 ARM_LPAE_PTE_ATTR_HI_MASK) 90 /* Software bit for solving coherency races */ 91 #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55) 92 93 /* Stage-1 PTE */ 94 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6) 95 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6) 96 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2 97 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11) 98 99 /* Stage-2 PTE */ 100 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6) 101 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6) 102 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6) 103 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2) 104 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2) 105 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2) 106 107 /* Register bits */ 108 #define ARM_LPAE_VTCR_SL0_MASK 0x3 109 110 #define ARM_LPAE_TCR_T0SZ_SHIFT 0 111 112 #define ARM_LPAE_VTCR_PS_SHIFT 16 113 #define ARM_LPAE_VTCR_PS_MASK 0x7 114 115 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3) 116 #define ARM_LPAE_MAIR_ATTR_MASK 0xff 117 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04 118 #define ARM_LPAE_MAIR_ATTR_NC 0x44 119 #define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4 120 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff 121 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0 122 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1 123 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2 124 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3 125 126 #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0) 127 #define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2) 128 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4) 129 130 #define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL 131 #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL 132 133 #define APPLE_DART_PTE_PROT_NO_WRITE (1<<7) 134 #define APPLE_DART_PTE_PROT_NO_READ (1<<8) 135 136 /* IOPTE accessors */ 137 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d)) 138 139 #define iopte_type(pte) \ 140 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) 141 142 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK) 143 144 struct arm_lpae_io_pgtable { 145 struct io_pgtable iop; 146 147 int pgd_bits; 148 int start_level; 149 int bits_per_level; 150 151 void *pgd; 152 }; 153 154 typedef u64 arm_lpae_iopte; 155 156 static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl, 157 enum io_pgtable_fmt fmt) 158 { 159 if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE) 160 return iopte_type(pte) == ARM_LPAE_PTE_TYPE_PAGE; 161 162 return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK; 163 } 164 165 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr, 166 struct arm_lpae_io_pgtable *data) 167 { 168 arm_lpae_iopte pte = paddr; 169 170 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */ 171 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK; 172 } 173 174 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte, 175 struct arm_lpae_io_pgtable *data) 176 { 177 u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK; 178 179 if (ARM_LPAE_GRANULE(data) < SZ_64K) 180 return paddr; 181 182 /* Rotate the packed high-order bits back to the top */ 183 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4); 184 } 185 186 static bool selftest_running = false; 187 188 static dma_addr_t __arm_lpae_dma_addr(void *pages) 189 { 190 return (dma_addr_t)virt_to_phys(pages); 191 } 192 193 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, 194 struct io_pgtable_cfg *cfg) 195 { 196 struct device *dev = cfg->iommu_dev; 197 int order = get_order(size); 198 struct page *p; 199 dma_addr_t dma; 200 void *pages; 201 202 VM_BUG_ON((gfp & __GFP_HIGHMEM)); 203 p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE, 204 gfp | __GFP_ZERO, order); 205 if (!p) 206 return NULL; 207 208 pages = page_address(p); 209 if (!cfg->coherent_walk) { 210 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE); 211 if (dma_mapping_error(dev, dma)) 212 goto out_free; 213 /* 214 * We depend on the IOMMU being able to work with any physical 215 * address directly, so if the DMA layer suggests otherwise by 216 * translating or truncating them, that bodes very badly... 217 */ 218 if (dma != virt_to_phys(pages)) 219 goto out_unmap; 220 } 221 222 return pages; 223 224 out_unmap: 225 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n"); 226 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE); 227 out_free: 228 __free_pages(p, order); 229 return NULL; 230 } 231 232 static void __arm_lpae_free_pages(void *pages, size_t size, 233 struct io_pgtable_cfg *cfg) 234 { 235 if (!cfg->coherent_walk) 236 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), 237 size, DMA_TO_DEVICE); 238 free_pages((unsigned long)pages, get_order(size)); 239 } 240 241 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries, 242 struct io_pgtable_cfg *cfg) 243 { 244 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep), 245 sizeof(*ptep) * num_entries, DMA_TO_DEVICE); 246 } 247 248 static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg) 249 { 250 251 *ptep = 0; 252 253 if (!cfg->coherent_walk) 254 __arm_lpae_sync_pte(ptep, 1, cfg); 255 } 256 257 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, 258 struct iommu_iotlb_gather *gather, 259 unsigned long iova, size_t size, size_t pgcount, 260 int lvl, arm_lpae_iopte *ptep); 261 262 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, 263 phys_addr_t paddr, arm_lpae_iopte prot, 264 int lvl, int num_entries, arm_lpae_iopte *ptep) 265 { 266 arm_lpae_iopte pte = prot; 267 struct io_pgtable_cfg *cfg = &data->iop.cfg; 268 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); 269 int i; 270 271 if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1) 272 pte |= ARM_LPAE_PTE_TYPE_PAGE; 273 else 274 pte |= ARM_LPAE_PTE_TYPE_BLOCK; 275 276 for (i = 0; i < num_entries; i++) 277 ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data); 278 279 if (!cfg->coherent_walk) 280 __arm_lpae_sync_pte(ptep, num_entries, cfg); 281 } 282 283 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, 284 unsigned long iova, phys_addr_t paddr, 285 arm_lpae_iopte prot, int lvl, int num_entries, 286 arm_lpae_iopte *ptep) 287 { 288 int i; 289 290 for (i = 0; i < num_entries; i++) 291 if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) { 292 /* We require an unmap first */ 293 WARN_ON(!selftest_running); 294 return -EEXIST; 295 } else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) { 296 /* 297 * We need to unmap and free the old table before 298 * overwriting it with a block entry. 299 */ 300 arm_lpae_iopte *tblp; 301 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); 302 303 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); 304 if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1, 305 lvl, tblp) != sz) { 306 WARN_ON(1); 307 return -EINVAL; 308 } 309 } 310 311 __arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep); 312 return 0; 313 } 314 315 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, 316 arm_lpae_iopte *ptep, 317 arm_lpae_iopte curr, 318 struct arm_lpae_io_pgtable *data) 319 { 320 arm_lpae_iopte old, new; 321 struct io_pgtable_cfg *cfg = &data->iop.cfg; 322 323 new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE; 324 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) 325 new |= ARM_LPAE_PTE_NSTABLE; 326 327 /* 328 * Ensure the table itself is visible before its PTE can be. 329 * Whilst we could get away with cmpxchg64_release below, this 330 * doesn't have any ordering semantics when !CONFIG_SMP. 331 */ 332 dma_wmb(); 333 334 old = cmpxchg64_relaxed(ptep, curr, new); 335 336 if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC)) 337 return old; 338 339 /* Even if it's not ours, there's no point waiting; just kick it */ 340 __arm_lpae_sync_pte(ptep, 1, cfg); 341 if (old == curr) 342 WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC); 343 344 return old; 345 } 346 347 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, 348 phys_addr_t paddr, size_t size, size_t pgcount, 349 arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep, 350 gfp_t gfp, size_t *mapped) 351 { 352 arm_lpae_iopte *cptep, pte; 353 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); 354 size_t tblsz = ARM_LPAE_GRANULE(data); 355 struct io_pgtable_cfg *cfg = &data->iop.cfg; 356 int ret = 0, num_entries, max_entries, map_idx_start; 357 358 /* Find our entry at the current level */ 359 map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); 360 ptep += map_idx_start; 361 362 /* If we can install a leaf entry at this level, then do so */ 363 if (size == block_size) { 364 max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start; 365 num_entries = min_t(int, pgcount, max_entries); 366 ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep); 367 if (!ret && mapped) 368 *mapped += num_entries * size; 369 370 return ret; 371 } 372 373 /* We can't allocate tables at the final level */ 374 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1)) 375 return -EINVAL; 376 377 /* Grab a pointer to the next level */ 378 pte = READ_ONCE(*ptep); 379 if (!pte) { 380 cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg); 381 if (!cptep) 382 return -ENOMEM; 383 384 pte = arm_lpae_install_table(cptep, ptep, 0, data); 385 if (pte) 386 __arm_lpae_free_pages(cptep, tblsz, cfg); 387 } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) { 388 __arm_lpae_sync_pte(ptep, 1, cfg); 389 } 390 391 if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) { 392 cptep = iopte_deref(pte, data); 393 } else if (pte) { 394 /* We require an unmap first */ 395 WARN_ON(!selftest_running); 396 return -EEXIST; 397 } 398 399 /* Rinse, repeat */ 400 return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1, 401 cptep, gfp, mapped); 402 } 403 404 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, 405 int prot) 406 { 407 arm_lpae_iopte pte; 408 409 if (data->iop.fmt == APPLE_DART) { 410 pte = 0; 411 if (!(prot & IOMMU_WRITE)) 412 pte |= APPLE_DART_PTE_PROT_NO_WRITE; 413 if (!(prot & IOMMU_READ)) 414 pte |= APPLE_DART_PTE_PROT_NO_READ; 415 return pte; 416 } 417 418 if (data->iop.fmt == ARM_64_LPAE_S1 || 419 data->iop.fmt == ARM_32_LPAE_S1) { 420 pte = ARM_LPAE_PTE_nG; 421 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) 422 pte |= ARM_LPAE_PTE_AP_RDONLY; 423 if (!(prot & IOMMU_PRIV)) 424 pte |= ARM_LPAE_PTE_AP_UNPRIV; 425 } else { 426 pte = ARM_LPAE_PTE_HAP_FAULT; 427 if (prot & IOMMU_READ) 428 pte |= ARM_LPAE_PTE_HAP_READ; 429 if (prot & IOMMU_WRITE) 430 pte |= ARM_LPAE_PTE_HAP_WRITE; 431 } 432 433 /* 434 * Note that this logic is structured to accommodate Mali LPAE 435 * having stage-1-like attributes but stage-2-like permissions. 436 */ 437 if (data->iop.fmt == ARM_64_LPAE_S2 || 438 data->iop.fmt == ARM_32_LPAE_S2) { 439 if (prot & IOMMU_MMIO) 440 pte |= ARM_LPAE_PTE_MEMATTR_DEV; 441 else if (prot & IOMMU_CACHE) 442 pte |= ARM_LPAE_PTE_MEMATTR_OIWB; 443 else 444 pte |= ARM_LPAE_PTE_MEMATTR_NC; 445 } else { 446 if (prot & IOMMU_MMIO) 447 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV 448 << ARM_LPAE_PTE_ATTRINDX_SHIFT); 449 else if (prot & IOMMU_CACHE) 450 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE 451 << ARM_LPAE_PTE_ATTRINDX_SHIFT); 452 } 453 454 /* 455 * Also Mali has its own notions of shareability wherein its Inner 456 * domain covers the cores within the GPU, and its Outer domain is 457 * "outside the GPU" (i.e. either the Inner or System domain in CPU 458 * terms, depending on coherency). 459 */ 460 if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE) 461 pte |= ARM_LPAE_PTE_SH_IS; 462 else 463 pte |= ARM_LPAE_PTE_SH_OS; 464 465 if (prot & IOMMU_NOEXEC) 466 pte |= ARM_LPAE_PTE_XN; 467 468 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) 469 pte |= ARM_LPAE_PTE_NS; 470 471 if (data->iop.fmt != ARM_MALI_LPAE) 472 pte |= ARM_LPAE_PTE_AF; 473 474 return pte; 475 } 476 477 static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova, 478 phys_addr_t paddr, size_t pgsize, size_t pgcount, 479 int iommu_prot, gfp_t gfp, size_t *mapped) 480 { 481 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 482 struct io_pgtable_cfg *cfg = &data->iop.cfg; 483 arm_lpae_iopte *ptep = data->pgd; 484 int ret, lvl = data->start_level; 485 arm_lpae_iopte prot; 486 long iaext = (s64)iova >> cfg->ias; 487 488 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize)) 489 return -EINVAL; 490 491 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) 492 iaext = ~iaext; 493 if (WARN_ON(iaext || paddr >> cfg->oas)) 494 return -ERANGE; 495 496 /* If no access, then nothing to do */ 497 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) 498 return 0; 499 500 prot = arm_lpae_prot_to_pte(data, iommu_prot); 501 ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl, 502 ptep, gfp, mapped); 503 /* 504 * Synchronise all PTE updates for the new mapping before there's 505 * a chance for anything to kick off a table walk for the new iova. 506 */ 507 wmb(); 508 509 return ret; 510 } 511 512 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, 513 phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp) 514 { 515 return arm_lpae_map_pages(ops, iova, paddr, size, 1, iommu_prot, gfp, 516 NULL); 517 } 518 519 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, 520 arm_lpae_iopte *ptep) 521 { 522 arm_lpae_iopte *start, *end; 523 unsigned long table_size; 524 525 if (lvl == data->start_level) 526 table_size = ARM_LPAE_PGD_SIZE(data); 527 else 528 table_size = ARM_LPAE_GRANULE(data); 529 530 start = ptep; 531 532 /* Only leaf entries at the last level */ 533 if (lvl == ARM_LPAE_MAX_LEVELS - 1) 534 end = ptep; 535 else 536 end = (void *)ptep + table_size; 537 538 while (ptep != end) { 539 arm_lpae_iopte pte = *ptep++; 540 541 if (!pte || iopte_leaf(pte, lvl, data->iop.fmt)) 542 continue; 543 544 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); 545 } 546 547 __arm_lpae_free_pages(start, table_size, &data->iop.cfg); 548 } 549 550 static void arm_lpae_free_pgtable(struct io_pgtable *iop) 551 { 552 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); 553 554 __arm_lpae_free_pgtable(data, data->start_level, data->pgd); 555 kfree(data); 556 } 557 558 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, 559 struct iommu_iotlb_gather *gather, 560 unsigned long iova, size_t size, 561 arm_lpae_iopte blk_pte, int lvl, 562 arm_lpae_iopte *ptep, size_t pgcount) 563 { 564 struct io_pgtable_cfg *cfg = &data->iop.cfg; 565 arm_lpae_iopte pte, *tablep; 566 phys_addr_t blk_paddr; 567 size_t tablesz = ARM_LPAE_GRANULE(data); 568 size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data); 569 int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data); 570 int i, unmap_idx_start = -1, num_entries = 0, max_entries; 571 572 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) 573 return 0; 574 575 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg); 576 if (!tablep) 577 return 0; /* Bytes unmapped */ 578 579 if (size == split_sz) { 580 unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); 581 max_entries = ptes_per_table - unmap_idx_start; 582 num_entries = min_t(int, pgcount, max_entries); 583 } 584 585 blk_paddr = iopte_to_paddr(blk_pte, data); 586 pte = iopte_prot(blk_pte); 587 588 for (i = 0; i < ptes_per_table; i++, blk_paddr += split_sz) { 589 /* Unmap! */ 590 if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries)) 591 continue; 592 593 __arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]); 594 } 595 596 pte = arm_lpae_install_table(tablep, ptep, blk_pte, data); 597 if (pte != blk_pte) { 598 __arm_lpae_free_pages(tablep, tablesz, cfg); 599 /* 600 * We may race against someone unmapping another part of this 601 * block, but anything else is invalid. We can't misinterpret 602 * a page entry here since we're never at the last level. 603 */ 604 if (iopte_type(pte) != ARM_LPAE_PTE_TYPE_TABLE) 605 return 0; 606 607 tablep = iopte_deref(pte, data); 608 } else if (unmap_idx_start >= 0) { 609 for (i = 0; i < num_entries; i++) 610 io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size); 611 612 return num_entries * size; 613 } 614 615 return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep); 616 } 617 618 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, 619 struct iommu_iotlb_gather *gather, 620 unsigned long iova, size_t size, size_t pgcount, 621 int lvl, arm_lpae_iopte *ptep) 622 { 623 arm_lpae_iopte pte; 624 struct io_pgtable *iop = &data->iop; 625 int i = 0, num_entries, max_entries, unmap_idx_start; 626 627 /* Something went horribly wrong and we ran out of page table */ 628 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) 629 return 0; 630 631 unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); 632 ptep += unmap_idx_start; 633 pte = READ_ONCE(*ptep); 634 if (WARN_ON(!pte)) 635 return 0; 636 637 /* If the size matches this level, we're in the right place */ 638 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) { 639 max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start; 640 num_entries = min_t(int, pgcount, max_entries); 641 642 while (i < num_entries) { 643 pte = READ_ONCE(*ptep); 644 if (WARN_ON(!pte)) 645 break; 646 647 __arm_lpae_clear_pte(ptep, &iop->cfg); 648 649 if (!iopte_leaf(pte, lvl, iop->fmt)) { 650 /* Also flush any partial walks */ 651 io_pgtable_tlb_flush_walk(iop, iova + i * size, size, 652 ARM_LPAE_GRANULE(data)); 653 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); 654 } else if (!iommu_iotlb_gather_queued(gather)) { 655 io_pgtable_tlb_add_page(iop, gather, iova + i * size, size); 656 } 657 658 ptep++; 659 i++; 660 } 661 662 return i * size; 663 } else if (iopte_leaf(pte, lvl, iop->fmt)) { 664 /* 665 * Insert a table at the next level to map the old region, 666 * minus the part we want to unmap 667 */ 668 return arm_lpae_split_blk_unmap(data, gather, iova, size, pte, 669 lvl + 1, ptep, pgcount); 670 } 671 672 /* Keep on walkin' */ 673 ptep = iopte_deref(pte, data); 674 return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep); 675 } 676 677 static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova, 678 size_t pgsize, size_t pgcount, 679 struct iommu_iotlb_gather *gather) 680 { 681 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 682 struct io_pgtable_cfg *cfg = &data->iop.cfg; 683 arm_lpae_iopte *ptep = data->pgd; 684 long iaext = (s64)iova >> cfg->ias; 685 686 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount)) 687 return 0; 688 689 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) 690 iaext = ~iaext; 691 if (WARN_ON(iaext)) 692 return 0; 693 694 return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount, 695 data->start_level, ptep); 696 } 697 698 static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, 699 size_t size, struct iommu_iotlb_gather *gather) 700 { 701 return arm_lpae_unmap_pages(ops, iova, size, 1, gather); 702 } 703 704 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, 705 unsigned long iova) 706 { 707 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 708 arm_lpae_iopte pte, *ptep = data->pgd; 709 int lvl = data->start_level; 710 711 do { 712 /* Valid IOPTE pointer? */ 713 if (!ptep) 714 return 0; 715 716 /* Grab the IOPTE we're interested in */ 717 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); 718 pte = READ_ONCE(*ptep); 719 720 /* Valid entry? */ 721 if (!pte) 722 return 0; 723 724 /* Leaf entry? */ 725 if (iopte_leaf(pte, lvl, data->iop.fmt)) 726 goto found_translation; 727 728 /* Take it to the next level */ 729 ptep = iopte_deref(pte, data); 730 } while (++lvl < ARM_LPAE_MAX_LEVELS); 731 732 /* Ran out of page tables to walk */ 733 return 0; 734 735 found_translation: 736 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1); 737 return iopte_to_paddr(pte, data) | iova; 738 } 739 740 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) 741 { 742 unsigned long granule, page_sizes; 743 unsigned int max_addr_bits = 48; 744 745 /* 746 * We need to restrict the supported page sizes to match the 747 * translation regime for a particular granule. Aim to match 748 * the CPU page size if possible, otherwise prefer smaller sizes. 749 * While we're at it, restrict the block sizes to match the 750 * chosen granule. 751 */ 752 if (cfg->pgsize_bitmap & PAGE_SIZE) 753 granule = PAGE_SIZE; 754 else if (cfg->pgsize_bitmap & ~PAGE_MASK) 755 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); 756 else if (cfg->pgsize_bitmap & PAGE_MASK) 757 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); 758 else 759 granule = 0; 760 761 switch (granule) { 762 case SZ_4K: 763 page_sizes = (SZ_4K | SZ_2M | SZ_1G); 764 break; 765 case SZ_16K: 766 page_sizes = (SZ_16K | SZ_32M); 767 break; 768 case SZ_64K: 769 max_addr_bits = 52; 770 page_sizes = (SZ_64K | SZ_512M); 771 if (cfg->oas > 48) 772 page_sizes |= 1ULL << 42; /* 4TB */ 773 break; 774 default: 775 page_sizes = 0; 776 } 777 778 cfg->pgsize_bitmap &= page_sizes; 779 cfg->ias = min(cfg->ias, max_addr_bits); 780 cfg->oas = min(cfg->oas, max_addr_bits); 781 } 782 783 static struct arm_lpae_io_pgtable * 784 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) 785 { 786 struct arm_lpae_io_pgtable *data; 787 int levels, va_bits, pg_shift; 788 789 arm_lpae_restrict_pgsizes(cfg); 790 791 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K))) 792 return NULL; 793 794 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS) 795 return NULL; 796 797 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) 798 return NULL; 799 800 data = kmalloc(sizeof(*data), GFP_KERNEL); 801 if (!data) 802 return NULL; 803 804 pg_shift = __ffs(cfg->pgsize_bitmap); 805 data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte)); 806 807 va_bits = cfg->ias - pg_shift; 808 levels = DIV_ROUND_UP(va_bits, data->bits_per_level); 809 data->start_level = ARM_LPAE_MAX_LEVELS - levels; 810 811 /* Calculate the actual size of our pgd (without concatenation) */ 812 data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1)); 813 814 data->iop.ops = (struct io_pgtable_ops) { 815 .map = arm_lpae_map, 816 .map_pages = arm_lpae_map_pages, 817 .unmap = arm_lpae_unmap, 818 .unmap_pages = arm_lpae_unmap_pages, 819 .iova_to_phys = arm_lpae_iova_to_phys, 820 }; 821 822 return data; 823 } 824 825 static struct io_pgtable * 826 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) 827 { 828 u64 reg; 829 struct arm_lpae_io_pgtable *data; 830 typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr; 831 bool tg1; 832 833 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | 834 IO_PGTABLE_QUIRK_ARM_TTBR1 | 835 IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)) 836 return NULL; 837 838 data = arm_lpae_alloc_pgtable(cfg); 839 if (!data) 840 return NULL; 841 842 /* TCR */ 843 if (cfg->coherent_walk) { 844 tcr->sh = ARM_LPAE_TCR_SH_IS; 845 tcr->irgn = ARM_LPAE_TCR_RGN_WBWA; 846 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA; 847 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA) 848 goto out_free_data; 849 } else { 850 tcr->sh = ARM_LPAE_TCR_SH_OS; 851 tcr->irgn = ARM_LPAE_TCR_RGN_NC; 852 if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)) 853 tcr->orgn = ARM_LPAE_TCR_RGN_NC; 854 else 855 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA; 856 } 857 858 tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1; 859 switch (ARM_LPAE_GRANULE(data)) { 860 case SZ_4K: 861 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K; 862 break; 863 case SZ_16K: 864 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K; 865 break; 866 case SZ_64K: 867 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K; 868 break; 869 } 870 871 switch (cfg->oas) { 872 case 32: 873 tcr->ips = ARM_LPAE_TCR_PS_32_BIT; 874 break; 875 case 36: 876 tcr->ips = ARM_LPAE_TCR_PS_36_BIT; 877 break; 878 case 40: 879 tcr->ips = ARM_LPAE_TCR_PS_40_BIT; 880 break; 881 case 42: 882 tcr->ips = ARM_LPAE_TCR_PS_42_BIT; 883 break; 884 case 44: 885 tcr->ips = ARM_LPAE_TCR_PS_44_BIT; 886 break; 887 case 48: 888 tcr->ips = ARM_LPAE_TCR_PS_48_BIT; 889 break; 890 case 52: 891 tcr->ips = ARM_LPAE_TCR_PS_52_BIT; 892 break; 893 default: 894 goto out_free_data; 895 } 896 897 tcr->tsz = 64ULL - cfg->ias; 898 899 /* MAIRs */ 900 reg = (ARM_LPAE_MAIR_ATTR_NC 901 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | 902 (ARM_LPAE_MAIR_ATTR_WBRWA 903 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | 904 (ARM_LPAE_MAIR_ATTR_DEVICE 905 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) | 906 (ARM_LPAE_MAIR_ATTR_INC_OWBRWA 907 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE)); 908 909 cfg->arm_lpae_s1_cfg.mair = reg; 910 911 /* Looking good; allocate a pgd */ 912 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), 913 GFP_KERNEL, cfg); 914 if (!data->pgd) 915 goto out_free_data; 916 917 /* Ensure the empty pgd is visible before any actual TTBR write */ 918 wmb(); 919 920 /* TTBR */ 921 cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd); 922 return &data->iop; 923 924 out_free_data: 925 kfree(data); 926 return NULL; 927 } 928 929 static struct io_pgtable * 930 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) 931 { 932 u64 sl; 933 struct arm_lpae_io_pgtable *data; 934 typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr; 935 936 /* The NS quirk doesn't apply at stage 2 */ 937 if (cfg->quirks) 938 return NULL; 939 940 data = arm_lpae_alloc_pgtable(cfg); 941 if (!data) 942 return NULL; 943 944 /* 945 * Concatenate PGDs at level 1 if possible in order to reduce 946 * the depth of the stage-2 walk. 947 */ 948 if (data->start_level == 0) { 949 unsigned long pgd_pages; 950 951 pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte); 952 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) { 953 data->pgd_bits += data->bits_per_level; 954 data->start_level++; 955 } 956 } 957 958 /* VTCR */ 959 if (cfg->coherent_walk) { 960 vtcr->sh = ARM_LPAE_TCR_SH_IS; 961 vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA; 962 vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA; 963 } else { 964 vtcr->sh = ARM_LPAE_TCR_SH_OS; 965 vtcr->irgn = ARM_LPAE_TCR_RGN_NC; 966 vtcr->orgn = ARM_LPAE_TCR_RGN_NC; 967 } 968 969 sl = data->start_level; 970 971 switch (ARM_LPAE_GRANULE(data)) { 972 case SZ_4K: 973 vtcr->tg = ARM_LPAE_TCR_TG0_4K; 974 sl++; /* SL0 format is different for 4K granule size */ 975 break; 976 case SZ_16K: 977 vtcr->tg = ARM_LPAE_TCR_TG0_16K; 978 break; 979 case SZ_64K: 980 vtcr->tg = ARM_LPAE_TCR_TG0_64K; 981 break; 982 } 983 984 switch (cfg->oas) { 985 case 32: 986 vtcr->ps = ARM_LPAE_TCR_PS_32_BIT; 987 break; 988 case 36: 989 vtcr->ps = ARM_LPAE_TCR_PS_36_BIT; 990 break; 991 case 40: 992 vtcr->ps = ARM_LPAE_TCR_PS_40_BIT; 993 break; 994 case 42: 995 vtcr->ps = ARM_LPAE_TCR_PS_42_BIT; 996 break; 997 case 44: 998 vtcr->ps = ARM_LPAE_TCR_PS_44_BIT; 999 break; 1000 case 48: 1001 vtcr->ps = ARM_LPAE_TCR_PS_48_BIT; 1002 break; 1003 case 52: 1004 vtcr->ps = ARM_LPAE_TCR_PS_52_BIT; 1005 break; 1006 default: 1007 goto out_free_data; 1008 } 1009 1010 vtcr->tsz = 64ULL - cfg->ias; 1011 vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK; 1012 1013 /* Allocate pgd pages */ 1014 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), 1015 GFP_KERNEL, cfg); 1016 if (!data->pgd) 1017 goto out_free_data; 1018 1019 /* Ensure the empty pgd is visible before any actual TTBR write */ 1020 wmb(); 1021 1022 /* VTTBR */ 1023 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd); 1024 return &data->iop; 1025 1026 out_free_data: 1027 kfree(data); 1028 return NULL; 1029 } 1030 1031 static struct io_pgtable * 1032 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) 1033 { 1034 if (cfg->ias > 32 || cfg->oas > 40) 1035 return NULL; 1036 1037 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 1038 return arm_64_lpae_alloc_pgtable_s1(cfg, cookie); 1039 } 1040 1041 static struct io_pgtable * 1042 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) 1043 { 1044 if (cfg->ias > 40 || cfg->oas > 40) 1045 return NULL; 1046 1047 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 1048 return arm_64_lpae_alloc_pgtable_s2(cfg, cookie); 1049 } 1050 1051 static struct io_pgtable * 1052 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) 1053 { 1054 struct arm_lpae_io_pgtable *data; 1055 1056 /* No quirks for Mali (hopefully) */ 1057 if (cfg->quirks) 1058 return NULL; 1059 1060 if (cfg->ias > 48 || cfg->oas > 40) 1061 return NULL; 1062 1063 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 1064 1065 data = arm_lpae_alloc_pgtable(cfg); 1066 if (!data) 1067 return NULL; 1068 1069 /* Mali seems to need a full 4-level table regardless of IAS */ 1070 if (data->start_level > 0) { 1071 data->start_level = 0; 1072 data->pgd_bits = 0; 1073 } 1074 /* 1075 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the 1076 * best we can do is mimic the out-of-tree driver and hope that the 1077 * "implementation-defined caching policy" is good enough. Similarly, 1078 * we'll use it for the sake of a valid attribute for our 'device' 1079 * index, although callers should never request that in practice. 1080 */ 1081 cfg->arm_mali_lpae_cfg.memattr = 1082 (ARM_MALI_LPAE_MEMATTR_IMP_DEF 1083 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | 1084 (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 1085 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | 1086 (ARM_MALI_LPAE_MEMATTR_IMP_DEF 1087 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)); 1088 1089 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL, 1090 cfg); 1091 if (!data->pgd) 1092 goto out_free_data; 1093 1094 /* Ensure the empty pgd is visible before TRANSTAB can be written */ 1095 wmb(); 1096 1097 cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) | 1098 ARM_MALI_LPAE_TTBR_READ_INNER | 1099 ARM_MALI_LPAE_TTBR_ADRMODE_TABLE; 1100 if (cfg->coherent_walk) 1101 cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER; 1102 1103 return &data->iop; 1104 1105 out_free_data: 1106 kfree(data); 1107 return NULL; 1108 } 1109 1110 static struct io_pgtable * 1111 apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) 1112 { 1113 struct arm_lpae_io_pgtable *data; 1114 int i; 1115 1116 if (cfg->oas > 36) 1117 return NULL; 1118 1119 data = arm_lpae_alloc_pgtable(cfg); 1120 if (!data) 1121 return NULL; 1122 1123 /* 1124 * The table format itself always uses two levels, but the total VA 1125 * space is mapped by four separate tables, making the MMIO registers 1126 * an effective "level 1". For simplicity, though, we treat this 1127 * equivalently to LPAE stage 2 concatenation at level 2, with the 1128 * additional TTBRs each just pointing at consecutive pages. 1129 */ 1130 if (data->start_level < 1) 1131 goto out_free_data; 1132 if (data->start_level == 1 && data->pgd_bits > 2) 1133 goto out_free_data; 1134 if (data->start_level > 1) 1135 data->pgd_bits = 0; 1136 data->start_level = 2; 1137 cfg->apple_dart_cfg.n_ttbrs = 1 << data->pgd_bits; 1138 data->pgd_bits += data->bits_per_level; 1139 1140 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL, 1141 cfg); 1142 if (!data->pgd) 1143 goto out_free_data; 1144 1145 for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) 1146 cfg->apple_dart_cfg.ttbr[i] = 1147 virt_to_phys(data->pgd + i * ARM_LPAE_GRANULE(data)); 1148 1149 return &data->iop; 1150 1151 out_free_data: 1152 kfree(data); 1153 return NULL; 1154 } 1155 1156 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = { 1157 .alloc = arm_64_lpae_alloc_pgtable_s1, 1158 .free = arm_lpae_free_pgtable, 1159 }; 1160 1161 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = { 1162 .alloc = arm_64_lpae_alloc_pgtable_s2, 1163 .free = arm_lpae_free_pgtable, 1164 }; 1165 1166 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = { 1167 .alloc = arm_32_lpae_alloc_pgtable_s1, 1168 .free = arm_lpae_free_pgtable, 1169 }; 1170 1171 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = { 1172 .alloc = arm_32_lpae_alloc_pgtable_s2, 1173 .free = arm_lpae_free_pgtable, 1174 }; 1175 1176 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = { 1177 .alloc = arm_mali_lpae_alloc_pgtable, 1178 .free = arm_lpae_free_pgtable, 1179 }; 1180 1181 struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns = { 1182 .alloc = apple_dart_alloc_pgtable, 1183 .free = arm_lpae_free_pgtable, 1184 }; 1185 1186 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST 1187 1188 static struct io_pgtable_cfg *cfg_cookie __initdata; 1189 1190 static void __init dummy_tlb_flush_all(void *cookie) 1191 { 1192 WARN_ON(cookie != cfg_cookie); 1193 } 1194 1195 static void __init dummy_tlb_flush(unsigned long iova, size_t size, 1196 size_t granule, void *cookie) 1197 { 1198 WARN_ON(cookie != cfg_cookie); 1199 WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); 1200 } 1201 1202 static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather, 1203 unsigned long iova, size_t granule, 1204 void *cookie) 1205 { 1206 dummy_tlb_flush(iova, granule, granule, cookie); 1207 } 1208 1209 static const struct iommu_flush_ops dummy_tlb_ops __initconst = { 1210 .tlb_flush_all = dummy_tlb_flush_all, 1211 .tlb_flush_walk = dummy_tlb_flush, 1212 .tlb_add_page = dummy_tlb_add_page, 1213 }; 1214 1215 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) 1216 { 1217 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 1218 struct io_pgtable_cfg *cfg = &data->iop.cfg; 1219 1220 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", 1221 cfg->pgsize_bitmap, cfg->ias); 1222 pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n", 1223 ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data), 1224 ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd); 1225 } 1226 1227 #define __FAIL(ops, i) ({ \ 1228 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \ 1229 arm_lpae_dump_ops(ops); \ 1230 selftest_running = false; \ 1231 -EFAULT; \ 1232 }) 1233 1234 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) 1235 { 1236 static const enum io_pgtable_fmt fmts[] __initconst = { 1237 ARM_64_LPAE_S1, 1238 ARM_64_LPAE_S2, 1239 }; 1240 1241 int i, j; 1242 unsigned long iova; 1243 size_t size; 1244 struct io_pgtable_ops *ops; 1245 1246 selftest_running = true; 1247 1248 for (i = 0; i < ARRAY_SIZE(fmts); ++i) { 1249 cfg_cookie = cfg; 1250 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg); 1251 if (!ops) { 1252 pr_err("selftest: failed to allocate io pgtable ops\n"); 1253 return -ENOMEM; 1254 } 1255 1256 /* 1257 * Initial sanity checks. 1258 * Empty page tables shouldn't provide any translations. 1259 */ 1260 if (ops->iova_to_phys(ops, 42)) 1261 return __FAIL(ops, i); 1262 1263 if (ops->iova_to_phys(ops, SZ_1G + 42)) 1264 return __FAIL(ops, i); 1265 1266 if (ops->iova_to_phys(ops, SZ_2G + 42)) 1267 return __FAIL(ops, i); 1268 1269 /* 1270 * Distinct mappings of different granule sizes. 1271 */ 1272 iova = 0; 1273 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { 1274 size = 1UL << j; 1275 1276 if (ops->map(ops, iova, iova, size, IOMMU_READ | 1277 IOMMU_WRITE | 1278 IOMMU_NOEXEC | 1279 IOMMU_CACHE, GFP_KERNEL)) 1280 return __FAIL(ops, i); 1281 1282 /* Overlapping mappings */ 1283 if (!ops->map(ops, iova, iova + size, size, 1284 IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL)) 1285 return __FAIL(ops, i); 1286 1287 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) 1288 return __FAIL(ops, i); 1289 1290 iova += SZ_1G; 1291 } 1292 1293 /* Partial unmap */ 1294 size = 1UL << __ffs(cfg->pgsize_bitmap); 1295 if (ops->unmap(ops, SZ_1G + size, size, NULL) != size) 1296 return __FAIL(ops, i); 1297 1298 /* Remap of partial unmap */ 1299 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ, GFP_KERNEL)) 1300 return __FAIL(ops, i); 1301 1302 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) 1303 return __FAIL(ops, i); 1304 1305 /* Full unmap */ 1306 iova = 0; 1307 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { 1308 size = 1UL << j; 1309 1310 if (ops->unmap(ops, iova, size, NULL) != size) 1311 return __FAIL(ops, i); 1312 1313 if (ops->iova_to_phys(ops, iova + 42)) 1314 return __FAIL(ops, i); 1315 1316 /* Remap full block */ 1317 if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL)) 1318 return __FAIL(ops, i); 1319 1320 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) 1321 return __FAIL(ops, i); 1322 1323 iova += SZ_1G; 1324 } 1325 1326 free_io_pgtable_ops(ops); 1327 } 1328 1329 selftest_running = false; 1330 return 0; 1331 } 1332 1333 static int __init arm_lpae_do_selftests(void) 1334 { 1335 static const unsigned long pgsize[] __initconst = { 1336 SZ_4K | SZ_2M | SZ_1G, 1337 SZ_16K | SZ_32M, 1338 SZ_64K | SZ_512M, 1339 }; 1340 1341 static const unsigned int ias[] __initconst = { 1342 32, 36, 40, 42, 44, 48, 1343 }; 1344 1345 int i, j, pass = 0, fail = 0; 1346 struct io_pgtable_cfg cfg = { 1347 .tlb = &dummy_tlb_ops, 1348 .oas = 48, 1349 .coherent_walk = true, 1350 }; 1351 1352 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { 1353 for (j = 0; j < ARRAY_SIZE(ias); ++j) { 1354 cfg.pgsize_bitmap = pgsize[i]; 1355 cfg.ias = ias[j]; 1356 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n", 1357 pgsize[i], ias[j]); 1358 if (arm_lpae_run_tests(&cfg)) 1359 fail++; 1360 else 1361 pass++; 1362 } 1363 } 1364 1365 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail); 1366 return fail ? -EFAULT : 0; 1367 } 1368 subsys_initcall(arm_lpae_do_selftests); 1369 #endif 1370