1 /* 2 * CPU-agnostic ARM page table allocator. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 * 16 * Copyright (C) 2014 ARM Limited 17 * 18 * Author: Will Deacon <will.deacon@arm.com> 19 */ 20 21 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt 22 23 #include <linux/iommu.h> 24 #include <linux/kernel.h> 25 #include <linux/sizes.h> 26 #include <linux/slab.h> 27 #include <linux/types.h> 28 29 #include <asm/barrier.h> 30 31 #include "io-pgtable.h" 32 33 #define ARM_LPAE_MAX_ADDR_BITS 48 34 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16 35 #define ARM_LPAE_MAX_LEVELS 4 36 37 /* Struct accessors */ 38 #define io_pgtable_to_data(x) \ 39 container_of((x), struct arm_lpae_io_pgtable, iop) 40 41 #define io_pgtable_ops_to_pgtable(x) \ 42 container_of((x), struct io_pgtable, ops) 43 44 #define io_pgtable_ops_to_data(x) \ 45 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) 46 47 /* 48 * For consistency with the architecture, we always consider 49 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0 50 */ 51 #define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels) 52 53 /* 54 * Calculate the right shift amount to get to the portion describing level l 55 * in a virtual address mapped by the pagetable in d. 56 */ 57 #define ARM_LPAE_LVL_SHIFT(l,d) \ 58 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ 59 * (d)->bits_per_level) + (d)->pg_shift) 60 61 #define ARM_LPAE_PAGES_PER_PGD(d) \ 62 DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift) 63 64 /* 65 * Calculate the index at level l used to map virtual address a using the 66 * pagetable in d. 67 */ 68 #define ARM_LPAE_PGD_IDX(l,d) \ 69 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) 70 71 #define ARM_LPAE_LVL_IDX(a,l,d) \ 72 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ 73 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) 74 75 /* Calculate the block/page mapping size at level l for pagetable in d. */ 76 #define ARM_LPAE_BLOCK_SIZE(l,d) \ 77 (1 << (ilog2(sizeof(arm_lpae_iopte)) + \ 78 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level))) 79 80 /* Page table bits */ 81 #define ARM_LPAE_PTE_TYPE_SHIFT 0 82 #define ARM_LPAE_PTE_TYPE_MASK 0x3 83 84 #define ARM_LPAE_PTE_TYPE_BLOCK 1 85 #define ARM_LPAE_PTE_TYPE_TABLE 3 86 #define ARM_LPAE_PTE_TYPE_PAGE 3 87 88 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63) 89 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53) 90 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10) 91 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8) 92 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8) 93 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8) 94 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5) 95 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0) 96 97 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2) 98 /* Ignore the contiguous bit for block splitting */ 99 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52) 100 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \ 101 ARM_LPAE_PTE_ATTR_HI_MASK) 102 103 /* Stage-1 PTE */ 104 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6) 105 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6) 106 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2 107 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11) 108 109 /* Stage-2 PTE */ 110 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6) 111 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6) 112 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6) 113 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2) 114 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2) 115 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2) 116 117 /* Register bits */ 118 #define ARM_32_LPAE_TCR_EAE (1 << 31) 119 #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31) 120 121 #define ARM_LPAE_TCR_EPD1 (1 << 23) 122 123 #define ARM_LPAE_TCR_TG0_4K (0 << 14) 124 #define ARM_LPAE_TCR_TG0_64K (1 << 14) 125 #define ARM_LPAE_TCR_TG0_16K (2 << 14) 126 127 #define ARM_LPAE_TCR_SH0_SHIFT 12 128 #define ARM_LPAE_TCR_SH0_MASK 0x3 129 #define ARM_LPAE_TCR_SH_NS 0 130 #define ARM_LPAE_TCR_SH_OS 2 131 #define ARM_LPAE_TCR_SH_IS 3 132 133 #define ARM_LPAE_TCR_ORGN0_SHIFT 10 134 #define ARM_LPAE_TCR_IRGN0_SHIFT 8 135 #define ARM_LPAE_TCR_RGN_MASK 0x3 136 #define ARM_LPAE_TCR_RGN_NC 0 137 #define ARM_LPAE_TCR_RGN_WBWA 1 138 #define ARM_LPAE_TCR_RGN_WT 2 139 #define ARM_LPAE_TCR_RGN_WB 3 140 141 #define ARM_LPAE_TCR_SL0_SHIFT 6 142 #define ARM_LPAE_TCR_SL0_MASK 0x3 143 144 #define ARM_LPAE_TCR_T0SZ_SHIFT 0 145 #define ARM_LPAE_TCR_SZ_MASK 0xf 146 147 #define ARM_LPAE_TCR_PS_SHIFT 16 148 #define ARM_LPAE_TCR_PS_MASK 0x7 149 150 #define ARM_LPAE_TCR_IPS_SHIFT 32 151 #define ARM_LPAE_TCR_IPS_MASK 0x7 152 153 #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL 154 #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL 155 #define ARM_LPAE_TCR_PS_40_BIT 0x2ULL 156 #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL 157 #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL 158 #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL 159 160 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3) 161 #define ARM_LPAE_MAIR_ATTR_MASK 0xff 162 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04 163 #define ARM_LPAE_MAIR_ATTR_NC 0x44 164 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff 165 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0 166 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1 167 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2 168 169 /* IOPTE accessors */ 170 #define iopte_deref(pte,d) \ 171 (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \ 172 & ~((1ULL << (d)->pg_shift) - 1))) 173 174 #define iopte_type(pte,l) \ 175 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) 176 177 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK) 178 179 #define iopte_leaf(pte,l) \ 180 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \ 181 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \ 182 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK)) 183 184 #define iopte_to_pfn(pte,d) \ 185 (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift) 186 187 #define pfn_to_iopte(pfn,d) \ 188 (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) 189 190 struct arm_lpae_io_pgtable { 191 struct io_pgtable iop; 192 193 int levels; 194 size_t pgd_size; 195 unsigned long pg_shift; 196 unsigned long bits_per_level; 197 198 void *pgd; 199 }; 200 201 typedef u64 arm_lpae_iopte; 202 203 static bool selftest_running = false; 204 205 static dma_addr_t __arm_lpae_dma_addr(void *pages) 206 { 207 return (dma_addr_t)virt_to_phys(pages); 208 } 209 210 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, 211 struct io_pgtable_cfg *cfg) 212 { 213 struct device *dev = cfg->iommu_dev; 214 dma_addr_t dma; 215 void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO); 216 217 if (!pages) 218 return NULL; 219 220 if (!selftest_running) { 221 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE); 222 if (dma_mapping_error(dev, dma)) 223 goto out_free; 224 /* 225 * We depend on the IOMMU being able to work with any physical 226 * address directly, so if the DMA layer suggests otherwise by 227 * translating or truncating them, that bodes very badly... 228 */ 229 if (dma != virt_to_phys(pages)) 230 goto out_unmap; 231 } 232 233 return pages; 234 235 out_unmap: 236 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n"); 237 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE); 238 out_free: 239 free_pages_exact(pages, size); 240 return NULL; 241 } 242 243 static void __arm_lpae_free_pages(void *pages, size_t size, 244 struct io_pgtable_cfg *cfg) 245 { 246 if (!selftest_running) 247 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), 248 size, DMA_TO_DEVICE); 249 free_pages_exact(pages, size); 250 } 251 252 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, 253 struct io_pgtable_cfg *cfg) 254 { 255 *ptep = pte; 256 257 if (!selftest_running) 258 dma_sync_single_for_device(cfg->iommu_dev, 259 __arm_lpae_dma_addr(ptep), 260 sizeof(pte), DMA_TO_DEVICE); 261 } 262 263 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, 264 unsigned long iova, size_t size, int lvl, 265 arm_lpae_iopte *ptep); 266 267 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, 268 unsigned long iova, phys_addr_t paddr, 269 arm_lpae_iopte prot, int lvl, 270 arm_lpae_iopte *ptep) 271 { 272 arm_lpae_iopte pte = prot; 273 struct io_pgtable_cfg *cfg = &data->iop.cfg; 274 275 if (iopte_leaf(*ptep, lvl)) { 276 /* We require an unmap first */ 277 WARN_ON(!selftest_running); 278 return -EEXIST; 279 } else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) { 280 /* 281 * We need to unmap and free the old table before 282 * overwriting it with a block entry. 283 */ 284 arm_lpae_iopte *tblp; 285 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); 286 287 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); 288 if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz)) 289 return -EINVAL; 290 } 291 292 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) 293 pte |= ARM_LPAE_PTE_NS; 294 295 if (lvl == ARM_LPAE_MAX_LEVELS - 1) 296 pte |= ARM_LPAE_PTE_TYPE_PAGE; 297 else 298 pte |= ARM_LPAE_PTE_TYPE_BLOCK; 299 300 pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS; 301 pte |= pfn_to_iopte(paddr >> data->pg_shift, data); 302 303 __arm_lpae_set_pte(ptep, pte, cfg); 304 return 0; 305 } 306 307 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, 308 phys_addr_t paddr, size_t size, arm_lpae_iopte prot, 309 int lvl, arm_lpae_iopte *ptep) 310 { 311 arm_lpae_iopte *cptep, pte; 312 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); 313 struct io_pgtable_cfg *cfg = &data->iop.cfg; 314 315 /* Find our entry at the current level */ 316 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); 317 318 /* If we can install a leaf entry at this level, then do so */ 319 if (size == block_size && (size & cfg->pgsize_bitmap)) 320 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); 321 322 /* We can't allocate tables at the final level */ 323 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1)) 324 return -EINVAL; 325 326 /* Grab a pointer to the next level */ 327 pte = *ptep; 328 if (!pte) { 329 cptep = __arm_lpae_alloc_pages(1UL << data->pg_shift, 330 GFP_ATOMIC, cfg); 331 if (!cptep) 332 return -ENOMEM; 333 334 pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE; 335 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) 336 pte |= ARM_LPAE_PTE_NSTABLE; 337 __arm_lpae_set_pte(ptep, pte, cfg); 338 } else { 339 cptep = iopte_deref(pte, data); 340 } 341 342 /* Rinse, repeat */ 343 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep); 344 } 345 346 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, 347 int prot) 348 { 349 arm_lpae_iopte pte; 350 351 if (data->iop.fmt == ARM_64_LPAE_S1 || 352 data->iop.fmt == ARM_32_LPAE_S1) { 353 pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG; 354 355 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) 356 pte |= ARM_LPAE_PTE_AP_RDONLY; 357 358 if (prot & IOMMU_CACHE) 359 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE 360 << ARM_LPAE_PTE_ATTRINDX_SHIFT); 361 } else { 362 pte = ARM_LPAE_PTE_HAP_FAULT; 363 if (prot & IOMMU_READ) 364 pte |= ARM_LPAE_PTE_HAP_READ; 365 if (prot & IOMMU_WRITE) 366 pte |= ARM_LPAE_PTE_HAP_WRITE; 367 if (prot & IOMMU_CACHE) 368 pte |= ARM_LPAE_PTE_MEMATTR_OIWB; 369 else 370 pte |= ARM_LPAE_PTE_MEMATTR_NC; 371 } 372 373 if (prot & IOMMU_NOEXEC) 374 pte |= ARM_LPAE_PTE_XN; 375 376 return pte; 377 } 378 379 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, 380 phys_addr_t paddr, size_t size, int iommu_prot) 381 { 382 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 383 arm_lpae_iopte *ptep = data->pgd; 384 int ret, lvl = ARM_LPAE_START_LVL(data); 385 arm_lpae_iopte prot; 386 387 /* If no access, then nothing to do */ 388 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) 389 return 0; 390 391 prot = arm_lpae_prot_to_pte(data, iommu_prot); 392 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); 393 /* 394 * Synchronise all PTE updates for the new mapping before there's 395 * a chance for anything to kick off a table walk for the new iova. 396 */ 397 wmb(); 398 399 return ret; 400 } 401 402 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, 403 arm_lpae_iopte *ptep) 404 { 405 arm_lpae_iopte *start, *end; 406 unsigned long table_size; 407 408 /* Only leaf entries at the last level */ 409 if (lvl == ARM_LPAE_MAX_LEVELS - 1) 410 return; 411 412 if (lvl == ARM_LPAE_START_LVL(data)) 413 table_size = data->pgd_size; 414 else 415 table_size = 1UL << data->pg_shift; 416 417 start = ptep; 418 end = (void *)ptep + table_size; 419 420 while (ptep != end) { 421 arm_lpae_iopte pte = *ptep++; 422 423 if (!pte || iopte_leaf(pte, lvl)) 424 continue; 425 426 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); 427 } 428 429 __arm_lpae_free_pages(start, table_size, &data->iop.cfg); 430 } 431 432 static void arm_lpae_free_pgtable(struct io_pgtable *iop) 433 { 434 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); 435 436 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd); 437 kfree(data); 438 } 439 440 static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, 441 unsigned long iova, size_t size, 442 arm_lpae_iopte prot, int lvl, 443 arm_lpae_iopte *ptep, size_t blk_size) 444 { 445 unsigned long blk_start, blk_end; 446 phys_addr_t blk_paddr; 447 arm_lpae_iopte table = 0; 448 struct io_pgtable_cfg *cfg = &data->iop.cfg; 449 450 blk_start = iova & ~(blk_size - 1); 451 blk_end = blk_start + blk_size; 452 blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift; 453 454 for (; blk_start < blk_end; blk_start += size, blk_paddr += size) { 455 arm_lpae_iopte *tablep; 456 457 /* Unmap! */ 458 if (blk_start == iova) 459 continue; 460 461 /* __arm_lpae_map expects a pointer to the start of the table */ 462 tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data); 463 if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl, 464 tablep) < 0) { 465 if (table) { 466 /* Free the table we allocated */ 467 tablep = iopte_deref(table, data); 468 __arm_lpae_free_pgtable(data, lvl + 1, tablep); 469 } 470 return 0; /* Bytes unmapped */ 471 } 472 } 473 474 __arm_lpae_set_pte(ptep, table, cfg); 475 iova &= ~(blk_size - 1); 476 cfg->tlb->tlb_add_flush(iova, blk_size, true, data->iop.cookie); 477 return size; 478 } 479 480 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, 481 unsigned long iova, size_t size, int lvl, 482 arm_lpae_iopte *ptep) 483 { 484 arm_lpae_iopte pte; 485 const struct iommu_gather_ops *tlb = data->iop.cfg.tlb; 486 void *cookie = data->iop.cookie; 487 size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data); 488 489 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); 490 pte = *ptep; 491 492 /* Something went horribly wrong and we ran out of page table */ 493 if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS))) 494 return 0; 495 496 /* If the size matches this level, we're in the right place */ 497 if (size == blk_size) { 498 __arm_lpae_set_pte(ptep, 0, &data->iop.cfg); 499 500 if (!iopte_leaf(pte, lvl)) { 501 /* Also flush any partial walks */ 502 tlb->tlb_add_flush(iova, size, false, cookie); 503 tlb->tlb_sync(cookie); 504 ptep = iopte_deref(pte, data); 505 __arm_lpae_free_pgtable(data, lvl + 1, ptep); 506 } else { 507 tlb->tlb_add_flush(iova, size, true, cookie); 508 } 509 510 return size; 511 } else if (iopte_leaf(pte, lvl)) { 512 /* 513 * Insert a table at the next level to map the old region, 514 * minus the part we want to unmap 515 */ 516 return arm_lpae_split_blk_unmap(data, iova, size, 517 iopte_prot(pte), lvl, ptep, 518 blk_size); 519 } 520 521 /* Keep on walkin' */ 522 ptep = iopte_deref(pte, data); 523 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep); 524 } 525 526 static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, 527 size_t size) 528 { 529 size_t unmapped; 530 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 531 struct io_pgtable *iop = &data->iop; 532 arm_lpae_iopte *ptep = data->pgd; 533 int lvl = ARM_LPAE_START_LVL(data); 534 535 unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep); 536 if (unmapped) 537 iop->cfg.tlb->tlb_sync(iop->cookie); 538 539 return unmapped; 540 } 541 542 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, 543 unsigned long iova) 544 { 545 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 546 arm_lpae_iopte pte, *ptep = data->pgd; 547 int lvl = ARM_LPAE_START_LVL(data); 548 549 do { 550 /* Valid IOPTE pointer? */ 551 if (!ptep) 552 return 0; 553 554 /* Grab the IOPTE we're interested in */ 555 pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data)); 556 557 /* Valid entry? */ 558 if (!pte) 559 return 0; 560 561 /* Leaf entry? */ 562 if (iopte_leaf(pte,lvl)) 563 goto found_translation; 564 565 /* Take it to the next level */ 566 ptep = iopte_deref(pte, data); 567 } while (++lvl < ARM_LPAE_MAX_LEVELS); 568 569 /* Ran out of page tables to walk */ 570 return 0; 571 572 found_translation: 573 iova &= ((1 << data->pg_shift) - 1); 574 return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova; 575 } 576 577 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) 578 { 579 unsigned long granule; 580 581 /* 582 * We need to restrict the supported page sizes to match the 583 * translation regime for a particular granule. Aim to match 584 * the CPU page size if possible, otherwise prefer smaller sizes. 585 * While we're at it, restrict the block sizes to match the 586 * chosen granule. 587 */ 588 if (cfg->pgsize_bitmap & PAGE_SIZE) 589 granule = PAGE_SIZE; 590 else if (cfg->pgsize_bitmap & ~PAGE_MASK) 591 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); 592 else if (cfg->pgsize_bitmap & PAGE_MASK) 593 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); 594 else 595 granule = 0; 596 597 switch (granule) { 598 case SZ_4K: 599 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 600 break; 601 case SZ_16K: 602 cfg->pgsize_bitmap &= (SZ_16K | SZ_32M); 603 break; 604 case SZ_64K: 605 cfg->pgsize_bitmap &= (SZ_64K | SZ_512M); 606 break; 607 default: 608 cfg->pgsize_bitmap = 0; 609 } 610 } 611 612 static struct arm_lpae_io_pgtable * 613 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) 614 { 615 unsigned long va_bits, pgd_bits; 616 struct arm_lpae_io_pgtable *data; 617 618 arm_lpae_restrict_pgsizes(cfg); 619 620 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K))) 621 return NULL; 622 623 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS) 624 return NULL; 625 626 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) 627 return NULL; 628 629 if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) { 630 dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n"); 631 return NULL; 632 } 633 634 data = kmalloc(sizeof(*data), GFP_KERNEL); 635 if (!data) 636 return NULL; 637 638 data->pg_shift = __ffs(cfg->pgsize_bitmap); 639 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte)); 640 641 va_bits = cfg->ias - data->pg_shift; 642 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level); 643 644 /* Calculate the actual size of our pgd (without concatenation) */ 645 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1)); 646 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte))); 647 648 data->iop.ops = (struct io_pgtable_ops) { 649 .map = arm_lpae_map, 650 .unmap = arm_lpae_unmap, 651 .iova_to_phys = arm_lpae_iova_to_phys, 652 }; 653 654 return data; 655 } 656 657 static struct io_pgtable * 658 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) 659 { 660 u64 reg; 661 struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg); 662 663 if (!data) 664 return NULL; 665 666 /* TCR */ 667 reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | 668 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | 669 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); 670 671 switch (1 << data->pg_shift) { 672 case SZ_4K: 673 reg |= ARM_LPAE_TCR_TG0_4K; 674 break; 675 case SZ_16K: 676 reg |= ARM_LPAE_TCR_TG0_16K; 677 break; 678 case SZ_64K: 679 reg |= ARM_LPAE_TCR_TG0_64K; 680 break; 681 } 682 683 switch (cfg->oas) { 684 case 32: 685 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT); 686 break; 687 case 36: 688 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT); 689 break; 690 case 40: 691 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT); 692 break; 693 case 42: 694 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT); 695 break; 696 case 44: 697 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT); 698 break; 699 case 48: 700 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT); 701 break; 702 default: 703 goto out_free_data; 704 } 705 706 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; 707 708 /* Disable speculative walks through TTBR1 */ 709 reg |= ARM_LPAE_TCR_EPD1; 710 cfg->arm_lpae_s1_cfg.tcr = reg; 711 712 /* MAIRs */ 713 reg = (ARM_LPAE_MAIR_ATTR_NC 714 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | 715 (ARM_LPAE_MAIR_ATTR_WBRWA 716 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | 717 (ARM_LPAE_MAIR_ATTR_DEVICE 718 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)); 719 720 cfg->arm_lpae_s1_cfg.mair[0] = reg; 721 cfg->arm_lpae_s1_cfg.mair[1] = 0; 722 723 /* Looking good; allocate a pgd */ 724 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg); 725 if (!data->pgd) 726 goto out_free_data; 727 728 /* Ensure the empty pgd is visible before any actual TTBR write */ 729 wmb(); 730 731 /* TTBRs */ 732 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd); 733 cfg->arm_lpae_s1_cfg.ttbr[1] = 0; 734 return &data->iop; 735 736 out_free_data: 737 kfree(data); 738 return NULL; 739 } 740 741 static struct io_pgtable * 742 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) 743 { 744 u64 reg, sl; 745 struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg); 746 747 if (!data) 748 return NULL; 749 750 /* 751 * Concatenate PGDs at level 1 if possible in order to reduce 752 * the depth of the stage-2 walk. 753 */ 754 if (data->levels == ARM_LPAE_MAX_LEVELS) { 755 unsigned long pgd_pages; 756 757 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte)); 758 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) { 759 data->pgd_size = pgd_pages << data->pg_shift; 760 data->levels--; 761 } 762 } 763 764 /* VTCR */ 765 reg = ARM_64_LPAE_S2_TCR_RES1 | 766 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | 767 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | 768 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); 769 770 sl = ARM_LPAE_START_LVL(data); 771 772 switch (1 << data->pg_shift) { 773 case SZ_4K: 774 reg |= ARM_LPAE_TCR_TG0_4K; 775 sl++; /* SL0 format is different for 4K granule size */ 776 break; 777 case SZ_16K: 778 reg |= ARM_LPAE_TCR_TG0_16K; 779 break; 780 case SZ_64K: 781 reg |= ARM_LPAE_TCR_TG0_64K; 782 break; 783 } 784 785 switch (cfg->oas) { 786 case 32: 787 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT); 788 break; 789 case 36: 790 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT); 791 break; 792 case 40: 793 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT); 794 break; 795 case 42: 796 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT); 797 break; 798 case 44: 799 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT); 800 break; 801 case 48: 802 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT); 803 break; 804 default: 805 goto out_free_data; 806 } 807 808 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; 809 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT; 810 cfg->arm_lpae_s2_cfg.vtcr = reg; 811 812 /* Allocate pgd pages */ 813 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg); 814 if (!data->pgd) 815 goto out_free_data; 816 817 /* Ensure the empty pgd is visible before any actual TTBR write */ 818 wmb(); 819 820 /* VTTBR */ 821 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd); 822 return &data->iop; 823 824 out_free_data: 825 kfree(data); 826 return NULL; 827 } 828 829 static struct io_pgtable * 830 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) 831 { 832 struct io_pgtable *iop; 833 834 if (cfg->ias > 32 || cfg->oas > 40) 835 return NULL; 836 837 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 838 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie); 839 if (iop) { 840 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE; 841 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff; 842 } 843 844 return iop; 845 } 846 847 static struct io_pgtable * 848 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) 849 { 850 struct io_pgtable *iop; 851 852 if (cfg->ias > 40 || cfg->oas > 40) 853 return NULL; 854 855 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 856 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie); 857 if (iop) 858 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff; 859 860 return iop; 861 } 862 863 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = { 864 .alloc = arm_64_lpae_alloc_pgtable_s1, 865 .free = arm_lpae_free_pgtable, 866 }; 867 868 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = { 869 .alloc = arm_64_lpae_alloc_pgtable_s2, 870 .free = arm_lpae_free_pgtable, 871 }; 872 873 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = { 874 .alloc = arm_32_lpae_alloc_pgtable_s1, 875 .free = arm_lpae_free_pgtable, 876 }; 877 878 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = { 879 .alloc = arm_32_lpae_alloc_pgtable_s2, 880 .free = arm_lpae_free_pgtable, 881 }; 882 883 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST 884 885 static struct io_pgtable_cfg *cfg_cookie; 886 887 static void dummy_tlb_flush_all(void *cookie) 888 { 889 WARN_ON(cookie != cfg_cookie); 890 } 891 892 static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf, 893 void *cookie) 894 { 895 WARN_ON(cookie != cfg_cookie); 896 WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); 897 } 898 899 static void dummy_tlb_sync(void *cookie) 900 { 901 WARN_ON(cookie != cfg_cookie); 902 } 903 904 static struct iommu_gather_ops dummy_tlb_ops __initdata = { 905 .tlb_flush_all = dummy_tlb_flush_all, 906 .tlb_add_flush = dummy_tlb_add_flush, 907 .tlb_sync = dummy_tlb_sync, 908 }; 909 910 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) 911 { 912 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 913 struct io_pgtable_cfg *cfg = &data->iop.cfg; 914 915 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", 916 cfg->pgsize_bitmap, cfg->ias); 917 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n", 918 data->levels, data->pgd_size, data->pg_shift, 919 data->bits_per_level, data->pgd); 920 } 921 922 #define __FAIL(ops, i) ({ \ 923 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \ 924 arm_lpae_dump_ops(ops); \ 925 selftest_running = false; \ 926 -EFAULT; \ 927 }) 928 929 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) 930 { 931 static const enum io_pgtable_fmt fmts[] = { 932 ARM_64_LPAE_S1, 933 ARM_64_LPAE_S2, 934 }; 935 936 int i, j; 937 unsigned long iova; 938 size_t size; 939 struct io_pgtable_ops *ops; 940 941 selftest_running = true; 942 943 for (i = 0; i < ARRAY_SIZE(fmts); ++i) { 944 cfg_cookie = cfg; 945 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg); 946 if (!ops) { 947 pr_err("selftest: failed to allocate io pgtable ops\n"); 948 return -ENOMEM; 949 } 950 951 /* 952 * Initial sanity checks. 953 * Empty page tables shouldn't provide any translations. 954 */ 955 if (ops->iova_to_phys(ops, 42)) 956 return __FAIL(ops, i); 957 958 if (ops->iova_to_phys(ops, SZ_1G + 42)) 959 return __FAIL(ops, i); 960 961 if (ops->iova_to_phys(ops, SZ_2G + 42)) 962 return __FAIL(ops, i); 963 964 /* 965 * Distinct mappings of different granule sizes. 966 */ 967 iova = 0; 968 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG); 969 while (j != BITS_PER_LONG) { 970 size = 1UL << j; 971 972 if (ops->map(ops, iova, iova, size, IOMMU_READ | 973 IOMMU_WRITE | 974 IOMMU_NOEXEC | 975 IOMMU_CACHE)) 976 return __FAIL(ops, i); 977 978 /* Overlapping mappings */ 979 if (!ops->map(ops, iova, iova + size, size, 980 IOMMU_READ | IOMMU_NOEXEC)) 981 return __FAIL(ops, i); 982 983 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) 984 return __FAIL(ops, i); 985 986 iova += SZ_1G; 987 j++; 988 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j); 989 } 990 991 /* Partial unmap */ 992 size = 1UL << __ffs(cfg->pgsize_bitmap); 993 if (ops->unmap(ops, SZ_1G + size, size) != size) 994 return __FAIL(ops, i); 995 996 /* Remap of partial unmap */ 997 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ)) 998 return __FAIL(ops, i); 999 1000 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) 1001 return __FAIL(ops, i); 1002 1003 /* Full unmap */ 1004 iova = 0; 1005 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG); 1006 while (j != BITS_PER_LONG) { 1007 size = 1UL << j; 1008 1009 if (ops->unmap(ops, iova, size) != size) 1010 return __FAIL(ops, i); 1011 1012 if (ops->iova_to_phys(ops, iova + 42)) 1013 return __FAIL(ops, i); 1014 1015 /* Remap full block */ 1016 if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) 1017 return __FAIL(ops, i); 1018 1019 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) 1020 return __FAIL(ops, i); 1021 1022 iova += SZ_1G; 1023 j++; 1024 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j); 1025 } 1026 1027 free_io_pgtable_ops(ops); 1028 } 1029 1030 selftest_running = false; 1031 return 0; 1032 } 1033 1034 static int __init arm_lpae_do_selftests(void) 1035 { 1036 static const unsigned long pgsize[] = { 1037 SZ_4K | SZ_2M | SZ_1G, 1038 SZ_16K | SZ_32M, 1039 SZ_64K | SZ_512M, 1040 }; 1041 1042 static const unsigned int ias[] = { 1043 32, 36, 40, 42, 44, 48, 1044 }; 1045 1046 int i, j, pass = 0, fail = 0; 1047 struct io_pgtable_cfg cfg = { 1048 .tlb = &dummy_tlb_ops, 1049 .oas = 48, 1050 }; 1051 1052 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { 1053 for (j = 0; j < ARRAY_SIZE(ias); ++j) { 1054 cfg.pgsize_bitmap = pgsize[i]; 1055 cfg.ias = ias[j]; 1056 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n", 1057 pgsize[i], ias[j]); 1058 if (arm_lpae_run_tests(&cfg)) 1059 fail++; 1060 else 1061 pass++; 1062 } 1063 } 1064 1065 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail); 1066 return fail ? -EFAULT : 0; 1067 } 1068 subsys_initcall(arm_lpae_do_selftests); 1069 #endif 1070