1 /* 2 * CPU-agnostic ARM page table allocator. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 * 16 * Copyright (C) 2014 ARM Limited 17 * 18 * Author: Will Deacon <will.deacon@arm.com> 19 */ 20 21 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt 22 23 #include <linux/iommu.h> 24 #include <linux/kernel.h> 25 #include <linux/sizes.h> 26 #include <linux/slab.h> 27 #include <linux/types.h> 28 #include <linux/dma-mapping.h> 29 30 #include <asm/barrier.h> 31 32 #include "io-pgtable.h" 33 34 #define ARM_LPAE_MAX_ADDR_BITS 48 35 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16 36 #define ARM_LPAE_MAX_LEVELS 4 37 38 /* Struct accessors */ 39 #define io_pgtable_to_data(x) \ 40 container_of((x), struct arm_lpae_io_pgtable, iop) 41 42 #define io_pgtable_ops_to_data(x) \ 43 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) 44 45 /* 46 * For consistency with the architecture, we always consider 47 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0 48 */ 49 #define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels) 50 51 /* 52 * Calculate the right shift amount to get to the portion describing level l 53 * in a virtual address mapped by the pagetable in d. 54 */ 55 #define ARM_LPAE_LVL_SHIFT(l,d) \ 56 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ 57 * (d)->bits_per_level) + (d)->pg_shift) 58 59 #define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift) 60 61 #define ARM_LPAE_PAGES_PER_PGD(d) \ 62 DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d)) 63 64 /* 65 * Calculate the index at level l used to map virtual address a using the 66 * pagetable in d. 67 */ 68 #define ARM_LPAE_PGD_IDX(l,d) \ 69 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) 70 71 #define ARM_LPAE_LVL_IDX(a,l,d) \ 72 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ 73 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) 74 75 /* Calculate the block/page mapping size at level l for pagetable in d. */ 76 #define ARM_LPAE_BLOCK_SIZE(l,d) \ 77 (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \ 78 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level))) 79 80 /* Page table bits */ 81 #define ARM_LPAE_PTE_TYPE_SHIFT 0 82 #define ARM_LPAE_PTE_TYPE_MASK 0x3 83 84 #define ARM_LPAE_PTE_TYPE_BLOCK 1 85 #define ARM_LPAE_PTE_TYPE_TABLE 3 86 #define ARM_LPAE_PTE_TYPE_PAGE 3 87 88 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63) 89 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53) 90 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10) 91 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8) 92 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8) 93 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8) 94 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5) 95 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0) 96 97 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2) 98 /* Ignore the contiguous bit for block splitting */ 99 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52) 100 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \ 101 ARM_LPAE_PTE_ATTR_HI_MASK) 102 103 /* Stage-1 PTE */ 104 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6) 105 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6) 106 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2 107 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11) 108 109 /* Stage-2 PTE */ 110 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6) 111 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6) 112 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6) 113 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2) 114 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2) 115 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2) 116 117 /* Register bits */ 118 #define ARM_32_LPAE_TCR_EAE (1 << 31) 119 #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31) 120 121 #define ARM_LPAE_TCR_EPD1 (1 << 23) 122 123 #define ARM_LPAE_TCR_TG0_4K (0 << 14) 124 #define ARM_LPAE_TCR_TG0_64K (1 << 14) 125 #define ARM_LPAE_TCR_TG0_16K (2 << 14) 126 127 #define ARM_LPAE_TCR_SH0_SHIFT 12 128 #define ARM_LPAE_TCR_SH0_MASK 0x3 129 #define ARM_LPAE_TCR_SH_NS 0 130 #define ARM_LPAE_TCR_SH_OS 2 131 #define ARM_LPAE_TCR_SH_IS 3 132 133 #define ARM_LPAE_TCR_ORGN0_SHIFT 10 134 #define ARM_LPAE_TCR_IRGN0_SHIFT 8 135 #define ARM_LPAE_TCR_RGN_MASK 0x3 136 #define ARM_LPAE_TCR_RGN_NC 0 137 #define ARM_LPAE_TCR_RGN_WBWA 1 138 #define ARM_LPAE_TCR_RGN_WT 2 139 #define ARM_LPAE_TCR_RGN_WB 3 140 141 #define ARM_LPAE_TCR_SL0_SHIFT 6 142 #define ARM_LPAE_TCR_SL0_MASK 0x3 143 144 #define ARM_LPAE_TCR_T0SZ_SHIFT 0 145 #define ARM_LPAE_TCR_SZ_MASK 0xf 146 147 #define ARM_LPAE_TCR_PS_SHIFT 16 148 #define ARM_LPAE_TCR_PS_MASK 0x7 149 150 #define ARM_LPAE_TCR_IPS_SHIFT 32 151 #define ARM_LPAE_TCR_IPS_MASK 0x7 152 153 #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL 154 #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL 155 #define ARM_LPAE_TCR_PS_40_BIT 0x2ULL 156 #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL 157 #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL 158 #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL 159 160 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3) 161 #define ARM_LPAE_MAIR_ATTR_MASK 0xff 162 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04 163 #define ARM_LPAE_MAIR_ATTR_NC 0x44 164 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff 165 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0 166 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1 167 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2 168 169 /* IOPTE accessors */ 170 #define iopte_deref(pte,d) \ 171 (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \ 172 & ~(ARM_LPAE_GRANULE(d) - 1ULL))) 173 174 #define iopte_type(pte,l) \ 175 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) 176 177 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK) 178 179 #define iopte_leaf(pte,l) \ 180 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \ 181 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \ 182 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK)) 183 184 #define iopte_to_pfn(pte,d) \ 185 (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift) 186 187 #define pfn_to_iopte(pfn,d) \ 188 (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) 189 190 struct arm_lpae_io_pgtable { 191 struct io_pgtable iop; 192 193 int levels; 194 size_t pgd_size; 195 unsigned long pg_shift; 196 unsigned long bits_per_level; 197 198 void *pgd; 199 }; 200 201 typedef u64 arm_lpae_iopte; 202 203 static bool selftest_running = false; 204 205 static dma_addr_t __arm_lpae_dma_addr(void *pages) 206 { 207 return (dma_addr_t)virt_to_phys(pages); 208 } 209 210 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, 211 struct io_pgtable_cfg *cfg) 212 { 213 struct device *dev = cfg->iommu_dev; 214 dma_addr_t dma; 215 void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO); 216 217 if (!pages) 218 return NULL; 219 220 if (!selftest_running) { 221 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE); 222 if (dma_mapping_error(dev, dma)) 223 goto out_free; 224 /* 225 * We depend on the IOMMU being able to work with any physical 226 * address directly, so if the DMA layer suggests otherwise by 227 * translating or truncating them, that bodes very badly... 228 */ 229 if (dma != virt_to_phys(pages)) 230 goto out_unmap; 231 } 232 233 return pages; 234 235 out_unmap: 236 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n"); 237 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE); 238 out_free: 239 free_pages_exact(pages, size); 240 return NULL; 241 } 242 243 static void __arm_lpae_free_pages(void *pages, size_t size, 244 struct io_pgtable_cfg *cfg) 245 { 246 if (!selftest_running) 247 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), 248 size, DMA_TO_DEVICE); 249 free_pages_exact(pages, size); 250 } 251 252 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, 253 struct io_pgtable_cfg *cfg) 254 { 255 *ptep = pte; 256 257 if (!selftest_running) 258 dma_sync_single_for_device(cfg->iommu_dev, 259 __arm_lpae_dma_addr(ptep), 260 sizeof(pte), DMA_TO_DEVICE); 261 } 262 263 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, 264 unsigned long iova, size_t size, int lvl, 265 arm_lpae_iopte *ptep); 266 267 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, 268 unsigned long iova, phys_addr_t paddr, 269 arm_lpae_iopte prot, int lvl, 270 arm_lpae_iopte *ptep) 271 { 272 arm_lpae_iopte pte = prot; 273 struct io_pgtable_cfg *cfg = &data->iop.cfg; 274 275 if (iopte_leaf(*ptep, lvl)) { 276 /* We require an unmap first */ 277 WARN_ON(!selftest_running); 278 return -EEXIST; 279 } else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) { 280 /* 281 * We need to unmap and free the old table before 282 * overwriting it with a block entry. 283 */ 284 arm_lpae_iopte *tblp; 285 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); 286 287 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); 288 if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz)) 289 return -EINVAL; 290 } 291 292 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) 293 pte |= ARM_LPAE_PTE_NS; 294 295 if (lvl == ARM_LPAE_MAX_LEVELS - 1) 296 pte |= ARM_LPAE_PTE_TYPE_PAGE; 297 else 298 pte |= ARM_LPAE_PTE_TYPE_BLOCK; 299 300 pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS; 301 pte |= pfn_to_iopte(paddr >> data->pg_shift, data); 302 303 __arm_lpae_set_pte(ptep, pte, cfg); 304 return 0; 305 } 306 307 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, 308 phys_addr_t paddr, size_t size, arm_lpae_iopte prot, 309 int lvl, arm_lpae_iopte *ptep) 310 { 311 arm_lpae_iopte *cptep, pte; 312 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); 313 struct io_pgtable_cfg *cfg = &data->iop.cfg; 314 315 /* Find our entry at the current level */ 316 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); 317 318 /* If we can install a leaf entry at this level, then do so */ 319 if (size == block_size && (size & cfg->pgsize_bitmap)) 320 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); 321 322 /* We can't allocate tables at the final level */ 323 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1)) 324 return -EINVAL; 325 326 /* Grab a pointer to the next level */ 327 pte = *ptep; 328 if (!pte) { 329 cptep = __arm_lpae_alloc_pages(ARM_LPAE_GRANULE(data), 330 GFP_ATOMIC, cfg); 331 if (!cptep) 332 return -ENOMEM; 333 334 pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE; 335 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) 336 pte |= ARM_LPAE_PTE_NSTABLE; 337 __arm_lpae_set_pte(ptep, pte, cfg); 338 } else if (!iopte_leaf(pte, lvl)) { 339 cptep = iopte_deref(pte, data); 340 } else { 341 /* We require an unmap first */ 342 WARN_ON(!selftest_running); 343 return -EEXIST; 344 } 345 346 /* Rinse, repeat */ 347 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep); 348 } 349 350 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, 351 int prot) 352 { 353 arm_lpae_iopte pte; 354 355 if (data->iop.fmt == ARM_64_LPAE_S1 || 356 data->iop.fmt == ARM_32_LPAE_S1) { 357 pte = ARM_LPAE_PTE_nG; 358 359 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) 360 pte |= ARM_LPAE_PTE_AP_RDONLY; 361 362 if (!(prot & IOMMU_PRIV)) 363 pte |= ARM_LPAE_PTE_AP_UNPRIV; 364 365 if (prot & IOMMU_MMIO) 366 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV 367 << ARM_LPAE_PTE_ATTRINDX_SHIFT); 368 else if (prot & IOMMU_CACHE) 369 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE 370 << ARM_LPAE_PTE_ATTRINDX_SHIFT); 371 } else { 372 pte = ARM_LPAE_PTE_HAP_FAULT; 373 if (prot & IOMMU_READ) 374 pte |= ARM_LPAE_PTE_HAP_READ; 375 if (prot & IOMMU_WRITE) 376 pte |= ARM_LPAE_PTE_HAP_WRITE; 377 if (prot & IOMMU_MMIO) 378 pte |= ARM_LPAE_PTE_MEMATTR_DEV; 379 else if (prot & IOMMU_CACHE) 380 pte |= ARM_LPAE_PTE_MEMATTR_OIWB; 381 else 382 pte |= ARM_LPAE_PTE_MEMATTR_NC; 383 } 384 385 if (prot & IOMMU_NOEXEC) 386 pte |= ARM_LPAE_PTE_XN; 387 388 return pte; 389 } 390 391 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, 392 phys_addr_t paddr, size_t size, int iommu_prot) 393 { 394 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 395 arm_lpae_iopte *ptep = data->pgd; 396 int ret, lvl = ARM_LPAE_START_LVL(data); 397 arm_lpae_iopte prot; 398 399 /* If no access, then nothing to do */ 400 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) 401 return 0; 402 403 prot = arm_lpae_prot_to_pte(data, iommu_prot); 404 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); 405 /* 406 * Synchronise all PTE updates for the new mapping before there's 407 * a chance for anything to kick off a table walk for the new iova. 408 */ 409 wmb(); 410 411 return ret; 412 } 413 414 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, 415 arm_lpae_iopte *ptep) 416 { 417 arm_lpae_iopte *start, *end; 418 unsigned long table_size; 419 420 if (lvl == ARM_LPAE_START_LVL(data)) 421 table_size = data->pgd_size; 422 else 423 table_size = ARM_LPAE_GRANULE(data); 424 425 start = ptep; 426 427 /* Only leaf entries at the last level */ 428 if (lvl == ARM_LPAE_MAX_LEVELS - 1) 429 end = ptep; 430 else 431 end = (void *)ptep + table_size; 432 433 while (ptep != end) { 434 arm_lpae_iopte pte = *ptep++; 435 436 if (!pte || iopte_leaf(pte, lvl)) 437 continue; 438 439 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); 440 } 441 442 __arm_lpae_free_pages(start, table_size, &data->iop.cfg); 443 } 444 445 static void arm_lpae_free_pgtable(struct io_pgtable *iop) 446 { 447 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); 448 449 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd); 450 kfree(data); 451 } 452 453 static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, 454 unsigned long iova, size_t size, 455 arm_lpae_iopte prot, int lvl, 456 arm_lpae_iopte *ptep, size_t blk_size) 457 { 458 unsigned long blk_start, blk_end; 459 phys_addr_t blk_paddr; 460 arm_lpae_iopte table = 0; 461 462 blk_start = iova & ~(blk_size - 1); 463 blk_end = blk_start + blk_size; 464 blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift; 465 466 for (; blk_start < blk_end; blk_start += size, blk_paddr += size) { 467 arm_lpae_iopte *tablep; 468 469 /* Unmap! */ 470 if (blk_start == iova) 471 continue; 472 473 /* __arm_lpae_map expects a pointer to the start of the table */ 474 tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data); 475 if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl, 476 tablep) < 0) { 477 if (table) { 478 /* Free the table we allocated */ 479 tablep = iopte_deref(table, data); 480 __arm_lpae_free_pgtable(data, lvl + 1, tablep); 481 } 482 return 0; /* Bytes unmapped */ 483 } 484 } 485 486 __arm_lpae_set_pte(ptep, table, &data->iop.cfg); 487 iova &= ~(blk_size - 1); 488 io_pgtable_tlb_add_flush(&data->iop, iova, blk_size, blk_size, true); 489 return size; 490 } 491 492 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, 493 unsigned long iova, size_t size, int lvl, 494 arm_lpae_iopte *ptep) 495 { 496 arm_lpae_iopte pte; 497 struct io_pgtable *iop = &data->iop; 498 size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data); 499 500 /* Something went horribly wrong and we ran out of page table */ 501 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) 502 return 0; 503 504 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); 505 pte = *ptep; 506 if (WARN_ON(!pte)) 507 return 0; 508 509 /* If the size matches this level, we're in the right place */ 510 if (size == blk_size) { 511 __arm_lpae_set_pte(ptep, 0, &iop->cfg); 512 513 if (!iopte_leaf(pte, lvl)) { 514 /* Also flush any partial walks */ 515 io_pgtable_tlb_add_flush(iop, iova, size, 516 ARM_LPAE_GRANULE(data), false); 517 io_pgtable_tlb_sync(iop); 518 ptep = iopte_deref(pte, data); 519 __arm_lpae_free_pgtable(data, lvl + 1, ptep); 520 } else { 521 io_pgtable_tlb_add_flush(iop, iova, size, size, true); 522 } 523 524 return size; 525 } else if (iopte_leaf(pte, lvl)) { 526 /* 527 * Insert a table at the next level to map the old region, 528 * minus the part we want to unmap 529 */ 530 return arm_lpae_split_blk_unmap(data, iova, size, 531 iopte_prot(pte), lvl, ptep, 532 blk_size); 533 } 534 535 /* Keep on walkin' */ 536 ptep = iopte_deref(pte, data); 537 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep); 538 } 539 540 static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, 541 size_t size) 542 { 543 size_t unmapped; 544 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 545 arm_lpae_iopte *ptep = data->pgd; 546 int lvl = ARM_LPAE_START_LVL(data); 547 548 unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep); 549 if (unmapped) 550 io_pgtable_tlb_sync(&data->iop); 551 552 return unmapped; 553 } 554 555 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, 556 unsigned long iova) 557 { 558 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 559 arm_lpae_iopte pte, *ptep = data->pgd; 560 int lvl = ARM_LPAE_START_LVL(data); 561 562 do { 563 /* Valid IOPTE pointer? */ 564 if (!ptep) 565 return 0; 566 567 /* Grab the IOPTE we're interested in */ 568 pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data)); 569 570 /* Valid entry? */ 571 if (!pte) 572 return 0; 573 574 /* Leaf entry? */ 575 if (iopte_leaf(pte,lvl)) 576 goto found_translation; 577 578 /* Take it to the next level */ 579 ptep = iopte_deref(pte, data); 580 } while (++lvl < ARM_LPAE_MAX_LEVELS); 581 582 /* Ran out of page tables to walk */ 583 return 0; 584 585 found_translation: 586 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1); 587 return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova; 588 } 589 590 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) 591 { 592 unsigned long granule; 593 594 /* 595 * We need to restrict the supported page sizes to match the 596 * translation regime for a particular granule. Aim to match 597 * the CPU page size if possible, otherwise prefer smaller sizes. 598 * While we're at it, restrict the block sizes to match the 599 * chosen granule. 600 */ 601 if (cfg->pgsize_bitmap & PAGE_SIZE) 602 granule = PAGE_SIZE; 603 else if (cfg->pgsize_bitmap & ~PAGE_MASK) 604 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); 605 else if (cfg->pgsize_bitmap & PAGE_MASK) 606 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); 607 else 608 granule = 0; 609 610 switch (granule) { 611 case SZ_4K: 612 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 613 break; 614 case SZ_16K: 615 cfg->pgsize_bitmap &= (SZ_16K | SZ_32M); 616 break; 617 case SZ_64K: 618 cfg->pgsize_bitmap &= (SZ_64K | SZ_512M); 619 break; 620 default: 621 cfg->pgsize_bitmap = 0; 622 } 623 } 624 625 static struct arm_lpae_io_pgtable * 626 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) 627 { 628 unsigned long va_bits, pgd_bits; 629 struct arm_lpae_io_pgtable *data; 630 631 arm_lpae_restrict_pgsizes(cfg); 632 633 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K))) 634 return NULL; 635 636 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS) 637 return NULL; 638 639 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) 640 return NULL; 641 642 if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) { 643 dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n"); 644 return NULL; 645 } 646 647 data = kmalloc(sizeof(*data), GFP_KERNEL); 648 if (!data) 649 return NULL; 650 651 data->pg_shift = __ffs(cfg->pgsize_bitmap); 652 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte)); 653 654 va_bits = cfg->ias - data->pg_shift; 655 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level); 656 657 /* Calculate the actual size of our pgd (without concatenation) */ 658 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1)); 659 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte))); 660 661 data->iop.ops = (struct io_pgtable_ops) { 662 .map = arm_lpae_map, 663 .unmap = arm_lpae_unmap, 664 .iova_to_phys = arm_lpae_iova_to_phys, 665 }; 666 667 return data; 668 } 669 670 static struct io_pgtable * 671 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) 672 { 673 u64 reg; 674 struct arm_lpae_io_pgtable *data; 675 676 if (cfg->quirks & ~IO_PGTABLE_QUIRK_ARM_NS) 677 return NULL; 678 679 data = arm_lpae_alloc_pgtable(cfg); 680 if (!data) 681 return NULL; 682 683 /* TCR */ 684 reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | 685 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | 686 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); 687 688 switch (ARM_LPAE_GRANULE(data)) { 689 case SZ_4K: 690 reg |= ARM_LPAE_TCR_TG0_4K; 691 break; 692 case SZ_16K: 693 reg |= ARM_LPAE_TCR_TG0_16K; 694 break; 695 case SZ_64K: 696 reg |= ARM_LPAE_TCR_TG0_64K; 697 break; 698 } 699 700 switch (cfg->oas) { 701 case 32: 702 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT); 703 break; 704 case 36: 705 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT); 706 break; 707 case 40: 708 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT); 709 break; 710 case 42: 711 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT); 712 break; 713 case 44: 714 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT); 715 break; 716 case 48: 717 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT); 718 break; 719 default: 720 goto out_free_data; 721 } 722 723 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; 724 725 /* Disable speculative walks through TTBR1 */ 726 reg |= ARM_LPAE_TCR_EPD1; 727 cfg->arm_lpae_s1_cfg.tcr = reg; 728 729 /* MAIRs */ 730 reg = (ARM_LPAE_MAIR_ATTR_NC 731 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | 732 (ARM_LPAE_MAIR_ATTR_WBRWA 733 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | 734 (ARM_LPAE_MAIR_ATTR_DEVICE 735 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)); 736 737 cfg->arm_lpae_s1_cfg.mair[0] = reg; 738 cfg->arm_lpae_s1_cfg.mair[1] = 0; 739 740 /* Looking good; allocate a pgd */ 741 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg); 742 if (!data->pgd) 743 goto out_free_data; 744 745 /* Ensure the empty pgd is visible before any actual TTBR write */ 746 wmb(); 747 748 /* TTBRs */ 749 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd); 750 cfg->arm_lpae_s1_cfg.ttbr[1] = 0; 751 return &data->iop; 752 753 out_free_data: 754 kfree(data); 755 return NULL; 756 } 757 758 static struct io_pgtable * 759 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) 760 { 761 u64 reg, sl; 762 struct arm_lpae_io_pgtable *data; 763 764 /* The NS quirk doesn't apply at stage 2 */ 765 if (cfg->quirks) 766 return NULL; 767 768 data = arm_lpae_alloc_pgtable(cfg); 769 if (!data) 770 return NULL; 771 772 /* 773 * Concatenate PGDs at level 1 if possible in order to reduce 774 * the depth of the stage-2 walk. 775 */ 776 if (data->levels == ARM_LPAE_MAX_LEVELS) { 777 unsigned long pgd_pages; 778 779 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte)); 780 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) { 781 data->pgd_size = pgd_pages << data->pg_shift; 782 data->levels--; 783 } 784 } 785 786 /* VTCR */ 787 reg = ARM_64_LPAE_S2_TCR_RES1 | 788 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | 789 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | 790 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); 791 792 sl = ARM_LPAE_START_LVL(data); 793 794 switch (ARM_LPAE_GRANULE(data)) { 795 case SZ_4K: 796 reg |= ARM_LPAE_TCR_TG0_4K; 797 sl++; /* SL0 format is different for 4K granule size */ 798 break; 799 case SZ_16K: 800 reg |= ARM_LPAE_TCR_TG0_16K; 801 break; 802 case SZ_64K: 803 reg |= ARM_LPAE_TCR_TG0_64K; 804 break; 805 } 806 807 switch (cfg->oas) { 808 case 32: 809 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT); 810 break; 811 case 36: 812 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT); 813 break; 814 case 40: 815 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT); 816 break; 817 case 42: 818 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT); 819 break; 820 case 44: 821 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT); 822 break; 823 case 48: 824 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT); 825 break; 826 default: 827 goto out_free_data; 828 } 829 830 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; 831 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT; 832 cfg->arm_lpae_s2_cfg.vtcr = reg; 833 834 /* Allocate pgd pages */ 835 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg); 836 if (!data->pgd) 837 goto out_free_data; 838 839 /* Ensure the empty pgd is visible before any actual TTBR write */ 840 wmb(); 841 842 /* VTTBR */ 843 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd); 844 return &data->iop; 845 846 out_free_data: 847 kfree(data); 848 return NULL; 849 } 850 851 static struct io_pgtable * 852 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) 853 { 854 struct io_pgtable *iop; 855 856 if (cfg->ias > 32 || cfg->oas > 40) 857 return NULL; 858 859 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 860 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie); 861 if (iop) { 862 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE; 863 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff; 864 } 865 866 return iop; 867 } 868 869 static struct io_pgtable * 870 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) 871 { 872 struct io_pgtable *iop; 873 874 if (cfg->ias > 40 || cfg->oas > 40) 875 return NULL; 876 877 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); 878 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie); 879 if (iop) 880 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff; 881 882 return iop; 883 } 884 885 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = { 886 .alloc = arm_64_lpae_alloc_pgtable_s1, 887 .free = arm_lpae_free_pgtable, 888 }; 889 890 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = { 891 .alloc = arm_64_lpae_alloc_pgtable_s2, 892 .free = arm_lpae_free_pgtable, 893 }; 894 895 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = { 896 .alloc = arm_32_lpae_alloc_pgtable_s1, 897 .free = arm_lpae_free_pgtable, 898 }; 899 900 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = { 901 .alloc = arm_32_lpae_alloc_pgtable_s2, 902 .free = arm_lpae_free_pgtable, 903 }; 904 905 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST 906 907 static struct io_pgtable_cfg *cfg_cookie; 908 909 static void dummy_tlb_flush_all(void *cookie) 910 { 911 WARN_ON(cookie != cfg_cookie); 912 } 913 914 static void dummy_tlb_add_flush(unsigned long iova, size_t size, 915 size_t granule, bool leaf, void *cookie) 916 { 917 WARN_ON(cookie != cfg_cookie); 918 WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); 919 } 920 921 static void dummy_tlb_sync(void *cookie) 922 { 923 WARN_ON(cookie != cfg_cookie); 924 } 925 926 static const struct iommu_gather_ops dummy_tlb_ops __initconst = { 927 .tlb_flush_all = dummy_tlb_flush_all, 928 .tlb_add_flush = dummy_tlb_add_flush, 929 .tlb_sync = dummy_tlb_sync, 930 }; 931 932 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) 933 { 934 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); 935 struct io_pgtable_cfg *cfg = &data->iop.cfg; 936 937 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", 938 cfg->pgsize_bitmap, cfg->ias); 939 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n", 940 data->levels, data->pgd_size, data->pg_shift, 941 data->bits_per_level, data->pgd); 942 } 943 944 #define __FAIL(ops, i) ({ \ 945 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \ 946 arm_lpae_dump_ops(ops); \ 947 selftest_running = false; \ 948 -EFAULT; \ 949 }) 950 951 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) 952 { 953 static const enum io_pgtable_fmt fmts[] = { 954 ARM_64_LPAE_S1, 955 ARM_64_LPAE_S2, 956 }; 957 958 int i, j; 959 unsigned long iova; 960 size_t size; 961 struct io_pgtable_ops *ops; 962 963 selftest_running = true; 964 965 for (i = 0; i < ARRAY_SIZE(fmts); ++i) { 966 cfg_cookie = cfg; 967 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg); 968 if (!ops) { 969 pr_err("selftest: failed to allocate io pgtable ops\n"); 970 return -ENOMEM; 971 } 972 973 /* 974 * Initial sanity checks. 975 * Empty page tables shouldn't provide any translations. 976 */ 977 if (ops->iova_to_phys(ops, 42)) 978 return __FAIL(ops, i); 979 980 if (ops->iova_to_phys(ops, SZ_1G + 42)) 981 return __FAIL(ops, i); 982 983 if (ops->iova_to_phys(ops, SZ_2G + 42)) 984 return __FAIL(ops, i); 985 986 /* 987 * Distinct mappings of different granule sizes. 988 */ 989 iova = 0; 990 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { 991 size = 1UL << j; 992 993 if (ops->map(ops, iova, iova, size, IOMMU_READ | 994 IOMMU_WRITE | 995 IOMMU_NOEXEC | 996 IOMMU_CACHE)) 997 return __FAIL(ops, i); 998 999 /* Overlapping mappings */ 1000 if (!ops->map(ops, iova, iova + size, size, 1001 IOMMU_READ | IOMMU_NOEXEC)) 1002 return __FAIL(ops, i); 1003 1004 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) 1005 return __FAIL(ops, i); 1006 1007 iova += SZ_1G; 1008 } 1009 1010 /* Partial unmap */ 1011 size = 1UL << __ffs(cfg->pgsize_bitmap); 1012 if (ops->unmap(ops, SZ_1G + size, size) != size) 1013 return __FAIL(ops, i); 1014 1015 /* Remap of partial unmap */ 1016 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ)) 1017 return __FAIL(ops, i); 1018 1019 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) 1020 return __FAIL(ops, i); 1021 1022 /* Full unmap */ 1023 iova = 0; 1024 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG); 1025 while (j != BITS_PER_LONG) { 1026 size = 1UL << j; 1027 1028 if (ops->unmap(ops, iova, size) != size) 1029 return __FAIL(ops, i); 1030 1031 if (ops->iova_to_phys(ops, iova + 42)) 1032 return __FAIL(ops, i); 1033 1034 /* Remap full block */ 1035 if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) 1036 return __FAIL(ops, i); 1037 1038 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) 1039 return __FAIL(ops, i); 1040 1041 iova += SZ_1G; 1042 j++; 1043 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j); 1044 } 1045 1046 free_io_pgtable_ops(ops); 1047 } 1048 1049 selftest_running = false; 1050 return 0; 1051 } 1052 1053 static int __init arm_lpae_do_selftests(void) 1054 { 1055 static const unsigned long pgsize[] = { 1056 SZ_4K | SZ_2M | SZ_1G, 1057 SZ_16K | SZ_32M, 1058 SZ_64K | SZ_512M, 1059 }; 1060 1061 static const unsigned int ias[] = { 1062 32, 36, 40, 42, 44, 48, 1063 }; 1064 1065 int i, j, pass = 0, fail = 0; 1066 struct io_pgtable_cfg cfg = { 1067 .tlb = &dummy_tlb_ops, 1068 .oas = 48, 1069 }; 1070 1071 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { 1072 for (j = 0; j < ARRAY_SIZE(ias); ++j) { 1073 cfg.pgsize_bitmap = pgsize[i]; 1074 cfg.ias = ias[j]; 1075 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n", 1076 pgsize[i], ias[j]); 1077 if (arm_lpae_run_tests(&cfg)) 1078 fail++; 1079 else 1080 pass++; 1081 } 1082 } 1083 1084 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail); 1085 return fail ? -EFAULT : 0; 1086 } 1087 subsys_initcall(arm_lpae_do_selftests); 1088 #endif 1089