1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Stand-alone page-table allocator for hyp stage-1 and guest stage-2. 4 * No bombay mix was harmed in the writing of this file. 5 * 6 * Copyright (C) 2020 Google LLC 7 * Author: Will Deacon <will@kernel.org> 8 */ 9 10 #include <linux/bitfield.h> 11 #include <asm/kvm_pgtable.h> 12 #include <asm/stage2_pgtable.h> 13 14 15 #define KVM_PTE_TYPE BIT(1) 16 #define KVM_PTE_TYPE_BLOCK 0 17 #define KVM_PTE_TYPE_PAGE 1 18 #define KVM_PTE_TYPE_TABLE 1 19 20 #define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2) 21 22 #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2) 23 #define KVM_PTE_LEAF_ATTR_LO_S1_AP GENMASK(7, 6) 24 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO 3 25 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW 1 26 #define KVM_PTE_LEAF_ATTR_LO_S1_SH GENMASK(9, 8) 27 #define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS 3 28 #define KVM_PTE_LEAF_ATTR_LO_S1_AF BIT(10) 29 30 #define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR GENMASK(5, 2) 31 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R BIT(6) 32 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W BIT(7) 33 #define KVM_PTE_LEAF_ATTR_LO_S2_SH GENMASK(9, 8) 34 #define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS 3 35 #define KVM_PTE_LEAF_ATTR_LO_S2_AF BIT(10) 36 37 #define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 51) 38 39 #define KVM_PTE_LEAF_ATTR_HI_SW GENMASK(58, 55) 40 41 #define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54) 42 43 #define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54) 44 45 #define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \ 46 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \ 47 KVM_PTE_LEAF_ATTR_HI_S2_XN) 48 49 #define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2) 50 #define KVM_MAX_OWNER_ID 1 51 52 /* 53 * Used to indicate a pte for which a 'break-before-make' sequence is in 54 * progress. 55 */ 56 #define KVM_INVALID_PTE_LOCKED BIT(10) 57 58 struct kvm_pgtable_walk_data { 59 struct kvm_pgtable_walker *walker; 60 61 const u64 start; 62 u64 addr; 63 const u64 end; 64 }; 65 66 static bool kvm_phys_is_valid(u64 phys) 67 { 68 return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_EL1_PARANGE_MAX)); 69 } 70 71 static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx, u64 phys) 72 { 73 u64 granule = kvm_granule_size(ctx->level); 74 75 if (!kvm_level_supports_block_mapping(ctx->level)) 76 return false; 77 78 if (granule > (ctx->end - ctx->addr)) 79 return false; 80 81 if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule)) 82 return false; 83 84 return IS_ALIGNED(ctx->addr, granule); 85 } 86 87 static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, u32 level) 88 { 89 u64 shift = kvm_granule_shift(level); 90 u64 mask = BIT(PAGE_SHIFT - 3) - 1; 91 92 return (data->addr >> shift) & mask; 93 } 94 95 static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr) 96 { 97 u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */ 98 u64 mask = BIT(pgt->ia_bits) - 1; 99 100 return (addr & mask) >> shift; 101 } 102 103 static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level) 104 { 105 struct kvm_pgtable pgt = { 106 .ia_bits = ia_bits, 107 .start_level = start_level, 108 }; 109 110 return kvm_pgd_page_idx(&pgt, -1ULL) + 1; 111 } 112 113 static bool kvm_pte_table(kvm_pte_t pte, u32 level) 114 { 115 if (level == KVM_PGTABLE_MAX_LEVELS - 1) 116 return false; 117 118 if (!kvm_pte_valid(pte)) 119 return false; 120 121 return FIELD_GET(KVM_PTE_TYPE, pte) == KVM_PTE_TYPE_TABLE; 122 } 123 124 static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops) 125 { 126 return mm_ops->phys_to_virt(kvm_pte_to_phys(pte)); 127 } 128 129 static void kvm_clear_pte(kvm_pte_t *ptep) 130 { 131 WRITE_ONCE(*ptep, 0); 132 } 133 134 static kvm_pte_t kvm_init_table_pte(kvm_pte_t *childp, struct kvm_pgtable_mm_ops *mm_ops) 135 { 136 kvm_pte_t pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp)); 137 138 pte |= FIELD_PREP(KVM_PTE_TYPE, KVM_PTE_TYPE_TABLE); 139 pte |= KVM_PTE_VALID; 140 return pte; 141 } 142 143 static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, u32 level) 144 { 145 kvm_pte_t pte = kvm_phys_to_pte(pa); 146 u64 type = (level == KVM_PGTABLE_MAX_LEVELS - 1) ? KVM_PTE_TYPE_PAGE : 147 KVM_PTE_TYPE_BLOCK; 148 149 pte |= attr & (KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI); 150 pte |= FIELD_PREP(KVM_PTE_TYPE, type); 151 pte |= KVM_PTE_VALID; 152 153 return pte; 154 } 155 156 static kvm_pte_t kvm_init_invalid_leaf_owner(u8 owner_id) 157 { 158 return FIELD_PREP(KVM_INVALID_PTE_OWNER_MASK, owner_id); 159 } 160 161 static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data, 162 const struct kvm_pgtable_visit_ctx *ctx, 163 enum kvm_pgtable_walk_flags visit) 164 { 165 struct kvm_pgtable_walker *walker = data->walker; 166 167 /* Ensure the appropriate lock is held (e.g. RCU lock for stage-2 MMU) */ 168 WARN_ON_ONCE(kvm_pgtable_walk_shared(ctx) && !kvm_pgtable_walk_lock_held()); 169 return walker->cb(ctx, visit); 170 } 171 172 static bool kvm_pgtable_walk_continue(const struct kvm_pgtable_walker *walker, 173 int r) 174 { 175 /* 176 * Visitor callbacks return EAGAIN when the conditions that led to a 177 * fault are no longer reflected in the page tables due to a race to 178 * update a PTE. In the context of a fault handler this is interpreted 179 * as a signal to retry guest execution. 180 * 181 * Ignore the return code altogether for walkers outside a fault handler 182 * (e.g. write protecting a range of memory) and chug along with the 183 * page table walk. 184 */ 185 if (r == -EAGAIN) 186 return !(walker->flags & KVM_PGTABLE_WALK_HANDLE_FAULT); 187 188 return !r; 189 } 190 191 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data, 192 struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, u32 level); 193 194 static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data, 195 struct kvm_pgtable_mm_ops *mm_ops, 196 kvm_pteref_t pteref, u32 level) 197 { 198 enum kvm_pgtable_walk_flags flags = data->walker->flags; 199 kvm_pte_t *ptep = kvm_dereference_pteref(data->walker, pteref); 200 struct kvm_pgtable_visit_ctx ctx = { 201 .ptep = ptep, 202 .old = READ_ONCE(*ptep), 203 .arg = data->walker->arg, 204 .mm_ops = mm_ops, 205 .start = data->start, 206 .addr = data->addr, 207 .end = data->end, 208 .level = level, 209 .flags = flags, 210 }; 211 int ret = 0; 212 kvm_pteref_t childp; 213 bool table = kvm_pte_table(ctx.old, level); 214 215 if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) 216 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE); 217 218 if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) { 219 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF); 220 ctx.old = READ_ONCE(*ptep); 221 table = kvm_pte_table(ctx.old, level); 222 } 223 224 if (!kvm_pgtable_walk_continue(data->walker, ret)) 225 goto out; 226 227 if (!table) { 228 data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level)); 229 data->addr += kvm_granule_size(level); 230 goto out; 231 } 232 233 childp = (kvm_pteref_t)kvm_pte_follow(ctx.old, mm_ops); 234 ret = __kvm_pgtable_walk(data, mm_ops, childp, level + 1); 235 if (!kvm_pgtable_walk_continue(data->walker, ret)) 236 goto out; 237 238 if (ctx.flags & KVM_PGTABLE_WALK_TABLE_POST) 239 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_POST); 240 241 out: 242 if (kvm_pgtable_walk_continue(data->walker, ret)) 243 return 0; 244 245 return ret; 246 } 247 248 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data, 249 struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, u32 level) 250 { 251 u32 idx; 252 int ret = 0; 253 254 if (WARN_ON_ONCE(level >= KVM_PGTABLE_MAX_LEVELS)) 255 return -EINVAL; 256 257 for (idx = kvm_pgtable_idx(data, level); idx < PTRS_PER_PTE; ++idx) { 258 kvm_pteref_t pteref = &pgtable[idx]; 259 260 if (data->addr >= data->end) 261 break; 262 263 ret = __kvm_pgtable_visit(data, mm_ops, pteref, level); 264 if (ret) 265 break; 266 } 267 268 return ret; 269 } 270 271 static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct kvm_pgtable_walk_data *data) 272 { 273 u32 idx; 274 int ret = 0; 275 u64 limit = BIT(pgt->ia_bits); 276 277 if (data->addr > limit || data->end > limit) 278 return -ERANGE; 279 280 if (!pgt->pgd) 281 return -EINVAL; 282 283 for (idx = kvm_pgd_page_idx(pgt, data->addr); data->addr < data->end; ++idx) { 284 kvm_pteref_t pteref = &pgt->pgd[idx * PTRS_PER_PTE]; 285 286 ret = __kvm_pgtable_walk(data, pgt->mm_ops, pteref, pgt->start_level); 287 if (ret) 288 break; 289 } 290 291 return ret; 292 } 293 294 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, 295 struct kvm_pgtable_walker *walker) 296 { 297 struct kvm_pgtable_walk_data walk_data = { 298 .start = ALIGN_DOWN(addr, PAGE_SIZE), 299 .addr = ALIGN_DOWN(addr, PAGE_SIZE), 300 .end = PAGE_ALIGN(walk_data.addr + size), 301 .walker = walker, 302 }; 303 int r; 304 305 r = kvm_pgtable_walk_begin(walker); 306 if (r) 307 return r; 308 309 r = _kvm_pgtable_walk(pgt, &walk_data); 310 kvm_pgtable_walk_end(walker); 311 312 return r; 313 } 314 315 struct leaf_walk_data { 316 kvm_pte_t pte; 317 u32 level; 318 }; 319 320 static int leaf_walker(const struct kvm_pgtable_visit_ctx *ctx, 321 enum kvm_pgtable_walk_flags visit) 322 { 323 struct leaf_walk_data *data = ctx->arg; 324 325 data->pte = ctx->old; 326 data->level = ctx->level; 327 328 return 0; 329 } 330 331 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr, 332 kvm_pte_t *ptep, u32 *level) 333 { 334 struct leaf_walk_data data; 335 struct kvm_pgtable_walker walker = { 336 .cb = leaf_walker, 337 .flags = KVM_PGTABLE_WALK_LEAF, 338 .arg = &data, 339 }; 340 int ret; 341 342 ret = kvm_pgtable_walk(pgt, ALIGN_DOWN(addr, PAGE_SIZE), 343 PAGE_SIZE, &walker); 344 if (!ret) { 345 if (ptep) 346 *ptep = data.pte; 347 if (level) 348 *level = data.level; 349 } 350 351 return ret; 352 } 353 354 struct hyp_map_data { 355 const u64 phys; 356 kvm_pte_t attr; 357 }; 358 359 static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep) 360 { 361 bool device = prot & KVM_PGTABLE_PROT_DEVICE; 362 u32 mtype = device ? MT_DEVICE_nGnRE : MT_NORMAL; 363 kvm_pte_t attr = FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX, mtype); 364 u32 sh = KVM_PTE_LEAF_ATTR_LO_S1_SH_IS; 365 u32 ap = (prot & KVM_PGTABLE_PROT_W) ? KVM_PTE_LEAF_ATTR_LO_S1_AP_RW : 366 KVM_PTE_LEAF_ATTR_LO_S1_AP_RO; 367 368 if (!(prot & KVM_PGTABLE_PROT_R)) 369 return -EINVAL; 370 371 if (prot & KVM_PGTABLE_PROT_X) { 372 if (prot & KVM_PGTABLE_PROT_W) 373 return -EINVAL; 374 375 if (device) 376 return -EINVAL; 377 } else { 378 attr |= KVM_PTE_LEAF_ATTR_HI_S1_XN; 379 } 380 381 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_AP, ap); 382 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_SH, sh); 383 attr |= KVM_PTE_LEAF_ATTR_LO_S1_AF; 384 attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW; 385 *ptep = attr; 386 387 return 0; 388 } 389 390 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte) 391 { 392 enum kvm_pgtable_prot prot = pte & KVM_PTE_LEAF_ATTR_HI_SW; 393 u32 ap; 394 395 if (!kvm_pte_valid(pte)) 396 return prot; 397 398 if (!(pte & KVM_PTE_LEAF_ATTR_HI_S1_XN)) 399 prot |= KVM_PGTABLE_PROT_X; 400 401 ap = FIELD_GET(KVM_PTE_LEAF_ATTR_LO_S1_AP, pte); 402 if (ap == KVM_PTE_LEAF_ATTR_LO_S1_AP_RO) 403 prot |= KVM_PGTABLE_PROT_R; 404 else if (ap == KVM_PTE_LEAF_ATTR_LO_S1_AP_RW) 405 prot |= KVM_PGTABLE_PROT_RW; 406 407 return prot; 408 } 409 410 static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, 411 struct hyp_map_data *data) 412 { 413 u64 phys = data->phys + (ctx->addr - ctx->start); 414 kvm_pte_t new; 415 416 if (!kvm_block_mapping_supported(ctx, phys)) 417 return false; 418 419 new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level); 420 if (ctx->old == new) 421 return true; 422 if (!kvm_pte_valid(ctx->old)) 423 ctx->mm_ops->get_page(ctx->ptep); 424 else if (WARN_ON((ctx->old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW)) 425 return false; 426 427 smp_store_release(ctx->ptep, new); 428 return true; 429 } 430 431 static int hyp_map_walker(const struct kvm_pgtable_visit_ctx *ctx, 432 enum kvm_pgtable_walk_flags visit) 433 { 434 kvm_pte_t *childp, new; 435 struct hyp_map_data *data = ctx->arg; 436 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; 437 438 if (hyp_map_walker_try_leaf(ctx, data)) 439 return 0; 440 441 if (WARN_ON(ctx->level == KVM_PGTABLE_MAX_LEVELS - 1)) 442 return -EINVAL; 443 444 childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL); 445 if (!childp) 446 return -ENOMEM; 447 448 new = kvm_init_table_pte(childp, mm_ops); 449 mm_ops->get_page(ctx->ptep); 450 smp_store_release(ctx->ptep, new); 451 452 return 0; 453 } 454 455 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, 456 enum kvm_pgtable_prot prot) 457 { 458 int ret; 459 struct hyp_map_data map_data = { 460 .phys = ALIGN_DOWN(phys, PAGE_SIZE), 461 }; 462 struct kvm_pgtable_walker walker = { 463 .cb = hyp_map_walker, 464 .flags = KVM_PGTABLE_WALK_LEAF, 465 .arg = &map_data, 466 }; 467 468 ret = hyp_set_prot_attr(prot, &map_data.attr); 469 if (ret) 470 return ret; 471 472 ret = kvm_pgtable_walk(pgt, addr, size, &walker); 473 dsb(ishst); 474 isb(); 475 return ret; 476 } 477 478 static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx, 479 enum kvm_pgtable_walk_flags visit) 480 { 481 kvm_pte_t *childp = NULL; 482 u64 granule = kvm_granule_size(ctx->level); 483 u64 *unmapped = ctx->arg; 484 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; 485 486 if (!kvm_pte_valid(ctx->old)) 487 return -EINVAL; 488 489 if (kvm_pte_table(ctx->old, ctx->level)) { 490 childp = kvm_pte_follow(ctx->old, mm_ops); 491 492 if (mm_ops->page_count(childp) != 1) 493 return 0; 494 495 kvm_clear_pte(ctx->ptep); 496 dsb(ishst); 497 __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), ctx->level); 498 } else { 499 if (ctx->end - ctx->addr < granule) 500 return -EINVAL; 501 502 kvm_clear_pte(ctx->ptep); 503 dsb(ishst); 504 __tlbi_level(vale2is, __TLBI_VADDR(ctx->addr, 0), ctx->level); 505 *unmapped += granule; 506 } 507 508 dsb(ish); 509 isb(); 510 mm_ops->put_page(ctx->ptep); 511 512 if (childp) 513 mm_ops->put_page(childp); 514 515 return 0; 516 } 517 518 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size) 519 { 520 u64 unmapped = 0; 521 struct kvm_pgtable_walker walker = { 522 .cb = hyp_unmap_walker, 523 .arg = &unmapped, 524 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST, 525 }; 526 527 if (!pgt->mm_ops->page_count) 528 return 0; 529 530 kvm_pgtable_walk(pgt, addr, size, &walker); 531 return unmapped; 532 } 533 534 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, 535 struct kvm_pgtable_mm_ops *mm_ops) 536 { 537 u64 levels = ARM64_HW_PGTABLE_LEVELS(va_bits); 538 539 pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_page(NULL); 540 if (!pgt->pgd) 541 return -ENOMEM; 542 543 pgt->ia_bits = va_bits; 544 pgt->start_level = KVM_PGTABLE_MAX_LEVELS - levels; 545 pgt->mm_ops = mm_ops; 546 pgt->mmu = NULL; 547 pgt->force_pte_cb = NULL; 548 549 return 0; 550 } 551 552 static int hyp_free_walker(const struct kvm_pgtable_visit_ctx *ctx, 553 enum kvm_pgtable_walk_flags visit) 554 { 555 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; 556 557 if (!kvm_pte_valid(ctx->old)) 558 return 0; 559 560 mm_ops->put_page(ctx->ptep); 561 562 if (kvm_pte_table(ctx->old, ctx->level)) 563 mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops)); 564 565 return 0; 566 } 567 568 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt) 569 { 570 struct kvm_pgtable_walker walker = { 571 .cb = hyp_free_walker, 572 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST, 573 }; 574 575 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker)); 576 pgt->mm_ops->put_page(kvm_dereference_pteref(&walker, pgt->pgd)); 577 pgt->pgd = NULL; 578 } 579 580 struct stage2_map_data { 581 const u64 phys; 582 kvm_pte_t attr; 583 u8 owner_id; 584 585 kvm_pte_t *anchor; 586 kvm_pte_t *childp; 587 588 struct kvm_s2_mmu *mmu; 589 void *memcache; 590 591 /* Force mappings to page granularity */ 592 bool force_pte; 593 }; 594 595 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift) 596 { 597 u64 vtcr = VTCR_EL2_FLAGS; 598 u8 lvls; 599 600 vtcr |= kvm_get_parange(mmfr0) << VTCR_EL2_PS_SHIFT; 601 vtcr |= VTCR_EL2_T0SZ(phys_shift); 602 /* 603 * Use a minimum 2 level page table to prevent splitting 604 * host PMD huge pages at stage2. 605 */ 606 lvls = stage2_pgtable_levels(phys_shift); 607 if (lvls < 2) 608 lvls = 2; 609 vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls); 610 611 #ifdef CONFIG_ARM64_HW_AFDBM 612 /* 613 * Enable the Hardware Access Flag management, unconditionally 614 * on all CPUs. In systems that have asymmetric support for the feature 615 * this allows KVM to leverage hardware support on the subset of cores 616 * that implement the feature. 617 * 618 * The architecture requires VTCR_EL2.HA to be RES0 (thus ignored by 619 * hardware) on implementations that do not advertise support for the 620 * feature. As such, setting HA unconditionally is safe, unless you 621 * happen to be running on a design that has unadvertised support for 622 * HAFDBS. Here be dragons. 623 */ 624 if (!cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38)) 625 vtcr |= VTCR_EL2_HA; 626 #endif /* CONFIG_ARM64_HW_AFDBM */ 627 628 /* Set the vmid bits */ 629 vtcr |= (get_vmid_bits(mmfr1) == 16) ? 630 VTCR_EL2_VS_16BIT : 631 VTCR_EL2_VS_8BIT; 632 633 return vtcr; 634 } 635 636 static bool stage2_has_fwb(struct kvm_pgtable *pgt) 637 { 638 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) 639 return false; 640 641 return !(pgt->flags & KVM_PGTABLE_S2_NOFWB); 642 } 643 644 #define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt)) 645 646 static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot, 647 kvm_pte_t *ptep) 648 { 649 bool device = prot & KVM_PGTABLE_PROT_DEVICE; 650 kvm_pte_t attr = device ? KVM_S2_MEMATTR(pgt, DEVICE_nGnRE) : 651 KVM_S2_MEMATTR(pgt, NORMAL); 652 u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS; 653 654 if (!(prot & KVM_PGTABLE_PROT_X)) 655 attr |= KVM_PTE_LEAF_ATTR_HI_S2_XN; 656 else if (device) 657 return -EINVAL; 658 659 if (prot & KVM_PGTABLE_PROT_R) 660 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R; 661 662 if (prot & KVM_PGTABLE_PROT_W) 663 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W; 664 665 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh); 666 attr |= KVM_PTE_LEAF_ATTR_LO_S2_AF; 667 attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW; 668 *ptep = attr; 669 670 return 0; 671 } 672 673 enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte) 674 { 675 enum kvm_pgtable_prot prot = pte & KVM_PTE_LEAF_ATTR_HI_SW; 676 677 if (!kvm_pte_valid(pte)) 678 return prot; 679 680 if (pte & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R) 681 prot |= KVM_PGTABLE_PROT_R; 682 if (pte & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W) 683 prot |= KVM_PGTABLE_PROT_W; 684 if (!(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN)) 685 prot |= KVM_PGTABLE_PROT_X; 686 687 return prot; 688 } 689 690 static bool stage2_pte_needs_update(kvm_pte_t old, kvm_pte_t new) 691 { 692 if (!kvm_pte_valid(old) || !kvm_pte_valid(new)) 693 return true; 694 695 return ((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS)); 696 } 697 698 static bool stage2_pte_is_counted(kvm_pte_t pte) 699 { 700 /* 701 * The refcount tracks valid entries as well as invalid entries if they 702 * encode ownership of a page to another entity than the page-table 703 * owner, whose id is 0. 704 */ 705 return !!pte; 706 } 707 708 static bool stage2_pte_is_locked(kvm_pte_t pte) 709 { 710 return !kvm_pte_valid(pte) && (pte & KVM_INVALID_PTE_LOCKED); 711 } 712 713 static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new) 714 { 715 if (!kvm_pgtable_walk_shared(ctx)) { 716 WRITE_ONCE(*ctx->ptep, new); 717 return true; 718 } 719 720 return cmpxchg(ctx->ptep, ctx->old, new) == ctx->old; 721 } 722 723 /** 724 * stage2_try_break_pte() - Invalidates a pte according to the 725 * 'break-before-make' requirements of the 726 * architecture. 727 * 728 * @ctx: context of the visited pte. 729 * @mmu: stage-2 mmu 730 * 731 * Returns: true if the pte was successfully broken. 732 * 733 * If the removed pte was valid, performs the necessary serialization and TLB 734 * invalidation for the old value. For counted ptes, drops the reference count 735 * on the containing table page. 736 */ 737 static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx, 738 struct kvm_s2_mmu *mmu) 739 { 740 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; 741 742 if (stage2_pte_is_locked(ctx->old)) { 743 /* 744 * Should never occur if this walker has exclusive access to the 745 * page tables. 746 */ 747 WARN_ON(!kvm_pgtable_walk_shared(ctx)); 748 return false; 749 } 750 751 if (!stage2_try_set_pte(ctx, KVM_INVALID_PTE_LOCKED)) 752 return false; 753 754 /* 755 * Perform the appropriate TLB invalidation based on the evicted pte 756 * value (if any). 757 */ 758 if (kvm_pte_table(ctx->old, ctx->level)) 759 kvm_call_hyp(__kvm_tlb_flush_vmid, mmu); 760 else if (kvm_pte_valid(ctx->old)) 761 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level); 762 763 if (stage2_pte_is_counted(ctx->old)) 764 mm_ops->put_page(ctx->ptep); 765 766 return true; 767 } 768 769 static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new) 770 { 771 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; 772 773 WARN_ON(!stage2_pte_is_locked(*ctx->ptep)); 774 775 if (stage2_pte_is_counted(new)) 776 mm_ops->get_page(ctx->ptep); 777 778 smp_store_release(ctx->ptep, new); 779 } 780 781 static void stage2_put_pte(const struct kvm_pgtable_visit_ctx *ctx, struct kvm_s2_mmu *mmu, 782 struct kvm_pgtable_mm_ops *mm_ops) 783 { 784 /* 785 * Clear the existing PTE, and perform break-before-make with 786 * TLB maintenance if it was valid. 787 */ 788 if (kvm_pte_valid(ctx->old)) { 789 kvm_clear_pte(ctx->ptep); 790 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level); 791 } 792 793 mm_ops->put_page(ctx->ptep); 794 } 795 796 static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte) 797 { 798 u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR; 799 return memattr == KVM_S2_MEMATTR(pgt, NORMAL); 800 } 801 802 static bool stage2_pte_executable(kvm_pte_t pte) 803 { 804 return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN); 805 } 806 807 static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx, 808 const struct stage2_map_data *data) 809 { 810 u64 phys = data->phys; 811 812 /* 813 * Stage-2 walks to update ownership data are communicated to the map 814 * walker using an invalid PA. Avoid offsetting an already invalid PA, 815 * which could overflow and make the address valid again. 816 */ 817 if (!kvm_phys_is_valid(phys)) 818 return phys; 819 820 /* 821 * Otherwise, work out the correct PA based on how far the walk has 822 * gotten. 823 */ 824 return phys + (ctx->addr - ctx->start); 825 } 826 827 static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx, 828 struct stage2_map_data *data) 829 { 830 u64 phys = stage2_map_walker_phys_addr(ctx, data); 831 832 if (data->force_pte && (ctx->level < (KVM_PGTABLE_MAX_LEVELS - 1))) 833 return false; 834 835 return kvm_block_mapping_supported(ctx, phys); 836 } 837 838 static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, 839 struct stage2_map_data *data) 840 { 841 kvm_pte_t new; 842 u64 phys = stage2_map_walker_phys_addr(ctx, data); 843 u64 granule = kvm_granule_size(ctx->level); 844 struct kvm_pgtable *pgt = data->mmu->pgt; 845 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; 846 847 if (!stage2_leaf_mapping_allowed(ctx, data)) 848 return -E2BIG; 849 850 if (kvm_phys_is_valid(phys)) 851 new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level); 852 else 853 new = kvm_init_invalid_leaf_owner(data->owner_id); 854 855 /* 856 * Skip updating the PTE if we are trying to recreate the exact 857 * same mapping or only change the access permissions. Instead, 858 * the vCPU will exit one more time from guest if still needed 859 * and then go through the path of relaxing permissions. 860 */ 861 if (!stage2_pte_needs_update(ctx->old, new)) 862 return -EAGAIN; 863 864 if (!stage2_try_break_pte(ctx, data->mmu)) 865 return -EAGAIN; 866 867 /* Perform CMOs before installation of the guest stage-2 PTE */ 868 if (mm_ops->dcache_clean_inval_poc && stage2_pte_cacheable(pgt, new)) 869 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(new, mm_ops), 870 granule); 871 872 if (mm_ops->icache_inval_pou && stage2_pte_executable(new)) 873 mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule); 874 875 stage2_make_pte(ctx, new); 876 877 return 0; 878 } 879 880 static int stage2_map_walk_table_pre(const struct kvm_pgtable_visit_ctx *ctx, 881 struct stage2_map_data *data) 882 { 883 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; 884 kvm_pte_t *childp = kvm_pte_follow(ctx->old, mm_ops); 885 int ret; 886 887 if (!stage2_leaf_mapping_allowed(ctx, data)) 888 return 0; 889 890 ret = stage2_map_walker_try_leaf(ctx, data); 891 if (ret) 892 return ret; 893 894 mm_ops->free_removed_table(childp, ctx->level); 895 return 0; 896 } 897 898 static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx, 899 struct stage2_map_data *data) 900 { 901 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; 902 kvm_pte_t *childp, new; 903 int ret; 904 905 ret = stage2_map_walker_try_leaf(ctx, data); 906 if (ret != -E2BIG) 907 return ret; 908 909 if (WARN_ON(ctx->level == KVM_PGTABLE_MAX_LEVELS - 1)) 910 return -EINVAL; 911 912 if (!data->memcache) 913 return -ENOMEM; 914 915 childp = mm_ops->zalloc_page(data->memcache); 916 if (!childp) 917 return -ENOMEM; 918 919 if (!stage2_try_break_pte(ctx, data->mmu)) { 920 mm_ops->put_page(childp); 921 return -EAGAIN; 922 } 923 924 /* 925 * If we've run into an existing block mapping then replace it with 926 * a table. Accesses beyond 'end' that fall within the new table 927 * will be mapped lazily. 928 */ 929 new = kvm_init_table_pte(childp, mm_ops); 930 stage2_make_pte(ctx, new); 931 932 return 0; 933 } 934 935 /* 936 * The TABLE_PRE callback runs for table entries on the way down, looking 937 * for table entries which we could conceivably replace with a block entry 938 * for this mapping. If it finds one it replaces the entry and calls 939 * kvm_pgtable_mm_ops::free_removed_table() to tear down the detached table. 940 * 941 * Otherwise, the LEAF callback performs the mapping at the existing leaves 942 * instead. 943 */ 944 static int stage2_map_walker(const struct kvm_pgtable_visit_ctx *ctx, 945 enum kvm_pgtable_walk_flags visit) 946 { 947 struct stage2_map_data *data = ctx->arg; 948 949 switch (visit) { 950 case KVM_PGTABLE_WALK_TABLE_PRE: 951 return stage2_map_walk_table_pre(ctx, data); 952 case KVM_PGTABLE_WALK_LEAF: 953 return stage2_map_walk_leaf(ctx, data); 954 default: 955 return -EINVAL; 956 } 957 } 958 959 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, 960 u64 phys, enum kvm_pgtable_prot prot, 961 void *mc, enum kvm_pgtable_walk_flags flags) 962 { 963 int ret; 964 struct stage2_map_data map_data = { 965 .phys = ALIGN_DOWN(phys, PAGE_SIZE), 966 .mmu = pgt->mmu, 967 .memcache = mc, 968 .force_pte = pgt->force_pte_cb && pgt->force_pte_cb(addr, addr + size, prot), 969 }; 970 struct kvm_pgtable_walker walker = { 971 .cb = stage2_map_walker, 972 .flags = flags | 973 KVM_PGTABLE_WALK_TABLE_PRE | 974 KVM_PGTABLE_WALK_LEAF, 975 .arg = &map_data, 976 }; 977 978 if (WARN_ON((pgt->flags & KVM_PGTABLE_S2_IDMAP) && (addr != phys))) 979 return -EINVAL; 980 981 ret = stage2_set_prot_attr(pgt, prot, &map_data.attr); 982 if (ret) 983 return ret; 984 985 ret = kvm_pgtable_walk(pgt, addr, size, &walker); 986 dsb(ishst); 987 return ret; 988 } 989 990 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size, 991 void *mc, u8 owner_id) 992 { 993 int ret; 994 struct stage2_map_data map_data = { 995 .phys = KVM_PHYS_INVALID, 996 .mmu = pgt->mmu, 997 .memcache = mc, 998 .owner_id = owner_id, 999 .force_pte = true, 1000 }; 1001 struct kvm_pgtable_walker walker = { 1002 .cb = stage2_map_walker, 1003 .flags = KVM_PGTABLE_WALK_TABLE_PRE | 1004 KVM_PGTABLE_WALK_LEAF, 1005 .arg = &map_data, 1006 }; 1007 1008 if (owner_id > KVM_MAX_OWNER_ID) 1009 return -EINVAL; 1010 1011 ret = kvm_pgtable_walk(pgt, addr, size, &walker); 1012 return ret; 1013 } 1014 1015 static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx, 1016 enum kvm_pgtable_walk_flags visit) 1017 { 1018 struct kvm_pgtable *pgt = ctx->arg; 1019 struct kvm_s2_mmu *mmu = pgt->mmu; 1020 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; 1021 kvm_pte_t *childp = NULL; 1022 bool need_flush = false; 1023 1024 if (!kvm_pte_valid(ctx->old)) { 1025 if (stage2_pte_is_counted(ctx->old)) { 1026 kvm_clear_pte(ctx->ptep); 1027 mm_ops->put_page(ctx->ptep); 1028 } 1029 return 0; 1030 } 1031 1032 if (kvm_pte_table(ctx->old, ctx->level)) { 1033 childp = kvm_pte_follow(ctx->old, mm_ops); 1034 1035 if (mm_ops->page_count(childp) != 1) 1036 return 0; 1037 } else if (stage2_pte_cacheable(pgt, ctx->old)) { 1038 need_flush = !stage2_has_fwb(pgt); 1039 } 1040 1041 /* 1042 * This is similar to the map() path in that we unmap the entire 1043 * block entry and rely on the remaining portions being faulted 1044 * back lazily. 1045 */ 1046 stage2_put_pte(ctx, mmu, mm_ops); 1047 1048 if (need_flush && mm_ops->dcache_clean_inval_poc) 1049 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops), 1050 kvm_granule_size(ctx->level)); 1051 1052 if (childp) 1053 mm_ops->put_page(childp); 1054 1055 return 0; 1056 } 1057 1058 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size) 1059 { 1060 struct kvm_pgtable_walker walker = { 1061 .cb = stage2_unmap_walker, 1062 .arg = pgt, 1063 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST, 1064 }; 1065 1066 return kvm_pgtable_walk(pgt, addr, size, &walker); 1067 } 1068 1069 struct stage2_attr_data { 1070 kvm_pte_t attr_set; 1071 kvm_pte_t attr_clr; 1072 kvm_pte_t pte; 1073 u32 level; 1074 }; 1075 1076 static int stage2_attr_walker(const struct kvm_pgtable_visit_ctx *ctx, 1077 enum kvm_pgtable_walk_flags visit) 1078 { 1079 kvm_pte_t pte = ctx->old; 1080 struct stage2_attr_data *data = ctx->arg; 1081 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; 1082 1083 if (!kvm_pte_valid(ctx->old)) 1084 return -EAGAIN; 1085 1086 data->level = ctx->level; 1087 data->pte = pte; 1088 pte &= ~data->attr_clr; 1089 pte |= data->attr_set; 1090 1091 /* 1092 * We may race with the CPU trying to set the access flag here, 1093 * but worst-case the access flag update gets lost and will be 1094 * set on the next access instead. 1095 */ 1096 if (data->pte != pte) { 1097 /* 1098 * Invalidate instruction cache before updating the guest 1099 * stage-2 PTE if we are going to add executable permission. 1100 */ 1101 if (mm_ops->icache_inval_pou && 1102 stage2_pte_executable(pte) && !stage2_pte_executable(ctx->old)) 1103 mm_ops->icache_inval_pou(kvm_pte_follow(pte, mm_ops), 1104 kvm_granule_size(ctx->level)); 1105 1106 if (!stage2_try_set_pte(ctx, pte)) 1107 return -EAGAIN; 1108 } 1109 1110 return 0; 1111 } 1112 1113 static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr, 1114 u64 size, kvm_pte_t attr_set, 1115 kvm_pte_t attr_clr, kvm_pte_t *orig_pte, 1116 u32 *level, enum kvm_pgtable_walk_flags flags) 1117 { 1118 int ret; 1119 kvm_pte_t attr_mask = KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI; 1120 struct stage2_attr_data data = { 1121 .attr_set = attr_set & attr_mask, 1122 .attr_clr = attr_clr & attr_mask, 1123 }; 1124 struct kvm_pgtable_walker walker = { 1125 .cb = stage2_attr_walker, 1126 .arg = &data, 1127 .flags = flags | KVM_PGTABLE_WALK_LEAF, 1128 }; 1129 1130 ret = kvm_pgtable_walk(pgt, addr, size, &walker); 1131 if (ret) 1132 return ret; 1133 1134 if (orig_pte) 1135 *orig_pte = data.pte; 1136 1137 if (level) 1138 *level = data.level; 1139 return 0; 1140 } 1141 1142 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size) 1143 { 1144 return stage2_update_leaf_attrs(pgt, addr, size, 0, 1145 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W, 1146 NULL, NULL, 0); 1147 } 1148 1149 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr) 1150 { 1151 kvm_pte_t pte = 0; 1152 int ret; 1153 1154 ret = stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0, 1155 &pte, NULL, 1156 KVM_PGTABLE_WALK_HANDLE_FAULT | 1157 KVM_PGTABLE_WALK_SHARED); 1158 if (!ret) 1159 dsb(ishst); 1160 1161 return pte; 1162 } 1163 1164 kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr) 1165 { 1166 kvm_pte_t pte = 0; 1167 stage2_update_leaf_attrs(pgt, addr, 1, 0, KVM_PTE_LEAF_ATTR_LO_S2_AF, 1168 &pte, NULL, 0); 1169 /* 1170 * "But where's the TLBI?!", you scream. 1171 * "Over in the core code", I sigh. 1172 * 1173 * See the '->clear_flush_young()' callback on the KVM mmu notifier. 1174 */ 1175 return pte; 1176 } 1177 1178 bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr) 1179 { 1180 kvm_pte_t pte = 0; 1181 stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL, 0); 1182 return pte & KVM_PTE_LEAF_ATTR_LO_S2_AF; 1183 } 1184 1185 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, 1186 enum kvm_pgtable_prot prot) 1187 { 1188 int ret; 1189 u32 level; 1190 kvm_pte_t set = 0, clr = 0; 1191 1192 if (prot & KVM_PTE_LEAF_ATTR_HI_SW) 1193 return -EINVAL; 1194 1195 if (prot & KVM_PGTABLE_PROT_R) 1196 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R; 1197 1198 if (prot & KVM_PGTABLE_PROT_W) 1199 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W; 1200 1201 if (prot & KVM_PGTABLE_PROT_X) 1202 clr |= KVM_PTE_LEAF_ATTR_HI_S2_XN; 1203 1204 ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level, 1205 KVM_PGTABLE_WALK_HANDLE_FAULT | 1206 KVM_PGTABLE_WALK_SHARED); 1207 if (!ret) 1208 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, pgt->mmu, addr, level); 1209 return ret; 1210 } 1211 1212 static int stage2_flush_walker(const struct kvm_pgtable_visit_ctx *ctx, 1213 enum kvm_pgtable_walk_flags visit) 1214 { 1215 struct kvm_pgtable *pgt = ctx->arg; 1216 struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops; 1217 1218 if (!kvm_pte_valid(ctx->old) || !stage2_pte_cacheable(pgt, ctx->old)) 1219 return 0; 1220 1221 if (mm_ops->dcache_clean_inval_poc) 1222 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops), 1223 kvm_granule_size(ctx->level)); 1224 return 0; 1225 } 1226 1227 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size) 1228 { 1229 struct kvm_pgtable_walker walker = { 1230 .cb = stage2_flush_walker, 1231 .flags = KVM_PGTABLE_WALK_LEAF, 1232 .arg = pgt, 1233 }; 1234 1235 if (stage2_has_fwb(pgt)) 1236 return 0; 1237 1238 return kvm_pgtable_walk(pgt, addr, size, &walker); 1239 } 1240 1241 1242 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, 1243 struct kvm_pgtable_mm_ops *mm_ops, 1244 enum kvm_pgtable_stage2_flags flags, 1245 kvm_pgtable_force_pte_cb_t force_pte_cb) 1246 { 1247 size_t pgd_sz; 1248 u64 vtcr = mmu->arch->vtcr; 1249 u32 ia_bits = VTCR_EL2_IPA(vtcr); 1250 u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr); 1251 u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0; 1252 1253 pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE; 1254 pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_pages_exact(pgd_sz); 1255 if (!pgt->pgd) 1256 return -ENOMEM; 1257 1258 pgt->ia_bits = ia_bits; 1259 pgt->start_level = start_level; 1260 pgt->mm_ops = mm_ops; 1261 pgt->mmu = mmu; 1262 pgt->flags = flags; 1263 pgt->force_pte_cb = force_pte_cb; 1264 1265 /* Ensure zeroed PGD pages are visible to the hardware walker */ 1266 dsb(ishst); 1267 return 0; 1268 } 1269 1270 size_t kvm_pgtable_stage2_pgd_size(u64 vtcr) 1271 { 1272 u32 ia_bits = VTCR_EL2_IPA(vtcr); 1273 u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr); 1274 u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0; 1275 1276 return kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE; 1277 } 1278 1279 static int stage2_free_walker(const struct kvm_pgtable_visit_ctx *ctx, 1280 enum kvm_pgtable_walk_flags visit) 1281 { 1282 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; 1283 1284 if (!stage2_pte_is_counted(ctx->old)) 1285 return 0; 1286 1287 mm_ops->put_page(ctx->ptep); 1288 1289 if (kvm_pte_table(ctx->old, ctx->level)) 1290 mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops)); 1291 1292 return 0; 1293 } 1294 1295 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt) 1296 { 1297 size_t pgd_sz; 1298 struct kvm_pgtable_walker walker = { 1299 .cb = stage2_free_walker, 1300 .flags = KVM_PGTABLE_WALK_LEAF | 1301 KVM_PGTABLE_WALK_TABLE_POST, 1302 }; 1303 1304 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker)); 1305 pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE; 1306 pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz); 1307 pgt->pgd = NULL; 1308 } 1309 1310 void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level) 1311 { 1312 kvm_pteref_t ptep = (kvm_pteref_t)pgtable; 1313 struct kvm_pgtable_walker walker = { 1314 .cb = stage2_free_walker, 1315 .flags = KVM_PGTABLE_WALK_LEAF | 1316 KVM_PGTABLE_WALK_TABLE_POST, 1317 }; 1318 struct kvm_pgtable_walk_data data = { 1319 .walker = &walker, 1320 1321 /* 1322 * At this point the IPA really doesn't matter, as the page 1323 * table being traversed has already been removed from the stage 1324 * 2. Set an appropriate range to cover the entire page table. 1325 */ 1326 .addr = 0, 1327 .end = kvm_granule_size(level), 1328 }; 1329 1330 WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1)); 1331 } 1332