1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 4 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 5 */ 6 7 #include <linux/mman.h> 8 #include <linux/kvm_host.h> 9 #include <linux/io.h> 10 #include <linux/hugetlb.h> 11 #include <linux/sched/signal.h> 12 #include <trace/events/kvm.h> 13 #include <asm/pgalloc.h> 14 #include <asm/cacheflush.h> 15 #include <asm/kvm_arm.h> 16 #include <asm/kvm_mmu.h> 17 #include <asm/kvm_pgtable.h> 18 #include <asm/kvm_ras.h> 19 #include <asm/kvm_asm.h> 20 #include <asm/kvm_emulate.h> 21 #include <asm/virt.h> 22 23 #include "trace.h" 24 25 static struct kvm_pgtable *hyp_pgtable; 26 static DEFINE_MUTEX(kvm_hyp_pgd_mutex); 27 28 static unsigned long hyp_idmap_start; 29 static unsigned long hyp_idmap_end; 30 static phys_addr_t hyp_idmap_vector; 31 32 static unsigned long io_map_base; 33 34 35 /* 36 * Release kvm_mmu_lock periodically if the memory region is large. Otherwise, 37 * we may see kernel panics with CONFIG_DETECT_HUNG_TASK, 38 * CONFIG_LOCKUP_DETECTOR, CONFIG_LOCKDEP. Additionally, holding the lock too 39 * long will also starve other vCPUs. We have to also make sure that the page 40 * tables are not freed while we released the lock. 41 */ 42 static int stage2_apply_range(struct kvm *kvm, phys_addr_t addr, 43 phys_addr_t end, 44 int (*fn)(struct kvm_pgtable *, u64, u64), 45 bool resched) 46 { 47 int ret; 48 u64 next; 49 50 do { 51 struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; 52 if (!pgt) 53 return -EINVAL; 54 55 next = stage2_pgd_addr_end(kvm, addr, end); 56 ret = fn(pgt, addr, next - addr); 57 if (ret) 58 break; 59 60 if (resched && next != end) 61 cond_resched_lock(&kvm->mmu_lock); 62 } while (addr = next, addr != end); 63 64 return ret; 65 } 66 67 #define stage2_apply_range_resched(kvm, addr, end, fn) \ 68 stage2_apply_range(kvm, addr, end, fn, true) 69 70 static bool memslot_is_logging(struct kvm_memory_slot *memslot) 71 { 72 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); 73 } 74 75 /** 76 * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8 77 * @kvm: pointer to kvm structure. 78 * 79 * Interface to HYP function to flush all VM TLB entries 80 */ 81 void kvm_flush_remote_tlbs(struct kvm *kvm) 82 { 83 ++kvm->stat.generic.remote_tlb_flush_requests; 84 kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu); 85 } 86 87 static bool kvm_is_device_pfn(unsigned long pfn) 88 { 89 return !pfn_is_map_memory(pfn); 90 } 91 92 static void *stage2_memcache_zalloc_page(void *arg) 93 { 94 struct kvm_mmu_memory_cache *mc = arg; 95 96 /* Allocated with __GFP_ZERO, so no need to zero */ 97 return kvm_mmu_memory_cache_alloc(mc); 98 } 99 100 static void *kvm_host_zalloc_pages_exact(size_t size) 101 { 102 return alloc_pages_exact(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); 103 } 104 105 static void kvm_host_get_page(void *addr) 106 { 107 get_page(virt_to_page(addr)); 108 } 109 110 static void kvm_host_put_page(void *addr) 111 { 112 put_page(virt_to_page(addr)); 113 } 114 115 static int kvm_host_page_count(void *addr) 116 { 117 return page_count(virt_to_page(addr)); 118 } 119 120 static phys_addr_t kvm_host_pa(void *addr) 121 { 122 return __pa(addr); 123 } 124 125 static void *kvm_host_va(phys_addr_t phys) 126 { 127 return __va(phys); 128 } 129 130 static void clean_dcache_guest_page(void *va, size_t size) 131 { 132 __clean_dcache_guest_page(va, size); 133 } 134 135 static void invalidate_icache_guest_page(void *va, size_t size) 136 { 137 __invalidate_icache_guest_page(va, size); 138 } 139 140 /* 141 * Unmapping vs dcache management: 142 * 143 * If a guest maps certain memory pages as uncached, all writes will 144 * bypass the data cache and go directly to RAM. However, the CPUs 145 * can still speculate reads (not writes) and fill cache lines with 146 * data. 147 * 148 * Those cache lines will be *clean* cache lines though, so a 149 * clean+invalidate operation is equivalent to an invalidate 150 * operation, because no cache lines are marked dirty. 151 * 152 * Those clean cache lines could be filled prior to an uncached write 153 * by the guest, and the cache coherent IO subsystem would therefore 154 * end up writing old data to disk. 155 * 156 * This is why right after unmapping a page/section and invalidating 157 * the corresponding TLBs, we flush to make sure the IO subsystem will 158 * never hit in the cache. 159 * 160 * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as 161 * we then fully enforce cacheability of RAM, no matter what the guest 162 * does. 163 */ 164 /** 165 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range 166 * @mmu: The KVM stage-2 MMU pointer 167 * @start: The intermediate physical base address of the range to unmap 168 * @size: The size of the area to unmap 169 * @may_block: Whether or not we are permitted to block 170 * 171 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must 172 * be called while holding mmu_lock (unless for freeing the stage2 pgd before 173 * destroying the VM), otherwise another faulting VCPU may come in and mess 174 * with things behind our backs. 175 */ 176 static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size, 177 bool may_block) 178 { 179 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); 180 phys_addr_t end = start + size; 181 182 assert_spin_locked(&kvm->mmu_lock); 183 WARN_ON(size & ~PAGE_MASK); 184 WARN_ON(stage2_apply_range(kvm, start, end, kvm_pgtable_stage2_unmap, 185 may_block)); 186 } 187 188 static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) 189 { 190 __unmap_stage2_range(mmu, start, size, true); 191 } 192 193 static void stage2_flush_memslot(struct kvm *kvm, 194 struct kvm_memory_slot *memslot) 195 { 196 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; 197 phys_addr_t end = addr + PAGE_SIZE * memslot->npages; 198 199 stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_flush); 200 } 201 202 /** 203 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2 204 * @kvm: The struct kvm pointer 205 * 206 * Go through the stage 2 page tables and invalidate any cache lines 207 * backing memory already mapped to the VM. 208 */ 209 static void stage2_flush_vm(struct kvm *kvm) 210 { 211 struct kvm_memslots *slots; 212 struct kvm_memory_slot *memslot; 213 int idx, bkt; 214 215 idx = srcu_read_lock(&kvm->srcu); 216 spin_lock(&kvm->mmu_lock); 217 218 slots = kvm_memslots(kvm); 219 kvm_for_each_memslot(memslot, bkt, slots) 220 stage2_flush_memslot(kvm, memslot); 221 222 spin_unlock(&kvm->mmu_lock); 223 srcu_read_unlock(&kvm->srcu, idx); 224 } 225 226 /** 227 * free_hyp_pgds - free Hyp-mode page tables 228 */ 229 void free_hyp_pgds(void) 230 { 231 mutex_lock(&kvm_hyp_pgd_mutex); 232 if (hyp_pgtable) { 233 kvm_pgtable_hyp_destroy(hyp_pgtable); 234 kfree(hyp_pgtable); 235 hyp_pgtable = NULL; 236 } 237 mutex_unlock(&kvm_hyp_pgd_mutex); 238 } 239 240 static bool kvm_host_owns_hyp_mappings(void) 241 { 242 if (is_kernel_in_hyp_mode()) 243 return false; 244 245 if (static_branch_likely(&kvm_protected_mode_initialized)) 246 return false; 247 248 /* 249 * This can happen at boot time when __create_hyp_mappings() is called 250 * after the hyp protection has been enabled, but the static key has 251 * not been flipped yet. 252 */ 253 if (!hyp_pgtable && is_protected_kvm_enabled()) 254 return false; 255 256 WARN_ON(!hyp_pgtable); 257 258 return true; 259 } 260 261 static int __create_hyp_mappings(unsigned long start, unsigned long size, 262 unsigned long phys, enum kvm_pgtable_prot prot) 263 { 264 int err; 265 266 if (WARN_ON(!kvm_host_owns_hyp_mappings())) 267 return -EINVAL; 268 269 mutex_lock(&kvm_hyp_pgd_mutex); 270 err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot); 271 mutex_unlock(&kvm_hyp_pgd_mutex); 272 273 return err; 274 } 275 276 static phys_addr_t kvm_kaddr_to_phys(void *kaddr) 277 { 278 if (!is_vmalloc_addr(kaddr)) { 279 BUG_ON(!virt_addr_valid(kaddr)); 280 return __pa(kaddr); 281 } else { 282 return page_to_phys(vmalloc_to_page(kaddr)) + 283 offset_in_page(kaddr); 284 } 285 } 286 287 struct hyp_shared_pfn { 288 u64 pfn; 289 int count; 290 struct rb_node node; 291 }; 292 293 static DEFINE_MUTEX(hyp_shared_pfns_lock); 294 static struct rb_root hyp_shared_pfns = RB_ROOT; 295 296 static struct hyp_shared_pfn *find_shared_pfn(u64 pfn, struct rb_node ***node, 297 struct rb_node **parent) 298 { 299 struct hyp_shared_pfn *this; 300 301 *node = &hyp_shared_pfns.rb_node; 302 *parent = NULL; 303 while (**node) { 304 this = container_of(**node, struct hyp_shared_pfn, node); 305 *parent = **node; 306 if (this->pfn < pfn) 307 *node = &((**node)->rb_left); 308 else if (this->pfn > pfn) 309 *node = &((**node)->rb_right); 310 else 311 return this; 312 } 313 314 return NULL; 315 } 316 317 static int share_pfn_hyp(u64 pfn) 318 { 319 struct rb_node **node, *parent; 320 struct hyp_shared_pfn *this; 321 int ret = 0; 322 323 mutex_lock(&hyp_shared_pfns_lock); 324 this = find_shared_pfn(pfn, &node, &parent); 325 if (this) { 326 this->count++; 327 goto unlock; 328 } 329 330 this = kzalloc(sizeof(*this), GFP_KERNEL); 331 if (!this) { 332 ret = -ENOMEM; 333 goto unlock; 334 } 335 336 this->pfn = pfn; 337 this->count = 1; 338 rb_link_node(&this->node, parent, node); 339 rb_insert_color(&this->node, &hyp_shared_pfns); 340 ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp, pfn, 1); 341 unlock: 342 mutex_unlock(&hyp_shared_pfns_lock); 343 344 return ret; 345 } 346 347 static int unshare_pfn_hyp(u64 pfn) 348 { 349 struct rb_node **node, *parent; 350 struct hyp_shared_pfn *this; 351 int ret = 0; 352 353 mutex_lock(&hyp_shared_pfns_lock); 354 this = find_shared_pfn(pfn, &node, &parent); 355 if (WARN_ON(!this)) { 356 ret = -ENOENT; 357 goto unlock; 358 } 359 360 this->count--; 361 if (this->count) 362 goto unlock; 363 364 rb_erase(&this->node, &hyp_shared_pfns); 365 kfree(this); 366 ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_hyp, pfn, 1); 367 unlock: 368 mutex_unlock(&hyp_shared_pfns_lock); 369 370 return ret; 371 } 372 373 int kvm_share_hyp(void *from, void *to) 374 { 375 phys_addr_t start, end, cur; 376 u64 pfn; 377 int ret; 378 379 if (is_kernel_in_hyp_mode()) 380 return 0; 381 382 /* 383 * The share hcall maps things in the 'fixed-offset' region of the hyp 384 * VA space, so we can only share physically contiguous data-structures 385 * for now. 386 */ 387 if (is_vmalloc_or_module_addr(from) || is_vmalloc_or_module_addr(to)) 388 return -EINVAL; 389 390 if (kvm_host_owns_hyp_mappings()) 391 return create_hyp_mappings(from, to, PAGE_HYP); 392 393 start = ALIGN_DOWN(__pa(from), PAGE_SIZE); 394 end = PAGE_ALIGN(__pa(to)); 395 for (cur = start; cur < end; cur += PAGE_SIZE) { 396 pfn = __phys_to_pfn(cur); 397 ret = share_pfn_hyp(pfn); 398 if (ret) 399 return ret; 400 } 401 402 return 0; 403 } 404 405 void kvm_unshare_hyp(void *from, void *to) 406 { 407 phys_addr_t start, end, cur; 408 u64 pfn; 409 410 if (is_kernel_in_hyp_mode() || kvm_host_owns_hyp_mappings() || !from) 411 return; 412 413 start = ALIGN_DOWN(__pa(from), PAGE_SIZE); 414 end = PAGE_ALIGN(__pa(to)); 415 for (cur = start; cur < end; cur += PAGE_SIZE) { 416 pfn = __phys_to_pfn(cur); 417 WARN_ON(unshare_pfn_hyp(pfn)); 418 } 419 } 420 421 /** 422 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode 423 * @from: The virtual kernel start address of the range 424 * @to: The virtual kernel end address of the range (exclusive) 425 * @prot: The protection to be applied to this range 426 * 427 * The same virtual address as the kernel virtual address is also used 428 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying 429 * physical pages. 430 */ 431 int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot) 432 { 433 phys_addr_t phys_addr; 434 unsigned long virt_addr; 435 unsigned long start = kern_hyp_va((unsigned long)from); 436 unsigned long end = kern_hyp_va((unsigned long)to); 437 438 if (is_kernel_in_hyp_mode()) 439 return 0; 440 441 if (!kvm_host_owns_hyp_mappings()) 442 return -EPERM; 443 444 start = start & PAGE_MASK; 445 end = PAGE_ALIGN(end); 446 447 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) { 448 int err; 449 450 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start); 451 err = __create_hyp_mappings(virt_addr, PAGE_SIZE, phys_addr, 452 prot); 453 if (err) 454 return err; 455 } 456 457 return 0; 458 } 459 460 static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size, 461 unsigned long *haddr, 462 enum kvm_pgtable_prot prot) 463 { 464 unsigned long base; 465 int ret = 0; 466 467 if (!kvm_host_owns_hyp_mappings()) { 468 base = kvm_call_hyp_nvhe(__pkvm_create_private_mapping, 469 phys_addr, size, prot); 470 if (IS_ERR_OR_NULL((void *)base)) 471 return PTR_ERR((void *)base); 472 *haddr = base; 473 474 return 0; 475 } 476 477 mutex_lock(&kvm_hyp_pgd_mutex); 478 479 /* 480 * This assumes that we have enough space below the idmap 481 * page to allocate our VAs. If not, the check below will 482 * kick. A potential alternative would be to detect that 483 * overflow and switch to an allocation above the idmap. 484 * 485 * The allocated size is always a multiple of PAGE_SIZE. 486 */ 487 size = PAGE_ALIGN(size + offset_in_page(phys_addr)); 488 base = io_map_base - size; 489 490 /* 491 * Verify that BIT(VA_BITS - 1) hasn't been flipped by 492 * allocating the new area, as it would indicate we've 493 * overflowed the idmap/IO address range. 494 */ 495 if ((base ^ io_map_base) & BIT(VA_BITS - 1)) 496 ret = -ENOMEM; 497 else 498 io_map_base = base; 499 500 mutex_unlock(&kvm_hyp_pgd_mutex); 501 502 if (ret) 503 goto out; 504 505 ret = __create_hyp_mappings(base, size, phys_addr, prot); 506 if (ret) 507 goto out; 508 509 *haddr = base + offset_in_page(phys_addr); 510 out: 511 return ret; 512 } 513 514 /** 515 * create_hyp_io_mappings - Map IO into both kernel and HYP 516 * @phys_addr: The physical start address which gets mapped 517 * @size: Size of the region being mapped 518 * @kaddr: Kernel VA for this mapping 519 * @haddr: HYP VA for this mapping 520 */ 521 int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, 522 void __iomem **kaddr, 523 void __iomem **haddr) 524 { 525 unsigned long addr; 526 int ret; 527 528 if (is_protected_kvm_enabled()) 529 return -EPERM; 530 531 *kaddr = ioremap(phys_addr, size); 532 if (!*kaddr) 533 return -ENOMEM; 534 535 if (is_kernel_in_hyp_mode()) { 536 *haddr = *kaddr; 537 return 0; 538 } 539 540 ret = __create_hyp_private_mapping(phys_addr, size, 541 &addr, PAGE_HYP_DEVICE); 542 if (ret) { 543 iounmap(*kaddr); 544 *kaddr = NULL; 545 *haddr = NULL; 546 return ret; 547 } 548 549 *haddr = (void __iomem *)addr; 550 return 0; 551 } 552 553 /** 554 * create_hyp_exec_mappings - Map an executable range into HYP 555 * @phys_addr: The physical start address which gets mapped 556 * @size: Size of the region being mapped 557 * @haddr: HYP VA for this mapping 558 */ 559 int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, 560 void **haddr) 561 { 562 unsigned long addr; 563 int ret; 564 565 BUG_ON(is_kernel_in_hyp_mode()); 566 567 ret = __create_hyp_private_mapping(phys_addr, size, 568 &addr, PAGE_HYP_EXEC); 569 if (ret) { 570 *haddr = NULL; 571 return ret; 572 } 573 574 *haddr = (void *)addr; 575 return 0; 576 } 577 578 static struct kvm_pgtable_mm_ops kvm_user_mm_ops = { 579 /* We shouldn't need any other callback to walk the PT */ 580 .phys_to_virt = kvm_host_va, 581 }; 582 583 static int get_user_mapping_size(struct kvm *kvm, u64 addr) 584 { 585 struct kvm_pgtable pgt = { 586 .pgd = (kvm_pte_t *)kvm->mm->pgd, 587 .ia_bits = VA_BITS, 588 .start_level = (KVM_PGTABLE_MAX_LEVELS - 589 CONFIG_PGTABLE_LEVELS), 590 .mm_ops = &kvm_user_mm_ops, 591 }; 592 kvm_pte_t pte = 0; /* Keep GCC quiet... */ 593 u32 level = ~0; 594 int ret; 595 596 ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level); 597 VM_BUG_ON(ret); 598 VM_BUG_ON(level >= KVM_PGTABLE_MAX_LEVELS); 599 VM_BUG_ON(!(pte & PTE_VALID)); 600 601 return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level)); 602 } 603 604 static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = { 605 .zalloc_page = stage2_memcache_zalloc_page, 606 .zalloc_pages_exact = kvm_host_zalloc_pages_exact, 607 .free_pages_exact = free_pages_exact, 608 .get_page = kvm_host_get_page, 609 .put_page = kvm_host_put_page, 610 .page_count = kvm_host_page_count, 611 .phys_to_virt = kvm_host_va, 612 .virt_to_phys = kvm_host_pa, 613 .dcache_clean_inval_poc = clean_dcache_guest_page, 614 .icache_inval_pou = invalidate_icache_guest_page, 615 }; 616 617 /** 618 * kvm_init_stage2_mmu - Initialise a S2 MMU strucrure 619 * @kvm: The pointer to the KVM structure 620 * @mmu: The pointer to the s2 MMU structure 621 * 622 * Allocates only the stage-2 HW PGD level table(s). 623 * Note we don't need locking here as this is only called when the VM is 624 * created, which can only be done once. 625 */ 626 int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) 627 { 628 int cpu, err; 629 struct kvm_pgtable *pgt; 630 631 if (mmu->pgt != NULL) { 632 kvm_err("kvm_arch already initialized?\n"); 633 return -EINVAL; 634 } 635 636 pgt = kzalloc(sizeof(*pgt), GFP_KERNEL_ACCOUNT); 637 if (!pgt) 638 return -ENOMEM; 639 640 mmu->arch = &kvm->arch; 641 err = kvm_pgtable_stage2_init(pgt, mmu, &kvm_s2_mm_ops); 642 if (err) 643 goto out_free_pgtable; 644 645 mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran)); 646 if (!mmu->last_vcpu_ran) { 647 err = -ENOMEM; 648 goto out_destroy_pgtable; 649 } 650 651 for_each_possible_cpu(cpu) 652 *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1; 653 654 mmu->pgt = pgt; 655 mmu->pgd_phys = __pa(pgt->pgd); 656 WRITE_ONCE(mmu->vmid.vmid_gen, 0); 657 return 0; 658 659 out_destroy_pgtable: 660 kvm_pgtable_stage2_destroy(pgt); 661 out_free_pgtable: 662 kfree(pgt); 663 return err; 664 } 665 666 static void stage2_unmap_memslot(struct kvm *kvm, 667 struct kvm_memory_slot *memslot) 668 { 669 hva_t hva = memslot->userspace_addr; 670 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; 671 phys_addr_t size = PAGE_SIZE * memslot->npages; 672 hva_t reg_end = hva + size; 673 674 /* 675 * A memory region could potentially cover multiple VMAs, and any holes 676 * between them, so iterate over all of them to find out if we should 677 * unmap any of them. 678 * 679 * +--------------------------------------------+ 680 * +---------------+----------------+ +----------------+ 681 * | : VMA 1 | VMA 2 | | VMA 3 : | 682 * +---------------+----------------+ +----------------+ 683 * | memory region | 684 * +--------------------------------------------+ 685 */ 686 do { 687 struct vm_area_struct *vma; 688 hva_t vm_start, vm_end; 689 690 vma = find_vma_intersection(current->mm, hva, reg_end); 691 if (!vma) 692 break; 693 694 /* 695 * Take the intersection of this VMA with the memory region 696 */ 697 vm_start = max(hva, vma->vm_start); 698 vm_end = min(reg_end, vma->vm_end); 699 700 if (!(vma->vm_flags & VM_PFNMAP)) { 701 gpa_t gpa = addr + (vm_start - memslot->userspace_addr); 702 unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start); 703 } 704 hva = vm_end; 705 } while (hva < reg_end); 706 } 707 708 /** 709 * stage2_unmap_vm - Unmap Stage-2 RAM mappings 710 * @kvm: The struct kvm pointer 711 * 712 * Go through the memregions and unmap any regular RAM 713 * backing memory already mapped to the VM. 714 */ 715 void stage2_unmap_vm(struct kvm *kvm) 716 { 717 struct kvm_memslots *slots; 718 struct kvm_memory_slot *memslot; 719 int idx, bkt; 720 721 idx = srcu_read_lock(&kvm->srcu); 722 mmap_read_lock(current->mm); 723 spin_lock(&kvm->mmu_lock); 724 725 slots = kvm_memslots(kvm); 726 kvm_for_each_memslot(memslot, bkt, slots) 727 stage2_unmap_memslot(kvm, memslot); 728 729 spin_unlock(&kvm->mmu_lock); 730 mmap_read_unlock(current->mm); 731 srcu_read_unlock(&kvm->srcu, idx); 732 } 733 734 void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu) 735 { 736 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); 737 struct kvm_pgtable *pgt = NULL; 738 739 spin_lock(&kvm->mmu_lock); 740 pgt = mmu->pgt; 741 if (pgt) { 742 mmu->pgd_phys = 0; 743 mmu->pgt = NULL; 744 free_percpu(mmu->last_vcpu_ran); 745 } 746 spin_unlock(&kvm->mmu_lock); 747 748 if (pgt) { 749 kvm_pgtable_stage2_destroy(pgt); 750 kfree(pgt); 751 } 752 } 753 754 /** 755 * kvm_phys_addr_ioremap - map a device range to guest IPA 756 * 757 * @kvm: The KVM pointer 758 * @guest_ipa: The IPA at which to insert the mapping 759 * @pa: The physical address of the device 760 * @size: The size of the mapping 761 * @writable: Whether or not to create a writable mapping 762 */ 763 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, 764 phys_addr_t pa, unsigned long size, bool writable) 765 { 766 phys_addr_t addr; 767 int ret = 0; 768 struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, }; 769 struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; 770 enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE | 771 KVM_PGTABLE_PROT_R | 772 (writable ? KVM_PGTABLE_PROT_W : 0); 773 774 if (is_protected_kvm_enabled()) 775 return -EPERM; 776 777 size += offset_in_page(guest_ipa); 778 guest_ipa &= PAGE_MASK; 779 780 for (addr = guest_ipa; addr < guest_ipa + size; addr += PAGE_SIZE) { 781 ret = kvm_mmu_topup_memory_cache(&cache, 782 kvm_mmu_cache_min_pages(kvm)); 783 if (ret) 784 break; 785 786 spin_lock(&kvm->mmu_lock); 787 ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot, 788 &cache); 789 spin_unlock(&kvm->mmu_lock); 790 if (ret) 791 break; 792 793 pa += PAGE_SIZE; 794 } 795 796 kvm_mmu_free_memory_cache(&cache); 797 return ret; 798 } 799 800 /** 801 * stage2_wp_range() - write protect stage2 memory region range 802 * @mmu: The KVM stage-2 MMU pointer 803 * @addr: Start address of range 804 * @end: End address of range 805 */ 806 static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end) 807 { 808 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); 809 stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_wrprotect); 810 } 811 812 /** 813 * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot 814 * @kvm: The KVM pointer 815 * @slot: The memory slot to write protect 816 * 817 * Called to start logging dirty pages after memory region 818 * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns 819 * all present PUD, PMD and PTEs are write protected in the memory region. 820 * Afterwards read of dirty page log can be called. 821 * 822 * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired, 823 * serializing operations for VM memory regions. 824 */ 825 static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) 826 { 827 struct kvm_memslots *slots = kvm_memslots(kvm); 828 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); 829 phys_addr_t start, end; 830 831 if (WARN_ON_ONCE(!memslot)) 832 return; 833 834 start = memslot->base_gfn << PAGE_SHIFT; 835 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; 836 837 spin_lock(&kvm->mmu_lock); 838 stage2_wp_range(&kvm->arch.mmu, start, end); 839 spin_unlock(&kvm->mmu_lock); 840 kvm_flush_remote_tlbs(kvm); 841 } 842 843 /** 844 * kvm_mmu_write_protect_pt_masked() - write protect dirty pages 845 * @kvm: The KVM pointer 846 * @slot: The memory slot associated with mask 847 * @gfn_offset: The gfn offset in memory slot 848 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory 849 * slot to be write protected 850 * 851 * Walks bits set in mask write protects the associated pte's. Caller must 852 * acquire kvm_mmu_lock. 853 */ 854 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, 855 struct kvm_memory_slot *slot, 856 gfn_t gfn_offset, unsigned long mask) 857 { 858 phys_addr_t base_gfn = slot->base_gfn + gfn_offset; 859 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; 860 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; 861 862 stage2_wp_range(&kvm->arch.mmu, start, end); 863 } 864 865 /* 866 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected 867 * dirty pages. 868 * 869 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to 870 * enable dirty logging for them. 871 */ 872 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, 873 struct kvm_memory_slot *slot, 874 gfn_t gfn_offset, unsigned long mask) 875 { 876 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); 877 } 878 879 static void kvm_send_hwpoison_signal(unsigned long address, short lsb) 880 { 881 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current); 882 } 883 884 static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, 885 unsigned long hva, 886 unsigned long map_size) 887 { 888 gpa_t gpa_start; 889 hva_t uaddr_start, uaddr_end; 890 size_t size; 891 892 /* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */ 893 if (map_size == PAGE_SIZE) 894 return true; 895 896 size = memslot->npages * PAGE_SIZE; 897 898 gpa_start = memslot->base_gfn << PAGE_SHIFT; 899 900 uaddr_start = memslot->userspace_addr; 901 uaddr_end = uaddr_start + size; 902 903 /* 904 * Pages belonging to memslots that don't have the same alignment 905 * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2 906 * PMD/PUD entries, because we'll end up mapping the wrong pages. 907 * 908 * Consider a layout like the following: 909 * 910 * memslot->userspace_addr: 911 * +-----+--------------------+--------------------+---+ 912 * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz| 913 * +-----+--------------------+--------------------+---+ 914 * 915 * memslot->base_gfn << PAGE_SHIFT: 916 * +---+--------------------+--------------------+-----+ 917 * |abc|def Stage-2 block | Stage-2 block |tvxyz| 918 * +---+--------------------+--------------------+-----+ 919 * 920 * If we create those stage-2 blocks, we'll end up with this incorrect 921 * mapping: 922 * d -> f 923 * e -> g 924 * f -> h 925 */ 926 if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1))) 927 return false; 928 929 /* 930 * Next, let's make sure we're not trying to map anything not covered 931 * by the memslot. This means we have to prohibit block size mappings 932 * for the beginning and end of a non-block aligned and non-block sized 933 * memory slot (illustrated by the head and tail parts of the 934 * userspace view above containing pages 'abcde' and 'xyz', 935 * respectively). 936 * 937 * Note that it doesn't matter if we do the check using the 938 * userspace_addr or the base_gfn, as both are equally aligned (per 939 * the check above) and equally sized. 940 */ 941 return (hva & ~(map_size - 1)) >= uaddr_start && 942 (hva & ~(map_size - 1)) + map_size <= uaddr_end; 943 } 944 945 /* 946 * Check if the given hva is backed by a transparent huge page (THP) and 947 * whether it can be mapped using block mapping in stage2. If so, adjust 948 * the stage2 PFN and IPA accordingly. Only PMD_SIZE THPs are currently 949 * supported. This will need to be updated to support other THP sizes. 950 * 951 * Returns the size of the mapping. 952 */ 953 static unsigned long 954 transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot, 955 unsigned long hva, kvm_pfn_t *pfnp, 956 phys_addr_t *ipap) 957 { 958 kvm_pfn_t pfn = *pfnp; 959 960 /* 961 * Make sure the adjustment is done only for THP pages. Also make 962 * sure that the HVA and IPA are sufficiently aligned and that the 963 * block map is contained within the memslot. 964 */ 965 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) && 966 get_user_mapping_size(kvm, hva) >= PMD_SIZE) { 967 /* 968 * The address we faulted on is backed by a transparent huge 969 * page. However, because we map the compound huge page and 970 * not the individual tail page, we need to transfer the 971 * refcount to the head page. We have to be careful that the 972 * THP doesn't start to split while we are adjusting the 973 * refcounts. 974 * 975 * We are sure this doesn't happen, because mmu_notifier_retry 976 * was successful and we are holding the mmu_lock, so if this 977 * THP is trying to split, it will be blocked in the mmu 978 * notifier before touching any of the pages, specifically 979 * before being able to call __split_huge_page_refcount(). 980 * 981 * We can therefore safely transfer the refcount from PG_tail 982 * to PG_head and switch the pfn from a tail page to the head 983 * page accordingly. 984 */ 985 *ipap &= PMD_MASK; 986 kvm_release_pfn_clean(pfn); 987 pfn &= ~(PTRS_PER_PMD - 1); 988 get_page(pfn_to_page(pfn)); 989 *pfnp = pfn; 990 991 return PMD_SIZE; 992 } 993 994 /* Use page mapping if we cannot use block mapping. */ 995 return PAGE_SIZE; 996 } 997 998 static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva) 999 { 1000 unsigned long pa; 1001 1002 if (is_vm_hugetlb_page(vma) && !(vma->vm_flags & VM_PFNMAP)) 1003 return huge_page_shift(hstate_vma(vma)); 1004 1005 if (!(vma->vm_flags & VM_PFNMAP)) 1006 return PAGE_SHIFT; 1007 1008 VM_BUG_ON(is_vm_hugetlb_page(vma)); 1009 1010 pa = (vma->vm_pgoff << PAGE_SHIFT) + (hva - vma->vm_start); 1011 1012 #ifndef __PAGETABLE_PMD_FOLDED 1013 if ((hva & (PUD_SIZE - 1)) == (pa & (PUD_SIZE - 1)) && 1014 ALIGN_DOWN(hva, PUD_SIZE) >= vma->vm_start && 1015 ALIGN(hva, PUD_SIZE) <= vma->vm_end) 1016 return PUD_SHIFT; 1017 #endif 1018 1019 if ((hva & (PMD_SIZE - 1)) == (pa & (PMD_SIZE - 1)) && 1020 ALIGN_DOWN(hva, PMD_SIZE) >= vma->vm_start && 1021 ALIGN(hva, PMD_SIZE) <= vma->vm_end) 1022 return PMD_SHIFT; 1023 1024 return PAGE_SHIFT; 1025 } 1026 1027 /* 1028 * The page will be mapped in stage 2 as Normal Cacheable, so the VM will be 1029 * able to see the page's tags and therefore they must be initialised first. If 1030 * PG_mte_tagged is set, tags have already been initialised. 1031 * 1032 * The race in the test/set of the PG_mte_tagged flag is handled by: 1033 * - preventing VM_SHARED mappings in a memslot with MTE preventing two VMs 1034 * racing to santise the same page 1035 * - mmap_lock protects between a VM faulting a page in and the VMM performing 1036 * an mprotect() to add VM_MTE 1037 */ 1038 static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn, 1039 unsigned long size) 1040 { 1041 unsigned long i, nr_pages = size >> PAGE_SHIFT; 1042 struct page *page; 1043 1044 if (!kvm_has_mte(kvm)) 1045 return 0; 1046 1047 /* 1048 * pfn_to_online_page() is used to reject ZONE_DEVICE pages 1049 * that may not support tags. 1050 */ 1051 page = pfn_to_online_page(pfn); 1052 1053 if (!page) 1054 return -EFAULT; 1055 1056 for (i = 0; i < nr_pages; i++, page++) { 1057 if (!test_bit(PG_mte_tagged, &page->flags)) { 1058 mte_clear_page_tags(page_address(page)); 1059 set_bit(PG_mte_tagged, &page->flags); 1060 } 1061 } 1062 1063 return 0; 1064 } 1065 1066 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 1067 struct kvm_memory_slot *memslot, unsigned long hva, 1068 unsigned long fault_status) 1069 { 1070 int ret = 0; 1071 bool write_fault, writable, force_pte = false; 1072 bool exec_fault; 1073 bool device = false; 1074 bool shared; 1075 unsigned long mmu_seq; 1076 struct kvm *kvm = vcpu->kvm; 1077 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; 1078 struct vm_area_struct *vma; 1079 short vma_shift; 1080 gfn_t gfn; 1081 kvm_pfn_t pfn; 1082 bool logging_active = memslot_is_logging(memslot); 1083 unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu); 1084 unsigned long vma_pagesize, fault_granule; 1085 enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R; 1086 struct kvm_pgtable *pgt; 1087 1088 fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level); 1089 write_fault = kvm_is_write_fault(vcpu); 1090 exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); 1091 VM_BUG_ON(write_fault && exec_fault); 1092 1093 if (fault_status == FSC_PERM && !write_fault && !exec_fault) { 1094 kvm_err("Unexpected L2 read permission error\n"); 1095 return -EFAULT; 1096 } 1097 1098 /* 1099 * Let's check if we will get back a huge page backed by hugetlbfs, or 1100 * get block mapping for device MMIO region. 1101 */ 1102 mmap_read_lock(current->mm); 1103 vma = vma_lookup(current->mm, hva); 1104 if (unlikely(!vma)) { 1105 kvm_err("Failed to find VMA for hva 0x%lx\n", hva); 1106 mmap_read_unlock(current->mm); 1107 return -EFAULT; 1108 } 1109 1110 /* 1111 * logging_active is guaranteed to never be true for VM_PFNMAP 1112 * memslots. 1113 */ 1114 if (logging_active) { 1115 force_pte = true; 1116 vma_shift = PAGE_SHIFT; 1117 } else { 1118 vma_shift = get_vma_page_shift(vma, hva); 1119 } 1120 1121 shared = (vma->vm_flags & VM_SHARED); 1122 1123 switch (vma_shift) { 1124 #ifndef __PAGETABLE_PMD_FOLDED 1125 case PUD_SHIFT: 1126 if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE)) 1127 break; 1128 fallthrough; 1129 #endif 1130 case CONT_PMD_SHIFT: 1131 vma_shift = PMD_SHIFT; 1132 fallthrough; 1133 case PMD_SHIFT: 1134 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) 1135 break; 1136 fallthrough; 1137 case CONT_PTE_SHIFT: 1138 vma_shift = PAGE_SHIFT; 1139 force_pte = true; 1140 fallthrough; 1141 case PAGE_SHIFT: 1142 break; 1143 default: 1144 WARN_ONCE(1, "Unknown vma_shift %d", vma_shift); 1145 } 1146 1147 vma_pagesize = 1UL << vma_shift; 1148 if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) 1149 fault_ipa &= ~(vma_pagesize - 1); 1150 1151 gfn = fault_ipa >> PAGE_SHIFT; 1152 mmap_read_unlock(current->mm); 1153 1154 /* 1155 * Permission faults just need to update the existing leaf entry, 1156 * and so normally don't require allocations from the memcache. The 1157 * only exception to this is when dirty logging is enabled at runtime 1158 * and a write fault needs to collapse a block entry into a table. 1159 */ 1160 if (fault_status != FSC_PERM || (logging_active && write_fault)) { 1161 ret = kvm_mmu_topup_memory_cache(memcache, 1162 kvm_mmu_cache_min_pages(kvm)); 1163 if (ret) 1164 return ret; 1165 } 1166 1167 mmu_seq = vcpu->kvm->mmu_notifier_seq; 1168 /* 1169 * Ensure the read of mmu_notifier_seq happens before we call 1170 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk 1171 * the page we just got a reference to gets unmapped before we have a 1172 * chance to grab the mmu_lock, which ensure that if the page gets 1173 * unmapped afterwards, the call to kvm_unmap_gfn will take it away 1174 * from us again properly. This smp_rmb() interacts with the smp_wmb() 1175 * in kvm_mmu_notifier_invalidate_<page|range_end>. 1176 * 1177 * Besides, __gfn_to_pfn_memslot() instead of gfn_to_pfn_prot() is 1178 * used to avoid unnecessary overhead introduced to locate the memory 1179 * slot because it's always fixed even @gfn is adjusted for huge pages. 1180 */ 1181 smp_rmb(); 1182 1183 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, 1184 write_fault, &writable, NULL); 1185 if (pfn == KVM_PFN_ERR_HWPOISON) { 1186 kvm_send_hwpoison_signal(hva, vma_shift); 1187 return 0; 1188 } 1189 if (is_error_noslot_pfn(pfn)) 1190 return -EFAULT; 1191 1192 if (kvm_is_device_pfn(pfn)) { 1193 /* 1194 * If the page was identified as device early by looking at 1195 * the VMA flags, vma_pagesize is already representing the 1196 * largest quantity we can map. If instead it was mapped 1197 * via gfn_to_pfn_prot(), vma_pagesize is set to PAGE_SIZE 1198 * and must not be upgraded. 1199 * 1200 * In both cases, we don't let transparent_hugepage_adjust() 1201 * change things at the last minute. 1202 */ 1203 device = true; 1204 } else if (logging_active && !write_fault) { 1205 /* 1206 * Only actually map the page as writable if this was a write 1207 * fault. 1208 */ 1209 writable = false; 1210 } 1211 1212 if (exec_fault && device) 1213 return -ENOEXEC; 1214 1215 spin_lock(&kvm->mmu_lock); 1216 pgt = vcpu->arch.hw_mmu->pgt; 1217 if (mmu_notifier_retry(kvm, mmu_seq)) 1218 goto out_unlock; 1219 1220 /* 1221 * If we are not forced to use page mapping, check if we are 1222 * backed by a THP and thus use block mapping if possible. 1223 */ 1224 if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) { 1225 if (fault_status == FSC_PERM && fault_granule > PAGE_SIZE) 1226 vma_pagesize = fault_granule; 1227 else 1228 vma_pagesize = transparent_hugepage_adjust(kvm, memslot, 1229 hva, &pfn, 1230 &fault_ipa); 1231 } 1232 1233 if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) { 1234 /* Check the VMM hasn't introduced a new VM_SHARED VMA */ 1235 if (!shared) 1236 ret = sanitise_mte_tags(kvm, pfn, vma_pagesize); 1237 else 1238 ret = -EFAULT; 1239 if (ret) 1240 goto out_unlock; 1241 } 1242 1243 if (writable) 1244 prot |= KVM_PGTABLE_PROT_W; 1245 1246 if (exec_fault) 1247 prot |= KVM_PGTABLE_PROT_X; 1248 1249 if (device) 1250 prot |= KVM_PGTABLE_PROT_DEVICE; 1251 else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) 1252 prot |= KVM_PGTABLE_PROT_X; 1253 1254 /* 1255 * Under the premise of getting a FSC_PERM fault, we just need to relax 1256 * permissions only if vma_pagesize equals fault_granule. Otherwise, 1257 * kvm_pgtable_stage2_map() should be called to change block size. 1258 */ 1259 if (fault_status == FSC_PERM && vma_pagesize == fault_granule) { 1260 ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot); 1261 } else { 1262 ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize, 1263 __pfn_to_phys(pfn), prot, 1264 memcache); 1265 } 1266 1267 /* Mark the page dirty only if the fault is handled successfully */ 1268 if (writable && !ret) { 1269 kvm_set_pfn_dirty(pfn); 1270 mark_page_dirty_in_slot(kvm, memslot, gfn); 1271 } 1272 1273 out_unlock: 1274 spin_unlock(&kvm->mmu_lock); 1275 kvm_set_pfn_accessed(pfn); 1276 kvm_release_pfn_clean(pfn); 1277 return ret != -EAGAIN ? ret : 0; 1278 } 1279 1280 /* Resolve the access fault by making the page young again. */ 1281 static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) 1282 { 1283 pte_t pte; 1284 kvm_pte_t kpte; 1285 struct kvm_s2_mmu *mmu; 1286 1287 trace_kvm_access_fault(fault_ipa); 1288 1289 spin_lock(&vcpu->kvm->mmu_lock); 1290 mmu = vcpu->arch.hw_mmu; 1291 kpte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa); 1292 spin_unlock(&vcpu->kvm->mmu_lock); 1293 1294 pte = __pte(kpte); 1295 if (pte_valid(pte)) 1296 kvm_set_pfn_accessed(pte_pfn(pte)); 1297 } 1298 1299 /** 1300 * kvm_handle_guest_abort - handles all 2nd stage aborts 1301 * @vcpu: the VCPU pointer 1302 * 1303 * Any abort that gets to the host is almost guaranteed to be caused by a 1304 * missing second stage translation table entry, which can mean that either the 1305 * guest simply needs more memory and we must allocate an appropriate page or it 1306 * can mean that the guest tried to access I/O memory, which is emulated by user 1307 * space. The distinction is based on the IPA causing the fault and whether this 1308 * memory region has been registered as standard RAM by user space. 1309 */ 1310 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu) 1311 { 1312 unsigned long fault_status; 1313 phys_addr_t fault_ipa; 1314 struct kvm_memory_slot *memslot; 1315 unsigned long hva; 1316 bool is_iabt, write_fault, writable; 1317 gfn_t gfn; 1318 int ret, idx; 1319 1320 fault_status = kvm_vcpu_trap_get_fault_type(vcpu); 1321 1322 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); 1323 is_iabt = kvm_vcpu_trap_is_iabt(vcpu); 1324 1325 /* Synchronous External Abort? */ 1326 if (kvm_vcpu_abt_issea(vcpu)) { 1327 /* 1328 * For RAS the host kernel may handle this abort. 1329 * There is no need to pass the error into the guest. 1330 */ 1331 if (kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu))) 1332 kvm_inject_vabt(vcpu); 1333 1334 return 1; 1335 } 1336 1337 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu), 1338 kvm_vcpu_get_hfar(vcpu), fault_ipa); 1339 1340 /* Check the stage-2 fault is trans. fault or write fault */ 1341 if (fault_status != FSC_FAULT && fault_status != FSC_PERM && 1342 fault_status != FSC_ACCESS) { 1343 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n", 1344 kvm_vcpu_trap_get_class(vcpu), 1345 (unsigned long)kvm_vcpu_trap_get_fault(vcpu), 1346 (unsigned long)kvm_vcpu_get_esr(vcpu)); 1347 return -EFAULT; 1348 } 1349 1350 idx = srcu_read_lock(&vcpu->kvm->srcu); 1351 1352 gfn = fault_ipa >> PAGE_SHIFT; 1353 memslot = gfn_to_memslot(vcpu->kvm, gfn); 1354 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); 1355 write_fault = kvm_is_write_fault(vcpu); 1356 if (kvm_is_error_hva(hva) || (write_fault && !writable)) { 1357 /* 1358 * The guest has put either its instructions or its page-tables 1359 * somewhere it shouldn't have. Userspace won't be able to do 1360 * anything about this (there's no syndrome for a start), so 1361 * re-inject the abort back into the guest. 1362 */ 1363 if (is_iabt) { 1364 ret = -ENOEXEC; 1365 goto out; 1366 } 1367 1368 if (kvm_vcpu_abt_iss1tw(vcpu)) { 1369 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); 1370 ret = 1; 1371 goto out_unlock; 1372 } 1373 1374 /* 1375 * Check for a cache maintenance operation. Since we 1376 * ended-up here, we know it is outside of any memory 1377 * slot. But we can't find out if that is for a device, 1378 * or if the guest is just being stupid. The only thing 1379 * we know for sure is that this range cannot be cached. 1380 * 1381 * So let's assume that the guest is just being 1382 * cautious, and skip the instruction. 1383 */ 1384 if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) { 1385 kvm_incr_pc(vcpu); 1386 ret = 1; 1387 goto out_unlock; 1388 } 1389 1390 /* 1391 * The IPA is reported as [MAX:12], so we need to 1392 * complement it with the bottom 12 bits from the 1393 * faulting VA. This is always 12 bits, irrespective 1394 * of the page size. 1395 */ 1396 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1); 1397 ret = io_mem_abort(vcpu, fault_ipa); 1398 goto out_unlock; 1399 } 1400 1401 /* Userspace should not be able to register out-of-bounds IPAs */ 1402 VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm)); 1403 1404 if (fault_status == FSC_ACCESS) { 1405 handle_access_fault(vcpu, fault_ipa); 1406 ret = 1; 1407 goto out_unlock; 1408 } 1409 1410 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); 1411 if (ret == 0) 1412 ret = 1; 1413 out: 1414 if (ret == -ENOEXEC) { 1415 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); 1416 ret = 1; 1417 } 1418 out_unlock: 1419 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1420 return ret; 1421 } 1422 1423 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) 1424 { 1425 if (!kvm->arch.mmu.pgt) 1426 return false; 1427 1428 __unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT, 1429 (range->end - range->start) << PAGE_SHIFT, 1430 range->may_block); 1431 1432 return false; 1433 } 1434 1435 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 1436 { 1437 kvm_pfn_t pfn = pte_pfn(range->pte); 1438 int ret; 1439 1440 if (!kvm->arch.mmu.pgt) 1441 return false; 1442 1443 WARN_ON(range->end - range->start != 1); 1444 1445 ret = sanitise_mte_tags(kvm, pfn, PAGE_SIZE); 1446 if (ret) 1447 return false; 1448 1449 /* 1450 * We've moved a page around, probably through CoW, so let's treat 1451 * it just like a translation fault and the map handler will clean 1452 * the cache to the PoC. 1453 * 1454 * The MMU notifiers will have unmapped a huge PMD before calling 1455 * ->change_pte() (which in turn calls kvm_set_spte_gfn()) and 1456 * therefore we never need to clear out a huge PMD through this 1457 * calling path and a memcache is not required. 1458 */ 1459 kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT, 1460 PAGE_SIZE, __pfn_to_phys(pfn), 1461 KVM_PGTABLE_PROT_R, NULL); 1462 1463 return false; 1464 } 1465 1466 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 1467 { 1468 u64 size = (range->end - range->start) << PAGE_SHIFT; 1469 kvm_pte_t kpte; 1470 pte_t pte; 1471 1472 if (!kvm->arch.mmu.pgt) 1473 return false; 1474 1475 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); 1476 1477 kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt, 1478 range->start << PAGE_SHIFT); 1479 pte = __pte(kpte); 1480 return pte_valid(pte) && pte_young(pte); 1481 } 1482 1483 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 1484 { 1485 if (!kvm->arch.mmu.pgt) 1486 return false; 1487 1488 return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt, 1489 range->start << PAGE_SHIFT); 1490 } 1491 1492 phys_addr_t kvm_mmu_get_httbr(void) 1493 { 1494 return __pa(hyp_pgtable->pgd); 1495 } 1496 1497 phys_addr_t kvm_get_idmap_vector(void) 1498 { 1499 return hyp_idmap_vector; 1500 } 1501 1502 static int kvm_map_idmap_text(void) 1503 { 1504 unsigned long size = hyp_idmap_end - hyp_idmap_start; 1505 int err = __create_hyp_mappings(hyp_idmap_start, size, hyp_idmap_start, 1506 PAGE_HYP_EXEC); 1507 if (err) 1508 kvm_err("Failed to idmap %lx-%lx\n", 1509 hyp_idmap_start, hyp_idmap_end); 1510 1511 return err; 1512 } 1513 1514 static void *kvm_hyp_zalloc_page(void *arg) 1515 { 1516 return (void *)get_zeroed_page(GFP_KERNEL); 1517 } 1518 1519 static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = { 1520 .zalloc_page = kvm_hyp_zalloc_page, 1521 .get_page = kvm_host_get_page, 1522 .put_page = kvm_host_put_page, 1523 .phys_to_virt = kvm_host_va, 1524 .virt_to_phys = kvm_host_pa, 1525 }; 1526 1527 int kvm_mmu_init(u32 *hyp_va_bits) 1528 { 1529 int err; 1530 1531 hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start); 1532 hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE); 1533 hyp_idmap_end = __pa_symbol(__hyp_idmap_text_end); 1534 hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE); 1535 hyp_idmap_vector = __pa_symbol(__kvm_hyp_init); 1536 1537 /* 1538 * We rely on the linker script to ensure at build time that the HYP 1539 * init code does not cross a page boundary. 1540 */ 1541 BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); 1542 1543 *hyp_va_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET); 1544 kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits); 1545 kvm_debug("IDMAP page: %lx\n", hyp_idmap_start); 1546 kvm_debug("HYP VA range: %lx:%lx\n", 1547 kern_hyp_va(PAGE_OFFSET), 1548 kern_hyp_va((unsigned long)high_memory - 1)); 1549 1550 if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) && 1551 hyp_idmap_start < kern_hyp_va((unsigned long)high_memory - 1) && 1552 hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) { 1553 /* 1554 * The idmap page is intersecting with the VA space, 1555 * it is not safe to continue further. 1556 */ 1557 kvm_err("IDMAP intersecting with HYP VA, unable to continue\n"); 1558 err = -EINVAL; 1559 goto out; 1560 } 1561 1562 hyp_pgtable = kzalloc(sizeof(*hyp_pgtable), GFP_KERNEL); 1563 if (!hyp_pgtable) { 1564 kvm_err("Hyp mode page-table not allocated\n"); 1565 err = -ENOMEM; 1566 goto out; 1567 } 1568 1569 err = kvm_pgtable_hyp_init(hyp_pgtable, *hyp_va_bits, &kvm_hyp_mm_ops); 1570 if (err) 1571 goto out_free_pgtable; 1572 1573 err = kvm_map_idmap_text(); 1574 if (err) 1575 goto out_destroy_pgtable; 1576 1577 io_map_base = hyp_idmap_start; 1578 return 0; 1579 1580 out_destroy_pgtable: 1581 kvm_pgtable_hyp_destroy(hyp_pgtable); 1582 out_free_pgtable: 1583 kfree(hyp_pgtable); 1584 hyp_pgtable = NULL; 1585 out: 1586 return err; 1587 } 1588 1589 void kvm_arch_commit_memory_region(struct kvm *kvm, 1590 struct kvm_memory_slot *old, 1591 const struct kvm_memory_slot *new, 1592 enum kvm_mr_change change) 1593 { 1594 /* 1595 * At this point memslot has been committed and there is an 1596 * allocated dirty_bitmap[], dirty pages will be tracked while the 1597 * memory slot is write protected. 1598 */ 1599 if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES) { 1600 /* 1601 * If we're with initial-all-set, we don't need to write 1602 * protect any pages because they're all reported as dirty. 1603 * Huge pages and normal pages will be write protect gradually. 1604 */ 1605 if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) { 1606 kvm_mmu_wp_memory_region(kvm, new->id); 1607 } 1608 } 1609 } 1610 1611 int kvm_arch_prepare_memory_region(struct kvm *kvm, 1612 const struct kvm_memory_slot *old, 1613 struct kvm_memory_slot *new, 1614 enum kvm_mr_change change) 1615 { 1616 hva_t hva, reg_end; 1617 int ret = 0; 1618 1619 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE && 1620 change != KVM_MR_FLAGS_ONLY) 1621 return 0; 1622 1623 /* 1624 * Prevent userspace from creating a memory region outside of the IPA 1625 * space addressable by the KVM guest IPA space. 1626 */ 1627 if ((new->base_gfn + new->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT)) 1628 return -EFAULT; 1629 1630 hva = new->userspace_addr; 1631 reg_end = hva + (new->npages << PAGE_SHIFT); 1632 1633 mmap_read_lock(current->mm); 1634 /* 1635 * A memory region could potentially cover multiple VMAs, and any holes 1636 * between them, so iterate over all of them. 1637 * 1638 * +--------------------------------------------+ 1639 * +---------------+----------------+ +----------------+ 1640 * | : VMA 1 | VMA 2 | | VMA 3 : | 1641 * +---------------+----------------+ +----------------+ 1642 * | memory region | 1643 * +--------------------------------------------+ 1644 */ 1645 do { 1646 struct vm_area_struct *vma; 1647 1648 vma = find_vma_intersection(current->mm, hva, reg_end); 1649 if (!vma) 1650 break; 1651 1652 /* 1653 * VM_SHARED mappings are not allowed with MTE to avoid races 1654 * when updating the PG_mte_tagged page flag, see 1655 * sanitise_mte_tags for more details. 1656 */ 1657 if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED) { 1658 ret = -EINVAL; 1659 break; 1660 } 1661 1662 if (vma->vm_flags & VM_PFNMAP) { 1663 /* IO region dirty page logging not allowed */ 1664 if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { 1665 ret = -EINVAL; 1666 break; 1667 } 1668 } 1669 hva = min(reg_end, vma->vm_end); 1670 } while (hva < reg_end); 1671 1672 mmap_read_unlock(current->mm); 1673 return ret; 1674 } 1675 1676 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) 1677 { 1678 } 1679 1680 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) 1681 { 1682 } 1683 1684 void kvm_arch_flush_shadow_all(struct kvm *kvm) 1685 { 1686 kvm_free_stage2_pgd(&kvm->arch.mmu); 1687 } 1688 1689 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 1690 struct kvm_memory_slot *slot) 1691 { 1692 gpa_t gpa = slot->base_gfn << PAGE_SHIFT; 1693 phys_addr_t size = slot->npages << PAGE_SHIFT; 1694 1695 spin_lock(&kvm->mmu_lock); 1696 unmap_stage2_range(&kvm->arch.mmu, gpa, size); 1697 spin_unlock(&kvm->mmu_lock); 1698 } 1699 1700 /* 1701 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). 1702 * 1703 * Main problems: 1704 * - S/W ops are local to a CPU (not broadcast) 1705 * - We have line migration behind our back (speculation) 1706 * - System caches don't support S/W at all (damn!) 1707 * 1708 * In the face of the above, the best we can do is to try and convert 1709 * S/W ops to VA ops. Because the guest is not allowed to infer the 1710 * S/W to PA mapping, it can only use S/W to nuke the whole cache, 1711 * which is a rather good thing for us. 1712 * 1713 * Also, it is only used when turning caches on/off ("The expected 1714 * usage of the cache maintenance instructions that operate by set/way 1715 * is associated with the cache maintenance instructions associated 1716 * with the powerdown and powerup of caches, if this is required by 1717 * the implementation."). 1718 * 1719 * We use the following policy: 1720 * 1721 * - If we trap a S/W operation, we enable VM trapping to detect 1722 * caches being turned on/off, and do a full clean. 1723 * 1724 * - We flush the caches on both caches being turned on and off. 1725 * 1726 * - Once the caches are enabled, we stop trapping VM ops. 1727 */ 1728 void kvm_set_way_flush(struct kvm_vcpu *vcpu) 1729 { 1730 unsigned long hcr = *vcpu_hcr(vcpu); 1731 1732 /* 1733 * If this is the first time we do a S/W operation 1734 * (i.e. HCR_TVM not set) flush the whole memory, and set the 1735 * VM trapping. 1736 * 1737 * Otherwise, rely on the VM trapping to wait for the MMU + 1738 * Caches to be turned off. At that point, we'll be able to 1739 * clean the caches again. 1740 */ 1741 if (!(hcr & HCR_TVM)) { 1742 trace_kvm_set_way_flush(*vcpu_pc(vcpu), 1743 vcpu_has_cache_enabled(vcpu)); 1744 stage2_flush_vm(vcpu->kvm); 1745 *vcpu_hcr(vcpu) = hcr | HCR_TVM; 1746 } 1747 } 1748 1749 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled) 1750 { 1751 bool now_enabled = vcpu_has_cache_enabled(vcpu); 1752 1753 /* 1754 * If switching the MMU+caches on, need to invalidate the caches. 1755 * If switching it off, need to clean the caches. 1756 * Clean + invalidate does the trick always. 1757 */ 1758 if (now_enabled != was_enabled) 1759 stage2_flush_vm(vcpu->kvm); 1760 1761 /* Caches are now on, stop trapping VM ops (until a S/W op) */ 1762 if (now_enabled) 1763 *vcpu_hcr(vcpu) &= ~HCR_TVM; 1764 1765 trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled); 1766 } 1767