Lines Matching +full:- +full:kvm

1 // SPDX-License-Identifier: GPL-2.0
47 if (level == (gstage_pgd_levels - 1)) in gstage_pte_index()
48 mask = (PTRS_PER_PTE * (1UL << gstage_pgd_xbits)) - 1; in gstage_pte_index()
50 mask = PTRS_PER_PTE - 1; in gstage_pte_index()
72 return -EINVAL; in gstage_page_size_to_level()
78 return -EINVAL; in gstage_level_to_page_order()
97 static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr, in gstage_get_leaf_entry() argument
101 u32 current_level = gstage_pgd_levels - 1; in gstage_get_leaf_entry()
104 ptep = (pte_t *)kvm->arch.pgd; in gstage_get_leaf_entry()
114 current_level--; in gstage_get_leaf_entry()
126 static void gstage_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr) in gstage_remote_tlb_flush() argument
132 addr &= ~(BIT(order) - 1); in gstage_remote_tlb_flush()
134 kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1UL, 0, addr, BIT(order), order); in gstage_remote_tlb_flush()
137 static int gstage_set_pte(struct kvm *kvm, u32 level, in gstage_set_pte() argument
141 u32 current_level = gstage_pgd_levels - 1; in gstage_set_pte()
142 pte_t *next_ptep = (pte_t *)kvm->arch.pgd; in gstage_set_pte()
146 return -EINVAL; in gstage_set_pte()
150 return -EEXIST; in gstage_set_pte()
154 return -ENOMEM; in gstage_set_pte()
157 return -ENOMEM; in gstage_set_pte()
162 return -EEXIST; in gstage_set_pte()
166 current_level--; in gstage_set_pte()
172 gstage_remote_tlb_flush(kvm, current_level, addr); in gstage_set_pte()
177 static int gstage_map_page(struct kvm *kvm, in gstage_map_page() argument
193 * A RISC-V implementation can choose to either: in gstage_map_page()
199 * always set 'A' and 'D' PTE bits at time of creating G-stage in gstage_map_page()
200 * mapping. To support KVM dirty page logging with both options in gstage_map_page()
201 * mentioned above, we will write-protect G-stage PTEs to track in gstage_map_page()
219 return gstage_set_pte(kvm, level, pcache, gpa, &new_pte); in gstage_map_page()
225 GSTAGE_OP_WP, /* Write-protect */
228 static void gstage_op_pte(struct kvm *kvm, gpa_t addr, in gstage_op_pte() argument
240 BUG_ON(addr & (page_size - 1)); in gstage_op_pte()
247 next_ptep_level = ptep_level - 1; in gstage_op_pte()
256 gstage_op_pte(kvm, addr + i * next_page_size, in gstage_op_pte()
265 gstage_remote_tlb_flush(kvm, ptep_level, addr); in gstage_op_pte()
269 static void gstage_unmap_range(struct kvm *kvm, gpa_t start, in gstage_unmap_range() argument
280 found_leaf = gstage_get_leaf_entry(kvm, addr, in gstage_unmap_range()
289 if (!(addr & (page_size - 1)) && ((end - addr) >= page_size)) in gstage_unmap_range()
290 gstage_op_pte(kvm, addr, ptep, in gstage_unmap_range()
297 * If the range is too large, release the kvm->mmu_lock in gstage_unmap_range()
301 cond_resched_lock(&kvm->mmu_lock); in gstage_unmap_range()
305 static void gstage_wp_range(struct kvm *kvm, gpa_t start, gpa_t end) in gstage_wp_range() argument
315 found_leaf = gstage_get_leaf_entry(kvm, addr, in gstage_wp_range()
324 if (!(addr & (page_size - 1)) && ((end - addr) >= page_size)) in gstage_wp_range()
325 gstage_op_pte(kvm, addr, ptep, in gstage_wp_range()
333 static void gstage_wp_memory_region(struct kvm *kvm, int slot) in gstage_wp_memory_region() argument
335 struct kvm_memslots *slots = kvm_memslots(kvm); in gstage_wp_memory_region()
337 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT; in gstage_wp_memory_region()
338 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in gstage_wp_memory_region()
340 spin_lock(&kvm->mmu_lock); in gstage_wp_memory_region()
341 gstage_wp_range(kvm, start, end); in gstage_wp_memory_region()
342 spin_unlock(&kvm->mmu_lock); in gstage_wp_memory_region()
343 kvm_flush_remote_tlbs(kvm); in gstage_wp_memory_region()
346 int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa, in kvm_riscv_gstage_ioremap() argument
359 end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK; in kvm_riscv_gstage_ioremap()
372 spin_lock(&kvm->mmu_lock); in kvm_riscv_gstage_ioremap()
373 ret = gstage_set_pte(kvm, 0, &pcache, addr, &pte); in kvm_riscv_gstage_ioremap()
374 spin_unlock(&kvm->mmu_lock); in kvm_riscv_gstage_ioremap()
386 void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size) in kvm_riscv_gstage_iounmap() argument
388 spin_lock(&kvm->mmu_lock); in kvm_riscv_gstage_iounmap()
389 gstage_unmap_range(kvm, gpa, size, false); in kvm_riscv_gstage_iounmap()
390 spin_unlock(&kvm->mmu_lock); in kvm_riscv_gstage_iounmap()
393 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, in kvm_arch_mmu_enable_log_dirty_pt_masked() argument
398 phys_addr_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked()
402 gstage_wp_range(kvm, start, end); in kvm_arch_mmu_enable_log_dirty_pt_masked()
405 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) in kvm_arch_sync_dirty_log() argument
409 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free) in kvm_arch_free_memslot() argument
413 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) in kvm_arch_memslots_updated() argument
417 void kvm_arch_flush_shadow_all(struct kvm *kvm) in kvm_arch_flush_shadow_all() argument
419 kvm_riscv_gstage_free_pgd(kvm); in kvm_arch_flush_shadow_all()
422 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, in kvm_arch_flush_shadow_memslot() argument
425 gpa_t gpa = slot->base_gfn << PAGE_SHIFT; in kvm_arch_flush_shadow_memslot()
426 phys_addr_t size = slot->npages << PAGE_SHIFT; in kvm_arch_flush_shadow_memslot()
428 spin_lock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
429 gstage_unmap_range(kvm, gpa, size, false); in kvm_arch_flush_shadow_memslot()
430 spin_unlock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
433 void kvm_arch_commit_memory_region(struct kvm *kvm, in kvm_arch_commit_memory_region() argument
443 if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES) in kvm_arch_commit_memory_region()
444 gstage_wp_memory_region(kvm, new->id); in kvm_arch_commit_memory_region()
447 int kvm_arch_prepare_memory_region(struct kvm *kvm, in kvm_arch_prepare_memory_region() argument
463 * space addressable by the KVM guest GPA space. in kvm_arch_prepare_memory_region()
465 if ((new->base_gfn + new->npages) >= in kvm_arch_prepare_memory_region()
467 return -EFAULT; in kvm_arch_prepare_memory_region()
469 hva = new->userspace_addr; in kvm_arch_prepare_memory_region()
470 size = new->npages << PAGE_SHIFT; in kvm_arch_prepare_memory_region()
472 base_gpa = new->base_gfn << PAGE_SHIFT; in kvm_arch_prepare_memory_region()
473 writable = !(new->flags & KVM_MEM_READONLY); in kvm_arch_prepare_memory_region()
475 mmap_read_lock(current->mm); in kvm_arch_prepare_memory_region()
482 * +--------------------------------------------+ in kvm_arch_prepare_memory_region()
483 * +---------------+----------------+ +----------------+ in kvm_arch_prepare_memory_region()
485 * +---------------+----------------+ +----------------+ in kvm_arch_prepare_memory_region()
487 * +--------------------------------------------+ in kvm_arch_prepare_memory_region()
490 struct vm_area_struct *vma = find_vma(current->mm, hva); in kvm_arch_prepare_memory_region()
493 if (!vma || vma->vm_start >= reg_end) in kvm_arch_prepare_memory_region()
497 * Mapping a read-only VMA is only allowed if the in kvm_arch_prepare_memory_region()
498 * memory region is configured as read-only. in kvm_arch_prepare_memory_region()
500 if (writable && !(vma->vm_flags & VM_WRITE)) { in kvm_arch_prepare_memory_region()
501 ret = -EPERM; in kvm_arch_prepare_memory_region()
506 vm_start = max(hva, vma->vm_start); in kvm_arch_prepare_memory_region()
507 vm_end = min(reg_end, vma->vm_end); in kvm_arch_prepare_memory_region()
509 if (vma->vm_flags & VM_PFNMAP) { in kvm_arch_prepare_memory_region()
510 gpa_t gpa = base_gpa + (vm_start - hva); in kvm_arch_prepare_memory_region()
513 pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; in kvm_arch_prepare_memory_region()
514 pa += vm_start - vma->vm_start; in kvm_arch_prepare_memory_region()
517 if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { in kvm_arch_prepare_memory_region()
518 ret = -EINVAL; in kvm_arch_prepare_memory_region()
522 ret = kvm_riscv_gstage_ioremap(kvm, gpa, pa, in kvm_arch_prepare_memory_region()
523 vm_end - vm_start, in kvm_arch_prepare_memory_region()
535 kvm_riscv_gstage_iounmap(kvm, base_gpa, size); in kvm_arch_prepare_memory_region()
538 mmap_read_unlock(current->mm); in kvm_arch_prepare_memory_region()
542 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_unmap_gfn_range() argument
544 if (!kvm->arch.pgd) in kvm_unmap_gfn_range()
547 gstage_unmap_range(kvm, range->start << PAGE_SHIFT, in kvm_unmap_gfn_range()
548 (range->end - range->start) << PAGE_SHIFT, in kvm_unmap_gfn_range()
549 range->may_block); in kvm_unmap_gfn_range()
553 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_set_spte_gfn() argument
556 kvm_pfn_t pfn = pte_pfn(range->arg.pte); in kvm_set_spte_gfn()
558 if (!kvm->arch.pgd) in kvm_set_spte_gfn()
561 WARN_ON(range->end - range->start != 1); in kvm_set_spte_gfn()
563 ret = gstage_map_page(kvm, NULL, range->start << PAGE_SHIFT, in kvm_set_spte_gfn()
566 kvm_debug("Failed to map G-stage page (error %d)\n", ret); in kvm_set_spte_gfn()
573 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_age_gfn() argument
577 u64 size = (range->end - range->start) << PAGE_SHIFT; in kvm_age_gfn()
579 if (!kvm->arch.pgd) in kvm_age_gfn()
584 if (!gstage_get_leaf_entry(kvm, range->start << PAGE_SHIFT, in kvm_age_gfn()
591 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_test_age_gfn() argument
595 u64 size = (range->end - range->start) << PAGE_SHIFT; in kvm_test_age_gfn()
597 if (!kvm->arch.pgd) in kvm_test_age_gfn()
602 if (!gstage_get_leaf_entry(kvm, range->start << PAGE_SHIFT, in kvm_test_age_gfn()
619 struct kvm *kvm = vcpu->kvm; in kvm_riscv_gstage_map() local
620 struct kvm_mmu_memory_cache *pcache = &vcpu->arch.mmu_page_cache; in kvm_riscv_gstage_map()
621 bool logging = (memslot->dirty_bitmap && in kvm_riscv_gstage_map()
622 !(memslot->flags & KVM_MEM_READONLY)) ? true : false; in kvm_riscv_gstage_map()
628 kvm_err("Failed to topup G-stage cache\n"); in kvm_riscv_gstage_map()
632 mmap_read_lock(current->mm); in kvm_riscv_gstage_map()
634 vma = vma_lookup(current->mm, hva); in kvm_riscv_gstage_map()
637 mmap_read_unlock(current->mm); in kvm_riscv_gstage_map()
638 return -EFAULT; in kvm_riscv_gstage_map()
646 if (logging || (vma->vm_flags & VM_PFNMAP)) in kvm_riscv_gstage_map()
653 * Read mmu_invalidate_seq so that KVM can detect if the results of in kvm_riscv_gstage_map()
655 * kvm->mmu_lock. in kvm_riscv_gstage_map()
660 mmu_seq = kvm->mmu_invalidate_seq; in kvm_riscv_gstage_map()
661 mmap_read_unlock(current->mm); in kvm_riscv_gstage_map()
667 return -EFAULT; in kvm_riscv_gstage_map()
670 hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable); in kvm_riscv_gstage_map()
677 return -EFAULT; in kvm_riscv_gstage_map()
686 spin_lock(&kvm->mmu_lock); in kvm_riscv_gstage_map()
688 if (mmu_invalidate_retry(kvm, mmu_seq)) in kvm_riscv_gstage_map()
693 mark_page_dirty(kvm, gfn); in kvm_riscv_gstage_map()
694 ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT, in kvm_riscv_gstage_map()
697 ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT, in kvm_riscv_gstage_map()
702 kvm_err("Failed to map in G-stage\n"); in kvm_riscv_gstage_map()
705 spin_unlock(&kvm->mmu_lock); in kvm_riscv_gstage_map()
711 int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm) in kvm_riscv_gstage_alloc_pgd() argument
715 if (kvm->arch.pgd != NULL) { in kvm_riscv_gstage_alloc_pgd()
717 return -EINVAL; in kvm_riscv_gstage_alloc_pgd()
723 return -ENOMEM; in kvm_riscv_gstage_alloc_pgd()
724 kvm->arch.pgd = page_to_virt(pgd_page); in kvm_riscv_gstage_alloc_pgd()
725 kvm->arch.pgd_phys = page_to_phys(pgd_page); in kvm_riscv_gstage_alloc_pgd()
730 void kvm_riscv_gstage_free_pgd(struct kvm *kvm) in kvm_riscv_gstage_free_pgd() argument
734 spin_lock(&kvm->mmu_lock); in kvm_riscv_gstage_free_pgd()
735 if (kvm->arch.pgd) { in kvm_riscv_gstage_free_pgd()
736 gstage_unmap_range(kvm, 0UL, gstage_gpa_size, false); in kvm_riscv_gstage_free_pgd()
737 pgd = READ_ONCE(kvm->arch.pgd); in kvm_riscv_gstage_free_pgd()
738 kvm->arch.pgd = NULL; in kvm_riscv_gstage_free_pgd()
739 kvm->arch.pgd_phys = 0; in kvm_riscv_gstage_free_pgd()
741 spin_unlock(&kvm->mmu_lock); in kvm_riscv_gstage_free_pgd()
750 struct kvm_arch *k = &vcpu->kvm->arch; in kvm_riscv_gstage_update_hgatp()
752 hgatp |= (READ_ONCE(k->vmid.vmid) << HGATP_VMID_SHIFT) & HGATP_VMID; in kvm_riscv_gstage_update_hgatp()
753 hgatp |= (k->pgd_phys >> PAGE_SHIFT) & HGATP_PPN; in kvm_riscv_gstage_update_hgatp()
764 /* Try Sv57x4 G-stage mode */ in kvm_riscv_gstage_mode_detect()
772 /* Try Sv48x4 G-stage mode */ in kvm_riscv_gstage_mode_detect()