Lines Matching refs:kvm
97 static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr, in gstage_get_leaf_entry() argument
104 ptep = (pte_t *)kvm->arch.pgd; in gstage_get_leaf_entry()
126 static void gstage_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr) in gstage_remote_tlb_flush() argument
134 kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1UL, 0, addr, BIT(order), order); in gstage_remote_tlb_flush()
137 static int gstage_set_pte(struct kvm *kvm, u32 level, in gstage_set_pte() argument
142 pte_t *next_ptep = (pte_t *)kvm->arch.pgd; in gstage_set_pte()
172 gstage_remote_tlb_flush(kvm, current_level, addr); in gstage_set_pte()
177 static int gstage_map_page(struct kvm *kvm, in gstage_map_page() argument
219 return gstage_set_pte(kvm, level, pcache, gpa, &new_pte); in gstage_map_page()
228 static void gstage_op_pte(struct kvm *kvm, gpa_t addr, in gstage_op_pte() argument
256 gstage_op_pte(kvm, addr + i * next_page_size, in gstage_op_pte()
265 gstage_remote_tlb_flush(kvm, ptep_level, addr); in gstage_op_pte()
269 static void gstage_unmap_range(struct kvm *kvm, gpa_t start, in gstage_unmap_range() argument
280 found_leaf = gstage_get_leaf_entry(kvm, addr, in gstage_unmap_range()
290 gstage_op_pte(kvm, addr, ptep, in gstage_unmap_range()
301 cond_resched_lock(&kvm->mmu_lock); in gstage_unmap_range()
305 static void gstage_wp_range(struct kvm *kvm, gpa_t start, gpa_t end) in gstage_wp_range() argument
315 found_leaf = gstage_get_leaf_entry(kvm, addr, in gstage_wp_range()
325 gstage_op_pte(kvm, addr, ptep, in gstage_wp_range()
333 static void gstage_wp_memory_region(struct kvm *kvm, int slot) in gstage_wp_memory_region() argument
335 struct kvm_memslots *slots = kvm_memslots(kvm); in gstage_wp_memory_region()
340 spin_lock(&kvm->mmu_lock); in gstage_wp_memory_region()
341 gstage_wp_range(kvm, start, end); in gstage_wp_memory_region()
342 spin_unlock(&kvm->mmu_lock); in gstage_wp_memory_region()
343 kvm_flush_remote_tlbs(kvm); in gstage_wp_memory_region()
346 int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa, in kvm_riscv_gstage_ioremap() argument
372 spin_lock(&kvm->mmu_lock); in kvm_riscv_gstage_ioremap()
373 ret = gstage_set_pte(kvm, 0, &pcache, addr, &pte); in kvm_riscv_gstage_ioremap()
374 spin_unlock(&kvm->mmu_lock); in kvm_riscv_gstage_ioremap()
386 void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size) in kvm_riscv_gstage_iounmap() argument
388 spin_lock(&kvm->mmu_lock); in kvm_riscv_gstage_iounmap()
389 gstage_unmap_range(kvm, gpa, size, false); in kvm_riscv_gstage_iounmap()
390 spin_unlock(&kvm->mmu_lock); in kvm_riscv_gstage_iounmap()
393 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, in kvm_arch_mmu_enable_log_dirty_pt_masked() argument
402 gstage_wp_range(kvm, start, end); in kvm_arch_mmu_enable_log_dirty_pt_masked()
405 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) in kvm_arch_sync_dirty_log() argument
409 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free) in kvm_arch_free_memslot() argument
413 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) in kvm_arch_memslots_updated() argument
417 void kvm_arch_flush_shadow_all(struct kvm *kvm) in kvm_arch_flush_shadow_all() argument
419 kvm_riscv_gstage_free_pgd(kvm); in kvm_arch_flush_shadow_all()
422 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, in kvm_arch_flush_shadow_memslot() argument
428 spin_lock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
429 gstage_unmap_range(kvm, gpa, size, false); in kvm_arch_flush_shadow_memslot()
430 spin_unlock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
433 void kvm_arch_commit_memory_region(struct kvm *kvm, in kvm_arch_commit_memory_region() argument
444 gstage_wp_memory_region(kvm, new->id); in kvm_arch_commit_memory_region()
447 int kvm_arch_prepare_memory_region(struct kvm *kvm, in kvm_arch_prepare_memory_region() argument
522 ret = kvm_riscv_gstage_ioremap(kvm, gpa, pa, in kvm_arch_prepare_memory_region()
535 kvm_riscv_gstage_iounmap(kvm, base_gpa, size); in kvm_arch_prepare_memory_region()
542 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_unmap_gfn_range() argument
544 if (!kvm->arch.pgd) in kvm_unmap_gfn_range()
547 gstage_unmap_range(kvm, range->start << PAGE_SHIFT, in kvm_unmap_gfn_range()
553 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_set_spte_gfn() argument
558 if (!kvm->arch.pgd) in kvm_set_spte_gfn()
563 ret = gstage_map_page(kvm, NULL, range->start << PAGE_SHIFT, in kvm_set_spte_gfn()
573 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_age_gfn() argument
579 if (!kvm->arch.pgd) in kvm_age_gfn()
584 if (!gstage_get_leaf_entry(kvm, range->start << PAGE_SHIFT, in kvm_age_gfn()
591 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_test_age_gfn() argument
597 if (!kvm->arch.pgd) in kvm_test_age_gfn()
602 if (!gstage_get_leaf_entry(kvm, range->start << PAGE_SHIFT, in kvm_test_age_gfn()
619 struct kvm *kvm = vcpu->kvm; in kvm_riscv_gstage_map() local
660 mmu_seq = kvm->mmu_invalidate_seq; in kvm_riscv_gstage_map()
670 hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable); in kvm_riscv_gstage_map()
686 spin_lock(&kvm->mmu_lock); in kvm_riscv_gstage_map()
688 if (mmu_invalidate_retry(kvm, mmu_seq)) in kvm_riscv_gstage_map()
693 mark_page_dirty(kvm, gfn); in kvm_riscv_gstage_map()
694 ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT, in kvm_riscv_gstage_map()
697 ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT, in kvm_riscv_gstage_map()
705 spin_unlock(&kvm->mmu_lock); in kvm_riscv_gstage_map()
711 int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm) in kvm_riscv_gstage_alloc_pgd() argument
715 if (kvm->arch.pgd != NULL) { in kvm_riscv_gstage_alloc_pgd()
724 kvm->arch.pgd = page_to_virt(pgd_page); in kvm_riscv_gstage_alloc_pgd()
725 kvm->arch.pgd_phys = page_to_phys(pgd_page); in kvm_riscv_gstage_alloc_pgd()
730 void kvm_riscv_gstage_free_pgd(struct kvm *kvm) in kvm_riscv_gstage_free_pgd() argument
734 spin_lock(&kvm->mmu_lock); in kvm_riscv_gstage_free_pgd()
735 if (kvm->arch.pgd) { in kvm_riscv_gstage_free_pgd()
736 gstage_unmap_range(kvm, 0UL, gstage_gpa_size, false); in kvm_riscv_gstage_free_pgd()
737 pgd = READ_ONCE(kvm->arch.pgd); in kvm_riscv_gstage_free_pgd()
738 kvm->arch.pgd = NULL; in kvm_riscv_gstage_free_pgd()
739 kvm->arch.pgd_phys = 0; in kvm_riscv_gstage_free_pgd()
741 spin_unlock(&kvm->mmu_lock); in kvm_riscv_gstage_free_pgd()
750 struct kvm_arch *k = &vcpu->kvm->arch; in kvm_riscv_gstage_update_hgatp()