Lines Matching refs:kvm

15 void kvm_mmu_init_tdp_mmu(struct kvm *kvm)  in kvm_mmu_init_tdp_mmu()  argument
17 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); in kvm_mmu_init_tdp_mmu()
18 spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); in kvm_mmu_init_tdp_mmu()
22 static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, in kvm_lockdep_assert_mmu_lock_held() argument
26 lockdep_assert_held_read(&kvm->mmu_lock); in kvm_lockdep_assert_mmu_lock_held()
28 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_lockdep_assert_mmu_lock_held()
33 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) in kvm_mmu_uninit_tdp_mmu() argument
40 kvm_tdp_mmu_invalidate_all_roots(kvm); in kvm_mmu_uninit_tdp_mmu()
41 kvm_tdp_mmu_zap_invalidated_roots(kvm); in kvm_mmu_uninit_tdp_mmu()
43 WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages)); in kvm_mmu_uninit_tdp_mmu()
44 WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); in kvm_mmu_uninit_tdp_mmu()
76 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, in kvm_tdp_mmu_put_root() argument
79 kvm_lockdep_assert_mmu_lock_held(kvm, shared); in kvm_tdp_mmu_put_root()
89 KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm); in kvm_tdp_mmu_put_root()
91 spin_lock(&kvm->arch.tdp_mmu_pages_lock); in kvm_tdp_mmu_put_root()
93 spin_unlock(&kvm->arch.tdp_mmu_pages_lock); in kvm_tdp_mmu_put_root()
107 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, in tdp_mmu_next_root() argument
116 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, in tdp_mmu_next_root()
120 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots, in tdp_mmu_next_root()
128 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots, in tdp_mmu_next_root()
135 kvm_tdp_mmu_put_root(kvm, prev_root, shared); in tdp_mmu_next_root()
223 struct kvm *kvm = vcpu->kvm; in kvm_tdp_mmu_get_vcpu_root_hpa() local
226 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_tdp_mmu_get_vcpu_root_hpa()
232 for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) { in kvm_tdp_mmu_get_vcpu_root_hpa()
250 spin_lock(&kvm->arch.tdp_mmu_pages_lock); in kvm_tdp_mmu_get_vcpu_root_hpa()
251 list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots); in kvm_tdp_mmu_get_vcpu_root_hpa()
252 spin_unlock(&kvm->arch.tdp_mmu_pages_lock); in kvm_tdp_mmu_get_vcpu_root_hpa()
258 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
262 static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) in tdp_account_mmu_page() argument
265 atomic64_inc(&kvm->arch.tdp_mmu_pages); in tdp_account_mmu_page()
268 static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) in tdp_unaccount_mmu_page() argument
271 atomic64_dec(&kvm->arch.tdp_mmu_pages); in tdp_unaccount_mmu_page()
283 static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp, in tdp_mmu_unlink_sp() argument
286 tdp_unaccount_mmu_page(kvm, sp); in tdp_mmu_unlink_sp()
292 spin_lock(&kvm->arch.tdp_mmu_pages_lock); in tdp_mmu_unlink_sp()
294 lockdep_assert_held_write(&kvm->mmu_lock); in tdp_mmu_unlink_sp()
297 untrack_possible_nx_huge_page(kvm, sp); in tdp_mmu_unlink_sp()
300 spin_unlock(&kvm->arch.tdp_mmu_pages_lock); in tdp_mmu_unlink_sp()
320 static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) in handle_removed_pt() argument
329 tdp_mmu_unlink_sp(kvm, sp, shared); in handle_removed_pt()
396 handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn, in handle_removed_pt()
419 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, in handle_changed_spte() argument
490 kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1); in handle_changed_spte()
504 handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared); in handle_changed_spte()
528 static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm, in tdp_mmu_set_spte_atomic() argument
542 lockdep_assert_held_read(&kvm->mmu_lock); in tdp_mmu_set_spte_atomic()
554 handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, in tdp_mmu_set_spte_atomic()
560 static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm, in tdp_mmu_zap_spte_atomic() argument
571 ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE); in tdp_mmu_zap_spte_atomic()
575 kvm_flush_remote_tlbs_gfn(kvm, iter->gfn, iter->level); in tdp_mmu_zap_spte_atomic()
603 static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, in tdp_mmu_set_spte() argument
606 lockdep_assert_held_write(&kvm->mmu_lock); in tdp_mmu_set_spte()
619 handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false); in tdp_mmu_set_spte()
623 static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter, in tdp_mmu_iter_set_spte() argument
627 iter->old_spte = tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep, in tdp_mmu_iter_set_spte()
659 static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm, in tdp_mmu_iter_cond_resched() argument
669 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { in tdp_mmu_iter_cond_resched()
671 kvm_flush_remote_tlbs(kvm); in tdp_mmu_iter_cond_resched()
676 cond_resched_rwlock_read(&kvm->mmu_lock); in tdp_mmu_iter_cond_resched()
678 cond_resched_rwlock_write(&kvm->mmu_lock); in tdp_mmu_iter_cond_resched()
701 static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, in __tdp_mmu_zap_root() argument
711 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared)) in __tdp_mmu_zap_root()
721 tdp_mmu_iter_set_spte(kvm, &iter, 0); in __tdp_mmu_zap_root()
722 else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0)) in __tdp_mmu_zap_root()
727 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, in tdp_mmu_zap_root() argument
743 kvm_lockdep_assert_mmu_lock_held(kvm, shared); in tdp_mmu_zap_root()
757 __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G); in tdp_mmu_zap_root()
758 __tdp_mmu_zap_root(kvm, root, shared, root->role.level); in tdp_mmu_zap_root()
763 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_tdp_mmu_zap_sp() argument
778 tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0, in kvm_tdp_mmu_zap_sp()
791 static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root, in tdp_mmu_zap_leafs() argument
798 lockdep_assert_held_write(&kvm->mmu_lock); in tdp_mmu_zap_leafs()
804 tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) { in tdp_mmu_zap_leafs()
813 tdp_mmu_iter_set_spte(kvm, &iter, 0); in tdp_mmu_zap_leafs()
831 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush) in kvm_tdp_mmu_zap_leafs() argument
835 for_each_tdp_mmu_root_yield_safe(kvm, root, false) in kvm_tdp_mmu_zap_leafs()
836 flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush); in kvm_tdp_mmu_zap_leafs()
841 void kvm_tdp_mmu_zap_all(struct kvm *kvm) in kvm_tdp_mmu_zap_all() argument
857 for_each_tdp_mmu_root_yield_safe(kvm, root, false) in kvm_tdp_mmu_zap_all()
858 tdp_mmu_zap_root(kvm, root, false); in kvm_tdp_mmu_zap_all()
865 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) in kvm_tdp_mmu_zap_invalidated_roots() argument
869 read_lock(&kvm->mmu_lock); in kvm_tdp_mmu_zap_invalidated_roots()
871 for_each_tdp_mmu_root_yield_safe(kvm, root, true) { in kvm_tdp_mmu_zap_invalidated_roots()
876 KVM_BUG_ON(!root->role.invalid, kvm); in kvm_tdp_mmu_zap_invalidated_roots()
887 tdp_mmu_zap_root(kvm, root, true); in kvm_tdp_mmu_zap_invalidated_roots()
894 kvm_tdp_mmu_put_root(kvm, root, true); in kvm_tdp_mmu_zap_invalidated_roots()
897 read_unlock(&kvm->mmu_lock); in kvm_tdp_mmu_zap_invalidated_roots()
910 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm) in kvm_tdp_mmu_invalidate_all_roots() argument
926 refcount_read(&kvm->users_count) && kvm->created_vcpus) in kvm_tdp_mmu_invalidate_all_roots()
927 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_tdp_mmu_invalidate_all_roots()
934 list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) { in kvm_tdp_mmu_invalidate_all_roots()
973 else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte)) in tdp_mmu_map_handle_target_level()
977 kvm_flush_remote_tlbs_gfn(vcpu->kvm, iter->gfn, iter->level); in tdp_mmu_map_handle_target_level()
1015 static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter, in tdp_mmu_link_sp() argument
1022 ret = tdp_mmu_set_spte_atomic(kvm, iter, spte); in tdp_mmu_link_sp()
1026 tdp_mmu_iter_set_spte(kvm, iter, spte); in tdp_mmu_link_sp()
1029 tdp_account_mmu_page(kvm, sp); in tdp_mmu_link_sp()
1034 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1044 struct kvm *kvm = vcpu->kvm; in kvm_tdp_mmu_map() local
1086 r = tdp_mmu_split_huge_page(kvm, &iter, sp, true); in kvm_tdp_mmu_map()
1088 r = tdp_mmu_link_sp(kvm, &iter, sp, true); in kvm_tdp_mmu_map()
1101 spin_lock(&kvm->arch.tdp_mmu_pages_lock); in kvm_tdp_mmu_map()
1103 track_possible_nx_huge_page(kvm, sp); in kvm_tdp_mmu_map()
1104 spin_unlock(&kvm->arch.tdp_mmu_pages_lock); in kvm_tdp_mmu_map()
1123 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, in kvm_tdp_mmu_unmap_gfn_range() argument
1128 __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false, false) in kvm_tdp_mmu_unmap_gfn_range()
1129 flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end, in kvm_tdp_mmu_unmap_gfn_range()
1135 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
1138 static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm, in kvm_tdp_mmu_handle_gfn() argument
1150 for_each_tdp_mmu_root(kvm, root, range->slot->as_id) { in kvm_tdp_mmu_handle_gfn()
1154 ret |= handler(kvm, &iter, range); in kvm_tdp_mmu_handle_gfn()
1170 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter, in age_gfn_range() argument
1204 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_tdp_mmu_age_gfn_range() argument
1206 return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range); in kvm_tdp_mmu_age_gfn_range()
1209 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter, in test_age_gfn() argument
1215 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_tdp_mmu_test_age_gfn() argument
1217 return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn); in kvm_tdp_mmu_test_age_gfn()
1220 static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, in set_spte_gfn() argument
1238 tdp_mmu_iter_set_spte(kvm, iter, 0); in set_spte_gfn()
1244 tdp_mmu_iter_set_spte(kvm, iter, new_spte); in set_spte_gfn()
1256 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_tdp_mmu_set_spte_gfn() argument
1263 return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn); in kvm_tdp_mmu_set_spte_gfn()
1271 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in wrprot_gfn_range() argument
1284 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) in wrprot_gfn_range()
1294 if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) in wrprot_gfn_range()
1309 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, in kvm_tdp_mmu_wrprot_slot() argument
1315 lockdep_assert_held_read(&kvm->mmu_lock); in kvm_tdp_mmu_wrprot_slot()
1317 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) in kvm_tdp_mmu_wrprot_slot()
1318 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_wrprot_slot()
1343 static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm, in tdp_mmu_alloc_sp_for_split() argument
1365 read_unlock(&kvm->mmu_lock); in tdp_mmu_alloc_sp_for_split()
1367 write_unlock(&kvm->mmu_lock); in tdp_mmu_alloc_sp_for_split()
1373 read_lock(&kvm->mmu_lock); in tdp_mmu_alloc_sp_for_split()
1375 write_lock(&kvm->mmu_lock); in tdp_mmu_alloc_sp_for_split()
1383 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter, in tdp_mmu_split_huge_page() argument
1395 sp->spt[i] = make_huge_page_split_spte(kvm, huge_spte, sp->role, i); in tdp_mmu_split_huge_page()
1405 ret = tdp_mmu_link_sp(kvm, iter, sp, shared); in tdp_mmu_split_huge_page()
1414 kvm_update_page_stats(kvm, level - 1, SPTE_ENT_PER_PAGE); in tdp_mmu_split_huge_page()
1421 static int tdp_mmu_split_huge_pages_root(struct kvm *kvm, in tdp_mmu_split_huge_pages_root() argument
1445 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared)) in tdp_mmu_split_huge_pages_root()
1452 sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared); in tdp_mmu_split_huge_pages_root()
1467 if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared)) in tdp_mmu_split_huge_pages_root()
1490 void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm, in kvm_tdp_mmu_try_split_huge_pages() argument
1498 kvm_lockdep_assert_mmu_lock_held(kvm, shared); in kvm_tdp_mmu_try_split_huge_pages()
1500 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) { in kvm_tdp_mmu_try_split_huge_pages()
1501 r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared); in kvm_tdp_mmu_try_split_huge_pages()
1503 kvm_tdp_mmu_put_root(kvm, root, shared); in kvm_tdp_mmu_try_split_huge_pages()
1526 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in clear_dirty_gfn_range() argument
1538 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) in clear_dirty_gfn_range()
1550 if (tdp_mmu_set_spte_atomic(kvm, &iter, iter.old_spte & ~dbit)) in clear_dirty_gfn_range()
1567 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, in kvm_tdp_mmu_clear_dirty_slot() argument
1573 lockdep_assert_held_read(&kvm->mmu_lock); in kvm_tdp_mmu_clear_dirty_slot()
1575 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) in kvm_tdp_mmu_clear_dirty_slot()
1576 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_clear_dirty_slot()
1589 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, in clear_dirty_pt_masked() argument
1596 lockdep_assert_held_write(&kvm->mmu_lock); in clear_dirty_pt_masked()
1637 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, in kvm_tdp_mmu_clear_dirty_pt_masked() argument
1644 for_each_tdp_mmu_root(kvm, root, slot->as_id) in kvm_tdp_mmu_clear_dirty_pt_masked()
1645 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot); in kvm_tdp_mmu_clear_dirty_pt_masked()
1648 static void zap_collapsible_spte_range(struct kvm *kvm, in zap_collapsible_spte_range() argument
1661 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) in zap_collapsible_spte_range()
1686 max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot, in zap_collapsible_spte_range()
1692 if (tdp_mmu_zap_spte_atomic(kvm, &iter)) in zap_collapsible_spte_range()
1703 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, in kvm_tdp_mmu_zap_collapsible_sptes() argument
1708 lockdep_assert_held_read(&kvm->mmu_lock); in kvm_tdp_mmu_zap_collapsible_sptes()
1710 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) in kvm_tdp_mmu_zap_collapsible_sptes()
1711 zap_collapsible_spte_range(kvm, root, slot); in kvm_tdp_mmu_zap_collapsible_sptes()
1719 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, in write_protect_gfn() argument
1741 tdp_mmu_iter_set_spte(kvm, &iter, new_spte); in write_protect_gfn()
1755 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, in kvm_tdp_mmu_write_protect_gfn() argument
1762 lockdep_assert_held_write(&kvm->mmu_lock); in kvm_tdp_mmu_write_protect_gfn()
1763 for_each_tdp_mmu_root(kvm, root, slot->as_id) in kvm_tdp_mmu_write_protect_gfn()
1764 spte_set |= write_protect_gfn(kvm, root, gfn, min_level); in kvm_tdp_mmu_write_protect_gfn()