Lines Matching refs:invalid_list

1887 				     struct list_head *invalid_list);
1889 struct list_head *invalid_list);
1982 struct list_head *invalid_list) in kvm_sync_page() argument
1987 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); in kvm_sync_page()
1992 struct list_head *invalid_list, in kvm_mmu_remote_flush_or_zap() argument
1995 if (!remote_flush && list_empty(invalid_list)) in kvm_mmu_remote_flush_or_zap()
1998 if (!list_empty(invalid_list)) in kvm_mmu_remote_flush_or_zap()
1999 kvm_mmu_commit_zap_page(kvm, invalid_list); in kvm_mmu_remote_flush_or_zap()
2094 LIST_HEAD(invalid_list); in mmu_sync_children()
2104 kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, true); in mmu_sync_children()
2110 flush |= kvm_sync_page(vcpu, sp, &invalid_list) > 0; in mmu_sync_children()
2114 kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush); in mmu_sync_children()
2125 kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush); in mmu_sync_children()
2154 LIST_HEAD(invalid_list); in kvm_mmu_find_shadow_page()
2174 &invalid_list); in kvm_mmu_find_shadow_page()
2198 ret = kvm_sync_page(vcpu, sp, &invalid_list); in kvm_mmu_find_shadow_page()
2202 WARN_ON_ONCE(!list_empty(&invalid_list)); in kvm_mmu_find_shadow_page()
2216 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_mmu_find_shadow_page()
2488 u64 *spte, struct list_head *invalid_list) in mmu_page_zap_pte() argument
2506 if (tdp_enabled && invalid_list && in mmu_page_zap_pte()
2509 invalid_list); in mmu_page_zap_pte()
2519 struct list_head *invalid_list) in kvm_mmu_page_unlink_children() argument
2525 zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list); in kvm_mmu_page_unlink_children()
2541 struct list_head *invalid_list) in mmu_zap_unsync_children() argument
2554 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); in mmu_zap_unsync_children()
2565 struct list_head *invalid_list, in __kvm_mmu_prepare_zap_page() argument
2573 *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list); in __kvm_mmu_prepare_zap_page()
2574 *nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list); in __kvm_mmu_prepare_zap_page()
2595 list_add(&sp->link, invalid_list); in __kvm_mmu_prepare_zap_page()
2597 list_move(&sp->link, invalid_list); in __kvm_mmu_prepare_zap_page()
2629 struct list_head *invalid_list) in kvm_mmu_prepare_zap_page() argument
2633 __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped); in kvm_mmu_prepare_zap_page()
2638 struct list_head *invalid_list) in kvm_mmu_commit_zap_page() argument
2642 if (list_empty(invalid_list)) in kvm_mmu_commit_zap_page()
2656 list_for_each_entry_safe(sp, nsp, invalid_list, link) { in kvm_mmu_commit_zap_page()
2667 LIST_HEAD(invalid_list); in kvm_mmu_zap_oldest_mmu_pages()
2683 unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, in kvm_mmu_zap_oldest_mmu_pages()
2693 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_mmu_zap_oldest_mmu_pages()
2754 LIST_HEAD(invalid_list); in kvm_mmu_unprotect_page()
2761 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); in kvm_mmu_unprotect_page()
2763 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_mmu_unprotect_page()
3539 struct list_head *invalid_list) in mmu_free_root_page() argument
3553 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); in mmu_free_root_page()
3563 LIST_HEAD(invalid_list); in kvm_mmu_free_roots()
3589 &invalid_list); in kvm_mmu_free_roots()
3595 mmu_free_root_page(kvm, &mmu->root.hpa, &invalid_list); in kvm_mmu_free_roots()
3602 &invalid_list); in kvm_mmu_free_roots()
3610 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_mmu_free_roots()
5652 LIST_HEAD(invalid_list); in kvm_mmu_track_write()
5673 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); in kvm_mmu_track_write()
5692 kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush); in kvm_mmu_track_write()
6665 LIST_HEAD(invalid_list); in kvm_mmu_zap_all()
6673 if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) in kvm_mmu_zap_all()
6679 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_mmu_zap_all()
6734 LIST_HEAD(invalid_list); in mmu_shrink_scan()
7017 LIST_HEAD(invalid_list); in kvm_recover_nx_huge_pages()
7084 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); in kvm_recover_nx_huge_pages()
7088 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); in kvm_recover_nx_huge_pages()
7097 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); in kvm_recover_nx_huge_pages()