Lines Matching refs:arch

252 		.efer = vcpu->arch.efer,  in vcpu_to_role_regs()
646 return tdp_mmu_enabled && vcpu->arch.mmu->root_role.direct; in is_tdp_mmu_active()
688 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, in mmu_topup_memory_caches()
692 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache, in mmu_topup_memory_caches()
697 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadowed_info_cache, in mmu_topup_memory_caches()
702 return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, in mmu_topup_memory_caches()
708 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache); in mmu_free_memory_caches()
709 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache); in mmu_free_memory_caches()
710 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadowed_info_cache); in mmu_free_memory_caches()
711 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache); in mmu_free_memory_caches()
795 return &slot->arch.lpage_info[level - 2][idx]; in lpage_info_slot()
827 kvm->arch.indirect_shadow_pages++; in account_shadowed()
857 &kvm->arch.possible_nx_huge_pages); in track_possible_nx_huge_page()
875 kvm->arch.indirect_shadow_pages--; in unaccount_shadowed()
1086 return &slot->arch.rmap[level - PG_LEVEL_4K][idx]; in gfn_to_rmap()
1662 struct kvm_mmu_memory_cache *cache = &vcpu->arch.mmu_pte_list_desc_cache; in rmap_add()
1715 kvm->arch.n_used_mmu_pages += nr; in kvm_mod_used_mmu_pages()
1909 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
1914 union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role; in kvm_sync_page_check()
1938 if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_spte || in kvm_sync_page_check()
1950 return vcpu->arch.mmu->sync_spte(vcpu, sp, i); in kvm_sync_spte()
2012 unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); in is_obsolete_sp()
2252 sp->mmu_valid_gen = kvm->arch.mmu_valid_gen; in kvm_mmu_alloc_shadow_page()
2253 list_add(&sp->link, &kvm->arch.active_mmu_pages); in kvm_mmu_alloc_shadow_page()
2276 sp_list = &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]; in __kvm_mmu_get_shadow_page()
2293 .page_header_cache = &vcpu->arch.mmu_page_header_cache, in kvm_mmu_get_shadow_page()
2294 .shadow_page_cache = &vcpu->arch.mmu_shadow_page_cache, in kvm_mmu_get_shadow_page()
2295 .shadowed_info_cache = &vcpu->arch.mmu_shadowed_info_cache, in kvm_mmu_get_shadow_page()
2366 iterator->level = vcpu->arch.mmu->root_role.level; in shadow_walk_init_using_root()
2369 vcpu->arch.mmu->cpu_role.base.level < PT64_ROOT_4LEVEL && in shadow_walk_init_using_root()
2370 !vcpu->arch.mmu->root_role.direct) in shadow_walk_init_using_root()
2378 BUG_ON(root != vcpu->arch.mmu->root.hpa); in shadow_walk_init_using_root()
2381 = vcpu->arch.mmu->pae_root[(addr >> 30) & 3]; in shadow_walk_init_using_root()
2392 shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root.hpa, in shadow_walk_init()
2461 __link_shadow_page(vcpu->kvm, &vcpu->arch.mmu_pte_list_desc_cache, sptep, sp, true); in link_shadow_page()
2671 if (list_empty(&kvm->arch.active_mmu_pages)) in kvm_mmu_zap_oldest_mmu_pages()
2675 list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) { in kvm_mmu_zap_oldest_mmu_pages()
2701 if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages) in kvm_mmu_available_pages()
2702 return kvm->arch.n_max_mmu_pages - in kvm_mmu_available_pages()
2703 kvm->arch.n_used_mmu_pages; in kvm_mmu_available_pages()
2739 if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { in kvm_mmu_change_mmu_pages()
2740 kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages - in kvm_mmu_change_mmu_pages()
2743 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; in kvm_mmu_change_mmu_pages()
2746 kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; in kvm_mmu_change_mmu_pages()
2774 if (vcpu->arch.mmu->root_role.direct) in kvm_mmu_unprotect_page_virt()
2838 spin_lock(&kvm->arch.mmu_unsync_pages_lock); in mmu_try_to_unsync_pages()
2856 spin_unlock(&kvm->arch.mmu_unsync_pages_lock); in mmu_try_to_unsync_pages()
3645 union kvm_mmu_page_role role = vcpu->arch.mmu->root_role; in mmu_alloc_root()
3662 struct kvm_mmu *mmu = vcpu->arch.mmu; in mmu_alloc_direct_roots()
3761 smp_store_release(&kvm->arch.shadow_root_allocated, true); in mmu_first_shadow_root_alloc()
3770 struct kvm_mmu *mmu = vcpu->arch.mmu; in mmu_alloc_shadow_roots()
3888 struct kvm_mmu *mmu = vcpu->arch.mmu; in mmu_alloc_special_roots()
3998 if (vcpu->arch.mmu->root_role.direct) in kvm_mmu_sync_roots()
4001 if (!VALID_PAGE(vcpu->arch.mmu->root.hpa)) in kvm_mmu_sync_roots()
4006 if (vcpu->arch.mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) { in kvm_mmu_sync_roots()
4007 hpa_t root = vcpu->arch.mmu->root.hpa; in kvm_mmu_sync_roots()
4023 hpa_t root = vcpu->arch.mmu->pae_root[i]; in kvm_mmu_sync_roots()
4040 if (is_unsync_root(vcpu->arch.mmu->prev_roots[i].hpa)) in kvm_mmu_sync_prev_roots()
4044 kvm_mmu_free_roots(vcpu->kvm, vcpu->arch.mmu, roots_to_free); in kvm_mmu_sync_prev_roots()
4129 rsvd_check = &vcpu->arch.mmu->shadow_zero_check; in get_mmio_spte()
4213 u32 id = vcpu->arch.apf.id; in alloc_apf_token()
4216 vcpu->arch.apf.id = 1; in alloc_apf_token()
4218 return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; in alloc_apf_token()
4224 struct kvm_arch_async_pf arch; in kvm_arch_setup_async_pf() local
4226 arch.token = alloc_apf_token(vcpu); in kvm_arch_setup_async_pf()
4227 arch.gfn = gfn; in kvm_arch_setup_async_pf()
4228 arch.direct_map = vcpu->arch.mmu->root_role.direct; in kvm_arch_setup_async_pf()
4229 arch.cr3 = kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu); in kvm_arch_setup_async_pf()
4232 kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); in kvm_arch_setup_async_pf()
4239 if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) || in kvm_arch_async_page_ready()
4247 if (!vcpu->arch.mmu->root_role.direct && in kvm_arch_async_page_ready()
4248 work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu)) in kvm_arch_async_page_ready()
4343 struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa); in is_page_fault_stale()
4369 if (WARN_ON_ONCE(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa))) in direct_page_fault()
4417 u32 flags = vcpu->arch.apf.host_apf_flags; in kvm_handle_page_fault()
4425 vcpu->arch.l1tf_flush_l1d = true; in kvm_handle_page_fault()
4434 vcpu->arch.apf.host_apf_flags = 0; in kvm_handle_page_fault()
4621 struct kvm_mmu *mmu = vcpu->arch.mmu; in kvm_mmu_new_pgd()
4657 struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa); in kvm_mmu_new_pgd()
4787 vcpu->arch.reserved_gpa_bits, in reset_guest_rsvds_bits_mask()
4836 vcpu->arch.reserved_gpa_bits, execonly, in reset_rsvds_bits_mask_ept()
5200 struct kvm_mmu *context = &vcpu->arch.root_mmu; in init_kvm_tdp_mmu()
5251 struct kvm_mmu *context = &vcpu->arch.root_mmu; in kvm_init_shadow_mmu()
5276 struct kvm_mmu *context = &vcpu->arch.guest_mmu; in kvm_init_shadow_npt_mmu()
5328 struct kvm_mmu *context = &vcpu->arch.guest_mmu; in kvm_init_shadow_ept_mmu()
5356 struct kvm_mmu *context = &vcpu->arch.root_mmu; in init_kvm_softmmu()
5368 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; in init_kvm_nested_mmu()
5432 vcpu->arch.root_mmu.root_role.word = 0; in kvm_mmu_after_set_cpuid()
5433 vcpu->arch.guest_mmu.root_role.word = 0; in kvm_mmu_after_set_cpuid()
5434 vcpu->arch.nested_mmu.root_role.word = 0; in kvm_mmu_after_set_cpuid()
5435 vcpu->arch.root_mmu.cpu_role.ext.valid = 0; in kvm_mmu_after_set_cpuid()
5436 vcpu->arch.guest_mmu.cpu_role.ext.valid = 0; in kvm_mmu_after_set_cpuid()
5437 vcpu->arch.nested_mmu.cpu_role.ext.valid = 0; in kvm_mmu_after_set_cpuid()
5458 r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->root_role.direct); in kvm_mmu_load()
5464 if (vcpu->arch.mmu->root_role.direct) in kvm_mmu_load()
5491 kvm_mmu_free_roots(kvm, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL); in kvm_mmu_unload()
5492 WARN_ON_ONCE(VALID_PAGE(vcpu->arch.root_mmu.root.hpa)); in kvm_mmu_unload()
5493 kvm_mmu_free_roots(kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); in kvm_mmu_unload()
5494 WARN_ON_ONCE(VALID_PAGE(vcpu->arch.guest_mmu.root.hpa)); in kvm_mmu_unload()
5544 __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu); in kvm_mmu_free_obsolete_roots()
5545 __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu); in kvm_mmu_free_obsolete_roots()
5661 if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) in kvm_mmu_track_write()
5700 bool direct = vcpu->arch.mmu->root_role.direct; in kvm_mmu_page_fault()
5713 if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa))) in kvm_mmu_page_fault()
5743 if (vcpu->arch.mmu->root_role.direct && in kvm_mmu_page_fault()
5780 if (WARN_ON_ONCE(mmu != vcpu->arch.mmu)) in __kvm_mmu_invalidate_addr()
5813 if (mmu != &vcpu->arch.guest_mmu) { in kvm_mmu_invalidate_addr()
5846 kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL); in kvm_mmu_invlpg()
5854 struct kvm_mmu *mmu = vcpu->arch.mmu; in kvm_mmu_invpcid_gva()
5977 if (!tdp_enabled && mmu == &vcpu->arch.guest_mmu) in __kvm_mmu_create()
6023 vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache; in kvm_mmu_create()
6024 vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO; in kvm_mmu_create()
6026 vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache; in kvm_mmu_create()
6027 vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO; in kvm_mmu_create()
6029 vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO; in kvm_mmu_create()
6031 vcpu->arch.mmu = &vcpu->arch.root_mmu; in kvm_mmu_create()
6032 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; in kvm_mmu_create()
6034 ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu); in kvm_mmu_create()
6038 ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu); in kvm_mmu_create()
6044 free_mmu_pages(&vcpu->arch.guest_mmu); in kvm_mmu_create()
6057 &kvm->arch.active_mmu_pages, link) { in kvm_zap_obsolete_pages()
6086 &kvm->arch.zapped_obsolete_pages, &nr_zapped); in kvm_zap_obsolete_pages()
6102 kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages); in kvm_zap_obsolete_pages()
6128 kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1; in kvm_mmu_zap_all_fast()
6167 return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages)); in kvm_has_zapped_obsolete_pages()
6172 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); in kvm_mmu_init_vm()
6173 INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); in kvm_mmu_init_vm()
6174 INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages); in kvm_mmu_init_vm()
6175 spin_lock_init(&kvm->arch.mmu_unsync_pages_lock); in kvm_mmu_init_vm()
6180 kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache; in kvm_mmu_init_vm()
6181 kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO; in kvm_mmu_init_vm()
6183 kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO; in kvm_mmu_init_vm()
6185 kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache; in kvm_mmu_init_vm()
6186 kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO; in kvm_mmu_init_vm()
6191 kvm_mmu_free_memory_cache(&kvm->arch.split_desc_cache); in mmu_free_vm_memory_caches()
6192 kvm_mmu_free_memory_cache(&kvm->arch.split_page_header_cache); in mmu_free_vm_memory_caches()
6193 kvm_mmu_free_memory_cache(&kvm->arch.split_shadow_page_cache); in mmu_free_vm_memory_caches()
6303 return need_topup(&kvm->arch.split_desc_cache, SPLIT_DESC_CACHE_MIN_NR_OBJECTS) || in need_topup_split_caches_or_resched()
6304 need_topup(&kvm->arch.split_page_header_cache, 1) || in need_topup_split_caches_or_resched()
6305 need_topup(&kvm->arch.split_shadow_page_cache, 1); in need_topup_split_caches_or_resched()
6329 r = __kvm_mmu_topup_memory_cache(&kvm->arch.split_desc_cache, capacity, in topup_split_caches()
6334 r = kvm_mmu_topup_memory_cache(&kvm->arch.split_page_header_cache, 1); in topup_split_caches()
6338 return kvm_mmu_topup_memory_cache(&kvm->arch.split_shadow_page_cache, 1); in topup_split_caches()
6361 caches.page_header_cache = &kvm->arch.split_page_header_cache; in shadow_mmu_get_sp_for_split()
6362 caches.shadow_page_cache = &kvm->arch.split_shadow_page_cache; in shadow_mmu_get_sp_for_split()
6373 struct kvm_mmu_memory_cache *cache = &kvm->arch.split_desc_cache; in shadow_mmu_split_huge_page()
6670 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) { in kvm_mmu_zap_all()
6750 if (!kvm->arch.n_used_mmu_pages && in mmu_shrink_scan()
6759 &kvm->arch.zapped_obsolete_pages); in mmu_shrink_scan()
6860 wake_up_process(kvm->arch.nx_huge_page_recovery_thread); in set_nx_huge_pages()
6941 free_mmu_pages(&vcpu->arch.root_mmu); in kvm_mmu_destroy()
6942 free_mmu_pages(&vcpu->arch.guest_mmu); in kvm_mmu_destroy()
7002 wake_up_process(kvm->arch.nx_huge_page_recovery_thread); in set_nx_huge_pages_recovery_param()
7034 if (list_empty(&kvm->arch.possible_nx_huge_pages)) in kvm_recover_nx_huge_pages()
7044 sp = list_first_entry(&kvm->arch.possible_nx_huge_pages, in kvm_recover_nx_huge_pages()
7150 &kvm->arch.nx_huge_page_recovery_thread); in kvm_mmu_post_init_vm()
7152 kthread_unpark(kvm->arch.nx_huge_page_recovery_thread); in kvm_mmu_post_init_vm()
7159 if (kvm->arch.nx_huge_page_recovery_thread) in kvm_mmu_pre_destroy_vm()
7160 kthread_stop(kvm->arch.nx_huge_page_recovery_thread); in kvm_mmu_pre_destroy_vm()