Searched refs:walk_mmu (Results 1 – 9 of 9) sorted by relevance
143 return vcpu->arch.walk_mmu->pdptrs[index]; in kvm_pdptr_read()148 vcpu->arch.walk_mmu->pdptrs[index] = value; in kvm_pdptr_write()
185 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; in mmu_is_nested()
803 vcpu->arch.walk_mmu; in kvm_inject_emulated_page_fault()868 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in load_pdptrs()7320 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_read()7330 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_write()7342 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_system()7351 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_read_guest_virt_helper()7384 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_fetch_guest_virt()7443 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_write_guest_virt_helper()7547 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in vcpu_mmio_gva_to_gpa()7557 !permission_fault(vcpu, vcpu->arch.walk_mmu, in vcpu_mmio_gva_to_gpa()[all …]
518 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; in FNAME()881 WARN_ON_ONCE((addr >> 32) && mmu == vcpu->arch.walk_mmu); in FNAME()
5852 kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL); in kvm_mmu_invlpg()6038 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; in kvm_mmu_create()
103 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; in nested_svm_init_mmu_context()109 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; in nested_svm_uninit_mmu_context()
783 struct kvm_mmu *walk_mmu; member
433 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; in nested_ept_init_mmu_context()439 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; in nested_ept_uninit_mmu_context()
3242 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in vmx_ept_load_pdptrs()3257 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in ept_save_pdptrs()