Lines Matching refs:svm

85 static void avic_activate_vmcb(struct vcpu_svm *svm)  in avic_activate_vmcb()  argument
87 struct vmcb *vmcb = svm->vmcb01.ptr; in avic_activate_vmcb()
101 if (x2avic_enabled && apic_x2apic_mode(svm->vcpu.arch.apic)) { in avic_activate_vmcb()
105 svm_set_x2apic_msr_interception(svm, false); in avic_activate_vmcb()
111 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, &svm->vcpu); in avic_activate_vmcb()
116 svm_set_x2apic_msr_interception(svm, true); in avic_activate_vmcb()
120 static void avic_deactivate_vmcb(struct vcpu_svm *svm) in avic_deactivate_vmcb() argument
122 struct vmcb *vmcb = svm->vmcb01.ptr; in avic_deactivate_vmcb()
131 if (is_guest_mode(&svm->vcpu) && in avic_deactivate_vmcb()
132 vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)) in avic_deactivate_vmcb()
136 svm_set_x2apic_msr_interception(svm, true); in avic_deactivate_vmcb()
244 void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb) in avic_init_vmcb() argument
246 struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm); in avic_init_vmcb()
247 phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page)); in avic_init_vmcb()
256 if (kvm_apicv_activated(svm->vcpu.kvm)) in avic_init_vmcb()
257 avic_activate_vmcb(svm); in avic_init_vmcb()
259 avic_deactivate_vmcb(svm); in avic_init_vmcb()
281 struct vcpu_svm *svm = to_svm(vcpu); in avic_init_backing_page() local
304 svm->avic_backing_page = virt_to_page(vcpu->arch.apic->regs); in avic_init_backing_page()
311 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) & in avic_init_backing_page()
316 svm->avic_physical_id_cache = entry; in avic_init_backing_page()
492 struct vcpu_svm *svm = to_svm(vcpu); in avic_incomplete_ipi_interception() local
493 u32 icrh = svm->vmcb->control.exit_info_1 >> 32; in avic_incomplete_ipi_interception()
494 u32 icrl = svm->vmcb->control.exit_info_1; in avic_incomplete_ipi_interception()
495 u32 id = svm->vmcb->control.exit_info_2 >> 32; in avic_incomplete_ipi_interception()
496 u32 index = svm->vmcb->control.exit_info_2 & 0x1FF; in avic_incomplete_ipi_interception()
597 struct vcpu_svm *svm = to_svm(vcpu); in avic_invalidate_logical_id_entry() local
598 bool flat = svm->dfr_reg == APIC_DFR_FLAT; in avic_invalidate_logical_id_entry()
605 entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat); in avic_invalidate_logical_id_entry()
612 struct vcpu_svm *svm = to_svm(vcpu); in avic_handle_ldr_update() local
620 if (ldr == svm->ldr_reg) in avic_handle_ldr_update()
625 svm->ldr_reg = ldr; in avic_handle_ldr_update()
631 struct vcpu_svm *svm = to_svm(vcpu); in avic_handle_dfr_update() local
634 if (svm->dfr_reg == dfr) in avic_handle_dfr_update()
638 svm->dfr_reg = dfr; in avic_handle_dfr_update()
695 struct vcpu_svm *svm = to_svm(vcpu); in avic_unaccelerated_access_interception() local
697 u32 offset = svm->vmcb->control.exit_info_1 & in avic_unaccelerated_access_interception()
699 u32 vector = svm->vmcb->control.exit_info_2 & in avic_unaccelerated_access_interception()
701 bool write = (svm->vmcb->control.exit_info_1 >> 32) & in avic_unaccelerated_access_interception()
719 int avic_init_vcpu(struct vcpu_svm *svm) in avic_init_vcpu() argument
722 struct kvm_vcpu *vcpu = &svm->vcpu; in avic_init_vcpu()
731 INIT_LIST_HEAD(&svm->ir_list); in avic_init_vcpu()
732 spin_lock_init(&svm->ir_list_lock); in avic_init_vcpu()
733 svm->dfr_reg = APIC_DFR_FLAT; in avic_init_vcpu()
749 struct vcpu_svm *svm = to_svm(vcpu); in avic_set_pi_irte_mode() local
758 spin_lock_irqsave(&svm->ir_list_lock, flags); in avic_set_pi_irte_mode()
760 if (list_empty(&svm->ir_list)) in avic_set_pi_irte_mode()
763 list_for_each_entry(ir, &svm->ir_list, node) { in avic_set_pi_irte_mode()
772 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in avic_set_pi_irte_mode()
776 static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) in svm_ir_list_del() argument
781 spin_lock_irqsave(&svm->ir_list_lock, flags); in svm_ir_list_del()
782 list_for_each_entry(cur, &svm->ir_list, node) { in svm_ir_list_del()
789 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in svm_ir_list_del()
792 static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) in svm_ir_list_add() argument
805 struct kvm *kvm = svm->vcpu.kvm; in svm_ir_list_add()
830 spin_lock_irqsave(&svm->ir_list_lock, flags); in svm_ir_list_add()
838 entry = READ_ONCE(*(svm->avic_physical_id_cache)); in svm_ir_list_add()
843 list_add(&ir->node, &svm->ir_list); in svm_ir_list_add()
844 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in svm_ir_list_add()
862 struct vcpu_data *vcpu_info, struct vcpu_svm **svm) in get_pi_vcpu_info() argument
878 *svm = to_svm(vcpu); in get_pi_vcpu_info()
879 vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page)); in get_pi_vcpu_info()
920 struct vcpu_svm *svm = NULL; in avic_pi_update_irte() local
932 if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set && in avic_pi_update_irte()
933 kvm_vcpu_apicv_active(&svm->vcpu)) { in avic_pi_update_irte()
937 pi.base = __sme_set(page_to_phys(svm->avic_backing_page) & in avic_pi_update_irte()
940 svm->vcpu.vcpu_id); in avic_pi_update_irte()
953 svm_ir_list_add(svm, &pi); in avic_pi_update_irte()
983 if (!ret && svm) { in avic_pi_update_irte()
984 trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id, in avic_pi_update_irte()
1006 struct vcpu_svm *svm = to_svm(vcpu); in avic_update_iommu_vcpu_affinity() local
1008 lockdep_assert_held(&svm->ir_list_lock); in avic_update_iommu_vcpu_affinity()
1017 if (list_empty(&svm->ir_list)) in avic_update_iommu_vcpu_affinity()
1020 list_for_each_entry(ir, &svm->ir_list, node) { in avic_update_iommu_vcpu_affinity()
1032 struct vcpu_svm *svm = to_svm(vcpu); in avic_vcpu_load() local
1057 spin_lock_irqsave(&svm->ir_list_lock, flags); in avic_vcpu_load()
1059 entry = READ_ONCE(*(svm->avic_physical_id_cache)); in avic_vcpu_load()
1066 WRITE_ONCE(*(svm->avic_physical_id_cache), entry); in avic_vcpu_load()
1069 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in avic_vcpu_load()
1075 struct vcpu_svm *svm = to_svm(vcpu); in avic_vcpu_put() local
1087 entry = READ_ONCE(*(svm->avic_physical_id_cache)); in avic_vcpu_put()
1101 spin_lock_irqsave(&svm->ir_list_lock, flags); in avic_vcpu_put()
1106 WRITE_ONCE(*(svm->avic_physical_id_cache), entry); in avic_vcpu_put()
1108 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in avic_vcpu_put()
1114 struct vcpu_svm *svm = to_svm(vcpu); in avic_refresh_virtual_apic_mode() local
1115 struct vmcb *vmcb = svm->vmcb01.ptr; in avic_refresh_virtual_apic_mode()
1129 avic_activate_vmcb(svm); in avic_refresh_virtual_apic_mode()
1131 avic_deactivate_vmcb(svm); in avic_refresh_virtual_apic_mode()