Lines Matching refs:svm

299 	struct vcpu_svm *svm = to_svm(vcpu);  in svm_set_efer()  local
314 svm_set_gif(svm, true); in svm_set_efer()
317 clr_exception_intercept(svm, GP_VECTOR); in svm_set_efer()
325 svm_free_nested(svm); in svm_set_efer()
328 int ret = svm_allocate_nested(svm); in svm_set_efer()
340 set_exception_intercept(svm, GP_VECTOR); in svm_set_efer()
344 svm->vmcb->save.efer = efer | EFER_SVME; in svm_set_efer()
345 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in svm_set_efer()
351 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_interrupt_shadow() local
354 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) in svm_get_interrupt_shadow()
361 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_interrupt_shadow() local
364 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow()
366 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow()
375 struct vcpu_svm *svm = to_svm(vcpu); in __svm_skip_emulated_instruction() local
385 if (nrips && svm->vmcb->control.next_rip != 0) { in __svm_skip_emulated_instruction()
387 svm->next_rip = svm->vmcb->control.next_rip; in __svm_skip_emulated_instruction()
390 if (!svm->next_rip) { in __svm_skip_emulated_instruction()
400 old_rflags = svm->vmcb->save.rflags; in __svm_skip_emulated_instruction()
406 svm->vmcb->save.rflags = old_rflags; in __svm_skip_emulated_instruction()
408 kvm_rip_write(vcpu, svm->next_rip); in __svm_skip_emulated_instruction()
426 struct vcpu_svm *svm = to_svm(vcpu); in svm_update_soft_interrupt_rip() local
454 svm->soft_int_injected = true; in svm_update_soft_interrupt_rip()
455 svm->soft_int_csbase = svm->vmcb->save.cs.base; in svm_update_soft_interrupt_rip()
456 svm->soft_int_old_rip = old_rip; in svm_update_soft_interrupt_rip()
457 svm->soft_int_next_rip = rip; in svm_update_soft_interrupt_rip()
463 svm->vmcb->control.next_rip = rip; in svm_update_soft_interrupt_rip()
471 struct vcpu_svm *svm = to_svm(vcpu); in svm_inject_exception() local
479 svm->vmcb->control.event_inj = ex->vector in svm_inject_exception()
483 svm->vmcb->control.event_inj_err = ex->error_code; in svm_inject_exception()
743 static void set_dr_intercepts(struct vcpu_svm *svm) in set_dr_intercepts() argument
745 struct vmcb *vmcb = svm->vmcb01.ptr; in set_dr_intercepts()
764 recalc_intercepts(svm); in set_dr_intercepts()
767 static void clr_dr_intercepts(struct vcpu_svm *svm) in clr_dr_intercepts() argument
769 struct vmcb *vmcb = svm->vmcb01.ptr; in clr_dr_intercepts()
773 recalc_intercepts(svm); in clr_dr_intercepts()
790 struct vcpu_svm *svm = to_svm(vcpu); in set_shadow_msr_intercept() local
798 set_bit(slot, svm->shadow_msr_intercept.read); in set_shadow_msr_intercept()
800 clear_bit(slot, svm->shadow_msr_intercept.read); in set_shadow_msr_intercept()
803 set_bit(slot, svm->shadow_msr_intercept.write); in set_shadow_msr_intercept()
805 clear_bit(slot, svm->shadow_msr_intercept.write); in set_shadow_msr_intercept()
844 struct vcpu_svm *svm = to_svm(vcpu); in set_msr_interception_bitmap() local
875 svm->nested.force_msr_bitmap_recalc = true; in set_msr_interception_bitmap()
911 void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept) in svm_set_x2apic_msr_interception() argument
915 if (intercept == svm->x2avic_msrs_intercepted) in svm_set_x2apic_msr_interception()
927 set_msr_interception(&svm->vcpu, svm->msrpm, index, in svm_set_x2apic_msr_interception()
931 svm->x2avic_msrs_intercepted = intercept; in svm_set_x2apic_msr_interception()
941 struct vcpu_svm *svm = to_svm(vcpu); in svm_msr_filter_changed() local
951 u32 read = test_bit(i, svm->shadow_msr_intercept.read); in svm_msr_filter_changed()
952 u32 write = test_bit(i, svm->shadow_msr_intercept.write); in svm_msr_filter_changed()
954 set_msr_interception_bitmap(vcpu, svm->msrpm, msr, read, write); in svm_msr_filter_changed()
1014 struct vcpu_svm *svm = to_svm(vcpu); in svm_enable_lbrv() local
1016 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK; in svm_enable_lbrv()
1017 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1); in svm_enable_lbrv()
1018 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1); in svm_enable_lbrv()
1019 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1); in svm_enable_lbrv()
1020 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1); in svm_enable_lbrv()
1023 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_DEBUGCTLMSR, 1, 1); in svm_enable_lbrv()
1027 svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr); in svm_enable_lbrv()
1032 struct vcpu_svm *svm = to_svm(vcpu); in svm_disable_lbrv() local
1036 svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK; in svm_disable_lbrv()
1037 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0); in svm_disable_lbrv()
1038 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0); in svm_disable_lbrv()
1039 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0); in svm_disable_lbrv()
1040 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0); in svm_disable_lbrv()
1047 svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb); in svm_disable_lbrv()
1050 static struct vmcb *svm_get_lbr_vmcb(struct vcpu_svm *svm) in svm_get_lbr_vmcb() argument
1057 return svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK ? svm->vmcb : in svm_get_lbr_vmcb()
1058 svm->vmcb01.ptr; in svm_get_lbr_vmcb()
1063 struct vcpu_svm *svm = to_svm(vcpu); in svm_update_lbrv() local
1064 bool current_enable_lbrv = svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK; in svm_update_lbrv()
1065 bool enable_lbrv = (svm_get_lbr_vmcb(svm)->save.dbgctl & DEBUGCTLMSR_LBR) || in svm_update_lbrv()
1067 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK)); in svm_update_lbrv()
1078 void disable_nmi_singlestep(struct vcpu_svm *svm) in disable_nmi_singlestep() argument
1080 svm->nmi_singlestep = false; in disable_nmi_singlestep()
1082 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) { in disable_nmi_singlestep()
1084 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) in disable_nmi_singlestep()
1085 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF; in disable_nmi_singlestep()
1086 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) in disable_nmi_singlestep()
1087 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF; in disable_nmi_singlestep()
1093 struct vcpu_svm *svm = to_svm(vcpu); in grow_ple_window() local
1094 struct vmcb_control_area *control = &svm->vmcb->control; in grow_ple_window()
1106 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in grow_ple_window()
1114 struct vcpu_svm *svm = to_svm(vcpu); in shrink_ple_window() local
1115 struct vmcb_control_area *control = &svm->vmcb->control; in shrink_ple_window()
1127 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in shrink_ple_window()
1166 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_l2_tsc_offset() local
1168 return svm->nested.ctl.tsc_offset; in svm_get_l2_tsc_offset()
1173 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_l2_tsc_multiplier() local
1175 return svm->tsc_ratio_msr; in svm_get_l2_tsc_multiplier()
1180 struct vcpu_svm *svm = to_svm(vcpu); in svm_write_tsc_offset() local
1182 svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset; in svm_write_tsc_offset()
1183 svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset; in svm_write_tsc_offset()
1184 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in svm_write_tsc_offset()
1197 struct vcpu_svm *svm) in svm_recalc_instruction_intercepts() argument
1205 !guest_cpuid_has(&svm->vcpu, X86_FEATURE_INVPCID)) in svm_recalc_instruction_intercepts()
1206 svm_set_intercept(svm, INTERCEPT_INVPCID); in svm_recalc_instruction_intercepts()
1208 svm_clr_intercept(svm, INTERCEPT_INVPCID); in svm_recalc_instruction_intercepts()
1213 svm_clr_intercept(svm, INTERCEPT_RDTSCP); in svm_recalc_instruction_intercepts()
1215 svm_set_intercept(svm, INTERCEPT_RDTSCP); in svm_recalc_instruction_intercepts()
1221 struct vcpu_svm *svm = to_svm(vcpu); in init_vmcb_after_set_cpuid() local
1229 svm_set_intercept(svm, INTERCEPT_VMLOAD); in init_vmcb_after_set_cpuid()
1230 svm_set_intercept(svm, INTERCEPT_VMSAVE); in init_vmcb_after_set_cpuid()
1231 svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; in init_vmcb_after_set_cpuid()
1233 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0); in init_vmcb_after_set_cpuid()
1234 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0); in init_vmcb_after_set_cpuid()
1241 svm_clr_intercept(svm, INTERCEPT_VMLOAD); in init_vmcb_after_set_cpuid()
1242 svm_clr_intercept(svm, INTERCEPT_VMSAVE); in init_vmcb_after_set_cpuid()
1243 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; in init_vmcb_after_set_cpuid()
1246 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1); in init_vmcb_after_set_cpuid()
1247 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1); in init_vmcb_after_set_cpuid()
1253 struct vcpu_svm *svm = to_svm(vcpu); in init_vmcb() local
1254 struct vmcb *vmcb = svm->vmcb01.ptr; in init_vmcb()
1258 svm_set_intercept(svm, INTERCEPT_CR0_READ); in init_vmcb()
1259 svm_set_intercept(svm, INTERCEPT_CR3_READ); in init_vmcb()
1260 svm_set_intercept(svm, INTERCEPT_CR4_READ); in init_vmcb()
1261 svm_set_intercept(svm, INTERCEPT_CR0_WRITE); in init_vmcb()
1262 svm_set_intercept(svm, INTERCEPT_CR3_WRITE); in init_vmcb()
1263 svm_set_intercept(svm, INTERCEPT_CR4_WRITE); in init_vmcb()
1265 svm_set_intercept(svm, INTERCEPT_CR8_WRITE); in init_vmcb()
1267 set_dr_intercepts(svm); in init_vmcb()
1269 set_exception_intercept(svm, PF_VECTOR); in init_vmcb()
1270 set_exception_intercept(svm, UD_VECTOR); in init_vmcb()
1271 set_exception_intercept(svm, MC_VECTOR); in init_vmcb()
1272 set_exception_intercept(svm, AC_VECTOR); in init_vmcb()
1273 set_exception_intercept(svm, DB_VECTOR); in init_vmcb()
1281 set_exception_intercept(svm, GP_VECTOR); in init_vmcb()
1283 svm_set_intercept(svm, INTERCEPT_INTR); in init_vmcb()
1284 svm_set_intercept(svm, INTERCEPT_NMI); in init_vmcb()
1287 svm_set_intercept(svm, INTERCEPT_SMI); in init_vmcb()
1289 svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0); in init_vmcb()
1290 svm_set_intercept(svm, INTERCEPT_RDPMC); in init_vmcb()
1291 svm_set_intercept(svm, INTERCEPT_CPUID); in init_vmcb()
1292 svm_set_intercept(svm, INTERCEPT_INVD); in init_vmcb()
1293 svm_set_intercept(svm, INTERCEPT_INVLPG); in init_vmcb()
1294 svm_set_intercept(svm, INTERCEPT_INVLPGA); in init_vmcb()
1295 svm_set_intercept(svm, INTERCEPT_IOIO_PROT); in init_vmcb()
1296 svm_set_intercept(svm, INTERCEPT_MSR_PROT); in init_vmcb()
1297 svm_set_intercept(svm, INTERCEPT_TASK_SWITCH); in init_vmcb()
1298 svm_set_intercept(svm, INTERCEPT_SHUTDOWN); in init_vmcb()
1299 svm_set_intercept(svm, INTERCEPT_VMRUN); in init_vmcb()
1300 svm_set_intercept(svm, INTERCEPT_VMMCALL); in init_vmcb()
1301 svm_set_intercept(svm, INTERCEPT_VMLOAD); in init_vmcb()
1302 svm_set_intercept(svm, INTERCEPT_VMSAVE); in init_vmcb()
1303 svm_set_intercept(svm, INTERCEPT_STGI); in init_vmcb()
1304 svm_set_intercept(svm, INTERCEPT_CLGI); in init_vmcb()
1305 svm_set_intercept(svm, INTERCEPT_SKINIT); in init_vmcb()
1306 svm_set_intercept(svm, INTERCEPT_WBINVD); in init_vmcb()
1307 svm_set_intercept(svm, INTERCEPT_XSETBV); in init_vmcb()
1308 svm_set_intercept(svm, INTERCEPT_RDPRU); in init_vmcb()
1309 svm_set_intercept(svm, INTERCEPT_RSM); in init_vmcb()
1312 svm_set_intercept(svm, INTERCEPT_MONITOR); in init_vmcb()
1313 svm_set_intercept(svm, INTERCEPT_MWAIT); in init_vmcb()
1317 svm_set_intercept(svm, INTERCEPT_HLT); in init_vmcb()
1320 control->msrpm_base_pa = __sme_set(__pa(svm->msrpm)); in init_vmcb()
1347 svm_clr_intercept(svm, INTERCEPT_INVLPG); in init_vmcb()
1348 clr_exception_intercept(svm, PF_VECTOR); in init_vmcb()
1349 svm_clr_intercept(svm, INTERCEPT_CR3_READ); in init_vmcb()
1350 svm_clr_intercept(svm, INTERCEPT_CR3_WRITE); in init_vmcb()
1354 svm->current_vmcb->asid_generation = 0; in init_vmcb()
1355 svm->asid = 0; in init_vmcb()
1357 svm->nested.vmcb12_gpa = INVALID_GPA; in init_vmcb()
1358 svm->nested.last_vmcb12_gpa = INVALID_GPA; in init_vmcb()
1364 svm_set_intercept(svm, INTERCEPT_PAUSE); in init_vmcb()
1366 svm_clr_intercept(svm, INTERCEPT_PAUSE); in init_vmcb()
1369 svm_recalc_instruction_intercepts(vcpu, svm); in init_vmcb()
1376 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); in init_vmcb()
1379 avic_init_vmcb(svm, vmcb); in init_vmcb()
1382 svm->vmcb->control.int_ctl |= V_NMI_ENABLE_MASK; in init_vmcb()
1385 svm_clr_intercept(svm, INTERCEPT_STGI); in init_vmcb()
1386 svm_clr_intercept(svm, INTERCEPT_CLGI); in init_vmcb()
1387 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK; in init_vmcb()
1391 sev_init_vmcb(svm); in init_vmcb()
1398 enable_gif(svm); in init_vmcb()
1403 struct vcpu_svm *svm = to_svm(vcpu); in __svm_vcpu_reset() local
1405 svm_vcpu_init_msrpm(vcpu, svm->msrpm); in __svm_vcpu_reset()
1409 svm->tsc_ratio_msr = kvm_caps.default_tsc_scaling_ratio; in __svm_vcpu_reset()
1411 svm->nmi_masked = false; in __svm_vcpu_reset()
1412 svm->awaiting_iret_completion = false; in __svm_vcpu_reset()
1415 sev_es_vcpu_reset(svm); in __svm_vcpu_reset()
1420 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_reset() local
1422 svm->spec_ctrl = 0; in svm_vcpu_reset()
1423 svm->virt_spec_ctrl = 0; in svm_vcpu_reset()
1431 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb) in svm_switch_vmcb() argument
1433 svm->current_vmcb = target_vmcb; in svm_switch_vmcb()
1434 svm->vmcb = target_vmcb->ptr; in svm_switch_vmcb()
1439 struct vcpu_svm *svm; in svm_vcpu_create() local
1445 svm = to_svm(vcpu); in svm_vcpu_create()
1470 err = avic_init_vcpu(svm); in svm_vcpu_create()
1474 svm->msrpm = svm_vcpu_alloc_msrpm(); in svm_vcpu_create()
1475 if (!svm->msrpm) { in svm_vcpu_create()
1480 svm->x2avic_msrs_intercepted = true; in svm_vcpu_create()
1482 svm->vmcb01.ptr = page_address(vmcb01_page); in svm_vcpu_create()
1483 svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT); in svm_vcpu_create()
1484 svm_switch_vmcb(svm, &svm->vmcb01); in svm_vcpu_create()
1487 svm->sev_es.vmsa = page_address(vmsa_page); in svm_vcpu_create()
1489 svm->guest_state_loaded = false; in svm_vcpu_create()
1512 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_free() local
1519 svm_clear_current_vmcb(svm->vmcb); in svm_vcpu_free()
1522 svm_free_nested(svm); in svm_vcpu_free()
1526 __free_page(pfn_to_page(__sme_clr(svm->vmcb01.pa) >> PAGE_SHIFT)); in svm_vcpu_free()
1527 __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE)); in svm_vcpu_free()
1532 struct vcpu_svm *svm = to_svm(vcpu); in svm_prepare_switch_to_guest() local
1536 sev_es_unmap_ghcb(svm); in svm_prepare_switch_to_guest()
1538 if (svm->guest_state_loaded) in svm_prepare_switch_to_guest()
1564 kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull); in svm_prepare_switch_to_guest()
1566 svm->guest_state_loaded = true; in svm_prepare_switch_to_guest()
1576 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_load() local
1579 if (sd->current_vmcb != svm->vmcb) { in svm_vcpu_load()
1580 sd->current_vmcb = svm->vmcb; in svm_vcpu_load()
1601 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_rflags() local
1602 unsigned long rflags = svm->vmcb->save.rflags; in svm_get_rflags()
1604 if (svm->nmi_singlestep) { in svm_get_rflags()
1606 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) in svm_get_rflags()
1608 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) in svm_get_rflags()
1654 static void svm_set_vintr(struct vcpu_svm *svm) in svm_set_vintr() argument
1661 WARN_ON(kvm_vcpu_apicv_activated(&svm->vcpu)); in svm_set_vintr()
1663 svm_set_intercept(svm, INTERCEPT_VINTR); in svm_set_vintr()
1672 if (!svm_is_intercept(svm, INTERCEPT_VINTR)) in svm_set_vintr()
1679 control = &svm->vmcb->control; in svm_set_vintr()
1684 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); in svm_set_vintr()
1687 static void svm_clear_vintr(struct vcpu_svm *svm) in svm_clear_vintr() argument
1689 svm_clr_intercept(svm, INTERCEPT_VINTR); in svm_clear_vintr()
1692 svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; in svm_clear_vintr()
1693 if (is_guest_mode(&svm->vcpu)) { in svm_clear_vintr()
1694 svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; in svm_clear_vintr()
1696 WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) != in svm_clear_vintr()
1697 (svm->nested.ctl.int_ctl & V_TPR_MASK)); in svm_clear_vintr()
1699 svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & in svm_clear_vintr()
1702 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; in svm_clear_vintr()
1705 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); in svm_clear_vintr()
1821 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_idt() local
1823 dt->size = svm->vmcb->save.idtr.limit; in svm_get_idt()
1824 dt->address = svm->vmcb->save.idtr.base; in svm_get_idt()
1829 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_idt() local
1831 svm->vmcb->save.idtr.limit = dt->size; in svm_set_idt()
1832 svm->vmcb->save.idtr.base = dt->address ; in svm_set_idt()
1833 vmcb_mark_dirty(svm->vmcb, VMCB_DT); in svm_set_idt()
1838 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_gdt() local
1840 dt->size = svm->vmcb->save.gdtr.limit; in svm_get_gdt()
1841 dt->address = svm->vmcb->save.gdtr.base; in svm_get_gdt()
1846 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_gdt() local
1848 svm->vmcb->save.gdtr.limit = dt->size; in svm_set_gdt()
1849 svm->vmcb->save.gdtr.base = dt->address ; in svm_set_gdt()
1850 vmcb_mark_dirty(svm->vmcb, VMCB_DT); in svm_set_gdt()
1855 struct vcpu_svm *svm = to_svm(vcpu); in sev_post_set_cr3() local
1866 svm->vmcb->save.cr3 = cr3; in sev_post_set_cr3()
1867 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in sev_post_set_cr3()
1878 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_cr0() local
1887 svm->vmcb->save.efer |= EFER_LMA | EFER_LME; in svm_set_cr0()
1893 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); in svm_set_cr0()
1913 svm->vmcb->save.cr0 = hcr0; in svm_set_cr0()
1914 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in svm_set_cr0()
1925 svm_clr_intercept(svm, INTERCEPT_CR0_READ); in svm_set_cr0()
1926 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE); in svm_set_cr0()
1928 svm_set_intercept(svm, INTERCEPT_CR0_READ); in svm_set_cr0()
1929 svm_set_intercept(svm, INTERCEPT_CR0_WRITE); in svm_set_cr0()
1964 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_segment() local
1987 svm->vmcb->save.cpl = (var->dpl & 3); in svm_set_segment()
1989 vmcb_mark_dirty(svm->vmcb, VMCB_SEG); in svm_set_segment()
1994 struct vcpu_svm *svm = to_svm(vcpu); in svm_update_exception_bitmap() local
1996 clr_exception_intercept(svm, BP_VECTOR); in svm_update_exception_bitmap()
2000 set_exception_intercept(svm, BP_VECTOR); in svm_update_exception_bitmap()
2004 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) in new_asid() argument
2009 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; in new_asid()
2010 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); in new_asid()
2013 svm->current_vmcb->asid_generation = sd->asid_generation; in new_asid()
2014 svm->asid = sd->next_asid++; in new_asid()
2017 static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value) in svm_set_dr6() argument
2019 struct vmcb *vmcb = svm->vmcb; in svm_set_dr6()
2021 if (svm->vcpu.arch.guest_state_protected) in svm_set_dr6()
2032 struct vcpu_svm *svm = to_svm(vcpu); in svm_sync_dirty_debug_regs() local
2045 vcpu->arch.dr6 = svm->vmcb->save.dr6; in svm_sync_dirty_debug_regs()
2046 vcpu->arch.dr7 = svm->vmcb->save.dr7; in svm_sync_dirty_debug_regs()
2048 set_dr_intercepts(svm); in svm_sync_dirty_debug_regs()
2053 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_dr7() local
2058 svm->vmcb->save.dr7 = value; in svm_set_dr7()
2059 vmcb_mark_dirty(svm->vmcb, VMCB_DR); in svm_set_dr7()
2064 struct vcpu_svm *svm = to_svm(vcpu); in pf_interception() local
2066 u64 fault_address = svm->vmcb->control.exit_info_2; in pf_interception()
2067 u64 error_code = svm->vmcb->control.exit_info_1; in pf_interception()
2071 svm->vmcb->control.insn_bytes : NULL, in pf_interception()
2072 svm->vmcb->control.insn_len); in pf_interception()
2077 struct vcpu_svm *svm = to_svm(vcpu); in npf_interception() local
2079 u64 fault_address = svm->vmcb->control.exit_info_2; in npf_interception()
2080 u64 error_code = svm->vmcb->control.exit_info_1; in npf_interception()
2085 svm->vmcb->control.insn_bytes : NULL, in npf_interception()
2086 svm->vmcb->control.insn_len); in npf_interception()
2092 struct vcpu_svm *svm = to_svm(vcpu); in db_interception() local
2096 !svm->nmi_singlestep) { in db_interception()
2097 u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW; in db_interception()
2102 if (svm->nmi_singlestep) { in db_interception()
2103 disable_nmi_singlestep(svm); in db_interception()
2111 kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6; in db_interception()
2112 kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7; in db_interception()
2114 svm->vmcb->save.cs.base + svm->vmcb->save.rip; in db_interception()
2124 struct vcpu_svm *svm = to_svm(vcpu); in bp_interception() local
2128 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; in bp_interception()
2212 struct vcpu_svm *svm = to_svm(vcpu); in shutdown_interception() local
2229 clear_page(svm->vmcb); in shutdown_interception()
2238 struct vcpu_svm *svm = to_svm(vcpu); in io_interception() local
2239 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ in io_interception()
2251 return sev_es_string_io(svm, size, port, in); in io_interception()
2256 svm->next_rip = svm->vmcb->control.exit_info_2; in io_interception()
2279 struct vcpu_svm *svm = to_svm(vcpu); in vmload_vmsave_interception() local
2287 ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); in vmload_vmsave_interception()
2299 svm_copy_vmloadsave_state(svm->vmcb, vmcb12); in vmload_vmsave_interception()
2300 svm->sysenter_eip_hi = 0; in vmload_vmsave_interception()
2301 svm->sysenter_esp_hi = 0; in vmload_vmsave_interception()
2303 svm_copy_vmloadsave_state(vmcb12, svm->vmcb); in vmload_vmsave_interception()
2370 struct vcpu_svm *svm = to_svm(vcpu); in emulate_svm_instr() local
2375 ret = nested_svm_simple_vmexit(svm, guest_mode_exit_codes[opcode]); in emulate_svm_instr()
2393 struct vcpu_svm *svm = to_svm(vcpu); in gp_interception() local
2394 u32 error_code = svm->vmcb->control.exit_info_1; in gp_interception()
2420 if (svm->vmcb->save.rax & ~PAGE_MASK) in gp_interception()
2431 void svm_set_gif(struct vcpu_svm *svm, bool value) in svm_set_gif() argument
2441 svm_clr_intercept(svm, INTERCEPT_STGI); in svm_set_gif()
2442 if (svm_is_intercept(svm, INTERCEPT_VINTR)) in svm_set_gif()
2443 svm_clear_vintr(svm); in svm_set_gif()
2445 enable_gif(svm); in svm_set_gif()
2446 if (svm->vcpu.arch.smi_pending || in svm_set_gif()
2447 svm->vcpu.arch.nmi_pending || in svm_set_gif()
2448 kvm_cpu_has_injectable_intr(&svm->vcpu) || in svm_set_gif()
2449 kvm_apic_has_pending_init_or_sipi(&svm->vcpu)) in svm_set_gif()
2450 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in svm_set_gif()
2452 disable_gif(svm); in svm_set_gif()
2460 svm_clear_vintr(svm); in svm_set_gif()
2515 struct vcpu_svm *svm = to_svm(vcpu); in task_switch_interception() local
2518 int int_type = svm->vmcb->control.exit_int_info & in task_switch_interception()
2520 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK; in task_switch_interception()
2522 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; in task_switch_interception()
2524 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID; in task_switch_interception()
2528 tss_selector = (u16)svm->vmcb->control.exit_info_1; in task_switch_interception()
2530 if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2533 else if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2547 if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2551 (u32)svm->vmcb->control.exit_info_2; in task_switch_interception()
2579 static void svm_clr_iret_intercept(struct vcpu_svm *svm) in svm_clr_iret_intercept() argument
2581 if (!sev_es_guest(svm->vcpu.kvm)) in svm_clr_iret_intercept()
2582 svm_clr_intercept(svm, INTERCEPT_IRET); in svm_clr_iret_intercept()
2585 static void svm_set_iret_intercept(struct vcpu_svm *svm) in svm_set_iret_intercept() argument
2587 if (!sev_es_guest(svm->vcpu.kvm)) in svm_set_iret_intercept()
2588 svm_set_intercept(svm, INTERCEPT_IRET); in svm_set_iret_intercept()
2593 struct vcpu_svm *svm = to_svm(vcpu); in iret_interception() local
2598 svm->awaiting_iret_completion = true; in iret_interception()
2600 svm_clr_iret_intercept(svm); in iret_interception()
2601 svm->nmi_iret_rip = kvm_rip_read(vcpu); in iret_interception()
2629 struct vcpu_svm *svm = to_svm(vcpu); in check_selective_cr0_intercepted() local
2634 (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0)))) in check_selective_cr0_intercepted()
2641 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; in check_selective_cr0_intercepted()
2642 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE); in check_selective_cr0_intercepted()
2652 struct vcpu_svm *svm = to_svm(vcpu); in cr_interception() local
2660 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) in cr_interception()
2663 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; in cr_interception()
2664 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) in cr_interception()
2667 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; in cr_interception()
2726 struct vcpu_svm *svm = to_svm(vcpu); in cr_trap() local
2731 new_value = (unsigned long)svm->vmcb->control.exit_info_1; in cr_trap()
2733 cr = svm->vmcb->control.exit_code - SVM_EXIT_CR0_WRITE_TRAP; in cr_trap()
2761 struct vcpu_svm *svm = to_svm(vcpu); in dr_interception() local
2779 clr_dr_intercepts(svm); in dr_interception()
2787 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; in dr_interception()
2788 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; in dr_interception()
2853 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_msr() local
2860 msr_info->data = svm->tsc_ratio_msr; in svm_get_msr()
2863 msr_info->data = svm->vmcb01.ptr->save.star; in svm_get_msr()
2867 msr_info->data = svm->vmcb01.ptr->save.lstar; in svm_get_msr()
2870 msr_info->data = svm->vmcb01.ptr->save.cstar; in svm_get_msr()
2873 msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base; in svm_get_msr()
2876 msr_info->data = svm->vmcb01.ptr->save.sfmask; in svm_get_msr()
2880 msr_info->data = svm->vmcb01.ptr->save.sysenter_cs; in svm_get_msr()
2883 msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip; in svm_get_msr()
2885 msr_info->data |= (u64)svm->sysenter_eip_hi << 32; in svm_get_msr()
2888 msr_info->data = svm->vmcb01.ptr->save.sysenter_esp; in svm_get_msr()
2890 msr_info->data |= (u64)svm->sysenter_esp_hi << 32; in svm_get_msr()
2893 msr_info->data = svm->tsc_aux; in svm_get_msr()
2896 msr_info->data = svm_get_lbr_vmcb(svm)->save.dbgctl; in svm_get_msr()
2899 msr_info->data = svm_get_lbr_vmcb(svm)->save.br_from; in svm_get_msr()
2902 msr_info->data = svm_get_lbr_vmcb(svm)->save.br_to; in svm_get_msr()
2905 msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_from; in svm_get_msr()
2908 msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_to; in svm_get_msr()
2911 msr_info->data = svm->nested.hsave_msr; in svm_get_msr()
2914 msr_info->data = svm->nested.vm_cr_msr; in svm_get_msr()
2922 msr_info->data = svm->vmcb->save.spec_ctrl; in svm_get_msr()
2924 msr_info->data = svm->spec_ctrl; in svm_get_msr()
2931 msr_info->data = svm->virt_spec_ctrl; in svm_get_msr()
2951 msr_info->data = svm->msr_decfg; in svm_get_msr()
2961 struct vcpu_svm *svm = to_svm(vcpu); in svm_complete_emulated_msr() local
2962 if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb)) in svm_complete_emulated_msr()
2965 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 1); in svm_complete_emulated_msr()
2966 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, in svm_complete_emulated_msr()
2975 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_vm_cr() local
2983 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK) in svm_set_vm_cr()
2986 svm->nested.vm_cr_msr &= ~chg_mask; in svm_set_vm_cr()
2987 svm->nested.vm_cr_msr |= (data & chg_mask); in svm_set_vm_cr()
2989 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK; in svm_set_vm_cr()
3000 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_msr() local
3020 if (data != 0 && data != svm->tsc_ratio_msr) in svm_set_msr()
3028 svm->tsc_ratio_msr = data; in svm_set_msr()
3040 svm->vmcb01.ptr->save.g_pat = data; in svm_set_msr()
3042 nested_vmcb02_compute_g_pat(svm); in svm_set_msr()
3043 vmcb_mark_dirty(svm->vmcb, VMCB_NPT); in svm_set_msr()
3054 svm->vmcb->save.spec_ctrl = data; in svm_set_msr()
3056 svm->spec_ctrl = data; in svm_set_msr()
3071 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); in svm_set_msr()
3081 svm->virt_spec_ctrl = data; in svm_set_msr()
3084 svm->vmcb01.ptr->save.star = data; in svm_set_msr()
3088 svm->vmcb01.ptr->save.lstar = data; in svm_set_msr()
3091 svm->vmcb01.ptr->save.cstar = data; in svm_set_msr()
3094 svm->vmcb01.ptr->save.kernel_gs_base = data; in svm_set_msr()
3097 svm->vmcb01.ptr->save.sfmask = data; in svm_set_msr()
3101 svm->vmcb01.ptr->save.sysenter_cs = data; in svm_set_msr()
3104 svm->vmcb01.ptr->save.sysenter_eip = (u32)data; in svm_set_msr()
3112 svm->sysenter_eip_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0; in svm_set_msr()
3115 svm->vmcb01.ptr->save.sysenter_esp = (u32)data; in svm_set_msr()
3116 svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0; in svm_set_msr()
3140 svm->tsc_aux = data; in svm_set_msr()
3150 svm_get_lbr_vmcb(svm)->save.dbgctl = data; in svm_set_msr()
3163 svm->nested.hsave_msr = data & PAGE_MASK; in svm_set_msr()
3185 svm->msr_decfg = data; in svm_set_msr()
3243 struct vcpu_svm *svm = to_svm(vcpu); in invpcid_interception() local
3257 type = svm->vmcb->control.exit_info_2; in invpcid_interception()
3258 gva = svm->vmcb->control.exit_info_1; in invpcid_interception()
3339 struct vcpu_svm *svm = to_svm(vcpu); in dump_vmcb() local
3340 struct vmcb_control_area *control = &svm->vmcb->control; in dump_vmcb()
3341 struct vmcb_save_area *save = &svm->vmcb->save; in dump_vmcb()
3342 struct vmcb_save_area *save01 = &svm->vmcb01.ptr->save; in dump_vmcb()
3350 svm->current_vmcb->ptr, vcpu->arch.last_vmentry_cpu); in dump_vmcb()
3517 struct vcpu_svm *svm = to_svm(vcpu); in svm_handle_exit() local
3519 u32 exit_code = svm->vmcb->control.exit_code; in svm_handle_exit()
3523 if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE)) in svm_handle_exit()
3524 vcpu->arch.cr0 = svm->vmcb->save.cr0; in svm_handle_exit()
3526 vcpu->arch.cr3 = svm->vmcb->save.cr3; in svm_handle_exit()
3534 vmexit = nested_svm_exit_special(svm); in svm_handle_exit()
3537 vmexit = nested_svm_exit_handled(svm); in svm_handle_exit()
3543 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { in svm_handle_exit()
3546 = svm->vmcb->control.exit_code; in svm_handle_exit()
3561 struct vcpu_svm *svm = to_svm(vcpu); in pre_svm_run() local
3568 if (unlikely(svm->current_vmcb->cpu != vcpu->cpu)) { in pre_svm_run()
3569 svm->current_vmcb->asid_generation = 0; in pre_svm_run()
3570 vmcb_mark_all_dirty(svm->vmcb); in pre_svm_run()
3571 svm->current_vmcb->cpu = vcpu->cpu; in pre_svm_run()
3575 return pre_sev_run(svm, vcpu->cpu); in pre_svm_run()
3578 if (svm->current_vmcb->asid_generation != sd->asid_generation) in pre_svm_run()
3579 new_asid(svm, sd); in pre_svm_run()
3584 struct vcpu_svm *svm = to_svm(vcpu); in svm_inject_nmi() local
3586 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; in svm_inject_nmi()
3588 if (svm->nmi_l1_to_l2) in svm_inject_nmi()
3591 svm->nmi_masked = true; in svm_inject_nmi()
3592 svm_set_iret_intercept(svm); in svm_inject_nmi()
3598 struct vcpu_svm *svm = to_svm(vcpu); in svm_is_vnmi_pending() local
3600 if (!is_vnmi_enabled(svm)) in svm_is_vnmi_pending()
3603 return !!(svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK); in svm_is_vnmi_pending()
3608 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_vnmi_pending() local
3610 if (!is_vnmi_enabled(svm)) in svm_set_vnmi_pending()
3613 if (svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK) in svm_set_vnmi_pending()
3616 svm->vmcb->control.int_ctl |= V_NMI_PENDING_MASK; in svm_set_vnmi_pending()
3617 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); in svm_set_vnmi_pending()
3631 struct vcpu_svm *svm = to_svm(vcpu); in svm_inject_irq() local
3647 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | in svm_inject_irq()
3703 struct vcpu_svm *svm = to_svm(vcpu); in svm_update_cr8_intercept() local
3715 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE); in svm_update_cr8_intercept()
3721 svm_set_intercept(svm, INTERCEPT_CR8_WRITE); in svm_update_cr8_intercept()
3726 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_nmi_mask() local
3728 if (is_vnmi_enabled(svm)) in svm_get_nmi_mask()
3729 return svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK; in svm_get_nmi_mask()
3731 return svm->nmi_masked; in svm_get_nmi_mask()
3736 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_nmi_mask() local
3738 if (is_vnmi_enabled(svm)) { in svm_set_nmi_mask()
3740 svm->vmcb->control.int_ctl |= V_NMI_BLOCKING_MASK; in svm_set_nmi_mask()
3742 svm->vmcb->control.int_ctl &= ~V_NMI_BLOCKING_MASK; in svm_set_nmi_mask()
3745 svm->nmi_masked = masked; in svm_set_nmi_mask()
3747 svm_set_iret_intercept(svm); in svm_set_nmi_mask()
3749 svm_clr_iret_intercept(svm); in svm_set_nmi_mask()
3755 struct vcpu_svm *svm = to_svm(vcpu); in svm_nmi_blocked() local
3756 struct vmcb *vmcb = svm->vmcb; in svm_nmi_blocked()
3758 if (!gif_set(svm)) in svm_nmi_blocked()
3761 if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) in svm_nmi_blocked()
3772 struct vcpu_svm *svm = to_svm(vcpu); in svm_nmi_allowed() local
3773 if (svm->nested.nested_run_pending) in svm_nmi_allowed()
3780 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) in svm_nmi_allowed()
3787 struct vcpu_svm *svm = to_svm(vcpu); in svm_interrupt_blocked() local
3788 struct vmcb *vmcb = svm->vmcb; in svm_interrupt_blocked()
3790 if (!gif_set(svm)) in svm_interrupt_blocked()
3795 if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) in svm_interrupt_blocked()
3796 ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF) in svm_interrupt_blocked()
3801 if (nested_exit_on_intr(svm)) in svm_interrupt_blocked()
3813 struct vcpu_svm *svm = to_svm(vcpu); in svm_interrupt_allowed() local
3815 if (svm->nested.nested_run_pending) in svm_interrupt_allowed()
3825 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm)) in svm_interrupt_allowed()
3833 struct vcpu_svm *svm = to_svm(vcpu); in svm_enable_irq_window() local
3843 if (vgif || gif_set(svm)) { in svm_enable_irq_window()
3857 svm_set_vintr(svm); in svm_enable_irq_window()
3863 struct vcpu_svm *svm = to_svm(vcpu); in svm_enable_nmi_window() local
3882 WARN_ON_ONCE(is_vnmi_enabled(svm)); in svm_enable_nmi_window()
3884 if (!svm->awaiting_iret_completion) in svm_enable_nmi_window()
3901 if (!gif_set(svm)) { in svm_enable_nmi_window()
3903 svm_set_intercept(svm, INTERCEPT_STGI); in svm_enable_nmi_window()
3911 svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu); in svm_enable_nmi_window()
3912 svm->nmi_singlestep = true; in svm_enable_nmi_window()
3913 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); in svm_enable_nmi_window()
3918 struct vcpu_svm *svm = to_svm(vcpu); in svm_flush_tlb_asid() local
3935 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; in svm_flush_tlb_asid()
3937 svm->current_vmcb->asid_generation--; in svm_flush_tlb_asid()
3973 struct vcpu_svm *svm = to_svm(vcpu); in svm_flush_tlb_gva() local
3975 invlpga(gva, svm->vmcb->control.asid); in svm_flush_tlb_gva()
3980 struct vcpu_svm *svm = to_svm(vcpu); in sync_cr8_to_lapic() local
3985 if (!svm_is_intercept(svm, INTERCEPT_CR8_WRITE)) { in sync_cr8_to_lapic()
3986 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; in sync_cr8_to_lapic()
3993 struct vcpu_svm *svm = to_svm(vcpu); in sync_lapic_to_cr8() local
4001 svm->vmcb->control.int_ctl &= ~V_TPR_MASK; in sync_lapic_to_cr8()
4002 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; in sync_lapic_to_cr8()
4010 struct vcpu_svm *svm = to_svm(vcpu); in svm_complete_soft_interrupt() local
4022 kvm_is_linear_rip(vcpu, svm->soft_int_old_rip + svm->soft_int_csbase)) in svm_complete_soft_interrupt()
4023 svm->vmcb->control.next_rip = svm->soft_int_next_rip; in svm_complete_soft_interrupt()
4033 kvm_is_linear_rip(vcpu, svm->soft_int_next_rip + svm->soft_int_csbase)) in svm_complete_soft_interrupt()
4034 kvm_rip_write(vcpu, svm->soft_int_old_rip); in svm_complete_soft_interrupt()
4039 struct vcpu_svm *svm = to_svm(vcpu); in svm_complete_interrupts() local
4042 u32 exitintinfo = svm->vmcb->control.exit_int_info; in svm_complete_interrupts()
4043 bool nmi_l1_to_l2 = svm->nmi_l1_to_l2; in svm_complete_interrupts()
4044 bool soft_int_injected = svm->soft_int_injected; in svm_complete_interrupts()
4046 svm->nmi_l1_to_l2 = false; in svm_complete_interrupts()
4047 svm->soft_int_injected = false; in svm_complete_interrupts()
4053 if (svm->awaiting_iret_completion && in svm_complete_interrupts()
4054 kvm_rip_read(vcpu) != svm->nmi_iret_rip) { in svm_complete_interrupts()
4055 svm->awaiting_iret_completion = false; in svm_complete_interrupts()
4056 svm->nmi_masked = false; in svm_complete_interrupts()
4078 svm->nmi_l1_to_l2 = nmi_l1_to_l2; in svm_complete_interrupts()
4088 u32 err = svm->vmcb->control.exit_int_info_err; in svm_complete_interrupts()
4108 struct vcpu_svm *svm = to_svm(vcpu); in svm_cancel_injection() local
4109 struct vmcb_control_area *control = &svm->vmcb->control; in svm_cancel_injection()
4133 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_enter_exit() local
4140 __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted); in svm_vcpu_enter_exit()
4142 __svm_vcpu_run(svm, spec_ctrl_intercepted); in svm_vcpu_enter_exit()
4149 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_run() local
4154 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; in svm_vcpu_run()
4155 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; in svm_vcpu_run()
4156 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; in svm_vcpu_run()
4164 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) { in svm_vcpu_run()
4170 disable_nmi_singlestep(svm); in svm_vcpu_run()
4178 if (unlikely(svm->asid != svm->vmcb->control.asid)) { in svm_vcpu_run()
4179 svm->vmcb->control.asid = svm->asid; in svm_vcpu_run()
4180 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); in svm_vcpu_run()
4182 svm->vmcb->save.cr2 = vcpu->arch.cr2; in svm_vcpu_run()
4184 svm_hv_update_vp_id(svm->vmcb, vcpu); in svm_vcpu_run()
4191 svm_set_dr6(svm, vcpu->arch.dr6); in svm_vcpu_run()
4193 svm_set_dr6(svm, DR6_ACTIVE_LOW); in svm_vcpu_run()
4207 x86_spec_ctrl_set_guest(svm->virt_spec_ctrl); in svm_vcpu_run()
4212 x86_spec_ctrl_restore_host(svm->virt_spec_ctrl); in svm_vcpu_run()
4215 vcpu->arch.cr2 = svm->vmcb->save.cr2; in svm_vcpu_run()
4216 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; in svm_vcpu_run()
4217 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; in svm_vcpu_run()
4218 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; in svm_vcpu_run()
4222 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) in svm_vcpu_run()
4230 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) in svm_vcpu_run()
4235 svm->next_rip = 0; in svm_vcpu_run()
4237 nested_sync_control_from_vmcb02(svm); in svm_vcpu_run()
4240 if (svm->nested.nested_run_pending && in svm_vcpu_run()
4241 svm->vmcb->control.exit_code != SVM_EXIT_ERR) in svm_vcpu_run()
4244 svm->nested.nested_run_pending = 0; in svm_vcpu_run()
4247 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; in svm_vcpu_run()
4248 vmcb_mark_all_clean(svm->vmcb); in svm_vcpu_run()
4251 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) in svm_vcpu_run()
4261 if (unlikely(svm->vmcb->control.exit_code == in svm_vcpu_run()
4278 struct vcpu_svm *svm = to_svm(vcpu); in svm_load_mmu_pgd() local
4282 svm->vmcb->control.nested_cr3 = __sme_set(root_hpa); in svm_load_mmu_pgd()
4283 vmcb_mark_dirty(svm->vmcb, VMCB_NPT); in svm_load_mmu_pgd()
4296 svm->vmcb->save.cr3 = cr3; in svm_load_mmu_pgd()
4297 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in svm_load_mmu_pgd()
4337 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_after_set_cpuid() local
4371 svm_recalc_instruction_intercepts(vcpu, svm); in svm_vcpu_after_set_cpuid()
4374 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, in svm_vcpu_after_set_cpuid()
4378 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_FLUSH_CMD, 0, in svm_vcpu_after_set_cpuid()
4382 sev_vcpu_after_set_cpuid(svm); in svm_vcpu_after_set_cpuid()
4461 struct vcpu_svm *svm = to_svm(vcpu); in svm_check_intercept() local
4464 struct vmcb *vmcb = svm->vmcb; in svm_check_intercept()
4489 if (!(vmcb12_is_intercept(&svm->nested.ctl, in svm_check_intercept()
4567 vmexit = nested_svm_exit_handled(svm); in svm_check_intercept()
4597 struct vcpu_svm *svm = to_svm(vcpu); in svm_smi_blocked() local
4600 if (!gif_set(svm)) in svm_smi_blocked()
4608 struct vcpu_svm *svm = to_svm(vcpu); in svm_smi_allowed() local
4609 if (svm->nested.nested_run_pending) in svm_smi_allowed()
4616 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm)) in svm_smi_allowed()
4624 struct vcpu_svm *svm = to_svm(vcpu); in svm_enter_smm() local
4640 smram->smram64.svm_guest_vmcb_gpa = svm->nested.vmcb12_gpa; in svm_enter_smm()
4642 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; in svm_enter_smm()
4643 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; in svm_enter_smm()
4644 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; in svm_enter_smm()
4646 ret = nested_svm_simple_vmexit(svm, SVM_EXIT_SW); in svm_enter_smm()
4662 if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save)) in svm_enter_smm()
4668 &svm->vmcb01.ptr->save); in svm_enter_smm()
4676 struct vcpu_svm *svm = to_svm(vcpu); in svm_leave_smm() local
4700 if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save)) in svm_leave_smm()
4703 if (svm_allocate_nested(svm)) in svm_leave_smm()
4711 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400); in svm_leave_smm()
4717 vmcb_mark_all_dirty(svm->vmcb01.ptr); in svm_leave_smm()
4720 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); in svm_leave_smm()
4721 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); in svm_leave_smm()
4727 svm->nested.nested_run_pending = 1; in svm_leave_smm()
4738 struct vcpu_svm *svm = to_svm(vcpu); in svm_enable_smi_window() local
4740 if (!gif_set(svm)) { in svm_enable_smi_window()
4742 svm_set_intercept(svm, INTERCEPT_STGI); in svm_enable_smi_window()
4896 struct vcpu_svm *svm = to_svm(vcpu); in svm_apic_init_signal_blocked() local
4898 return !gif_set(svm); in svm_apic_init_signal_blocked()