/openbmc/linux/tools/testing/selftests/kvm/x86_64/ |
H A D | hyperv_svm_test.c | 74 struct vmcb *vmcb = svm->vmcb; in guest_code() local 75 struct hv_vmcb_enlightenments *hve = &vmcb->control.hv_enlightenments; in guest_code() 97 run_guest(vmcb, svm->vmcb_gpa); in guest_code() 98 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); in guest_code() 100 vmcb->save.rip += 3; in guest_code() 103 vmcb->control.intercept |= 1ULL << INTERCEPT_MSR_PROT; in guest_code() 105 run_guest(vmcb, svm->vmcb_gpa); in guest_code() 106 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR); in guest_code() 107 vmcb->save.rip += 2; /* rdmsr */ in guest_code() 111 run_guest(vmcb, svm->vmcb_gpa); in guest_code() [all …]
|
H A D | svm_nested_soft_inject_test.c | 83 struct vmcb *vmcb = svm->vmcb; in l1_guest_code() local 93 vmcb->control.intercept_exceptions |= BIT(PF_VECTOR) | BIT(UD_VECTOR); in l1_guest_code() 94 vmcb->control.intercept |= BIT(INTERCEPT_NMI) | BIT(INTERCEPT_HLT); in l1_guest_code() 97 vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; in l1_guest_code() 99 vmcb->control.event_inj = INT_NR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_SOFT; in l1_guest_code() 101 vmcb->control.next_rip = vmcb->save.rip; in l1_guest_code() 104 run_guest(vmcb, svm->vmcb_gpa); in l1_guest_code() 105 __GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL, in l1_guest_code() 107 vmcb->control.exit_code, in l1_guest_code() 108 vmcb->control.exit_info_1, vmcb->control.exit_info_2); in l1_guest_code() [all …]
|
H A D | svm_int_ctl_test.c | 62 struct vmcb *vmcb = svm->vmcb; in l1_guest_code() local 71 vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; in l1_guest_code() 74 vmcb->control.intercept &= ~(BIT(INTERCEPT_INTR) | BIT(INTERCEPT_VINTR)); in l1_guest_code() 77 vmcb->control.int_ctl |= V_IRQ_MASK | (0x1 << V_INTR_PRIO_SHIFT); in l1_guest_code() 78 vmcb->control.int_vector = VINTR_IRQ_NUMBER; in l1_guest_code() 80 run_guest(vmcb, svm->vmcb_gpa); in l1_guest_code() 81 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); in l1_guest_code()
|
H A D | svm_nested_shutdown_test.c | 24 struct vmcb *vmcb = svm->vmcb; in l1_guest_code() local 29 vmcb->control.intercept &= ~(BIT(INTERCEPT_SHUTDOWN)); in l1_guest_code() 36 run_guest(vmcb, svm->vmcb_gpa); in l1_guest_code()
|
H A D | svm_vmcall_test.c | 24 struct vmcb *vmcb = svm->vmcb; in l1_guest_code() local 30 run_guest(vmcb, svm->vmcb_gpa); in l1_guest_code() 32 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); in l1_guest_code()
|
H A D | nested_exceptions_test.c | 79 struct vmcb *vmcb = svm->vmcb; in svm_run_l2() local 80 struct vmcb_control_area *ctrl = &vmcb->control; in svm_run_l2() 82 vmcb->save.rip = (u64)l2_code; in svm_run_l2() 83 run_guest(vmcb, svm->vmcb_gpa); in svm_run_l2() 94 struct vmcb_control_area *ctrl = &svm->vmcb->control; in l1_svm_code() 98 svm->vmcb->save.idtr.limit = 0; in l1_svm_code()
|
H A D | triple_fault_event_test.c | 45 struct vmcb *vmcb = svm->vmcb; in l1_guest_code_svm() local 51 vmcb->control.intercept &= ~(BIT(INTERCEPT_SHUTDOWN)); in l1_guest_code_svm() 53 run_guest(vmcb, svm->vmcb_gpa); in l1_guest_code_svm()
|
H A D | state_test.c | 38 struct vmcb *vmcb = svm->vmcb; in svm_l1_guest_code() local 46 run_guest(vmcb, svm->vmcb_gpa); in svm_l1_guest_code() 47 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); in svm_l1_guest_code() 49 vmcb->save.rip += 3; in svm_l1_guest_code() 50 run_guest(vmcb, svm->vmcb_gpa); in svm_l1_guest_code() 51 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); in svm_l1_guest_code()
|
H A D | smm_test.c | 103 run_guest(svm->vmcb, svm->vmcb_gpa); in guest_code() 104 run_guest(svm->vmcb, svm->vmcb_gpa); in guest_code()
|
/openbmc/qemu/target/i386/tcg/sysemu/ |
H A D | svm_helper.c | 140 lbr_ctl = x86_ldl_phys(env_cpu(env), env->vm_vmcb + offsetof(struct vmcb, in virtual_vm_load_save_enabled() 187 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), in helper_vmrun() 189 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), in helper_vmrun() 192 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base), in helper_vmrun() 194 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), in helper_vmrun() 198 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]); in helper_vmrun() 200 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]); in helper_vmrun() 202 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]); in helper_vmrun() 204 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]); in helper_vmrun() 206 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]); in helper_vmrun() [all …]
|
H A D | seg_helper.c | 89 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, in handle_even_inj() 103 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, in handle_even_inj() 108 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), in handle_even_inj() 214 + offsetof(struct vmcb, control.int_vector)); in x86_cpu_exec_interrupt()
|
H A D | excp_helper.c | 538 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), in raise_stage2() 644 offsetof(struct vmcb, control.exit_info_2), in x86_cpu_tlb_fill()
|
/openbmc/linux/arch/x86/kvm/svm/ |
H A D | svm_onhyperv.h | 22 struct hv_vmcb_enlightenments *hve = &to_svm(vcpu)->vmcb->control.hv_enlightenments; in svm_hv_is_enlightened_tlb_enabled() 28 static inline void svm_hv_init_vmcb(struct vmcb *vmcb) in svm_hv_init_vmcb() argument 30 struct hv_vmcb_enlightenments *hve = &vmcb->control.hv_enlightenments; in svm_hv_init_vmcb() 32 BUILD_BUG_ON(sizeof(vmcb->control.hv_enlightenments) != in svm_hv_init_vmcb() 33 sizeof(vmcb->control.reserved_sw)); in svm_hv_init_vmcb() 73 struct vmcb *vmcb = to_svm(vcpu)->vmcb; in svm_hv_vmcb_dirty_nested_enlightenments() local 74 struct hv_vmcb_enlightenments *hve = &vmcb->control.hv_enlightenments; in svm_hv_vmcb_dirty_nested_enlightenments() 77 vmcb_mark_dirty(vmcb, HV_VMCB_NESTED_ENLIGHTENMENTS); in svm_hv_vmcb_dirty_nested_enlightenments() 80 static inline void svm_hv_update_vp_id(struct vmcb *vmcb, struct kvm_vcpu *vcpu) in svm_hv_update_vp_id() argument 82 struct hv_vmcb_enlightenments *hve = &vmcb->control.hv_enlightenments; in svm_hv_update_vp_id() [all …]
|
H A D | svm.h | 111 struct vmcb *ptr; 211 struct vmcb *vmcb; member 305 struct vmcb *current_vmcb; 308 struct vmcb **sev_vmcbs; 342 static inline void vmcb_mark_all_dirty(struct vmcb *vmcb) in vmcb_mark_all_dirty() argument 344 vmcb->control.clean = 0; in vmcb_mark_all_dirty() 347 static inline void vmcb_mark_all_clean(struct vmcb *vmcb) in vmcb_mark_all_clean() argument 349 vmcb->control.clean = VMCB_ALL_CLEAN_MASK in vmcb_mark_all_clean() 353 static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit) in vmcb_mark_dirty() argument 355 vmcb->control.clean &= ~(1 << bit); in vmcb_mark_dirty() [all …]
|
H A D | svm.c | 344 svm->vmcb->save.efer = efer | EFER_SVME; in svm_set_efer() 345 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in svm_set_efer() 354 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) in svm_get_interrupt_shadow() 364 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow() 366 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow() 385 if (nrips && svm->vmcb->control.next_rip != 0) { in __svm_skip_emulated_instruction() 387 svm->next_rip = svm->vmcb->control.next_rip; in __svm_skip_emulated_instruction() 400 old_rflags = svm->vmcb->save.rflags; in __svm_skip_emulated_instruction() 406 svm->vmcb->save.rflags = old_rflags; in __svm_skip_emulated_instruction() 455 svm->soft_int_csbase = svm->vmcb->save.cs.base; in svm_update_soft_interrupt_rip() [all …]
|
H A D | nested.c | 40 struct vmcb *vmcb = svm->vmcb; in nested_svm_inject_npf_exit() local 42 if (vmcb->control.exit_code != SVM_EXIT_NPF) { in nested_svm_inject_npf_exit() 47 vmcb->control.exit_code = SVM_EXIT_NPF; in nested_svm_inject_npf_exit() 48 vmcb->control.exit_code_hi = 0; in nested_svm_inject_npf_exit() 49 vmcb->control.exit_info_1 = (1ULL << 32); in nested_svm_inject_npf_exit() 50 vmcb->control.exit_info_2 = fault->address; in nested_svm_inject_npf_exit() 53 vmcb->control.exit_info_1 &= ~0xffffffffULL; in nested_svm_inject_npf_exit() 54 vmcb->control.exit_info_1 |= fault->error_code; in nested_svm_inject_npf_exit() 132 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in recalc_intercepts() 137 c = &svm->vmcb->control; in recalc_intercepts() [all …]
|
H A D | avic.c | 87 struct vmcb *vmcb = svm->vmcb01.ptr; in avic_activate_vmcb() local 89 vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK); in avic_activate_vmcb() 90 vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK; in avic_activate_vmcb() 92 vmcb->control.int_ctl |= AVIC_ENABLE_MASK; in avic_activate_vmcb() 102 vmcb->control.int_ctl |= X2APIC_MODE_MASK; in avic_activate_vmcb() 103 vmcb->control.avic_physical_id |= X2AVIC_MAX_PHYSICAL_ID; in avic_activate_vmcb() 114 vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID; in avic_activate_vmcb() 122 struct vmcb *vmcb = svm->vmcb01.ptr; in avic_deactivate_vmcb() local 124 vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK); in avic_deactivate_vmcb() 125 vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK; in avic_deactivate_vmcb() [all …]
|
H A D | hyperv.c | 13 svm->vmcb->control.exit_code = HV_SVM_EXITCODE_ENL; in svm_hv_inject_synthetic_vmexit_post_tlb_flush() 14 svm->vmcb->control.exit_code_hi = 0; in svm_hv_inject_synthetic_vmexit_post_tlb_flush() 15 svm->vmcb->control.exit_info_1 = HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH; in svm_hv_inject_synthetic_vmexit_post_tlb_flush() 16 svm->vmcb->control.exit_info_2 = 0; in svm_hv_inject_synthetic_vmexit_post_tlb_flush()
|
H A D | svm_onhyperv.c | 30 hve = &to_svm(vcpu)->vmcb->control.hv_enlightenments; in svm_hv_enable_l2_tlb_flush() 36 vmcb_mark_dirty(to_svm(vcpu)->vmcb, HV_VMCB_NESTED_ENLIGHTENMENTS); in svm_hv_enable_l2_tlb_flush()
|
H A D | sev.c | 584 if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1)) in sev_es_sync_vmsa() 593 memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save)); in sev_es_sync_vmsa() 1769 dst_svm->vmcb->control.ghcb_gpa = src_svm->vmcb->control.ghcb_gpa; in sev_migrate_from() 1770 dst_svm->vmcb->control.vmsa_pa = src_svm->vmcb->control.vmsa_pa; in sev_migrate_from() 1774 src_svm->vmcb->control.ghcb_gpa = INVALID_PAGE; in sev_migrate_from() 1775 src_svm->vmcb->control.vmsa_pa = INVALID_PAGE; in sev_migrate_from() 2415 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa); in dump_ghcb() 2448 struct vmcb_control_area *control = &svm->vmcb->control; in sev_es_sync_from_ghcb() 2476 svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm, ghcb); in sev_es_sync_from_ghcb() 2502 struct vmcb_control_area *control = &svm->vmcb->control; in sev_es_validate_vmgexit() [all …]
|
/openbmc/linux/tools/testing/selftests/kvm/lib/x86_64/ |
H A D | svm.c | 37 svm->vmcb = (void *)vm_vaddr_alloc_page(vm); in vcpu_alloc_svm() 38 svm->vmcb_hva = addr_gva2hva(vm, (uintptr_t)svm->vmcb); in vcpu_alloc_svm() 39 svm->vmcb_gpa = addr_gva2gpa(vm, (uintptr_t)svm->vmcb); in vcpu_alloc_svm() 65 struct vmcb *vmcb = svm->vmcb; in generic_svm_setup() local 67 struct vmcb_save_area *save = &vmcb->save; in generic_svm_setup() 68 struct vmcb_control_area *ctrl = &vmcb->control; in generic_svm_setup() 79 memset(vmcb, 0, sizeof(*vmcb)); in generic_svm_setup() 103 vmcb->save.rip = (u64)guest_rip; in generic_svm_setup() 104 vmcb->save.rsp = (u64)guest_rsp; in generic_svm_setup() 135 void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa) in run_guest() argument [all …]
|
/openbmc/linux/tools/testing/selftests/kvm/include/x86_64/ |
H A D | svm_util.h | 20 struct vmcb *vmcb; /* gva */ member 61 void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa);
|
H A D | svm.h | 240 struct __attribute__ ((__packed__)) vmcb { struct
|
/openbmc/qemu/target/i386/ |
H A D | svm.h | 236 struct QEMU_PACKED vmcb { struct
|
/openbmc/linux/arch/x86/kvm/ |
H A D | trace.h | 601 TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl, 604 TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, tdp_enabled, 609 __field( __u64, vmcb ) 620 __entry->vmcb = vmcb; 633 __entry->vmcb,
|