| /openbmc/qemu/target/i386/hvf/ |
| H A D | vmx.h | 92 static void enter_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer) in enter_long_mode() argument 96 efer |= MSR_EFER_LMA; in enter_long_mode() 97 wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer); in enter_long_mode() 102 if ((efer & MSR_EFER_LME) && in enter_long_mode() 109 static void exit_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer) in exit_long_mode() argument 116 efer &= ~MSR_EFER_LMA; in exit_long_mode() 117 wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer); in exit_long_mode() 124 uint64_t efer = rvmcs(vcpu, VMCS_GUEST_IA32_EFER); in macvm_set_cr0() local 132 !(efer & MSR_EFER_LME)) { in macvm_set_cr0() 145 if (efer & MSR_EFER_LME) { in macvm_set_cr0() [all …]
|
| H A D | x86hvf.c | 99 wvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER, env->efer); in hvf_put_segments() 208 env->efer = rvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER); in hvf_get_segments()
|
| /openbmc/qemu/target/i386/tcg/system/ |
| H A D | svm_helper.c | 89 if (!(env->efer & MSR_EFER_SVME)) { in is_efer_invalid_state() 93 if (env->efer & MSR_EFER_RESERVED) { in is_efer_invalid_state() 97 if ((env->efer & (MSR_EFER_LMA | MSR_EFER_LME)) && in is_efer_invalid_state() 102 if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK) in is_efer_invalid_state() 107 if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK) in is_efer_invalid_state() 112 if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK) in is_efer_invalid_state() 136 if (likely(!(env->hflags2 & HF2_NPT_MASK)) || !(env->efer & MSR_EFER_LMA)) { in virtual_vm_load_save_enabled() 211 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer); in helper_vmrun() 317 if ((env->efer & MSR_EFER_LMA) && in helper_vmrun() 346 env->vm_vmcb + offsetof(struct vmcb, save.efer))); in helper_vmrun() [all …]
|
| H A D | misc_helper.c | 92 if ((env->efer & MSR_EFER_LMA) && in helper_write_crN() 96 if (!(env->efer & MSR_EFER_LMA)) { in helper_write_crN() 188 cpu_load_efer(env, (env->efer & ~update_mask) | in helper_wrmsr() 350 val = env->efer; in helper_rdmsr()
|
| H A D | seg_helper.c | 34 if (!(env->efer & MSR_EFER_SCE)) { in helper_syscall()
|
| H A D | smm_helper.c | 86 x86_stq_phys(cs, sm_state + 0x7ed0, env->efer); in do_smm_enter()
|
| /openbmc/qemu/target/i386/ |
| H A D | helper.c | 146 (env->efer & MSR_EFER_LME)) { in cpu_x86_update_cr0() 151 env->efer |= MSR_EFER_LMA; in cpu_x86_update_cr0() 154 (env->efer & MSR_EFER_LMA)) { in cpu_x86_update_cr0() 156 env->efer &= ~MSR_EFER_LMA; in cpu_x86_update_cr0() 631 env->efer = val; in cpu_load_efer() 633 if (env->efer & MSR_EFER_LMA) { in cpu_load_efer() 636 if (env->efer & MSR_EFER_SVME) { in cpu_load_efer()
|
| H A D | svm.h | 205 uint64_t efer; member
|
| H A D | sev.h | 81 uint64_t efer; member
|
| H A D | gdbstub.c | 225 return gdb_read_reg_cs64(env->hflags, mem_buf, env->efer); in x86_cpu_gdb_read_register()
|
| H A D | machine.c | 1444 return env->efer != 0; in intel_efer32_needed() 1453 VMSTATE_UINT64(env.efer, X86CPU), 1694 VMSTATE_UINT64(env.efer, X86CPU),
|
| H A D | cpu-dump.c | 480 qemu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer); in x86_cpu_dump_state()
|
| H A D | sev.c | 408 cpu_load_efer(env, launch_vmsa->vmsa.efer); in sev_apply_cpu_context() 557 vmsa_check.efer = 0; in check_vmsa_supported() 2193 vmsa->efer = env->efer; in initialize_vmsa()
|
| H A D | cpu.h | 1869 uint64_t efer; member
|
| H A D | cpu.c | 9866 if (env->efer & MSR_EFER_LMA) { in x86_update_hflags()
|
| /openbmc/qemu/bsd-user/x86_64/ |
| H A D | target_arch_cpu.h | 45 env->efer |= MSR_EFER_LMA | MSR_EFER_LME; in target_cpu_init()
|
| /openbmc/qemu/linux-headers/asm-x86/ |
| H A D | kvm.h | 151 __u64 efer; member 162 __u64 efer; member
|
| /openbmc/qemu/target/i386/tcg/ |
| H A D | seg_helper.c | 107 if (env->efer & MSR_EFER_NXE) { in get_pg_mode() 1088 if (!(env->efer & MSR_EFER_SCE)) { in helper_sysret() 1531 if (env->efer & MSR_EFER_LMA) { in helper_ljmp_protected() 1562 if (env->efer & MSR_EFER_LMA) { in helper_ljmp_protected() 1591 if (env->efer & MSR_EFER_LMA) { in helper_ljmp_protected() 1743 if (env->efer & MSR_EFER_LMA) { in helper_lcall_protected() 1780 if (env->efer & MSR_EFER_LMA) { in helper_lcall_protected() 1809 if (env->efer & MSR_EFER_LMA) { in helper_lcall_protected()
|
| H A D | fpu_helper.c | 2702 if (!(env->efer & MSR_EFER_FFXSR) in do_fxsave() 2951 if (!(env->efer & MSR_EFER_FFXSR) in do_fxrstor()
|
| /openbmc/qemu/linux-user/i386/ |
| H A D | cpu_loop.c | 351 env->efer |= MSR_EFER_LMA | MSR_EFER_LME; in target_cpu_copy_regs()
|
| /openbmc/qemu/target/i386/kvm/ |
| H A D | kvm.c | 3568 sregs.efer = env->efer; in kvm_put_sregs() 3615 sregs.efer = env->efer; in kvm_put_sregs2() 4347 env->efer = sregs.efer; in kvm_get_sregs() 4348 if (sev_es_enabled() && env->efer & MSR_EFER_LME && in kvm_get_sregs() 4350 env->efer |= MSR_EFER_LMA; in kvm_get_sregs() 4390 env->efer = sregs.efer; in kvm_get_sregs2() 4391 if (sev_es_enabled() && env->efer & MSR_EFER_LME && in kvm_get_sregs2() 4393 env->efer |= MSR_EFER_LMA; in kvm_get_sregs2()
|
| /openbmc/qemu/target/i386/nvmm/ |
| H A D | nvmm-all.c | 171 state->msrs[NVMM_X64_MSR_EFER] = env->efer; in nvmm_set_registers() 326 env->efer = state->msrs[NVMM_X64_MSR_EFER]; in nvmm_get_registers()
|
| /openbmc/qemu/target/i386/whpx/ |
| H A D | whpx-all.c | 512 vcxt.values[idx++].Reg64 = env->efer; in whpx_set_registers() 733 env->efer = vcxt.values[idx++].Reg64; in whpx_get_registers()
|