Lines Matching +full:1 +full:- +full:cpu
30 * 1. Redistributions of source code must retain the above copyright
50 #include "qemu/error-report.h"
59 #include "hvf-i386.h"
76 #include "qemu/main-loop.h"
78 #include "target/i386/cpu.h"
82 void vmx_update_tpr(CPUState *cpu) in vmx_update_tpr() argument
85 X86CPU *x86_cpu = X86_CPU(cpu); in vmx_update_tpr()
86 int tpr = cpu_get_apic_tpr(x86_cpu->apic_state) << 4; in vmx_update_tpr()
87 int irr = apic_get_highest_priority_irr(x86_cpu->apic_state); in vmx_update_tpr()
89 wreg(cpu->accel->fd, HV_X86_TPR, tpr); in vmx_update_tpr()
90 if (irr == -1) { in vmx_update_tpr()
91 wvmcs(cpu->accel->fd, VMCS_TPR_THRESHOLD, 0); in vmx_update_tpr()
93 wvmcs(cpu->accel->fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 : in vmx_update_tpr()
98 static void update_apic_tpr(CPUState *cpu) in update_apic_tpr() argument
100 X86CPU *x86_cpu = X86_CPU(cpu); in update_apic_tpr()
101 int tpr = rreg(cpu->accel->fd, HV_X86_TPR) >> 4; in update_apic_tpr()
102 cpu_set_apic_tpr(x86_cpu->apic_state, tpr); in update_apic_tpr()
131 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0; in ept_emulation_fault()
132 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0; in ept_emulation_fault()
138 if (slot->flags & HVF_SLOT_LOG) { in ept_emulation_fault()
139 uint64_t dirty_page_start = gpa & ~(TARGET_PAGE_SIZE - 1u); in ept_emulation_fault()
140 memory_region_set_dirty(slot->region, gpa - slot->start, 1); in ept_emulation_fault()
148 * guest-physical address that is a translation of a guest-linear in ept_emulation_fault()
159 if (!memory_region_is_ram(slot->region) && in ept_emulation_fault()
160 !(read && memory_region_is_romd(slot->region))) { in ept_emulation_fault()
166 void hvf_arch_vcpu_destroy(CPUState *cpu) in hvf_arch_vcpu_destroy() argument
168 X86CPU *x86_cpu = X86_CPU(cpu); in hvf_arch_vcpu_destroy()
169 CPUX86State *env = &x86_cpu->env; in hvf_arch_vcpu_destroy()
171 g_free(env->hvf_mmio_buf); in hvf_arch_vcpu_destroy()
179 if (env->tsc_khz != 0) { in init_tsc_freq()
187 env->tsc_khz = tsc_freq / 1000; /* Hz to KHz */ in init_tsc_freq()
195 if (env->apic_bus_freq != 0) { in init_apic_bus_freq()
203 env->apic_bus_freq = bus_freq; in init_apic_bus_freq()
208 return env->tsc_khz != 0; in tsc_is_known()
213 return env->apic_bus_freq != 0; in apic_bus_freq_is_known()
216 void hvf_kick_vcpu_thread(CPUState *cpu) in hvf_kick_vcpu_thread() argument
218 cpus_kick_thread(cpu); in hvf_kick_vcpu_thread()
219 hv_vcpu_interrupt(&cpu->accel->fd, 1); in hvf_kick_vcpu_thread()
232 int hvf_arch_init_vcpu(CPUState *cpu) in hvf_arch_init_vcpu() argument
234 X86CPU *x86cpu = X86_CPU(cpu); in hvf_arch_init_vcpu()
235 CPUX86State *env = &x86cpu->env; in hvf_arch_init_vcpu()
243 if (hvf_state->hvf_caps == NULL) { in hvf_arch_init_vcpu()
244 hvf_state->hvf_caps = g_new0(struct hvf_vcpu_caps, 1); in hvf_arch_init_vcpu()
246 env->hvf_mmio_buf = g_new(char, 4096); in hvf_arch_init_vcpu()
248 if (x86cpu->vmware_cpuid_freq) { in hvf_arch_init_vcpu()
253 error_report("vmware-cpuid-freq: feature couldn't be enabled"); in hvf_arch_init_vcpu()
257 if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) && in hvf_arch_init_vcpu()
260 "State blocked by non-migratable CPU device (invtsc flag)"); in hvf_arch_init_vcpu()
270 &hvf_state->hvf_caps->vmx_cap_pinbased)) { in hvf_arch_init_vcpu()
274 &hvf_state->hvf_caps->vmx_cap_procbased)) { in hvf_arch_init_vcpu()
278 &hvf_state->hvf_caps->vmx_cap_procbased2)) { in hvf_arch_init_vcpu()
282 &hvf_state->hvf_caps->vmx_cap_entry)) { in hvf_arch_init_vcpu()
287 wvmcs(cpu->accel->fd, VMCS_PIN_BASED_CTLS, in hvf_arch_init_vcpu()
288 cap2ctrl(hvf_state->hvf_caps->vmx_cap_pinbased, in hvf_arch_init_vcpu()
292 wvmcs(cpu->accel->fd, VMCS_PRI_PROC_BASED_CTLS, in hvf_arch_init_vcpu()
293 cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased, in hvf_arch_init_vcpu()
307 wvmcs(cpu->accel->fd, VMCS_SEC_PROC_BASED_CTLS, in hvf_arch_init_vcpu()
308 cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased2, reqCap)); in hvf_arch_init_vcpu()
310 wvmcs(cpu->accel->fd, VMCS_ENTRY_CTLS, in hvf_arch_init_vcpu()
311 cap2ctrl(hvf_state->hvf_caps->vmx_cap_entry, 0)); in hvf_arch_init_vcpu()
312 wvmcs(cpu->accel->fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */ in hvf_arch_init_vcpu()
314 wvmcs(cpu->accel->fd, VMCS_TPR_THRESHOLD, 0); in hvf_arch_init_vcpu()
316 x86cpu = X86_CPU(cpu); in hvf_arch_init_vcpu()
317 x86cpu->env.xsave_buf_len = 4096; in hvf_arch_init_vcpu()
318 x86cpu->env.xsave_buf = qemu_memalign(4096, x86cpu->env.xsave_buf_len); in hvf_arch_init_vcpu()
324 assert(hvf_get_supported_cpuid(0xd, 0, R_ECX) <= x86cpu->env.xsave_buf_len); in hvf_arch_init_vcpu()
326 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_STAR, 1); in hvf_arch_init_vcpu()
327 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_LSTAR, 1); in hvf_arch_init_vcpu()
328 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_CSTAR, 1); in hvf_arch_init_vcpu()
329 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_FMASK, 1); in hvf_arch_init_vcpu()
330 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_FSBASE, 1); in hvf_arch_init_vcpu()
331 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_GSBASE, 1); in hvf_arch_init_vcpu()
332 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_KERNELGSBASE, 1); in hvf_arch_init_vcpu()
333 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_TSC_AUX, 1); in hvf_arch_init_vcpu()
334 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_IA32_TSC, 1); in hvf_arch_init_vcpu()
335 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_IA32_SYSENTER_CS, 1); in hvf_arch_init_vcpu()
336 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_IA32_SYSENTER_EIP, 1); in hvf_arch_init_vcpu()
337 hv_vcpu_enable_native_msr(cpu->accel->fd, MSR_IA32_SYSENTER_ESP, 1); in hvf_arch_init_vcpu()
342 static void hvf_store_events(CPUState *cpu, uint32_t ins_len, uint64_t idtvec_info) in hvf_store_events() argument
344 X86CPU *x86_cpu = X86_CPU(cpu); in hvf_store_events()
345 CPUX86State *env = &x86_cpu->env; in hvf_store_events()
347 env->exception_nr = -1; in hvf_store_events()
348 env->exception_pending = 0; in hvf_store_events()
349 env->exception_injected = 0; in hvf_store_events()
350 env->interrupt_injected = -1; in hvf_store_events()
351 env->nmi_injected = false; in hvf_store_events()
352 env->ins_len = 0; in hvf_store_events()
353 env->has_error_code = false; in hvf_store_events()
358 env->interrupt_injected = idtvec_info & VMCS_IDT_VEC_VECNUM; in hvf_store_events()
361 env->nmi_injected = true; in hvf_store_events()
365 env->exception_nr = idtvec_info & VMCS_IDT_VEC_VECNUM; in hvf_store_events()
366 env->exception_injected = 1; in hvf_store_events()
374 env->ins_len = ins_len; in hvf_store_events()
377 env->has_error_code = true; in hvf_store_events()
378 env->error_code = rvmcs(cpu->accel->fd, VMCS_IDT_VECTORING_ERROR); in hvf_store_events()
381 if ((rvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY) & in hvf_store_events()
383 env->hflags2 |= HF2_NMI_MASK; in hvf_store_events()
385 env->hflags2 &= ~HF2_NMI_MASK; in hvf_store_events()
387 if (rvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY) & in hvf_store_events()
390 env->hflags |= HF_INHIBIT_IRQ_MASK; in hvf_store_events()
392 env->hflags &= ~HF_INHIBIT_IRQ_MASK; in hvf_store_events()
402 * leafs 0x40000001-0x4000000F are filled with zeros in hvf_cpu_x86_cpuid()
403 * Provides vmware-cpuid-freq support to hvf in hvf_cpu_x86_cpuid()
424 *eax = env->tsc_khz; in hvf_cpu_x86_cpuid()
425 *ebx = env->apic_bus_freq / 1000; /* Hz to KHz */ in hvf_cpu_x86_cpuid()
440 X86CPU *cpu = X86_CPU(cs); in hvf_load_regs() local
441 CPUX86State *env = &cpu->env; in hvf_load_regs()
444 RRX(env, R_EAX) = rreg(cs->accel->fd, HV_X86_RAX); in hvf_load_regs()
445 RRX(env, R_EBX) = rreg(cs->accel->fd, HV_X86_RBX); in hvf_load_regs()
446 RRX(env, R_ECX) = rreg(cs->accel->fd, HV_X86_RCX); in hvf_load_regs()
447 RRX(env, R_EDX) = rreg(cs->accel->fd, HV_X86_RDX); in hvf_load_regs()
448 RRX(env, R_ESI) = rreg(cs->accel->fd, HV_X86_RSI); in hvf_load_regs()
449 RRX(env, R_EDI) = rreg(cs->accel->fd, HV_X86_RDI); in hvf_load_regs()
450 RRX(env, R_ESP) = rreg(cs->accel->fd, HV_X86_RSP); in hvf_load_regs()
451 RRX(env, R_EBP) = rreg(cs->accel->fd, HV_X86_RBP); in hvf_load_regs()
453 RRX(env, i) = rreg(cs->accel->fd, HV_X86_RAX + i); in hvf_load_regs()
456 env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS); in hvf_load_regs()
458 env->eip = rreg(cs->accel->fd, HV_X86_RIP); in hvf_load_regs()
463 X86CPU *cpu = X86_CPU(cs); in hvf_store_regs() local
464 CPUX86State *env = &cpu->env; in hvf_store_regs()
467 wreg(cs->accel->fd, HV_X86_RAX, RAX(env)); in hvf_store_regs()
468 wreg(cs->accel->fd, HV_X86_RBX, RBX(env)); in hvf_store_regs()
469 wreg(cs->accel->fd, HV_X86_RCX, RCX(env)); in hvf_store_regs()
470 wreg(cs->accel->fd, HV_X86_RDX, RDX(env)); in hvf_store_regs()
471 wreg(cs->accel->fd, HV_X86_RSI, RSI(env)); in hvf_store_regs()
472 wreg(cs->accel->fd, HV_X86_RDI, RDI(env)); in hvf_store_regs()
473 wreg(cs->accel->fd, HV_X86_RBP, RBP(env)); in hvf_store_regs()
474 wreg(cs->accel->fd, HV_X86_RSP, RSP(env)); in hvf_store_regs()
476 wreg(cs->accel->fd, HV_X86_RAX + i, RRX(env, i)); in hvf_store_regs()
480 wreg(cs->accel->fd, HV_X86_RFLAGS, env->eflags); in hvf_store_regs()
481 macvm_set_rip(cs, env->eip); in hvf_store_regs()
486 X86CPU *cpu = env_archcpu(env); in hvf_simulate_rdmsr() local
493 val = rdtscp() + rvmcs(cs->accel->fd, VMCS_TSC_OFFSET); in hvf_simulate_rdmsr()
496 val = cpu_get_apic_base(cpu->apic_state); in hvf_simulate_rdmsr()
500 int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START; in hvf_simulate_rdmsr()
510 val = cpu->ucode_rev; in hvf_simulate_rdmsr()
513 val = rvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER); in hvf_simulate_rdmsr()
516 val = rvmcs(cs->accel->fd, VMCS_GUEST_FS_BASE); in hvf_simulate_rdmsr()
519 val = rvmcs(cs->accel->fd, VMCS_GUEST_GS_BASE); in hvf_simulate_rdmsr()
522 val = rvmcs(cs->accel->fd, VMCS_HOST_FS_BASE); in hvf_simulate_rdmsr()
534 val = env->msr_ia32_misc_enable; in hvf_simulate_rdmsr()
537 case MSR_MTRRphysBase(1): in hvf_simulate_rdmsr()
544 val = env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base; in hvf_simulate_rdmsr()
547 case MSR_MTRRphysMask(1): in hvf_simulate_rdmsr()
554 val = env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask; in hvf_simulate_rdmsr()
557 val = env->mtrr_fixed[0]; in hvf_simulate_rdmsr()
561 val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1]; in hvf_simulate_rdmsr()
571 val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3]; in hvf_simulate_rdmsr()
574 val = env->mtrr_deftype; in hvf_simulate_rdmsr()
577 val = cpu_x86_get_msr_core_thread_count(cpu); in hvf_simulate_rdmsr()
591 X86CPU *cpu = env_archcpu(env); in hvf_simulate_wrmsr() local
602 r = cpu_set_apic_base(cpu->apic_state, data); in hvf_simulate_wrmsr()
611 int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START; in hvf_simulate_wrmsr()
621 wvmcs(cs->accel->fd, VMCS_GUEST_FS_BASE, data); in hvf_simulate_wrmsr()
624 wvmcs(cs->accel->fd, VMCS_GUEST_GS_BASE, data); in hvf_simulate_wrmsr()
627 wvmcs(cs->accel->fd, VMCS_HOST_FS_BASE, data); in hvf_simulate_wrmsr()
640 wvmcs(cs->accel->fd, VMCS_GUEST_IA32_EFER, data); in hvf_simulate_wrmsr()
642 hv_vcpu_invalidate_tlb(cs->accel->fd); in hvf_simulate_wrmsr()
646 case MSR_MTRRphysBase(1): in hvf_simulate_wrmsr()
653 env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base = data; in hvf_simulate_wrmsr()
656 case MSR_MTRRphysMask(1): in hvf_simulate_wrmsr()
663 env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask = data; in hvf_simulate_wrmsr()
666 env->mtrr_fixed[ECX(env) - MSR_MTRRfix64K_00000] = data; in hvf_simulate_wrmsr()
670 env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1] = data; in hvf_simulate_wrmsr()
680 env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3] = data; in hvf_simulate_wrmsr()
683 env->mtrr_deftype = data; in hvf_simulate_wrmsr()
691 g_hypervisor_iface->wrmsr_handler(cs, msr, data); in hvf_simulate_wrmsr()
696 int hvf_vcpu_exec(CPUState *cpu) in hvf_vcpu_exec() argument
698 X86CPU *x86_cpu = X86_CPU(cpu); in hvf_vcpu_exec()
699 CPUX86State *env = &x86_cpu->env; in hvf_vcpu_exec()
703 if (hvf_process_events(cpu)) { in hvf_vcpu_exec()
708 if (cpu->accel->dirty) { in hvf_vcpu_exec()
709 hvf_put_registers(cpu); in hvf_vcpu_exec()
710 cpu->accel->dirty = false; in hvf_vcpu_exec()
713 if (hvf_inject_interrupts(cpu)) { in hvf_vcpu_exec()
716 vmx_update_tpr(cpu); in hvf_vcpu_exec()
719 if (!cpu_is_bsp(X86_CPU(cpu)) && cpu->halted) { in hvf_vcpu_exec()
724 hv_return_t r = hv_vcpu_run_until(cpu->accel->fd, HV_DEADLINE_FOREVER); in hvf_vcpu_exec()
728 uint64_t exit_reason = rvmcs(cpu->accel->fd, VMCS_EXIT_REASON); in hvf_vcpu_exec()
729 uint64_t exit_qual = rvmcs(cpu->accel->fd, VMCS_EXIT_QUALIFICATION); in hvf_vcpu_exec()
730 uint32_t ins_len = (uint32_t)rvmcs(cpu->accel->fd, in hvf_vcpu_exec()
733 uint64_t idtvec_info = rvmcs(cpu->accel->fd, VMCS_IDT_VECTORING_INFO); in hvf_vcpu_exec()
735 hvf_store_events(cpu, ins_len, idtvec_info); in hvf_vcpu_exec()
736 rip = rreg(cpu->accel->fd, HV_X86_RIP); in hvf_vcpu_exec()
737 env->eflags = rreg(cpu->accel->fd, HV_X86_RFLAGS); in hvf_vcpu_exec()
741 update_apic_tpr(cpu); in hvf_vcpu_exec()
742 current_cpu = cpu; in hvf_vcpu_exec()
747 macvm_set_rip(cpu, rip + ins_len); in hvf_vcpu_exec()
748 if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) && in hvf_vcpu_exec()
749 (env->eflags & IF_MASK)) in hvf_vcpu_exec()
750 && !(cpu->interrupt_request & CPU_INTERRUPT_NMI) && in hvf_vcpu_exec()
752 cpu->halted = 1; in hvf_vcpu_exec()
767 uint64_t gpa = rvmcs(cpu->accel->fd, VMCS_GUEST_PHYSICAL_ADDRESS); in hvf_vcpu_exec()
771 vmx_set_nmi_blocking(cpu); in hvf_vcpu_exec()
774 slot = hvf_find_overlap_slot(gpa, 1); in hvf_vcpu_exec()
779 hvf_load_regs(cpu); in hvf_vcpu_exec()
782 hvf_store_regs(cpu); in hvf_vcpu_exec()
790 uint32_t size = (exit_qual & 7) + 1; in hvf_vcpu_exec()
797 hvf_load_regs(cpu); in hvf_vcpu_exec()
798 hvf_handle_io(env_cpu(env), port, &val, 0, size, 1); in hvf_vcpu_exec()
799 if (size == 1) { in hvf_vcpu_exec()
808 env->eip += ins_len; in hvf_vcpu_exec()
809 hvf_store_regs(cpu); in hvf_vcpu_exec()
812 RAX(env) = rreg(cpu->accel->fd, HV_X86_RAX); in hvf_vcpu_exec()
813 hvf_handle_io(env_cpu(env), port, &RAX(env), 1, size, 1); in hvf_vcpu_exec()
814 macvm_set_rip(cpu, rip + ins_len); in hvf_vcpu_exec()
819 hvf_load_regs(cpu); in hvf_vcpu_exec()
823 hvf_store_regs(cpu); in hvf_vcpu_exec()
828 uint32_t rax = (uint32_t)rreg(cpu->accel->fd, HV_X86_RAX); in hvf_vcpu_exec()
829 uint32_t rbx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RBX); in hvf_vcpu_exec()
830 uint32_t rcx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RCX); in hvf_vcpu_exec()
831 uint32_t rdx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RDX); in hvf_vcpu_exec()
833 if (rax == 1) { in hvf_vcpu_exec()
835 env->cr[4] = rvmcs(cpu->accel->fd, VMCS_GUEST_CR4); in hvf_vcpu_exec()
839 wreg(cpu->accel->fd, HV_X86_RAX, rax); in hvf_vcpu_exec()
840 wreg(cpu->accel->fd, HV_X86_RBX, rbx); in hvf_vcpu_exec()
841 wreg(cpu->accel->fd, HV_X86_RCX, rcx); in hvf_vcpu_exec()
842 wreg(cpu->accel->fd, HV_X86_RDX, rdx); in hvf_vcpu_exec()
844 macvm_set_rip(cpu, rip + ins_len); in hvf_vcpu_exec()
848 uint32_t eax = (uint32_t)rreg(cpu->accel->fd, HV_X86_RAX); in hvf_vcpu_exec()
849 uint32_t ecx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RCX); in hvf_vcpu_exec()
850 uint32_t edx = (uint32_t)rreg(cpu->accel->fd, HV_X86_RDX); in hvf_vcpu_exec()
853 macvm_set_rip(cpu, rip + ins_len); in hvf_vcpu_exec()
856 env->xcr0 = ((uint64_t)edx << 32) | eax; in hvf_vcpu_exec()
857 wreg(cpu->accel->fd, HV_X86_XCR0, env->xcr0 | 1); in hvf_vcpu_exec()
858 macvm_set_rip(cpu, rip + ins_len); in hvf_vcpu_exec()
862 vmx_clear_int_window_exiting(cpu); in hvf_vcpu_exec()
866 vmx_clear_nmi_window_exiting(cpu); in hvf_vcpu_exec()
876 hvf_load_regs(cpu); in hvf_vcpu_exec()
882 env->eip += ins_len; in hvf_vcpu_exec()
883 hvf_store_regs(cpu); in hvf_vcpu_exec()
890 hvf_load_regs(cpu); in hvf_vcpu_exec()
896 macvm_set_cr0(cpu->accel->fd, RRX(env, reg)); in hvf_vcpu_exec()
900 macvm_set_cr4(cpu->accel->fd, RRX(env, reg)); in hvf_vcpu_exec()
905 RRX(env, reg) = cpu_get_apic_tpr(x86_cpu->apic_state); in hvf_vcpu_exec()
908 cpu_set_apic_tpr(x86_cpu->apic_state, tpr); in hvf_vcpu_exec()
917 env->eip += ins_len; in hvf_vcpu_exec()
918 hvf_store_regs(cpu); in hvf_vcpu_exec()
924 hvf_load_regs(cpu); in hvf_vcpu_exec()
927 hvf_store_regs(cpu); in hvf_vcpu_exec()
931 ret = 1; in hvf_vcpu_exec()
935 uint64_t vinfo = rvmcs(cpu->accel->fd, VMCS_IDT_VECTORING_INFO); in hvf_vcpu_exec()
937 vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3, in hvf_vcpu_exec()
948 wreg(cpu->accel->fd, HV_X86_RAX, 0); in hvf_vcpu_exec()
949 wreg(cpu->accel->fd, HV_X86_RDX, 0); in hvf_vcpu_exec()
950 macvm_set_rip(cpu, rip + ins_len); in hvf_vcpu_exec()
953 env->exception_nr = EXCP0D_GPF; in hvf_vcpu_exec()
954 env->exception_injected = 1; in hvf_vcpu_exec()
955 env->has_error_code = true; in hvf_vcpu_exec()
956 env->error_code = 0; in hvf_vcpu_exec()
966 int hvf_arch_insert_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp) in hvf_arch_insert_sw_breakpoint() argument
968 return -ENOSYS; in hvf_arch_insert_sw_breakpoint()
971 int hvf_arch_remove_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp) in hvf_arch_remove_sw_breakpoint() argument
973 return -ENOSYS; in hvf_arch_remove_sw_breakpoint()
978 return -ENOSYS; in hvf_arch_insert_hw_breakpoint()
983 return -ENOSYS; in hvf_arch_remove_hw_breakpoint()
990 void hvf_arch_update_guest_debug(CPUState *cpu) in hvf_arch_update_guest_debug() argument