Lines Matching +full:cs +full:- +full:0

23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
26 #include "tcg/helper-tcg.h"
34 sc->selector, mmu_idx, 0); in svm_save_seg()
36 sc->base, mmu_idx, 0); in svm_save_seg()
38 sc->limit, mmu_idx, 0); in svm_save_seg()
40 ((sc->flags >> 8) & 0xff) in svm_save_seg()
41 | ((sc->flags >> 12) & 0x0f00), in svm_save_seg()
42 mmu_idx, 0); in svm_save_seg()
46 * VMRUN and VMLOAD canonicalizes (i.e., sign-extend to bit 63) all base
51 uint16_t shift_amt = 64 - cpu_x86_virtual_addr_width(env); in svm_canonicalization()
60 sc->selector = in svm_load_seg()
62 mmu_idx, 0); in svm_load_seg()
63 sc->base = in svm_load_seg()
65 mmu_idx, 0); in svm_load_seg()
66 sc->limit = in svm_load_seg()
68 mmu_idx, 0); in svm_load_seg()
71 mmu_idx, 0); in svm_load_seg()
72 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12); in svm_load_seg()
74 svm_canonicalization(env, &sc->base); in svm_load_seg()
89 if (!(env->efer & MSR_EFER_SVME)) { in is_efer_invalid_state()
93 if (env->efer & MSR_EFER_RESERVED) { in is_efer_invalid_state()
97 if ((env->efer & (MSR_EFER_LMA | MSR_EFER_LME)) && in is_efer_invalid_state()
98 !(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) { in is_efer_invalid_state()
102 if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK) in is_efer_invalid_state()
103 && !(env->cr[4] & CR4_PAE_MASK)) { in is_efer_invalid_state()
107 if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK) in is_efer_invalid_state()
108 && !(env->cr[0] & CR0_PE_MASK)) { in is_efer_invalid_state()
112 if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK) in is_efer_invalid_state()
113 && (env->cr[4] & CR4_PAE_MASK) in is_efer_invalid_state()
114 && (env->segs[R_CS].flags & DESC_L_MASK) in is_efer_invalid_state()
115 && (env->segs[R_CS].flags & DESC_B_MASK)) { in is_efer_invalid_state()
124 if (likely(env->hflags & HF_GUEST_MASK)) { in virtual_gif_enabled()
125 return (env->features[FEAT_SVM] & CPUID_SVM_VGIF) in virtual_gif_enabled()
126 && (env->int_ctl & V_GIF_ENABLED_MASK); in virtual_gif_enabled()
135 if (likely(env->hflags & HF_GUEST_MASK)) { in virtual_vm_load_save_enabled()
136 if (likely(!(env->hflags2 & HF2_NPT_MASK)) || !(env->efer & MSR_EFER_LMA)) { in virtual_vm_load_save_enabled()
137 cpu_vmexit(env, exit_code, 0, retaddr); in virtual_vm_load_save_enabled()
140 lbr_ctl = x86_ldl_phys(env_cpu(env), env->vm_vmcb + offsetof(struct vmcb, in virtual_vm_load_save_enabled()
142 return (env->features[FEAT_SVM] & CPUID_SVM_V_VMSAVE_VMLOAD) in virtual_vm_load_save_enabled()
152 return !virtual_gif_enabled(env) || (env->int_ctl & V_GIF_MASK); in virtual_gif_set()
157 CPUState *cs = env_cpu(env); in helper_vmrun() local
170 addr = env->regs[R_EAX]; in helper_vmrun()
172 addr = (uint32_t)env->regs[R_EAX]; in helper_vmrun()
176 if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) { in helper_vmrun()
177 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); in helper_vmrun()
180 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC()); in helper_vmrun()
184 env->vm_vmcb = addr; in helper_vmrun()
187 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), in helper_vmrun()
188 env->gdt.base); in helper_vmrun()
189 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), in helper_vmrun()
190 env->gdt.limit); in helper_vmrun()
192 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base), in helper_vmrun()
193 env->idt.base); in helper_vmrun()
194 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), in helper_vmrun()
195 env->idt.limit); in helper_vmrun()
197 x86_stq_phys(cs, in helper_vmrun()
198 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]); in helper_vmrun()
199 x86_stq_phys(cs, in helper_vmrun()
200 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]); in helper_vmrun()
201 x86_stq_phys(cs, in helper_vmrun()
202 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]); in helper_vmrun()
203 x86_stq_phys(cs, in helper_vmrun()
204 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]); in helper_vmrun()
205 x86_stq_phys(cs, in helper_vmrun()
206 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]); in helper_vmrun()
207 x86_stq_phys(cs, in helper_vmrun()
208 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]); in helper_vmrun()
210 x86_stq_phys(cs, in helper_vmrun()
211 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer); in helper_vmrun()
212 x86_stq_phys(cs, in helper_vmrun()
213 env->vm_hsave + offsetof(struct vmcb, save.rflags), in helper_vmrun()
217 env->vm_hsave + offsetof(struct vmcb, save.es), in helper_vmrun()
218 &env->segs[R_ES]); in helper_vmrun()
220 env->vm_hsave + offsetof(struct vmcb, save.cs), in helper_vmrun()
221 &env->segs[R_CS]); in helper_vmrun()
223 env->vm_hsave + offsetof(struct vmcb, save.ss), in helper_vmrun()
224 &env->segs[R_SS]); in helper_vmrun()
226 env->vm_hsave + offsetof(struct vmcb, save.ds), in helper_vmrun()
227 &env->segs[R_DS]); in helper_vmrun()
229 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip), in helper_vmrun()
230 env->eip + next_eip_addend); in helper_vmrun()
231 x86_stq_phys(cs, in helper_vmrun()
232 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]); in helper_vmrun()
233 x86_stq_phys(cs, in helper_vmrun()
234 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]); in helper_vmrun()
238 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, in helper_vmrun()
240 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb + in helper_vmrun()
243 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb + in helper_vmrun()
246 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb + in helper_vmrun()
249 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb + in helper_vmrun()
252 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb + in helper_vmrun()
257 env->hflags &= ~HF_INHIBIT_IRQ_MASK; in helper_vmrun()
258 if (x86_ldl_phys(cs, env->vm_vmcb + in helper_vmrun()
261 env->hflags |= HF_INHIBIT_IRQ_MASK; in helper_vmrun()
264 nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, in helper_vmrun()
266 asid = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, in helper_vmrun()
269 uint64_t msrpm_base_pa = x86_ldq_phys(cs, env->vm_vmcb + in helper_vmrun()
272 uint64_t iopm_base_pa = x86_ldq_phys(cs, env->vm_vmcb + in helper_vmrun()
275 if ((msrpm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_MSRPM_SIZE) { in helper_vmrun()
276 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
279 if ((iopm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_IOPM_SIZE) { in helper_vmrun()
280 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
283 env->nested_pg_mode = 0; in helper_vmrun()
286 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
288 if (asid == 0) { in helper_vmrun()
289 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
293 env->nested_cr3 = x86_ldq_phys(cs, in helper_vmrun()
294 env->vm_vmcb + offsetof(struct vmcb, in helper_vmrun()
296 env->hflags2 |= HF2_NPT_MASK; in helper_vmrun()
298 env->nested_pg_mode = get_pg_mode(env) & PG_MODE_SVM_MASK; in helper_vmrun()
300 tlb_flush_by_mmuidx(cs, 1 << MMU_NESTED_IDX); in helper_vmrun()
304 env->hflags |= HF_GUEST_MASK; in helper_vmrun()
306 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb + in helper_vmrun()
309 new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0)); in helper_vmrun()
311 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
314 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
316 new_cr3 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr3)); in helper_vmrun()
317 if ((env->efer & MSR_EFER_LMA) && in helper_vmrun()
318 (new_cr3 & ((~0ULL) << cpu->phys_bits))) { in helper_vmrun()
319 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
321 new_cr4 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr4)); in helper_vmrun()
323 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
326 x86_stq_phys(cs, in helper_vmrun()
327 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0); in helper_vmrun()
332 env->cr[2] = x86_ldq_phys(cs, in helper_vmrun()
333 env->vm_vmcb + offsetof(struct vmcb, save.cr2)); in helper_vmrun()
334 env->int_ctl = x86_ldl_phys(cs, in helper_vmrun()
335 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); in helper_vmrun()
336 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); in helper_vmrun()
337 if (env->int_ctl & V_INTR_MASKING_MASK) { in helper_vmrun()
338 env->hflags2 |= HF2_VINTR_MASK; in helper_vmrun()
339 if (env->eflags & IF_MASK) { in helper_vmrun()
340 env->hflags2 |= HF2_HIF_MASK; in helper_vmrun()
345 x86_ldq_phys(cs, in helper_vmrun()
346 env->vm_vmcb + offsetof(struct vmcb, save.efer))); in helper_vmrun()
347 env->eflags = 0; in helper_vmrun()
348 cpu_load_eflags(env, x86_ldq_phys(cs, in helper_vmrun()
349 env->vm_vmcb + offsetof(struct vmcb, in helper_vmrun()
354 env->vm_vmcb + offsetof(struct vmcb, save.es), R_ES); in helper_vmrun()
356 env->vm_vmcb + offsetof(struct vmcb, save.cs), R_CS); in helper_vmrun()
358 env->vm_vmcb + offsetof(struct vmcb, save.ss), R_SS); in helper_vmrun()
360 env->vm_vmcb + offsetof(struct vmcb, save.ds), R_DS); in helper_vmrun()
362 env->vm_vmcb + offsetof(struct vmcb, save.idtr), &env->idt); in helper_vmrun()
364 env->vm_vmcb + offsetof(struct vmcb, save.gdtr), &env->gdt); in helper_vmrun()
366 env->eip = x86_ldq_phys(cs, in helper_vmrun()
367 env->vm_vmcb + offsetof(struct vmcb, save.rip)); in helper_vmrun()
369 env->regs[R_ESP] = x86_ldq_phys(cs, in helper_vmrun()
370 env->vm_vmcb + offsetof(struct vmcb, save.rsp)); in helper_vmrun()
371 env->regs[R_EAX] = x86_ldq_phys(cs, in helper_vmrun()
372 env->vm_vmcb + offsetof(struct vmcb, save.rax)); in helper_vmrun()
374 new_dr7 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.dr7)); in helper_vmrun()
375 new_dr6 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.dr6)); in helper_vmrun()
379 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
382 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
387 env->dr[6] = new_dr6; in helper_vmrun()
390 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
393 switch (x86_ldub_phys(cs, in helper_vmrun()
394 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) { in helper_vmrun()
399 tlb_flush(cs); in helper_vmrun()
403 env->hflags2 |= HF2_GIF_MASK; in helper_vmrun()
406 cs->interrupt_request |= CPU_INTERRUPT_VIRQ; in helper_vmrun()
410 env->hflags2 |= HF2_VGIF_MASK; in helper_vmrun()
414 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, in helper_vmrun()
419 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb + in helper_vmrun()
427 cs->exception_index = vector; in helper_vmrun()
428 env->error_code = event_inj_err; in helper_vmrun()
429 env->exception_is_int = 0; in helper_vmrun()
430 env->exception_next_eip = -1; in helper_vmrun()
436 cs->exception_index = EXCP02_NMI; in helper_vmrun()
437 env->error_code = event_inj_err; in helper_vmrun()
438 env->exception_is_int = 0; in helper_vmrun()
439 env->exception_next_eip = env->eip; in helper_vmrun()
441 cpu_loop_exit(cs); in helper_vmrun()
445 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
447 cs->exception_index = vector; in helper_vmrun()
448 env->error_code = event_inj_err; in helper_vmrun()
449 env->exception_is_int = 0; in helper_vmrun()
450 env->exception_next_eip = -1; in helper_vmrun()
452 cpu_loop_exit(cs); in helper_vmrun()
455 cs->exception_index = vector; in helper_vmrun()
456 env->error_code = event_inj_err; in helper_vmrun()
457 env->exception_is_int = 1; in helper_vmrun()
458 env->exception_next_eip = env->eip; in helper_vmrun()
460 cpu_loop_exit(cs); in helper_vmrun()
463 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
466 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index, in helper_vmrun()
467 env->error_code); in helper_vmrun()
473 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC()); in helper_vmmcall()
483 addr = env->regs[R_EAX]; in helper_vmload()
485 addr = (uint32_t)env->regs[R_EAX]; in helper_vmload()
489 if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) { in helper_vmload()
490 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); in helper_vmload()
493 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC()); in helper_vmload()
504 addr + offsetof(struct vmcb, save.tr), &env->tr); in helper_vmload()
506 addr + offsetof(struct vmcb, save.ldtr), &env->ldt); in helper_vmload()
509 env->kernelgsbase = in helper_vmload()
512 mmu_idx, 0); in helper_vmload()
513 env->lstar = in helper_vmload()
515 mmu_idx, 0); in helper_vmload()
516 env->cstar = in helper_vmload()
518 mmu_idx, 0); in helper_vmload()
519 env->fmask = in helper_vmload()
521 mmu_idx, 0); in helper_vmload()
522 svm_canonicalization(env, &env->kernelgsbase); in helper_vmload()
524 env->star = in helper_vmload()
526 mmu_idx, 0); in helper_vmload()
527 env->sysenter_cs = in helper_vmload()
529 mmu_idx, 0); in helper_vmload()
530 env->sysenter_esp = in helper_vmload()
532 mmu_idx, 0); in helper_vmload()
533 env->sysenter_eip = in helper_vmload()
535 mmu_idx, 0); in helper_vmload()
544 addr = env->regs[R_EAX]; in helper_vmsave()
546 addr = (uint32_t)env->regs[R_EAX]; in helper_vmsave()
550 if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) { in helper_vmsave()
551 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); in helper_vmsave()
554 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC()); in helper_vmsave()
561 &env->segs[R_FS]); in helper_vmsave()
563 &env->segs[R_GS]); in helper_vmsave()
565 &env->tr); in helper_vmsave()
567 &env->ldt); in helper_vmsave()
571 env->kernelgsbase, mmu_idx, 0); in helper_vmsave()
573 env->lstar, mmu_idx, 0); in helper_vmsave()
575 env->cstar, mmu_idx, 0); in helper_vmsave()
577 env->fmask, mmu_idx, 0); in helper_vmsave()
580 env->star, mmu_idx, 0); in helper_vmsave()
582 env->sysenter_cs, mmu_idx, 0); in helper_vmsave()
584 env->sysenter_esp, mmu_idx, 0); in helper_vmsave()
586 env->sysenter_eip, mmu_idx, 0); in helper_vmsave()
591 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC()); in helper_stgi()
594 env->int_ctl |= V_GIF_MASK; in helper_stgi()
595 env->hflags2 |= HF2_VGIF_MASK; in helper_stgi()
597 env->hflags2 |= HF2_GIF_MASK; in helper_stgi()
603 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC()); in helper_clgi()
606 env->int_ctl &= ~V_GIF_MASK; in helper_clgi()
607 env->hflags2 &= ~HF2_VGIF_MASK; in helper_clgi()
609 env->hflags2 &= ~HF2_GIF_MASK; in helper_clgi()
617 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) { in cpu_svm_has_intercept()
622 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) { in cpu_svm_has_intercept()
627 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) { in cpu_svm_has_intercept()
632 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) { in cpu_svm_has_intercept()
637 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) { in cpu_svm_has_intercept()
642 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) { in cpu_svm_has_intercept()
653 CPUState *cs = env_cpu(env); in cpu_svm_check_intercept_param() local
655 if (likely(!(env->hflags & HF_GUEST_MASK))) { in cpu_svm_check_intercept_param()
665 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb + in cpu_svm_check_intercept_param()
670 switch ((uint32_t)env->regs[R_ECX]) { in cpu_svm_check_intercept_param()
671 case 0 ... 0x1fff: in cpu_svm_check_intercept_param()
672 t0 = (env->regs[R_ECX] * 2) % 8; in cpu_svm_check_intercept_param()
673 t1 = (env->regs[R_ECX] * 2) / 8; in cpu_svm_check_intercept_param()
675 case 0xc0000000 ... 0xc0001fff: in cpu_svm_check_intercept_param()
676 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2; in cpu_svm_check_intercept_param()
680 case 0xc0010000 ... 0xc0011fff: in cpu_svm_check_intercept_param()
681 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2; in cpu_svm_check_intercept_param()
687 t0 = 0; in cpu_svm_check_intercept_param()
688 t1 = 0; in cpu_svm_check_intercept_param()
691 if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) { in cpu_svm_check_intercept_param()
702 cpu_svm_check_intercept_param(env, type, 0, GETPC()); in helper_svm_check_intercept()
708 CPUState *cs = env_cpu(env); in helper_svm_check_io() local
710 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) { in helper_svm_check_io()
712 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb + in helper_svm_check_io()
714 uint16_t mask = (1 << ((param >> 4) & 7)) - 1; in helper_svm_check_io()
716 if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) { in helper_svm_check_io()
717 /* next env->eip */ in helper_svm_check_io()
718 x86_stq_phys(cs, in helper_svm_check_io()
719 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), in helper_svm_check_io()
720 env->eip + next_eip_addend); in helper_svm_check_io()
729 CPUState *cs = env_cpu(env); in cpu_vmexit() local
731 cpu_restore_state(cs, retaddr); in cpu_vmexit()
736 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, in cpu_vmexit()
738 env->eip); in cpu_vmexit()
740 cs->exception_index = EXCP_VMEXIT; in cpu_vmexit()
741 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code), in cpu_vmexit()
744 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, in cpu_vmexit()
748 env->old_exception = -1; in cpu_vmexit()
749 cpu_loop_exit(cs); in cpu_vmexit()
754 CPUState *cs = env_cpu(env); in do_vmexit() local
756 if (env->hflags & HF_INHIBIT_IRQ_MASK) { in do_vmexit()
757 x86_stl_phys(cs, in do_vmexit()
758 env->vm_vmcb + offsetof(struct vmcb, control.int_state), in do_vmexit()
760 env->hflags &= ~HF_INHIBIT_IRQ_MASK; in do_vmexit()
762 x86_stl_phys(cs, in do_vmexit()
763 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0); in do_vmexit()
765 env->hflags2 &= ~HF2_NPT_MASK; in do_vmexit()
766 tlb_flush_by_mmuidx(cs, 1 << MMU_NESTED_IDX); in do_vmexit()
770 env->vm_vmcb + offsetof(struct vmcb, save.es), in do_vmexit()
771 &env->segs[R_ES]); in do_vmexit()
773 env->vm_vmcb + offsetof(struct vmcb, save.cs), in do_vmexit()
774 &env->segs[R_CS]); in do_vmexit()
776 env->vm_vmcb + offsetof(struct vmcb, save.ss), in do_vmexit()
777 &env->segs[R_SS]); in do_vmexit()
779 env->vm_vmcb + offsetof(struct vmcb, save.ds), in do_vmexit()
780 &env->segs[R_DS]); in do_vmexit()
782 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), in do_vmexit()
783 env->gdt.base); in do_vmexit()
784 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), in do_vmexit()
785 env->gdt.limit); in do_vmexit()
787 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), in do_vmexit()
788 env->idt.base); in do_vmexit()
789 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), in do_vmexit()
790 env->idt.limit); in do_vmexit()
792 x86_stq_phys(cs, in do_vmexit()
793 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer); in do_vmexit()
794 x86_stq_phys(cs, in do_vmexit()
795 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]); in do_vmexit()
796 x86_stq_phys(cs, in do_vmexit()
797 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]); in do_vmexit()
798 x86_stq_phys(cs, in do_vmexit()
799 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]); in do_vmexit()
800 x86_stq_phys(cs, in do_vmexit()
801 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]); in do_vmexit()
802 x86_stl_phys(cs, in do_vmexit()
803 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), env->int_ctl); in do_vmexit()
805 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags), in do_vmexit()
807 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip), in do_vmexit()
808 env->eip); in do_vmexit()
809 x86_stq_phys(cs, in do_vmexit()
810 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]); in do_vmexit()
811 x86_stq_phys(cs, in do_vmexit()
812 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]); in do_vmexit()
813 x86_stq_phys(cs, in do_vmexit()
814 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]); in do_vmexit()
815 x86_stq_phys(cs, in do_vmexit()
816 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]); in do_vmexit()
817 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl), in do_vmexit()
818 env->hflags & HF_CPL_MASK); in do_vmexit()
821 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); in do_vmexit()
822 env->hflags &= ~HF_GUEST_MASK; in do_vmexit()
823 env->intercept = 0; in do_vmexit()
824 env->intercept_exceptions = 0; in do_vmexit()
827 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; in do_vmexit()
828 env->int_ctl = 0; in do_vmexit()
831 env->tsc_offset = 0; in do_vmexit()
833 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, in do_vmexit()
835 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb, in do_vmexit()
838 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, in do_vmexit()
840 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb, in do_vmexit()
843 cpu_x86_update_cr0(env, x86_ldq_phys(cs, in do_vmexit()
844 env->vm_hsave + offsetof(struct vmcb, in do_vmexit()
847 cpu_x86_update_cr4(env, x86_ldq_phys(cs, in do_vmexit()
848 env->vm_hsave + offsetof(struct vmcb, in do_vmexit()
859 cpu_x86_update_cr3(env, x86_ldq_phys(cs, in do_vmexit()
860 env->vm_hsave + offsetof(struct vmcb, in do_vmexit()
864 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, in do_vmexit()
868 env->eflags = 0; in do_vmexit()
869 cpu_load_eflags(env, x86_ldq_phys(cs, in do_vmexit()
870 env->vm_hsave + offsetof(struct vmcb, in do_vmexit()
876 env->vm_hsave + offsetof(struct vmcb, save.es), R_ES); in do_vmexit()
878 env->vm_hsave + offsetof(struct vmcb, save.cs), R_CS); in do_vmexit()
880 env->vm_hsave + offsetof(struct vmcb, save.ss), R_SS); in do_vmexit()
882 env->vm_hsave + offsetof(struct vmcb, save.ds), R_DS); in do_vmexit()
884 env->eip = x86_ldq_phys(cs, in do_vmexit()
885 env->vm_hsave + offsetof(struct vmcb, save.rip)); in do_vmexit()
886 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave + in do_vmexit()
888 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave + in do_vmexit()
891 env->dr[6] = x86_ldq_phys(cs, in do_vmexit()
892 env->vm_hsave + offsetof(struct vmcb, save.dr6)); in do_vmexit()
896 x86_ldq_phys(cs, in do_vmexit()
897 env->vm_hsave + offsetof(struct vmcb, save.dr7)) & ~0xff); in do_vmexit()
900 x86_stl_phys(cs, in do_vmexit()
901 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info), in do_vmexit()
902 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, in do_vmexit()
904 x86_stl_phys(cs, in do_vmexit()
905 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err), in do_vmexit()
906 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, in do_vmexit()
908 x86_stl_phys(cs, in do_vmexit()
909 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0); in do_vmexit()
911 env->hflags2 &= ~HF2_GIF_MASK; in do_vmexit()
912 env->hflags2 &= ~HF2_VGIF_MASK; in do_vmexit()
922 if ((env->eflags & TF_MASK) != 0) { in do_vmexit()
923 env->dr[6] |= DR6_BS; in do_vmexit()
924 do_interrupt_all(X86_CPU(cs), EXCP01_DB, 0, 0, env->eip, 0); in do_vmexit()