Lines Matching full:env
30 static void svm_save_seg(CPUX86State *env, int mmu_idx, hwaddr addr,
33 cpu_stw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, selector),
35 cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, base),
37 cpu_stl_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, limit),
39 cpu_stw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, attrib),
49 static inline void svm_canonicalization(CPUX86State *env, target_ulong *seg_base)
51 uint16_t shift_amt = 64 - cpu_x86_virtual_addr_width(env);
55 static void svm_load_seg(CPUX86State *env, int mmu_idx, hwaddr addr,
61 cpu_lduw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, selector),
64 cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, base),
67 cpu_ldl_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, limit),
70 cpu_lduw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, attrib),
74 svm_canonicalization(env, &sc->base);
77 static void svm_load_seg_cache(CPUX86State *env, int mmu_idx,
82 svm_load_seg(env, mmu_idx, addr, &sc);
83 cpu_x86_load_seg_cache(env, seg_reg, sc.selector,
87 static inline bool is_efer_invalid_state (CPUX86State *env)
89 if (!(env->efer & MSR_EFER_SVME)) {
93 if (env->efer & MSR_EFER_RESERVED) {
97 if ((env->efer & (MSR_EFER_LMA | MSR_EFER_LME)) &&
98 !(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
102 if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
103 && !(env->cr[4] & CR4_PAE_MASK)) {
107 if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
108 && !(env->cr[0] & CR0_PE_MASK)) {
112 if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
113 && (env->cr[4] & CR4_PAE_MASK)
114 && (env->segs[R_CS].flags & DESC_L_MASK)
115 && (env->segs[R_CS].flags & DESC_B_MASK)) {
122 static inline bool virtual_gif_enabled(CPUX86State *env)
124 if (likely(env->hflags & HF_GUEST_MASK)) {
125 return (env->features[FEAT_SVM] & CPUID_SVM_VGIF)
126 && (env->int_ctl & V_GIF_ENABLED_MASK);
131 static inline bool virtual_vm_load_save_enabled(CPUX86State *env, uint32_t exit_code, uintptr_t retaddr)
135 if (likely(env->hflags & HF_GUEST_MASK)) {
136 if (likely(!(env->hflags2 & HF2_NPT_MASK)) || !(env->efer & MSR_EFER_LMA)) {
137 cpu_vmexit(env, exit_code, 0, retaddr);
140 lbr_ctl = x86_ldl_phys(env_cpu(env), env->vm_vmcb + offsetof(struct vmcb,
142 return (env->features[FEAT_SVM] & CPUID_SVM_V_VMSAVE_VMLOAD)
150 static inline bool virtual_gif_set(CPUX86State *env)
152 return !virtual_gif_enabled(env) || (env->int_ctl & V_GIF_MASK);
155 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
157 CPUState *cs = env_cpu(env);
158 X86CPU *cpu = env_archcpu(env);
170 addr = env->regs[R_EAX];
172 addr = (uint32_t)env->regs[R_EAX];
176 if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) {
177 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
180 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
184 env->vm_vmcb = addr;
187 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
188 env->gdt.base);
189 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
190 env->gdt.limit);
192 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
193 env->idt.base);
194 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
195 env->idt.limit);
198 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
200 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
202 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
204 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
206 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
208 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
211 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
213 env->vm_hsave + offsetof(struct vmcb, save.rflags),
214 cpu_compute_eflags(env));
216 svm_save_seg(env, MMU_PHYS_IDX,
217 env->vm_hsave + offsetof(struct vmcb, save.es),
218 &env->segs[R_ES]);
219 svm_save_seg(env, MMU_PHYS_IDX,
220 env->vm_hsave + offsetof(struct vmcb, save.cs),
221 &env->segs[R_CS]);
222 svm_save_seg(env, MMU_PHYS_IDX,
223 env->vm_hsave + offsetof(struct vmcb, save.ss),
224 &env->segs[R_SS]);
225 svm_save_seg(env, MMU_PHYS_IDX,
226 env->vm_hsave + offsetof(struct vmcb, save.ds),
227 &env->segs[R_DS]);
229 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
230 env->eip + next_eip_addend);
232 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
234 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
238 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
240 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
243 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
246 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
249 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
252 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
257 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
258 if (x86_ldl_phys(cs, env->vm_vmcb +
261 env->hflags |= HF_INHIBIT_IRQ_MASK;
264 nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
266 asid = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
269 uint64_t msrpm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
272 uint64_t iopm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
276 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
280 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
283 env->nested_pg_mode = 0;
285 if (!cpu_svm_has_intercept(env, SVM_EXIT_VMRUN)) {
286 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
289 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
293 env->nested_cr3 = x86_ldq_phys(cs,
294 env->vm_vmcb + offsetof(struct vmcb,
296 env->hflags2 |= HF2_NPT_MASK;
298 env->nested_pg_mode = get_pg_mode(env) & PG_MODE_SVM_MASK;
304 env->hflags |= HF_GUEST_MASK;
306 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
309 new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0));
311 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
314 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
316 new_cr3 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr3));
317 if ((env->efer & MSR_EFER_LMA) &&
319 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
321 new_cr4 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr4));
322 if (new_cr4 & cr4_reserved_bits(env)) {
323 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
327 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
329 cpu_x86_update_cr0(env, new_cr0);
330 cpu_x86_update_cr4(env, new_cr4);
331 cpu_x86_update_cr3(env, new_cr3);
332 env->cr[2] = x86_ldq_phys(cs,
333 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
334 env->int_ctl = x86_ldl_phys(cs,
335 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
336 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
337 if (env->int_ctl & V_INTR_MASKING_MASK) {
338 env->hflags2 |= HF2_VINTR_MASK;
339 if (env->eflags & IF_MASK) {
340 env->hflags2 |= HF2_HIF_MASK;
344 cpu_load_efer(env,
346 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
347 env->eflags = 0;
348 cpu_load_eflags(env, x86_ldq_phys(cs,
349 env->vm_vmcb + offsetof(struct vmcb,
353 svm_load_seg_cache(env, MMU_PHYS_IDX,
354 env->vm_vmcb + offsetof(struct vmcb, save.es), R_ES);
355 svm_load_seg_cache(env, MMU_PHYS_IDX,
356 env->vm_vmcb + offsetof(struct vmcb, save.cs), R_CS);
357 svm_load_seg_cache(env, MMU_PHYS_IDX,
358 env->vm_vmcb + offsetof(struct vmcb, save.ss), R_SS);
359 svm_load_seg_cache(env, MMU_PHYS_IDX,
360 env->vm_vmcb + offsetof(struct vmcb, save.ds), R_DS);
361 svm_load_seg(env, MMU_PHYS_IDX,
362 env->vm_vmcb + offsetof(struct vmcb, save.idtr), &env->idt);
363 svm_load_seg(env, MMU_PHYS_IDX,
364 env->vm_vmcb + offsetof(struct vmcb, save.gdtr), &env->gdt);
366 env->eip = x86_ldq_phys(cs,
367 env->vm_vmcb + offsetof(struct vmcb, save.rip));
369 env->regs[R_ESP] = x86_ldq_phys(cs,
370 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
371 env->regs[R_EAX] = x86_ldq_phys(cs,
372 env->vm_vmcb + offsetof(struct vmcb, save.rax));
374 new_dr7 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.dr7));
375 new_dr6 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.dr6));
379 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
382 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
386 cpu_x86_update_dr7(env, new_dr7);
387 env->dr[6] = new_dr6;
389 if (is_efer_invalid_state(env)) {
390 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
394 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
403 env->hflags2 |= HF2_GIF_MASK;
405 if (ctl_has_irq(env)) {
409 if (virtual_gif_set(env)) {
410 env->hflags2 |= HF2_VGIF_MASK;
414 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
419 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
428 env->error_code = event_inj_err;
429 env->exception_is_int = 0;
430 env->exception_next_eip = -1;
433 do_interrupt_x86_hardirq(env, vector, 1);
437 env->error_code = event_inj_err;
438 env->exception_is_int = 0;
439 env->exception_next_eip = env->eip;
445 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
448 env->error_code = event_inj_err;
449 env->exception_is_int = 0;
450 env->exception_next_eip = -1;
456 env->error_code = event_inj_err;
457 env->exception_is_int = 1;
458 env->exception_next_eip = env->eip;
463 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
467 env->error_code);
471 void helper_vmmcall(CPUX86State *env)
473 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
474 raise_exception(env, EXCP06_ILLOP);
477 void helper_vmload(CPUX86State *env, int aflag)
483 addr = env->regs[R_EAX];
485 addr = (uint32_t)env->regs[R_EAX];
489 if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) {
490 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
493 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
495 if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMLOAD, GETPC())) {
499 svm_load_seg_cache(env, mmu_idx,
501 svm_load_seg_cache(env, mmu_idx,
503 svm_load_seg(env, mmu_idx,
504 addr + offsetof(struct vmcb, save.tr), &env->tr);
505 svm_load_seg(env, mmu_idx,
506 addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
509 env->kernelgsbase =
510 cpu_ldq_mmuidx_ra(env,
513 env->lstar =
514 cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.lstar),
516 env->cstar =
517 cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.cstar),
519 env->fmask =
520 cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sfmask),
522 svm_canonicalization(env, &env->kernelgsbase);
524 env->star =
525 cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.star),
527 env->sysenter_cs =
528 cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_cs),
530 env->sysenter_esp =
531 cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_esp),
533 env->sysenter_eip =
534 cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_eip),
538 void helper_vmsave(CPUX86State *env, int aflag)
544 addr = env->regs[R_EAX];
546 addr = (uint32_t)env->regs[R_EAX];
550 if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) {
551 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
554 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
556 if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMSAVE, GETPC())) {
560 svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.fs),
561 &env->segs[R_FS]);
562 svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.gs),
563 &env->segs[R_GS]);
564 svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.tr),
565 &env->tr);
566 svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.ldtr),
567 &env->ldt);
570 cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.kernel_gs_base),
571 env->kernelgsbase, mmu_idx, 0);
572 cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.lstar),
573 env->lstar, mmu_idx, 0);
574 cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.cstar),
575 env->cstar, mmu_idx, 0);
576 cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sfmask),
577 env->fmask, mmu_idx, 0);
579 cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.star),
580 env->star, mmu_idx, 0);
581 cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_cs),
582 env->sysenter_cs, mmu_idx, 0);
583 cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_esp),
584 env->sysenter_esp, mmu_idx, 0);
585 cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_eip),
586 env->sysenter_eip, mmu_idx, 0);
589 void helper_stgi(CPUX86State *env)
591 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
593 if (virtual_gif_enabled(env)) {
594 env->int_ctl |= V_GIF_MASK;
595 env->hflags2 |= HF2_VGIF_MASK;
597 env->hflags2 |= HF2_GIF_MASK;
601 void helper_clgi(CPUX86State *env)
603 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
605 if (virtual_gif_enabled(env)) {
606 env->int_ctl &= ~V_GIF_MASK;
607 env->hflags2 &= ~HF2_VGIF_MASK;
609 env->hflags2 &= ~HF2_GIF_MASK;
613 bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type)
617 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
622 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
627 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
632 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
637 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
642 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
650 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
653 CPUState *cs = env_cpu(env);
655 if (likely(!(env->hflags & HF_GUEST_MASK))) {
659 if (!cpu_svm_has_intercept(env, type)) {
665 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
670 switch ((uint32_t)env->regs[R_ECX]) {
672 t0 = (env->regs[R_ECX] * 2) % 8;
673 t1 = (env->regs[R_ECX] * 2) / 8;
676 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
681 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
686 cpu_vmexit(env, type, param, retaddr);
692 cpu_vmexit(env, type, param, retaddr);
697 cpu_vmexit(env, type, param, retaddr);
700 void helper_svm_check_intercept(CPUX86State *env, uint32_t type)
702 cpu_svm_check_intercept_param(env, type, 0, GETPC());
705 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
708 CPUState *cs = env_cpu(env);
710 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
712 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
717 /* next env->eip */
719 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
720 env->eip + next_eip_addend);
721 cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
726 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
729 CPUState *cs = env_cpu(env);
736 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
738 env->eip);
741 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
744 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
748 env->old_exception = -1;
752 void do_vmexit(CPUX86State *env)
754 CPUState *cs = env_cpu(env);
756 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
758 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
760 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
763 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
765 env->hflags2 &= ~HF2_NPT_MASK;
769 svm_save_seg(env, MMU_PHYS_IDX,
770 env->vm_vmcb + offsetof(struct vmcb, save.es),
771 &env->segs[R_ES]);
772 svm_save_seg(env, MMU_PHYS_IDX,
773 env->vm_vmcb + offsetof(struct vmcb, save.cs),
774 &env->segs[R_CS]);
775 svm_save_seg(env, MMU_PHYS_IDX,
776 env->vm_vmcb + offsetof(struct vmcb, save.ss),
777 &env->segs[R_SS]);
778 svm_save_seg(env, MMU_PHYS_IDX,
779 env->vm_vmcb + offsetof(struct vmcb, save.ds),
780 &env->segs[R_DS]);
782 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
783 env->gdt.base);
784 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
785 env->gdt.limit);
787 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
788 env->idt.base);
789 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
790 env->idt.limit);
793 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
795 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
797 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
799 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
801 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
803 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), env->int_ctl);
805 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
806 cpu_compute_eflags(env));
807 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
808 env->eip);
810 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
812 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
814 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
816 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
817 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
818 env->hflags & HF_CPL_MASK);
821 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
822 env->hflags &= ~HF_GUEST_MASK;
823 env->intercept = 0;
824 env->intercept_exceptions = 0;
828 env->int_ctl = 0;
831 env->tsc_offset = 0;
833 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
835 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
838 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
840 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
843 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
844 env->vm_hsave + offsetof(struct vmcb,
847 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
848 env->vm_hsave + offsetof(struct vmcb,
859 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
860 env->vm_hsave + offsetof(struct vmcb,
864 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
868 env->eflags = 0;
869 cpu_load_eflags(env, x86_ldq_phys(cs,
870 env->vm_hsave + offsetof(struct vmcb,
875 svm_load_seg_cache(env, MMU_PHYS_IDX,
876 env->vm_hsave + offsetof(struct vmcb, save.es), R_ES);
877 svm_load_seg_cache(env, MMU_PHYS_IDX,
878 env->vm_hsave + offsetof(struct vmcb, save.cs), R_CS);
879 svm_load_seg_cache(env, MMU_PHYS_IDX,
880 env->vm_hsave + offsetof(struct vmcb, save.ss), R_SS);
881 svm_load_seg_cache(env, MMU_PHYS_IDX,
882 env->vm_hsave + offsetof(struct vmcb, save.ds), R_DS);
884 env->eip = x86_ldq_phys(cs,
885 env->vm_hsave + offsetof(struct vmcb, save.rip));
886 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
888 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
891 env->dr[6] = x86_ldq_phys(cs,
892 env->vm_hsave + offsetof(struct vmcb, save.dr6));
895 cpu_x86_update_dr7(env,
897 env->vm_hsave + offsetof(struct vmcb, save.dr7)) & ~0xff);
901 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
902 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
905 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
906 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
909 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
911 env->hflags2 &= ~HF2_GIF_MASK;
912 env->hflags2 &= ~HF2_VGIF_MASK;
922 if ((env->eflags & TF_MASK) != 0) {
923 env->dr[6] |= DR6_BS;
924 do_interrupt_all(X86_CPU(cs), EXCP01_DB, 0, 0, env->eip, 0);