Lines Matching full:env
30 static void svm_save_seg(CPUX86State *env, int mmu_idx, hwaddr addr, in svm_save_seg() argument
33 cpu_stw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, selector), in svm_save_seg()
35 cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, base), in svm_save_seg()
37 cpu_stl_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, limit), in svm_save_seg()
39 cpu_stw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, attrib), in svm_save_seg()
49 static inline void svm_canonicalization(CPUX86State *env, target_ulong *seg_base) in svm_canonicalization() argument
51 uint16_t shift_amt = 64 - cpu_x86_virtual_addr_width(env); in svm_canonicalization()
55 static void svm_load_seg(CPUX86State *env, int mmu_idx, hwaddr addr, in svm_load_seg() argument
61 cpu_lduw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, selector), in svm_load_seg()
64 cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, base), in svm_load_seg()
67 cpu_ldl_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, limit), in svm_load_seg()
70 cpu_lduw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, attrib), in svm_load_seg()
74 svm_canonicalization(env, &sc->base); in svm_load_seg()
77 static void svm_load_seg_cache(CPUX86State *env, int mmu_idx, in svm_load_seg_cache() argument
82 svm_load_seg(env, mmu_idx, addr, &sc); in svm_load_seg_cache()
83 cpu_x86_load_seg_cache(env, seg_reg, sc.selector, in svm_load_seg_cache()
87 static inline bool is_efer_invalid_state (CPUX86State *env) in is_efer_invalid_state() argument
89 if (!(env->efer & MSR_EFER_SVME)) { in is_efer_invalid_state()
93 if (env->efer & MSR_EFER_RESERVED) { in is_efer_invalid_state()
97 if ((env->efer & (MSR_EFER_LMA | MSR_EFER_LME)) && in is_efer_invalid_state()
98 !(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) { in is_efer_invalid_state()
102 if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK) in is_efer_invalid_state()
103 && !(env->cr[4] & CR4_PAE_MASK)) { in is_efer_invalid_state()
107 if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK) in is_efer_invalid_state()
108 && !(env->cr[0] & CR0_PE_MASK)) { in is_efer_invalid_state()
112 if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK) in is_efer_invalid_state()
113 && (env->cr[4] & CR4_PAE_MASK) in is_efer_invalid_state()
114 && (env->segs[R_CS].flags & DESC_L_MASK) in is_efer_invalid_state()
115 && (env->segs[R_CS].flags & DESC_B_MASK)) { in is_efer_invalid_state()
122 static inline bool virtual_gif_enabled(CPUX86State *env) in virtual_gif_enabled() argument
124 if (likely(env->hflags & HF_GUEST_MASK)) { in virtual_gif_enabled()
125 return (env->features[FEAT_SVM] & CPUID_SVM_VGIF) in virtual_gif_enabled()
126 && (env->int_ctl & V_GIF_ENABLED_MASK); in virtual_gif_enabled()
131 static inline bool virtual_vm_load_save_enabled(CPUX86State *env, uint32_t exit_code, uintptr_t ret… in virtual_vm_load_save_enabled() argument
135 if (likely(env->hflags & HF_GUEST_MASK)) { in virtual_vm_load_save_enabled()
136 if (likely(!(env->hflags2 & HF2_NPT_MASK)) || !(env->efer & MSR_EFER_LMA)) { in virtual_vm_load_save_enabled()
137 cpu_vmexit(env, exit_code, 0, retaddr); in virtual_vm_load_save_enabled()
140 lbr_ctl = x86_ldl_phys(env_cpu(env), env->vm_vmcb + offsetof(struct vmcb, in virtual_vm_load_save_enabled()
142 return (env->features[FEAT_SVM] & CPUID_SVM_V_VMSAVE_VMLOAD) in virtual_vm_load_save_enabled()
150 static inline bool virtual_gif_set(CPUX86State *env) in virtual_gif_set() argument
152 return !virtual_gif_enabled(env) || (env->int_ctl & V_GIF_MASK); in virtual_gif_set()
155 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) in helper_vmrun() argument
157 CPUState *cs = env_cpu(env); in helper_vmrun()
158 X86CPU *cpu = env_archcpu(env); in helper_vmrun()
170 addr = env->regs[R_EAX]; in helper_vmrun()
172 addr = (uint32_t)env->regs[R_EAX]; in helper_vmrun()
176 if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) { in helper_vmrun()
177 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); in helper_vmrun()
180 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC()); in helper_vmrun()
184 env->vm_vmcb = addr; in helper_vmrun()
187 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), in helper_vmrun()
188 env->gdt.base); in helper_vmrun()
189 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), in helper_vmrun()
190 env->gdt.limit); in helper_vmrun()
192 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base), in helper_vmrun()
193 env->idt.base); in helper_vmrun()
194 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), in helper_vmrun()
195 env->idt.limit); in helper_vmrun()
198 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]); in helper_vmrun()
200 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]); in helper_vmrun()
202 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]); in helper_vmrun()
204 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]); in helper_vmrun()
206 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]); in helper_vmrun()
208 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]); in helper_vmrun()
211 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer); in helper_vmrun()
213 env->vm_hsave + offsetof(struct vmcb, save.rflags), in helper_vmrun()
214 cpu_compute_eflags(env)); in helper_vmrun()
216 svm_save_seg(env, MMU_PHYS_IDX, in helper_vmrun()
217 env->vm_hsave + offsetof(struct vmcb, save.es), in helper_vmrun()
218 &env->segs[R_ES]); in helper_vmrun()
219 svm_save_seg(env, MMU_PHYS_IDX, in helper_vmrun()
220 env->vm_hsave + offsetof(struct vmcb, save.cs), in helper_vmrun()
221 &env->segs[R_CS]); in helper_vmrun()
222 svm_save_seg(env, MMU_PHYS_IDX, in helper_vmrun()
223 env->vm_hsave + offsetof(struct vmcb, save.ss), in helper_vmrun()
224 &env->segs[R_SS]); in helper_vmrun()
225 svm_save_seg(env, MMU_PHYS_IDX, in helper_vmrun()
226 env->vm_hsave + offsetof(struct vmcb, save.ds), in helper_vmrun()
227 &env->segs[R_DS]); in helper_vmrun()
229 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip), in helper_vmrun()
230 env->eip + next_eip_addend); in helper_vmrun()
232 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]); in helper_vmrun()
234 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]); in helper_vmrun()
238 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, in helper_vmrun()
240 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb + in helper_vmrun()
243 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb + in helper_vmrun()
246 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb + in helper_vmrun()
249 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb + in helper_vmrun()
252 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb + in helper_vmrun()
257 env->hflags &= ~HF_INHIBIT_IRQ_MASK; in helper_vmrun()
258 if (x86_ldl_phys(cs, env->vm_vmcb + in helper_vmrun()
261 env->hflags |= HF_INHIBIT_IRQ_MASK; in helper_vmrun()
264 nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, in helper_vmrun()
266 asid = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, in helper_vmrun()
269 uint64_t msrpm_base_pa = x86_ldq_phys(cs, env->vm_vmcb + in helper_vmrun()
272 uint64_t iopm_base_pa = x86_ldq_phys(cs, env->vm_vmcb + in helper_vmrun()
276 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
280 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
283 env->nested_pg_mode = 0; in helper_vmrun()
285 if (!cpu_svm_has_intercept(env, SVM_EXIT_VMRUN)) { in helper_vmrun()
286 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
289 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
293 env->nested_cr3 = x86_ldq_phys(cs, in helper_vmrun()
294 env->vm_vmcb + offsetof(struct vmcb, in helper_vmrun()
296 env->hflags2 |= HF2_NPT_MASK; in helper_vmrun()
298 env->nested_pg_mode = get_pg_mode(env) & PG_MODE_SVM_MASK; in helper_vmrun()
304 env->hflags |= HF_GUEST_MASK; in helper_vmrun()
306 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb + in helper_vmrun()
309 new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0)); in helper_vmrun()
311 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
314 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
316 new_cr3 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr3)); in helper_vmrun()
317 if ((env->efer & MSR_EFER_LMA) && in helper_vmrun()
319 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
321 new_cr4 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr4)); in helper_vmrun()
322 if (new_cr4 & cr4_reserved_bits(env)) { in helper_vmrun()
323 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
327 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0); in helper_vmrun()
329 cpu_x86_update_cr0(env, new_cr0); in helper_vmrun()
330 cpu_x86_update_cr4(env, new_cr4); in helper_vmrun()
331 cpu_x86_update_cr3(env, new_cr3); in helper_vmrun()
332 env->cr[2] = x86_ldq_phys(cs, in helper_vmrun()
333 env->vm_vmcb + offsetof(struct vmcb, save.cr2)); in helper_vmrun()
334 env->int_ctl = x86_ldl_phys(cs, in helper_vmrun()
335 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); in helper_vmrun()
336 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); in helper_vmrun()
337 if (env->int_ctl & V_INTR_MASKING_MASK) { in helper_vmrun()
338 env->hflags2 |= HF2_VINTR_MASK; in helper_vmrun()
339 if (env->eflags & IF_MASK) { in helper_vmrun()
340 env->hflags2 |= HF2_HIF_MASK; in helper_vmrun()
344 cpu_load_efer(env, in helper_vmrun()
346 env->vm_vmcb + offsetof(struct vmcb, save.efer))); in helper_vmrun()
347 env->eflags = 0; in helper_vmrun()
348 cpu_load_eflags(env, x86_ldq_phys(cs, in helper_vmrun()
349 env->vm_vmcb + offsetof(struct vmcb, in helper_vmrun()
353 svm_load_seg_cache(env, MMU_PHYS_IDX, in helper_vmrun()
354 env->vm_vmcb + offsetof(struct vmcb, save.es), R_ES); in helper_vmrun()
355 svm_load_seg_cache(env, MMU_PHYS_IDX, in helper_vmrun()
356 env->vm_vmcb + offsetof(struct vmcb, save.cs), R_CS); in helper_vmrun()
357 svm_load_seg_cache(env, MMU_PHYS_IDX, in helper_vmrun()
358 env->vm_vmcb + offsetof(struct vmcb, save.ss), R_SS); in helper_vmrun()
359 svm_load_seg_cache(env, MMU_PHYS_IDX, in helper_vmrun()
360 env->vm_vmcb + offsetof(struct vmcb, save.ds), R_DS); in helper_vmrun()
361 svm_load_seg(env, MMU_PHYS_IDX, in helper_vmrun()
362 env->vm_vmcb + offsetof(struct vmcb, save.idtr), &env->idt); in helper_vmrun()
363 svm_load_seg(env, MMU_PHYS_IDX, in helper_vmrun()
364 env->vm_vmcb + offsetof(struct vmcb, save.gdtr), &env->gdt); in helper_vmrun()
366 env->eip = x86_ldq_phys(cs, in helper_vmrun()
367 env->vm_vmcb + offsetof(struct vmcb, save.rip)); in helper_vmrun()
369 env->regs[R_ESP] = x86_ldq_phys(cs, in helper_vmrun()
370 env->vm_vmcb + offsetof(struct vmcb, save.rsp)); in helper_vmrun()
371 env->regs[R_EAX] = x86_ldq_phys(cs, in helper_vmrun()
372 env->vm_vmcb + offsetof(struct vmcb, save.rax)); in helper_vmrun()
374 new_dr7 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.dr7)); in helper_vmrun()
375 new_dr6 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.dr6)); in helper_vmrun()
379 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
382 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
386 cpu_x86_update_dr7(env, new_dr7); in helper_vmrun()
387 env->dr[6] = new_dr6; in helper_vmrun()
389 if (is_efer_invalid_state(env)) { in helper_vmrun()
390 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
394 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) { in helper_vmrun()
403 env->hflags2 |= HF2_GIF_MASK; in helper_vmrun()
405 if (ctl_has_irq(env)) { in helper_vmrun()
409 if (virtual_gif_set(env)) { in helper_vmrun()
410 env->hflags2 |= HF2_VGIF_MASK; in helper_vmrun()
414 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, in helper_vmrun()
419 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb + in helper_vmrun()
428 env->error_code = event_inj_err; in helper_vmrun()
429 env->exception_is_int = 0; in helper_vmrun()
430 env->exception_next_eip = -1; in helper_vmrun()
433 do_interrupt_x86_hardirq(env, vector, 1); in helper_vmrun()
437 env->error_code = event_inj_err; in helper_vmrun()
438 env->exception_is_int = 0; in helper_vmrun()
439 env->exception_next_eip = env->eip; in helper_vmrun()
445 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
448 env->error_code = event_inj_err; in helper_vmrun()
449 env->exception_is_int = 0; in helper_vmrun()
450 env->exception_next_eip = -1; in helper_vmrun()
456 env->error_code = event_inj_err; in helper_vmrun()
457 env->exception_is_int = 1; in helper_vmrun()
458 env->exception_next_eip = env->eip; in helper_vmrun()
463 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC()); in helper_vmrun()
467 env->error_code); in helper_vmrun()
471 void helper_vmmcall(CPUX86State *env) in helper_vmmcall() argument
473 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC()); in helper_vmmcall()
474 raise_exception(env, EXCP06_ILLOP); in helper_vmmcall()
477 void helper_vmload(CPUX86State *env, int aflag) in helper_vmload() argument
483 addr = env->regs[R_EAX]; in helper_vmload()
485 addr = (uint32_t)env->regs[R_EAX]; in helper_vmload()
489 if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) { in helper_vmload()
490 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); in helper_vmload()
493 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC()); in helper_vmload()
495 if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMLOAD, GETPC())) { in helper_vmload()
499 svm_load_seg_cache(env, mmu_idx, in helper_vmload()
501 svm_load_seg_cache(env, mmu_idx, in helper_vmload()
503 svm_load_seg(env, mmu_idx, in helper_vmload()
504 addr + offsetof(struct vmcb, save.tr), &env->tr); in helper_vmload()
505 svm_load_seg(env, mmu_idx, in helper_vmload()
506 addr + offsetof(struct vmcb, save.ldtr), &env->ldt); in helper_vmload()
509 env->kernelgsbase = in helper_vmload()
510 cpu_ldq_mmuidx_ra(env, in helper_vmload()
513 env->lstar = in helper_vmload()
514 cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.lstar), in helper_vmload()
516 env->cstar = in helper_vmload()
517 cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.cstar), in helper_vmload()
519 env->fmask = in helper_vmload()
520 cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sfmask), in helper_vmload()
522 svm_canonicalization(env, &env->kernelgsbase); in helper_vmload()
524 env->star = in helper_vmload()
525 cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.star), in helper_vmload()
527 env->sysenter_cs = in helper_vmload()
528 cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_cs), in helper_vmload()
530 env->sysenter_esp = in helper_vmload()
531 cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_esp), in helper_vmload()
533 env->sysenter_eip = in helper_vmload()
534 cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_eip), in helper_vmload()
538 void helper_vmsave(CPUX86State *env, int aflag) in helper_vmsave() argument
544 addr = env->regs[R_EAX]; in helper_vmsave()
546 addr = (uint32_t)env->regs[R_EAX]; in helper_vmsave()
550 if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) { in helper_vmsave()
551 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); in helper_vmsave()
554 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC()); in helper_vmsave()
556 if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMSAVE, GETPC())) { in helper_vmsave()
560 svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.fs), in helper_vmsave()
561 &env->segs[R_FS]); in helper_vmsave()
562 svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.gs), in helper_vmsave()
563 &env->segs[R_GS]); in helper_vmsave()
564 svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.tr), in helper_vmsave()
565 &env->tr); in helper_vmsave()
566 svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.ldtr), in helper_vmsave()
567 &env->ldt); in helper_vmsave()
570 cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.kernel_gs_base), in helper_vmsave()
571 env->kernelgsbase, mmu_idx, 0); in helper_vmsave()
572 cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.lstar), in helper_vmsave()
573 env->lstar, mmu_idx, 0); in helper_vmsave()
574 cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.cstar), in helper_vmsave()
575 env->cstar, mmu_idx, 0); in helper_vmsave()
576 cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sfmask), in helper_vmsave()
577 env->fmask, mmu_idx, 0); in helper_vmsave()
579 cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.star), in helper_vmsave()
580 env->star, mmu_idx, 0); in helper_vmsave()
581 cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_cs), in helper_vmsave()
582 env->sysenter_cs, mmu_idx, 0); in helper_vmsave()
583 cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_esp), in helper_vmsave()
584 env->sysenter_esp, mmu_idx, 0); in helper_vmsave()
585 cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_eip), in helper_vmsave()
586 env->sysenter_eip, mmu_idx, 0); in helper_vmsave()
589 void helper_stgi(CPUX86State *env) in helper_stgi() argument
591 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC()); in helper_stgi()
593 if (virtual_gif_enabled(env)) { in helper_stgi()
594 env->int_ctl |= V_GIF_MASK; in helper_stgi()
595 env->hflags2 |= HF2_VGIF_MASK; in helper_stgi()
597 env->hflags2 |= HF2_GIF_MASK; in helper_stgi()
601 void helper_clgi(CPUX86State *env) in helper_clgi() argument
603 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC()); in helper_clgi()
605 if (virtual_gif_enabled(env)) { in helper_clgi()
606 env->int_ctl &= ~V_GIF_MASK; in helper_clgi()
607 env->hflags2 &= ~HF2_VGIF_MASK; in helper_clgi()
609 env->hflags2 &= ~HF2_GIF_MASK; in helper_clgi()
613 bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type) in cpu_svm_has_intercept() argument
617 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) { in cpu_svm_has_intercept()
622 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) { in cpu_svm_has_intercept()
627 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) { in cpu_svm_has_intercept()
632 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) { in cpu_svm_has_intercept()
637 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) { in cpu_svm_has_intercept()
642 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) { in cpu_svm_has_intercept()
650 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type, in cpu_svm_check_intercept_param() argument
653 CPUState *cs = env_cpu(env); in cpu_svm_check_intercept_param()
655 if (likely(!(env->hflags & HF_GUEST_MASK))) { in cpu_svm_check_intercept_param()
659 if (!cpu_svm_has_intercept(env, type)) { in cpu_svm_check_intercept_param()
665 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb + in cpu_svm_check_intercept_param()
670 switch ((uint32_t)env->regs[R_ECX]) { in cpu_svm_check_intercept_param()
672 t0 = (env->regs[R_ECX] * 2) % 8; in cpu_svm_check_intercept_param()
673 t1 = (env->regs[R_ECX] * 2) / 8; in cpu_svm_check_intercept_param()
676 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2; in cpu_svm_check_intercept_param()
681 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2; in cpu_svm_check_intercept_param()
686 cpu_vmexit(env, type, param, retaddr); in cpu_svm_check_intercept_param()
692 cpu_vmexit(env, type, param, retaddr); in cpu_svm_check_intercept_param()
697 cpu_vmexit(env, type, param, retaddr); in cpu_svm_check_intercept_param()
700 void helper_svm_check_intercept(CPUX86State *env, uint32_t type) in helper_svm_check_intercept() argument
702 cpu_svm_check_intercept_param(env, type, 0, GETPC()); in helper_svm_check_intercept()
705 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param, in helper_svm_check_io() argument
708 CPUState *cs = env_cpu(env); in helper_svm_check_io()
710 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) { in helper_svm_check_io()
712 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb + in helper_svm_check_io()
717 /* next env->eip */ in helper_svm_check_io()
719 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), in helper_svm_check_io()
720 env->eip + next_eip_addend); in helper_svm_check_io()
721 cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC()); in helper_svm_check_io()
726 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1, in cpu_vmexit() argument
729 CPUState *cs = env_cpu(env); in cpu_vmexit()
736 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, in cpu_vmexit()
738 env->eip); in cpu_vmexit()
741 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code), in cpu_vmexit()
744 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, in cpu_vmexit()
748 env->old_exception = -1; in cpu_vmexit()
752 void do_vmexit(CPUX86State *env) in do_vmexit() argument
754 CPUState *cs = env_cpu(env); in do_vmexit()
756 if (env->hflags & HF_INHIBIT_IRQ_MASK) { in do_vmexit()
758 env->vm_vmcb + offsetof(struct vmcb, control.int_state), in do_vmexit()
760 env->hflags &= ~HF_INHIBIT_IRQ_MASK; in do_vmexit()
763 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0); in do_vmexit()
765 env->hflags2 &= ~HF2_NPT_MASK; in do_vmexit()
769 svm_save_seg(env, MMU_PHYS_IDX, in do_vmexit()
770 env->vm_vmcb + offsetof(struct vmcb, save.es), in do_vmexit()
771 &env->segs[R_ES]); in do_vmexit()
772 svm_save_seg(env, MMU_PHYS_IDX, in do_vmexit()
773 env->vm_vmcb + offsetof(struct vmcb, save.cs), in do_vmexit()
774 &env->segs[R_CS]); in do_vmexit()
775 svm_save_seg(env, MMU_PHYS_IDX, in do_vmexit()
776 env->vm_vmcb + offsetof(struct vmcb, save.ss), in do_vmexit()
777 &env->segs[R_SS]); in do_vmexit()
778 svm_save_seg(env, MMU_PHYS_IDX, in do_vmexit()
779 env->vm_vmcb + offsetof(struct vmcb, save.ds), in do_vmexit()
780 &env->segs[R_DS]); in do_vmexit()
782 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), in do_vmexit()
783 env->gdt.base); in do_vmexit()
784 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), in do_vmexit()
785 env->gdt.limit); in do_vmexit()
787 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), in do_vmexit()
788 env->idt.base); in do_vmexit()
789 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), in do_vmexit()
790 env->idt.limit); in do_vmexit()
793 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer); in do_vmexit()
795 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]); in do_vmexit()
797 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]); in do_vmexit()
799 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]); in do_vmexit()
801 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]); in do_vmexit()
803 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), env->int_ctl); in do_vmexit()
805 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags), in do_vmexit()
806 cpu_compute_eflags(env)); in do_vmexit()
807 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip), in do_vmexit()
808 env->eip); in do_vmexit()
810 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]); in do_vmexit()
812 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]); in do_vmexit()
814 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]); in do_vmexit()
816 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]); in do_vmexit()
817 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl), in do_vmexit()
818 env->hflags & HF_CPL_MASK); in do_vmexit()
821 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); in do_vmexit()
822 env->hflags &= ~HF_GUEST_MASK; in do_vmexit()
823 env->intercept = 0; in do_vmexit()
824 env->intercept_exceptions = 0; in do_vmexit()
828 env->int_ctl = 0; in do_vmexit()
831 env->tsc_offset = 0; in do_vmexit()
833 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, in do_vmexit()
835 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb, in do_vmexit()
838 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, in do_vmexit()
840 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb, in do_vmexit()
843 cpu_x86_update_cr0(env, x86_ldq_phys(cs, in do_vmexit()
844 env->vm_hsave + offsetof(struct vmcb, in do_vmexit()
847 cpu_x86_update_cr4(env, x86_ldq_phys(cs, in do_vmexit()
848 env->vm_hsave + offsetof(struct vmcb, in do_vmexit()
859 cpu_x86_update_cr3(env, x86_ldq_phys(cs, in do_vmexit()
860 env->vm_hsave + offsetof(struct vmcb, in do_vmexit()
864 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb, in do_vmexit()
868 env->eflags = 0; in do_vmexit()
869 cpu_load_eflags(env, x86_ldq_phys(cs, in do_vmexit()
870 env->vm_hsave + offsetof(struct vmcb, in do_vmexit()
875 svm_load_seg_cache(env, MMU_PHYS_IDX, in do_vmexit()
876 env->vm_hsave + offsetof(struct vmcb, save.es), R_ES); in do_vmexit()
877 svm_load_seg_cache(env, MMU_PHYS_IDX, in do_vmexit()
878 env->vm_hsave + offsetof(struct vmcb, save.cs), R_CS); in do_vmexit()
879 svm_load_seg_cache(env, MMU_PHYS_IDX, in do_vmexit()
880 env->vm_hsave + offsetof(struct vmcb, save.ss), R_SS); in do_vmexit()
881 svm_load_seg_cache(env, MMU_PHYS_IDX, in do_vmexit()
882 env->vm_hsave + offsetof(struct vmcb, save.ds), R_DS); in do_vmexit()
884 env->eip = x86_ldq_phys(cs, in do_vmexit()
885 env->vm_hsave + offsetof(struct vmcb, save.rip)); in do_vmexit()
886 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave + in do_vmexit()
888 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave + in do_vmexit()
891 env->dr[6] = x86_ldq_phys(cs, in do_vmexit()
892 env->vm_hsave + offsetof(struct vmcb, save.dr6)); in do_vmexit()
895 cpu_x86_update_dr7(env, in do_vmexit()
897 env->vm_hsave + offsetof(struct vmcb, save.dr7)) & ~0xff); in do_vmexit()
901 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info), in do_vmexit()
902 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, in do_vmexit()
905 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err), in do_vmexit()
906 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, in do_vmexit()
909 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0); in do_vmexit()
911 env->hflags2 &= ~HF2_GIF_MASK; in do_vmexit()
912 env->hflags2 &= ~HF2_VGIF_MASK; in do_vmexit()
922 if ((env->eflags & TF_MASK) != 0) { in do_vmexit()
923 env->dr[6] |= DR6_BS; in do_vmexit()
924 do_interrupt_all(X86_CPU(cs), EXCP01_DB, 0, 0, env->eip, 0); in do_vmexit()