xref: /openbmc/qemu/target/i386/tcg/sysemu/svm_helper.c (revision 744c72a8)
1 /*
2  *  x86 SVM helpers (sysemu only)
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
25 #include "tcg/helper-tcg.h"
26 
27 /* Secure Virtual Machine helpers */
28 
29 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
30                                 const SegmentCache *sc)
31 {
32     CPUState *cs = env_cpu(env);
33 
34     x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
35              sc->selector);
36     x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
37              sc->base);
38     x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
39              sc->limit);
40     x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
41              ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
42 }
43 
44 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
45                                 SegmentCache *sc)
46 {
47     CPUState *cs = env_cpu(env);
48     unsigned int flags;
49 
50     sc->selector = x86_lduw_phys(cs,
51                              addr + offsetof(struct vmcb_seg, selector));
52     sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
53     sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
54     flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
55     sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
56 }
57 
58 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
59                                       int seg_reg)
60 {
61     SegmentCache sc1, *sc = &sc1;
62 
63     svm_load_seg(env, addr, sc);
64     cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
65                            sc->base, sc->limit, sc->flags);
66 }
67 
68 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
69 {
70     CPUState *cs = env_cpu(env);
71     target_ulong addr;
72     uint64_t nested_ctl;
73     uint32_t event_inj;
74     uint32_t int_ctl;
75 
76     cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
77 
78     if (aflag == 2) {
79         addr = env->regs[R_EAX];
80     } else {
81         addr = (uint32_t)env->regs[R_EAX];
82     }
83 
84     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
85 
86     env->vm_vmcb = addr;
87 
88     /* save the current CPU state in the hsave page */
89     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
90              env->gdt.base);
91     x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
92              env->gdt.limit);
93 
94     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
95              env->idt.base);
96     x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
97              env->idt.limit);
98 
99     x86_stq_phys(cs,
100              env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
101     x86_stq_phys(cs,
102              env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
103     x86_stq_phys(cs,
104              env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
105     x86_stq_phys(cs,
106              env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
107     x86_stq_phys(cs,
108              env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
109     x86_stq_phys(cs,
110              env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
111 
112     x86_stq_phys(cs,
113              env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
114     x86_stq_phys(cs,
115              env->vm_hsave + offsetof(struct vmcb, save.rflags),
116              cpu_compute_eflags(env));
117 
118     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
119                  &env->segs[R_ES]);
120     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
121                  &env->segs[R_CS]);
122     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
123                  &env->segs[R_SS]);
124     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
125                  &env->segs[R_DS]);
126 
127     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
128              env->eip + next_eip_addend);
129     x86_stq_phys(cs,
130              env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
131     x86_stq_phys(cs,
132              env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
133 
134     /* load the interception bitmaps so we do not need to access the
135        vmcb in svm mode */
136     env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
137                                                       control.intercept));
138     env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
139                                        offsetof(struct vmcb,
140                                                 control.intercept_cr_read));
141     env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
142                                         offsetof(struct vmcb,
143                                                  control.intercept_cr_write));
144     env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
145                                        offsetof(struct vmcb,
146                                                 control.intercept_dr_read));
147     env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
148                                         offsetof(struct vmcb,
149                                                  control.intercept_dr_write));
150     env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
151                                          offsetof(struct vmcb,
152                                                   control.intercept_exceptions
153                                                   ));
154 
155     nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
156                                                           control.nested_ctl));
157 
158     env->nested_pg_mode = 0;
159 
160     if (nested_ctl & SVM_NPT_ENABLED) {
161         env->nested_cr3 = x86_ldq_phys(cs,
162                                 env->vm_vmcb + offsetof(struct vmcb,
163                                                         control.nested_cr3));
164         env->hflags2 |= HF2_NPT_MASK;
165 
166         env->nested_pg_mode = get_pg_mode(env) & PG_MODE_SVM_MASK;
167     }
168 
169     /* enable intercepts */
170     env->hflags |= HF_GUEST_MASK;
171 
172     env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
173                                offsetof(struct vmcb, control.tsc_offset));
174 
175     env->gdt.base  = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
176                                                       save.gdtr.base));
177     env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
178                                                       save.gdtr.limit));
179 
180     env->idt.base  = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
181                                                       save.idtr.base));
182     env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
183                                                       save.idtr.limit));
184 
185     /* clear exit_info_2 so we behave like the real hardware */
186     x86_stq_phys(cs,
187              env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
188 
189     cpu_x86_update_cr0(env, x86_ldq_phys(cs,
190                                      env->vm_vmcb + offsetof(struct vmcb,
191                                                              save.cr0)));
192     cpu_x86_update_cr4(env, x86_ldq_phys(cs,
193                                      env->vm_vmcb + offsetof(struct vmcb,
194                                                              save.cr4)));
195     cpu_x86_update_cr3(env, x86_ldq_phys(cs,
196                                      env->vm_vmcb + offsetof(struct vmcb,
197                                                              save.cr3)));
198     env->cr[2] = x86_ldq_phys(cs,
199                           env->vm_vmcb + offsetof(struct vmcb, save.cr2));
200     int_ctl = x86_ldl_phys(cs,
201                        env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
202     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
203     if (int_ctl & V_INTR_MASKING_MASK) {
204         env->v_tpr = int_ctl & V_TPR_MASK;
205         env->hflags2 |= HF2_VINTR_MASK;
206         if (env->eflags & IF_MASK) {
207             env->hflags2 |= HF2_HIF_MASK;
208         }
209     }
210 
211     cpu_load_efer(env,
212                   x86_ldq_phys(cs,
213                            env->vm_vmcb + offsetof(struct vmcb, save.efer)));
214     env->eflags = 0;
215     cpu_load_eflags(env, x86_ldq_phys(cs,
216                                   env->vm_vmcb + offsetof(struct vmcb,
217                                                           save.rflags)),
218                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
219 
220     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
221                        R_ES);
222     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
223                        R_CS);
224     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
225                        R_SS);
226     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
227                        R_DS);
228 
229     env->eip = x86_ldq_phys(cs,
230                         env->vm_vmcb + offsetof(struct vmcb, save.rip));
231 
232     env->regs[R_ESP] = x86_ldq_phys(cs,
233                                 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
234     env->regs[R_EAX] = x86_ldq_phys(cs,
235                                 env->vm_vmcb + offsetof(struct vmcb, save.rax));
236     env->dr[7] = x86_ldq_phys(cs,
237                           env->vm_vmcb + offsetof(struct vmcb, save.dr7));
238     env->dr[6] = x86_ldq_phys(cs,
239                           env->vm_vmcb + offsetof(struct vmcb, save.dr6));
240 
241     /* FIXME: guest state consistency checks */
242 
243     switch (x86_ldub_phys(cs,
244                       env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
245     case TLB_CONTROL_DO_NOTHING:
246         break;
247     case TLB_CONTROL_FLUSH_ALL_ASID:
248         /* FIXME: this is not 100% correct but should work for now */
249         tlb_flush(cs);
250         break;
251     }
252 
253     env->hflags2 |= HF2_GIF_MASK;
254 
255     if (int_ctl & V_IRQ_MASK) {
256         CPUState *cs = env_cpu(env);
257 
258         cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
259     }
260 
261     /* maybe we need to inject an event */
262     event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
263                                                  control.event_inj));
264     if (event_inj & SVM_EVTINJ_VALID) {
265         uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
266         uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
267         uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
268                                           offsetof(struct vmcb,
269                                                    control.event_inj_err));
270 
271         qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
272         /* FIXME: need to implement valid_err */
273         switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
274         case SVM_EVTINJ_TYPE_INTR:
275             cs->exception_index = vector;
276             env->error_code = event_inj_err;
277             env->exception_is_int = 0;
278             env->exception_next_eip = -1;
279             qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
280             /* XXX: is it always correct? */
281             do_interrupt_x86_hardirq(env, vector, 1);
282             break;
283         case SVM_EVTINJ_TYPE_NMI:
284             cs->exception_index = EXCP02_NMI;
285             env->error_code = event_inj_err;
286             env->exception_is_int = 0;
287             env->exception_next_eip = env->eip;
288             qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
289             cpu_loop_exit(cs);
290             break;
291         case SVM_EVTINJ_TYPE_EXEPT:
292             cs->exception_index = vector;
293             env->error_code = event_inj_err;
294             env->exception_is_int = 0;
295             env->exception_next_eip = -1;
296             qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
297             cpu_loop_exit(cs);
298             break;
299         case SVM_EVTINJ_TYPE_SOFT:
300             cs->exception_index = vector;
301             env->error_code = event_inj_err;
302             env->exception_is_int = 1;
303             env->exception_next_eip = env->eip;
304             qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
305             cpu_loop_exit(cs);
306             break;
307         }
308         qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
309                       env->error_code);
310     }
311 }
312 
313 void helper_vmmcall(CPUX86State *env)
314 {
315     cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
316     raise_exception(env, EXCP06_ILLOP);
317 }
318 
319 void helper_vmload(CPUX86State *env, int aflag)
320 {
321     CPUState *cs = env_cpu(env);
322     target_ulong addr;
323 
324     cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
325 
326     if (aflag == 2) {
327         addr = env->regs[R_EAX];
328     } else {
329         addr = (uint32_t)env->regs[R_EAX];
330     }
331 
332     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
333                   "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
334                   addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
335                                                           save.fs.base)),
336                   env->segs[R_FS].base);
337 
338     svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
339     svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
340     svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
341     svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
342 
343 #ifdef TARGET_X86_64
344     env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
345                                                  save.kernel_gs_base));
346     env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
347     env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
348     env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
349 #endif
350     env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
351     env->sysenter_cs = x86_ldq_phys(cs,
352                                 addr + offsetof(struct vmcb, save.sysenter_cs));
353     env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
354                                                  save.sysenter_esp));
355     env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
356                                                  save.sysenter_eip));
357 }
358 
359 void helper_vmsave(CPUX86State *env, int aflag)
360 {
361     CPUState *cs = env_cpu(env);
362     target_ulong addr;
363 
364     cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
365 
366     if (aflag == 2) {
367         addr = env->regs[R_EAX];
368     } else {
369         addr = (uint32_t)env->regs[R_EAX];
370     }
371 
372     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
373                   "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
374                   addr, x86_ldq_phys(cs,
375                                  addr + offsetof(struct vmcb, save.fs.base)),
376                   env->segs[R_FS].base);
377 
378     svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
379                  &env->segs[R_FS]);
380     svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
381                  &env->segs[R_GS]);
382     svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
383                  &env->tr);
384     svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
385                  &env->ldt);
386 
387 #ifdef TARGET_X86_64
388     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
389              env->kernelgsbase);
390     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
391     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
392     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
393 #endif
394     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
395     x86_stq_phys(cs,
396              addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
397     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
398              env->sysenter_esp);
399     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
400              env->sysenter_eip);
401 }
402 
403 void helper_stgi(CPUX86State *env)
404 {
405     cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
406     env->hflags2 |= HF2_GIF_MASK;
407 }
408 
409 void helper_clgi(CPUX86State *env)
410 {
411     cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
412     env->hflags2 &= ~HF2_GIF_MASK;
413 }
414 
415 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
416                                    uint64_t param, uintptr_t retaddr)
417 {
418     CPUState *cs = env_cpu(env);
419 
420     if (likely(!(env->hflags & HF_GUEST_MASK))) {
421         return;
422     }
423     switch (type) {
424     case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
425         if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
426             cpu_vmexit(env, type, param, retaddr);
427         }
428         break;
429     case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
430         if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
431             cpu_vmexit(env, type, param, retaddr);
432         }
433         break;
434     case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
435         if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
436             cpu_vmexit(env, type, param, retaddr);
437         }
438         break;
439     case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
440         if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
441             cpu_vmexit(env, type, param, retaddr);
442         }
443         break;
444     case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
445         if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
446             cpu_vmexit(env, type, param, retaddr);
447         }
448         break;
449     case SVM_EXIT_MSR:
450         if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
451             /* FIXME: this should be read in at vmrun (faster this way?) */
452             uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
453                                      offsetof(struct vmcb,
454                                               control.msrpm_base_pa));
455             uint32_t t0, t1;
456 
457             switch ((uint32_t)env->regs[R_ECX]) {
458             case 0 ... 0x1fff:
459                 t0 = (env->regs[R_ECX] * 2) % 8;
460                 t1 = (env->regs[R_ECX] * 2) / 8;
461                 break;
462             case 0xc0000000 ... 0xc0001fff:
463                 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
464                 t1 = (t0 / 8);
465                 t0 %= 8;
466                 break;
467             case 0xc0010000 ... 0xc0011fff:
468                 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
469                 t1 = (t0 / 8);
470                 t0 %= 8;
471                 break;
472             default:
473                 cpu_vmexit(env, type, param, retaddr);
474                 t0 = 0;
475                 t1 = 0;
476                 break;
477             }
478             if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
479                 cpu_vmexit(env, type, param, retaddr);
480             }
481         }
482         break;
483     default:
484         if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
485             cpu_vmexit(env, type, param, retaddr);
486         }
487         break;
488     }
489 }
490 
491 void helper_svm_check_intercept(CPUX86State *env, uint32_t type)
492 {
493     cpu_svm_check_intercept_param(env, type, 0, GETPC());
494 }
495 
496 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
497                          uint32_t next_eip_addend)
498 {
499     CPUState *cs = env_cpu(env);
500 
501     if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
502         /* FIXME: this should be read in at vmrun (faster this way?) */
503         uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
504                                  offsetof(struct vmcb, control.iopm_base_pa));
505         uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
506 
507         if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
508             /* next env->eip */
509             x86_stq_phys(cs,
510                      env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
511                      env->eip + next_eip_addend);
512             cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
513         }
514     }
515 }
516 
517 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
518                 uintptr_t retaddr)
519 {
520     CPUState *cs = env_cpu(env);
521 
522     cpu_restore_state(cs, retaddr, true);
523 
524     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
525                   PRIx64 ", " TARGET_FMT_lx ")!\n",
526                   exit_code, exit_info_1,
527                   x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
528                                                    control.exit_info_2)),
529                   env->eip);
530 
531     cs->exception_index = EXCP_VMEXIT;
532     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
533              exit_code);
534 
535     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
536                                              control.exit_info_1), exit_info_1),
537 
538     /* remove any pending exception */
539     env->old_exception = -1;
540     cpu_loop_exit(cs);
541 }
542 
543 void do_vmexit(CPUX86State *env)
544 {
545     CPUState *cs = env_cpu(env);
546     uint32_t int_ctl;
547 
548     if (env->hflags & HF_INHIBIT_IRQ_MASK) {
549         x86_stl_phys(cs,
550                  env->vm_vmcb + offsetof(struct vmcb, control.int_state),
551                  SVM_INTERRUPT_SHADOW_MASK);
552         env->hflags &= ~HF_INHIBIT_IRQ_MASK;
553     } else {
554         x86_stl_phys(cs,
555                  env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
556     }
557     env->hflags2 &= ~HF2_NPT_MASK;
558 
559     /* Save the VM state in the vmcb */
560     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
561                  &env->segs[R_ES]);
562     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
563                  &env->segs[R_CS]);
564     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
565                  &env->segs[R_SS]);
566     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
567                  &env->segs[R_DS]);
568 
569     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
570              env->gdt.base);
571     x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
572              env->gdt.limit);
573 
574     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
575              env->idt.base);
576     x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
577              env->idt.limit);
578 
579     x86_stq_phys(cs,
580              env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
581     x86_stq_phys(cs,
582              env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
583     x86_stq_phys(cs,
584              env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
585     x86_stq_phys(cs,
586              env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
587     x86_stq_phys(cs,
588              env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
589 
590     int_ctl = x86_ldl_phys(cs,
591                        env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
592     int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
593     int_ctl |= env->v_tpr & V_TPR_MASK;
594     if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
595         int_ctl |= V_IRQ_MASK;
596     }
597     x86_stl_phys(cs,
598              env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
599 
600     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
601              cpu_compute_eflags(env));
602     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
603              env->eip);
604     x86_stq_phys(cs,
605              env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
606     x86_stq_phys(cs,
607              env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
608     x86_stq_phys(cs,
609              env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
610     x86_stq_phys(cs,
611              env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
612     x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
613              env->hflags & HF_CPL_MASK);
614 
615     /* Reload the host state from vm_hsave */
616     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
617     env->hflags &= ~HF_GUEST_MASK;
618     env->intercept = 0;
619     env->intercept_exceptions = 0;
620     cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
621     env->tsc_offset = 0;
622 
623     env->gdt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
624                                                        save.gdtr.base));
625     env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
626                                                        save.gdtr.limit));
627 
628     env->idt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
629                                                        save.idtr.base));
630     env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
631                                                        save.idtr.limit));
632 
633     cpu_x86_update_cr0(env, x86_ldq_phys(cs,
634                                      env->vm_hsave + offsetof(struct vmcb,
635                                                               save.cr0)) |
636                        CR0_PE_MASK);
637     cpu_x86_update_cr4(env, x86_ldq_phys(cs,
638                                      env->vm_hsave + offsetof(struct vmcb,
639                                                               save.cr4)));
640     cpu_x86_update_cr3(env, x86_ldq_phys(cs,
641                                      env->vm_hsave + offsetof(struct vmcb,
642                                                               save.cr3)));
643     /* we need to set the efer after the crs so the hidden flags get
644        set properly */
645     cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
646                                                          save.efer)));
647     env->eflags = 0;
648     cpu_load_eflags(env, x86_ldq_phys(cs,
649                                   env->vm_hsave + offsetof(struct vmcb,
650                                                            save.rflags)),
651                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
652                       VM_MASK));
653 
654     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
655                        R_ES);
656     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
657                        R_CS);
658     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
659                        R_SS);
660     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
661                        R_DS);
662 
663     env->eip = x86_ldq_phys(cs,
664                         env->vm_hsave + offsetof(struct vmcb, save.rip));
665     env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
666                                 offsetof(struct vmcb, save.rsp));
667     env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
668                                 offsetof(struct vmcb, save.rax));
669 
670     env->dr[6] = x86_ldq_phys(cs,
671                           env->vm_hsave + offsetof(struct vmcb, save.dr6));
672     env->dr[7] = x86_ldq_phys(cs,
673                           env->vm_hsave + offsetof(struct vmcb, save.dr7));
674 
675     /* other setups */
676     x86_stl_phys(cs,
677              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
678              x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
679                                               control.event_inj)));
680     x86_stl_phys(cs,
681              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
682              x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
683                                               control.event_inj_err)));
684     x86_stl_phys(cs,
685              env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
686 
687     env->hflags2 &= ~HF2_GIF_MASK;
688     /* FIXME: Resets the current ASID register to zero (host ASID). */
689 
690     /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
691 
692     /* Clears the TSC_OFFSET inside the processor. */
693 
694     /* If the host is in PAE mode, the processor reloads the host's PDPEs
695        from the page table indicated the host's CR3. If the PDPEs contain
696        illegal state, the processor causes a shutdown. */
697 
698     /* Disables all breakpoints in the host DR7 register. */
699 
700     /* Checks the reloaded host state for consistency. */
701 
702     /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
703        host's code segment or non-canonical (in the case of long mode), a
704        #GP fault is delivered inside the host. */
705 }
706