xref: /openbmc/qemu/target/i386/tcg/sysemu/svm_helper.c (revision f101c9fe)
1 /*
2  *  x86 SVM helpers (sysemu only)
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
25 #include "tcg/helper-tcg.h"
26 
27 /* Secure Virtual Machine helpers */
28 
29 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
30                                 const SegmentCache *sc)
31 {
32     CPUState *cs = env_cpu(env);
33 
34     x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
35              sc->selector);
36     x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
37              sc->base);
38     x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
39              sc->limit);
40     x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
41              ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
42 }
43 
44 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
45                                 SegmentCache *sc)
46 {
47     CPUState *cs = env_cpu(env);
48     unsigned int flags;
49 
50     sc->selector = x86_lduw_phys(cs,
51                              addr + offsetof(struct vmcb_seg, selector));
52     sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
53     sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
54     flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
55     sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
56 }
57 
58 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
59                                       int seg_reg)
60 {
61     SegmentCache sc1, *sc = &sc1;
62 
63     svm_load_seg(env, addr, sc);
64     cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
65                            sc->base, sc->limit, sc->flags);
66 }
67 
68 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
69 {
70     CPUState *cs = env_cpu(env);
71     target_ulong addr;
72     uint64_t nested_ctl;
73     uint32_t event_inj;
74     uint32_t int_ctl;
75     uint32_t asid;
76     uint64_t new_cr0;
77 
78     cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
79 
80     if (aflag == 2) {
81         addr = env->regs[R_EAX];
82     } else {
83         addr = (uint32_t)env->regs[R_EAX];
84     }
85 
86     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
87 
88     env->vm_vmcb = addr;
89 
90     /* save the current CPU state in the hsave page */
91     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
92              env->gdt.base);
93     x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
94              env->gdt.limit);
95 
96     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
97              env->idt.base);
98     x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
99              env->idt.limit);
100 
101     x86_stq_phys(cs,
102              env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
103     x86_stq_phys(cs,
104              env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
105     x86_stq_phys(cs,
106              env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
107     x86_stq_phys(cs,
108              env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
109     x86_stq_phys(cs,
110              env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
111     x86_stq_phys(cs,
112              env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
113 
114     x86_stq_phys(cs,
115              env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
116     x86_stq_phys(cs,
117              env->vm_hsave + offsetof(struct vmcb, save.rflags),
118              cpu_compute_eflags(env));
119 
120     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
121                  &env->segs[R_ES]);
122     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
123                  &env->segs[R_CS]);
124     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
125                  &env->segs[R_SS]);
126     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
127                  &env->segs[R_DS]);
128 
129     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
130              env->eip + next_eip_addend);
131     x86_stq_phys(cs,
132              env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
133     x86_stq_phys(cs,
134              env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
135 
136     /* load the interception bitmaps so we do not need to access the
137        vmcb in svm mode */
138     env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
139                                                       control.intercept));
140     env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
141                                        offsetof(struct vmcb,
142                                                 control.intercept_cr_read));
143     env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
144                                         offsetof(struct vmcb,
145                                                  control.intercept_cr_write));
146     env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
147                                        offsetof(struct vmcb,
148                                                 control.intercept_dr_read));
149     env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
150                                         offsetof(struct vmcb,
151                                                  control.intercept_dr_write));
152     env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
153                                          offsetof(struct vmcb,
154                                                   control.intercept_exceptions
155                                                   ));
156 
157     nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
158                                                           control.nested_ctl));
159     asid = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
160                                                           control.asid));
161 
162     env->nested_pg_mode = 0;
163 
164     if (!cpu_svm_has_intercept(env, SVM_EXIT_VMRUN)) {
165         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
166     }
167     if (asid == 0) {
168         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
169     }
170 
171     if (nested_ctl & SVM_NPT_ENABLED) {
172         env->nested_cr3 = x86_ldq_phys(cs,
173                                 env->vm_vmcb + offsetof(struct vmcb,
174                                                         control.nested_cr3));
175         env->hflags2 |= HF2_NPT_MASK;
176 
177         env->nested_pg_mode = get_pg_mode(env) & PG_MODE_SVM_MASK;
178     }
179 
180     /* enable intercepts */
181     env->hflags |= HF_GUEST_MASK;
182 
183     env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
184                                offsetof(struct vmcb, control.tsc_offset));
185 
186     env->gdt.base  = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
187                                                       save.gdtr.base));
188     env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
189                                                       save.gdtr.limit));
190 
191     env->idt.base  = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
192                                                       save.idtr.base));
193     env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
194                                                       save.idtr.limit));
195 
196     new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0));
197     if (new_cr0 & SVM_CR0_RESERVED_MASK) {
198         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
199     }
200     if ((new_cr0 & CR0_NW_MASK) && !(new_cr0 & CR0_CD_MASK)) {
201         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
202     }
203     /* clear exit_info_2 so we behave like the real hardware */
204     x86_stq_phys(cs,
205              env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
206 
207     cpu_x86_update_cr0(env, new_cr0);
208     cpu_x86_update_cr4(env, x86_ldq_phys(cs,
209                                      env->vm_vmcb + offsetof(struct vmcb,
210                                                              save.cr4)));
211     cpu_x86_update_cr3(env, x86_ldq_phys(cs,
212                                      env->vm_vmcb + offsetof(struct vmcb,
213                                                              save.cr3)));
214     env->cr[2] = x86_ldq_phys(cs,
215                           env->vm_vmcb + offsetof(struct vmcb, save.cr2));
216     int_ctl = x86_ldl_phys(cs,
217                        env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
218     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
219     if (int_ctl & V_INTR_MASKING_MASK) {
220         env->v_tpr = int_ctl & V_TPR_MASK;
221         env->hflags2 |= HF2_VINTR_MASK;
222         if (env->eflags & IF_MASK) {
223             env->hflags2 |= HF2_HIF_MASK;
224         }
225     }
226 
227     cpu_load_efer(env,
228                   x86_ldq_phys(cs,
229                            env->vm_vmcb + offsetof(struct vmcb, save.efer)));
230     env->eflags = 0;
231     cpu_load_eflags(env, x86_ldq_phys(cs,
232                                   env->vm_vmcb + offsetof(struct vmcb,
233                                                           save.rflags)),
234                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
235 
236     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
237                        R_ES);
238     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
239                        R_CS);
240     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
241                        R_SS);
242     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
243                        R_DS);
244 
245     env->eip = x86_ldq_phys(cs,
246                         env->vm_vmcb + offsetof(struct vmcb, save.rip));
247 
248     env->regs[R_ESP] = x86_ldq_phys(cs,
249                                 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
250     env->regs[R_EAX] = x86_ldq_phys(cs,
251                                 env->vm_vmcb + offsetof(struct vmcb, save.rax));
252     env->dr[7] = x86_ldq_phys(cs,
253                           env->vm_vmcb + offsetof(struct vmcb, save.dr7));
254     env->dr[6] = x86_ldq_phys(cs,
255                           env->vm_vmcb + offsetof(struct vmcb, save.dr6));
256 
257     /* FIXME: guest state consistency checks */
258 
259     switch (x86_ldub_phys(cs,
260                       env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
261     case TLB_CONTROL_DO_NOTHING:
262         break;
263     case TLB_CONTROL_FLUSH_ALL_ASID:
264         /* FIXME: this is not 100% correct but should work for now */
265         tlb_flush(cs);
266         break;
267     }
268 
269     env->hflags2 |= HF2_GIF_MASK;
270 
271     if (int_ctl & V_IRQ_MASK) {
272         CPUState *cs = env_cpu(env);
273 
274         cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
275     }
276 
277     /* maybe we need to inject an event */
278     event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
279                                                  control.event_inj));
280     if (event_inj & SVM_EVTINJ_VALID) {
281         uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
282         uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
283         uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
284                                           offsetof(struct vmcb,
285                                                    control.event_inj_err));
286 
287         qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
288         /* FIXME: need to implement valid_err */
289         switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
290         case SVM_EVTINJ_TYPE_INTR:
291             cs->exception_index = vector;
292             env->error_code = event_inj_err;
293             env->exception_is_int = 0;
294             env->exception_next_eip = -1;
295             qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
296             /* XXX: is it always correct? */
297             do_interrupt_x86_hardirq(env, vector, 1);
298             break;
299         case SVM_EVTINJ_TYPE_NMI:
300             cs->exception_index = EXCP02_NMI;
301             env->error_code = event_inj_err;
302             env->exception_is_int = 0;
303             env->exception_next_eip = env->eip;
304             qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
305             cpu_loop_exit(cs);
306             break;
307         case SVM_EVTINJ_TYPE_EXEPT:
308             cs->exception_index = vector;
309             env->error_code = event_inj_err;
310             env->exception_is_int = 0;
311             env->exception_next_eip = -1;
312             qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
313             cpu_loop_exit(cs);
314             break;
315         case SVM_EVTINJ_TYPE_SOFT:
316             cs->exception_index = vector;
317             env->error_code = event_inj_err;
318             env->exception_is_int = 1;
319             env->exception_next_eip = env->eip;
320             qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
321             cpu_loop_exit(cs);
322             break;
323         }
324         qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
325                       env->error_code);
326     }
327 }
328 
329 void helper_vmmcall(CPUX86State *env)
330 {
331     cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
332     raise_exception(env, EXCP06_ILLOP);
333 }
334 
335 void helper_vmload(CPUX86State *env, int aflag)
336 {
337     CPUState *cs = env_cpu(env);
338     target_ulong addr;
339 
340     cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
341 
342     if (aflag == 2) {
343         addr = env->regs[R_EAX];
344     } else {
345         addr = (uint32_t)env->regs[R_EAX];
346     }
347 
348     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
349                   "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
350                   addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
351                                                           save.fs.base)),
352                   env->segs[R_FS].base);
353 
354     svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
355     svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
356     svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
357     svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
358 
359 #ifdef TARGET_X86_64
360     env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
361                                                  save.kernel_gs_base));
362     env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
363     env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
364     env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
365 #endif
366     env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
367     env->sysenter_cs = x86_ldq_phys(cs,
368                                 addr + offsetof(struct vmcb, save.sysenter_cs));
369     env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
370                                                  save.sysenter_esp));
371     env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
372                                                  save.sysenter_eip));
373 }
374 
375 void helper_vmsave(CPUX86State *env, int aflag)
376 {
377     CPUState *cs = env_cpu(env);
378     target_ulong addr;
379 
380     cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
381 
382     if (aflag == 2) {
383         addr = env->regs[R_EAX];
384     } else {
385         addr = (uint32_t)env->regs[R_EAX];
386     }
387 
388     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
389                   "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
390                   addr, x86_ldq_phys(cs,
391                                  addr + offsetof(struct vmcb, save.fs.base)),
392                   env->segs[R_FS].base);
393 
394     svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
395                  &env->segs[R_FS]);
396     svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
397                  &env->segs[R_GS]);
398     svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
399                  &env->tr);
400     svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
401                  &env->ldt);
402 
403 #ifdef TARGET_X86_64
404     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
405              env->kernelgsbase);
406     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
407     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
408     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
409 #endif
410     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
411     x86_stq_phys(cs,
412              addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
413     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
414              env->sysenter_esp);
415     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
416              env->sysenter_eip);
417 }
418 
419 void helper_stgi(CPUX86State *env)
420 {
421     cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
422     env->hflags2 |= HF2_GIF_MASK;
423 }
424 
425 void helper_clgi(CPUX86State *env)
426 {
427     cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
428     env->hflags2 &= ~HF2_GIF_MASK;
429 }
430 
431 bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type)
432 {
433     switch (type) {
434     case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
435         if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
436             return true;
437         }
438         break;
439     case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
440         if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
441             return true;
442         }
443         break;
444     case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
445         if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
446             return true;
447         }
448         break;
449     case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
450         if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
451             return true;
452         }
453         break;
454     case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
455         if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
456             return true;
457         }
458         break;
459     default:
460         if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
461             return true;
462         }
463         break;
464     }
465     return false;
466 }
467 
468 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
469                                    uint64_t param, uintptr_t retaddr)
470 {
471     CPUState *cs = env_cpu(env);
472 
473     if (likely(!(env->hflags & HF_GUEST_MASK))) {
474         return;
475     }
476 
477     if (!cpu_svm_has_intercept(env, type)) {
478         return;
479     }
480 
481     if (type == SVM_EXIT_MSR) {
482         /* FIXME: this should be read in at vmrun (faster this way?) */
483         uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
484                                     offsetof(struct vmcb,
485                                             control.msrpm_base_pa));
486         uint32_t t0, t1;
487 
488         switch ((uint32_t)env->regs[R_ECX]) {
489         case 0 ... 0x1fff:
490             t0 = (env->regs[R_ECX] * 2) % 8;
491             t1 = (env->regs[R_ECX] * 2) / 8;
492             break;
493         case 0xc0000000 ... 0xc0001fff:
494             t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
495             t1 = (t0 / 8);
496             t0 %= 8;
497             break;
498         case 0xc0010000 ... 0xc0011fff:
499             t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
500             t1 = (t0 / 8);
501             t0 %= 8;
502             break;
503         default:
504             cpu_vmexit(env, type, param, retaddr);
505             t0 = 0;
506             t1 = 0;
507             break;
508         }
509         if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
510             cpu_vmexit(env, type, param, retaddr);
511         }
512         return;
513     }
514 
515     cpu_vmexit(env, type, param, retaddr);
516 }
517 
518 void helper_svm_check_intercept(CPUX86State *env, uint32_t type)
519 {
520     cpu_svm_check_intercept_param(env, type, 0, GETPC());
521 }
522 
523 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
524                          uint32_t next_eip_addend)
525 {
526     CPUState *cs = env_cpu(env);
527 
528     if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
529         /* FIXME: this should be read in at vmrun (faster this way?) */
530         uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
531                                  offsetof(struct vmcb, control.iopm_base_pa));
532         uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
533 
534         if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
535             /* next env->eip */
536             x86_stq_phys(cs,
537                      env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
538                      env->eip + next_eip_addend);
539             cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
540         }
541     }
542 }
543 
544 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
545                 uintptr_t retaddr)
546 {
547     CPUState *cs = env_cpu(env);
548 
549     cpu_restore_state(cs, retaddr, true);
550 
551     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
552                   PRIx64 ", " TARGET_FMT_lx ")!\n",
553                   exit_code, exit_info_1,
554                   x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
555                                                    control.exit_info_2)),
556                   env->eip);
557 
558     cs->exception_index = EXCP_VMEXIT;
559     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
560              exit_code);
561 
562     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
563                                              control.exit_info_1), exit_info_1),
564 
565     /* remove any pending exception */
566     env->old_exception = -1;
567     cpu_loop_exit(cs);
568 }
569 
570 void do_vmexit(CPUX86State *env)
571 {
572     CPUState *cs = env_cpu(env);
573     uint32_t int_ctl;
574 
575     if (env->hflags & HF_INHIBIT_IRQ_MASK) {
576         x86_stl_phys(cs,
577                  env->vm_vmcb + offsetof(struct vmcb, control.int_state),
578                  SVM_INTERRUPT_SHADOW_MASK);
579         env->hflags &= ~HF_INHIBIT_IRQ_MASK;
580     } else {
581         x86_stl_phys(cs,
582                  env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
583     }
584     env->hflags2 &= ~HF2_NPT_MASK;
585 
586     /* Save the VM state in the vmcb */
587     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
588                  &env->segs[R_ES]);
589     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
590                  &env->segs[R_CS]);
591     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
592                  &env->segs[R_SS]);
593     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
594                  &env->segs[R_DS]);
595 
596     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
597              env->gdt.base);
598     x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
599              env->gdt.limit);
600 
601     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
602              env->idt.base);
603     x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
604              env->idt.limit);
605 
606     x86_stq_phys(cs,
607              env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
608     x86_stq_phys(cs,
609              env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
610     x86_stq_phys(cs,
611              env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
612     x86_stq_phys(cs,
613              env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
614     x86_stq_phys(cs,
615              env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
616 
617     int_ctl = x86_ldl_phys(cs,
618                        env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
619     int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
620     int_ctl |= env->v_tpr & V_TPR_MASK;
621     if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
622         int_ctl |= V_IRQ_MASK;
623     }
624     x86_stl_phys(cs,
625              env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
626 
627     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
628              cpu_compute_eflags(env));
629     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
630              env->eip);
631     x86_stq_phys(cs,
632              env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
633     x86_stq_phys(cs,
634              env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
635     x86_stq_phys(cs,
636              env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
637     x86_stq_phys(cs,
638              env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
639     x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
640              env->hflags & HF_CPL_MASK);
641 
642     /* Reload the host state from vm_hsave */
643     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
644     env->hflags &= ~HF_GUEST_MASK;
645     env->intercept = 0;
646     env->intercept_exceptions = 0;
647     cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
648     env->tsc_offset = 0;
649 
650     env->gdt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
651                                                        save.gdtr.base));
652     env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
653                                                        save.gdtr.limit));
654 
655     env->idt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
656                                                        save.idtr.base));
657     env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
658                                                        save.idtr.limit));
659 
660     cpu_x86_update_cr0(env, x86_ldq_phys(cs,
661                                      env->vm_hsave + offsetof(struct vmcb,
662                                                               save.cr0)) |
663                        CR0_PE_MASK);
664     cpu_x86_update_cr4(env, x86_ldq_phys(cs,
665                                      env->vm_hsave + offsetof(struct vmcb,
666                                                               save.cr4)));
667     cpu_x86_update_cr3(env, x86_ldq_phys(cs,
668                                      env->vm_hsave + offsetof(struct vmcb,
669                                                               save.cr3)));
670     /* we need to set the efer after the crs so the hidden flags get
671        set properly */
672     cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
673                                                          save.efer)));
674     env->eflags = 0;
675     cpu_load_eflags(env, x86_ldq_phys(cs,
676                                   env->vm_hsave + offsetof(struct vmcb,
677                                                            save.rflags)),
678                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
679                       VM_MASK));
680 
681     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
682                        R_ES);
683     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
684                        R_CS);
685     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
686                        R_SS);
687     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
688                        R_DS);
689 
690     env->eip = x86_ldq_phys(cs,
691                         env->vm_hsave + offsetof(struct vmcb, save.rip));
692     env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
693                                 offsetof(struct vmcb, save.rsp));
694     env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
695                                 offsetof(struct vmcb, save.rax));
696 
697     env->dr[6] = x86_ldq_phys(cs,
698                           env->vm_hsave + offsetof(struct vmcb, save.dr6));
699     env->dr[7] = x86_ldq_phys(cs,
700                           env->vm_hsave + offsetof(struct vmcb, save.dr7));
701 
702     /* other setups */
703     x86_stl_phys(cs,
704              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
705              x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
706                                               control.event_inj)));
707     x86_stl_phys(cs,
708              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
709              x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
710                                               control.event_inj_err)));
711     x86_stl_phys(cs,
712              env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
713 
714     env->hflags2 &= ~HF2_GIF_MASK;
715     /* FIXME: Resets the current ASID register to zero (host ASID). */
716 
717     /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
718 
719     /* Clears the TSC_OFFSET inside the processor. */
720 
721     /* If the host is in PAE mode, the processor reloads the host's PDPEs
722        from the page table indicated the host's CR3. If the PDPEs contain
723        illegal state, the processor causes a shutdown. */
724 
725     /* Disables all breakpoints in the host DR7 register. */
726 
727     /* Checks the reloaded host state for consistency. */
728 
729     /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
730        host's code segment or non-canonical (in the case of long mode), a
731        #GP fault is delivered inside the host. */
732 }
733