xref: /openbmc/qemu/target/i386/tcg/sysemu/svm_helper.c (revision 1580b897)
1 /*
2  *  x86 SVM helpers (sysemu only)
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
25 #include "tcg/helper-tcg.h"
26 
27 /* Secure Virtual Machine helpers */
28 
29 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
30                                 const SegmentCache *sc)
31 {
32     CPUState *cs = env_cpu(env);
33 
34     x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
35              sc->selector);
36     x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
37              sc->base);
38     x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
39              sc->limit);
40     x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
41              ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
42 }
43 
44 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
45                                 SegmentCache *sc)
46 {
47     CPUState *cs = env_cpu(env);
48     unsigned int flags;
49 
50     sc->selector = x86_lduw_phys(cs,
51                              addr + offsetof(struct vmcb_seg, selector));
52     sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
53     sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
54     flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
55     sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
56 }
57 
58 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
59                                       int seg_reg)
60 {
61     SegmentCache sc1, *sc = &sc1;
62 
63     svm_load_seg(env, addr, sc);
64     cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
65                            sc->base, sc->limit, sc->flags);
66 }
67 
68 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
69 {
70     CPUState *cs = env_cpu(env);
71     X86CPU *cpu = env_archcpu(env);
72     target_ulong addr;
73     uint64_t nested_ctl;
74     uint32_t event_inj;
75     uint32_t int_ctl;
76     uint32_t asid;
77     uint64_t new_cr0;
78 
79     cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
80 
81     if (aflag == 2) {
82         addr = env->regs[R_EAX];
83     } else {
84         addr = (uint32_t)env->regs[R_EAX];
85     }
86 
87     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
88 
89     env->vm_vmcb = addr;
90 
91     /* save the current CPU state in the hsave page */
92     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
93              env->gdt.base);
94     x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
95              env->gdt.limit);
96 
97     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
98              env->idt.base);
99     x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
100              env->idt.limit);
101 
102     x86_stq_phys(cs,
103              env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
104     x86_stq_phys(cs,
105              env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
106     x86_stq_phys(cs,
107              env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
108     x86_stq_phys(cs,
109              env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
110     x86_stq_phys(cs,
111              env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
112     x86_stq_phys(cs,
113              env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
114 
115     x86_stq_phys(cs,
116              env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
117     x86_stq_phys(cs,
118              env->vm_hsave + offsetof(struct vmcb, save.rflags),
119              cpu_compute_eflags(env));
120 
121     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
122                  &env->segs[R_ES]);
123     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
124                  &env->segs[R_CS]);
125     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
126                  &env->segs[R_SS]);
127     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
128                  &env->segs[R_DS]);
129 
130     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
131              env->eip + next_eip_addend);
132     x86_stq_phys(cs,
133              env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
134     x86_stq_phys(cs,
135              env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
136 
137     /* load the interception bitmaps so we do not need to access the
138        vmcb in svm mode */
139     env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
140                                                       control.intercept));
141     env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
142                                        offsetof(struct vmcb,
143                                                 control.intercept_cr_read));
144     env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
145                                         offsetof(struct vmcb,
146                                                  control.intercept_cr_write));
147     env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
148                                        offsetof(struct vmcb,
149                                                 control.intercept_dr_read));
150     env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
151                                         offsetof(struct vmcb,
152                                                  control.intercept_dr_write));
153     env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
154                                          offsetof(struct vmcb,
155                                                   control.intercept_exceptions
156                                                   ));
157 
158     nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
159                                                           control.nested_ctl));
160     asid = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
161                                                           control.asid));
162 
163     uint64_t msrpm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
164                                     offsetof(struct vmcb,
165                                             control.msrpm_base_pa));
166     uint64_t iopm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
167                                  offsetof(struct vmcb, control.iopm_base_pa));
168 
169     if ((msrpm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_MSRPM_SIZE) {
170         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
171     }
172 
173     if ((iopm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_IOPM_SIZE) {
174         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
175     }
176 
177     env->nested_pg_mode = 0;
178 
179     if (!cpu_svm_has_intercept(env, SVM_EXIT_VMRUN)) {
180         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
181     }
182     if (asid == 0) {
183         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
184     }
185 
186     if (nested_ctl & SVM_NPT_ENABLED) {
187         env->nested_cr3 = x86_ldq_phys(cs,
188                                 env->vm_vmcb + offsetof(struct vmcb,
189                                                         control.nested_cr3));
190         env->hflags2 |= HF2_NPT_MASK;
191 
192         env->nested_pg_mode = get_pg_mode(env) & PG_MODE_SVM_MASK;
193     }
194 
195     /* enable intercepts */
196     env->hflags |= HF_GUEST_MASK;
197 
198     env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
199                                offsetof(struct vmcb, control.tsc_offset));
200 
201     env->gdt.base  = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
202                                                       save.gdtr.base));
203     env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
204                                                       save.gdtr.limit));
205 
206     env->idt.base  = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
207                                                       save.idtr.base));
208     env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
209                                                       save.idtr.limit));
210 
211     new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0));
212     if (new_cr0 & SVM_CR0_RESERVED_MASK) {
213         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
214     }
215     if ((new_cr0 & CR0_NW_MASK) && !(new_cr0 & CR0_CD_MASK)) {
216         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
217     }
218     /* clear exit_info_2 so we behave like the real hardware */
219     x86_stq_phys(cs,
220              env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
221 
222     cpu_x86_update_cr0(env, new_cr0);
223     cpu_x86_update_cr4(env, x86_ldq_phys(cs,
224                                      env->vm_vmcb + offsetof(struct vmcb,
225                                                              save.cr4)));
226     cpu_x86_update_cr3(env, x86_ldq_phys(cs,
227                                      env->vm_vmcb + offsetof(struct vmcb,
228                                                              save.cr3)));
229     env->cr[2] = x86_ldq_phys(cs,
230                           env->vm_vmcb + offsetof(struct vmcb, save.cr2));
231     int_ctl = x86_ldl_phys(cs,
232                        env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
233     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
234     if (int_ctl & V_INTR_MASKING_MASK) {
235         env->v_tpr = int_ctl & V_TPR_MASK;
236         env->hflags2 |= HF2_VINTR_MASK;
237         if (env->eflags & IF_MASK) {
238             env->hflags2 |= HF2_HIF_MASK;
239         }
240     }
241 
242     cpu_load_efer(env,
243                   x86_ldq_phys(cs,
244                            env->vm_vmcb + offsetof(struct vmcb, save.efer)));
245     env->eflags = 0;
246     cpu_load_eflags(env, x86_ldq_phys(cs,
247                                   env->vm_vmcb + offsetof(struct vmcb,
248                                                           save.rflags)),
249                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
250 
251     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
252                        R_ES);
253     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
254                        R_CS);
255     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
256                        R_SS);
257     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
258                        R_DS);
259 
260     env->eip = x86_ldq_phys(cs,
261                         env->vm_vmcb + offsetof(struct vmcb, save.rip));
262 
263     env->regs[R_ESP] = x86_ldq_phys(cs,
264                                 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
265     env->regs[R_EAX] = x86_ldq_phys(cs,
266                                 env->vm_vmcb + offsetof(struct vmcb, save.rax));
267     env->dr[7] = x86_ldq_phys(cs,
268                           env->vm_vmcb + offsetof(struct vmcb, save.dr7));
269     env->dr[6] = x86_ldq_phys(cs,
270                           env->vm_vmcb + offsetof(struct vmcb, save.dr6));
271 
272 #ifdef TARGET_X86_64
273     if (env->dr[6] & DR_RESERVED_MASK) {
274         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
275     }
276     if (env->dr[7] & DR_RESERVED_MASK) {
277         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
278     }
279 #endif
280 
281     switch (x86_ldub_phys(cs,
282                       env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
283     case TLB_CONTROL_DO_NOTHING:
284         break;
285     case TLB_CONTROL_FLUSH_ALL_ASID:
286         /* FIXME: this is not 100% correct but should work for now */
287         tlb_flush(cs);
288         break;
289     }
290 
291     env->hflags2 |= HF2_GIF_MASK;
292 
293     if (int_ctl & V_IRQ_MASK) {
294         CPUState *cs = env_cpu(env);
295 
296         cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
297     }
298 
299     /* maybe we need to inject an event */
300     event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
301                                                  control.event_inj));
302     if (event_inj & SVM_EVTINJ_VALID) {
303         uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
304         uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
305         uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
306                                           offsetof(struct vmcb,
307                                                    control.event_inj_err));
308 
309         qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
310         /* FIXME: need to implement valid_err */
311         switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
312         case SVM_EVTINJ_TYPE_INTR:
313             cs->exception_index = vector;
314             env->error_code = event_inj_err;
315             env->exception_is_int = 0;
316             env->exception_next_eip = -1;
317             qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
318             /* XXX: is it always correct? */
319             do_interrupt_x86_hardirq(env, vector, 1);
320             break;
321         case SVM_EVTINJ_TYPE_NMI:
322             cs->exception_index = EXCP02_NMI;
323             env->error_code = event_inj_err;
324             env->exception_is_int = 0;
325             env->exception_next_eip = env->eip;
326             qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
327             cpu_loop_exit(cs);
328             break;
329         case SVM_EVTINJ_TYPE_EXEPT:
330             cs->exception_index = vector;
331             env->error_code = event_inj_err;
332             env->exception_is_int = 0;
333             env->exception_next_eip = -1;
334             qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
335             cpu_loop_exit(cs);
336             break;
337         case SVM_EVTINJ_TYPE_SOFT:
338             cs->exception_index = vector;
339             env->error_code = event_inj_err;
340             env->exception_is_int = 1;
341             env->exception_next_eip = env->eip;
342             qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
343             cpu_loop_exit(cs);
344             break;
345         }
346         qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
347                       env->error_code);
348     }
349 }
350 
351 void helper_vmmcall(CPUX86State *env)
352 {
353     cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
354     raise_exception(env, EXCP06_ILLOP);
355 }
356 
357 void helper_vmload(CPUX86State *env, int aflag)
358 {
359     CPUState *cs = env_cpu(env);
360     target_ulong addr;
361 
362     cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
363 
364     if (aflag == 2) {
365         addr = env->regs[R_EAX];
366     } else {
367         addr = (uint32_t)env->regs[R_EAX];
368     }
369 
370     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
371                   "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
372                   addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
373                                                           save.fs.base)),
374                   env->segs[R_FS].base);
375 
376     svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
377     svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
378     svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
379     svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
380 
381 #ifdef TARGET_X86_64
382     env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
383                                                  save.kernel_gs_base));
384     env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
385     env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
386     env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
387 #endif
388     env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
389     env->sysenter_cs = x86_ldq_phys(cs,
390                                 addr + offsetof(struct vmcb, save.sysenter_cs));
391     env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
392                                                  save.sysenter_esp));
393     env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
394                                                  save.sysenter_eip));
395 }
396 
397 void helper_vmsave(CPUX86State *env, int aflag)
398 {
399     CPUState *cs = env_cpu(env);
400     target_ulong addr;
401 
402     cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
403 
404     if (aflag == 2) {
405         addr = env->regs[R_EAX];
406     } else {
407         addr = (uint32_t)env->regs[R_EAX];
408     }
409 
410     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
411                   "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
412                   addr, x86_ldq_phys(cs,
413                                  addr + offsetof(struct vmcb, save.fs.base)),
414                   env->segs[R_FS].base);
415 
416     svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
417                  &env->segs[R_FS]);
418     svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
419                  &env->segs[R_GS]);
420     svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
421                  &env->tr);
422     svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
423                  &env->ldt);
424 
425 #ifdef TARGET_X86_64
426     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
427              env->kernelgsbase);
428     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
429     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
430     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
431 #endif
432     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
433     x86_stq_phys(cs,
434              addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
435     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
436              env->sysenter_esp);
437     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
438              env->sysenter_eip);
439 }
440 
441 void helper_stgi(CPUX86State *env)
442 {
443     cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
444     env->hflags2 |= HF2_GIF_MASK;
445 }
446 
447 void helper_clgi(CPUX86State *env)
448 {
449     cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
450     env->hflags2 &= ~HF2_GIF_MASK;
451 }
452 
453 bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type)
454 {
455     switch (type) {
456     case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
457         if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
458             return true;
459         }
460         break;
461     case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
462         if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
463             return true;
464         }
465         break;
466     case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
467         if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
468             return true;
469         }
470         break;
471     case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
472         if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
473             return true;
474         }
475         break;
476     case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
477         if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
478             return true;
479         }
480         break;
481     default:
482         if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
483             return true;
484         }
485         break;
486     }
487     return false;
488 }
489 
490 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
491                                    uint64_t param, uintptr_t retaddr)
492 {
493     CPUState *cs = env_cpu(env);
494 
495     if (likely(!(env->hflags & HF_GUEST_MASK))) {
496         return;
497     }
498 
499     if (!cpu_svm_has_intercept(env, type)) {
500         return;
501     }
502 
503     if (type == SVM_EXIT_MSR) {
504         /* FIXME: this should be read in at vmrun (faster this way?) */
505         uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
506                                     offsetof(struct vmcb,
507                                             control.msrpm_base_pa));
508         uint32_t t0, t1;
509 
510         switch ((uint32_t)env->regs[R_ECX]) {
511         case 0 ... 0x1fff:
512             t0 = (env->regs[R_ECX] * 2) % 8;
513             t1 = (env->regs[R_ECX] * 2) / 8;
514             break;
515         case 0xc0000000 ... 0xc0001fff:
516             t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
517             t1 = (t0 / 8);
518             t0 %= 8;
519             break;
520         case 0xc0010000 ... 0xc0011fff:
521             t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
522             t1 = (t0 / 8);
523             t0 %= 8;
524             break;
525         default:
526             cpu_vmexit(env, type, param, retaddr);
527             t0 = 0;
528             t1 = 0;
529             break;
530         }
531         if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
532             cpu_vmexit(env, type, param, retaddr);
533         }
534         return;
535     }
536 
537     cpu_vmexit(env, type, param, retaddr);
538 }
539 
540 void helper_svm_check_intercept(CPUX86State *env, uint32_t type)
541 {
542     cpu_svm_check_intercept_param(env, type, 0, GETPC());
543 }
544 
545 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
546                          uint32_t next_eip_addend)
547 {
548     CPUState *cs = env_cpu(env);
549 
550     if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
551         /* FIXME: this should be read in at vmrun (faster this way?) */
552         uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
553                                  offsetof(struct vmcb, control.iopm_base_pa));
554         uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
555 
556         if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
557             /* next env->eip */
558             x86_stq_phys(cs,
559                      env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
560                      env->eip + next_eip_addend);
561             cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
562         }
563     }
564 }
565 
566 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
567                 uintptr_t retaddr)
568 {
569     CPUState *cs = env_cpu(env);
570 
571     cpu_restore_state(cs, retaddr, true);
572 
573     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
574                   PRIx64 ", " TARGET_FMT_lx ")!\n",
575                   exit_code, exit_info_1,
576                   x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
577                                                    control.exit_info_2)),
578                   env->eip);
579 
580     cs->exception_index = EXCP_VMEXIT;
581     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
582              exit_code);
583 
584     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
585                                              control.exit_info_1), exit_info_1),
586 
587     /* remove any pending exception */
588     env->old_exception = -1;
589     cpu_loop_exit(cs);
590 }
591 
592 void do_vmexit(CPUX86State *env)
593 {
594     CPUState *cs = env_cpu(env);
595     uint32_t int_ctl;
596 
597     if (env->hflags & HF_INHIBIT_IRQ_MASK) {
598         x86_stl_phys(cs,
599                  env->vm_vmcb + offsetof(struct vmcb, control.int_state),
600                  SVM_INTERRUPT_SHADOW_MASK);
601         env->hflags &= ~HF_INHIBIT_IRQ_MASK;
602     } else {
603         x86_stl_phys(cs,
604                  env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
605     }
606     env->hflags2 &= ~HF2_NPT_MASK;
607 
608     /* Save the VM state in the vmcb */
609     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
610                  &env->segs[R_ES]);
611     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
612                  &env->segs[R_CS]);
613     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
614                  &env->segs[R_SS]);
615     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
616                  &env->segs[R_DS]);
617 
618     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
619              env->gdt.base);
620     x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
621              env->gdt.limit);
622 
623     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
624              env->idt.base);
625     x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
626              env->idt.limit);
627 
628     x86_stq_phys(cs,
629              env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
630     x86_stq_phys(cs,
631              env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
632     x86_stq_phys(cs,
633              env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
634     x86_stq_phys(cs,
635              env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
636     x86_stq_phys(cs,
637              env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
638 
639     int_ctl = x86_ldl_phys(cs,
640                        env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
641     int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
642     int_ctl |= env->v_tpr & V_TPR_MASK;
643     if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
644         int_ctl |= V_IRQ_MASK;
645     }
646     x86_stl_phys(cs,
647              env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
648 
649     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
650              cpu_compute_eflags(env));
651     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
652              env->eip);
653     x86_stq_phys(cs,
654              env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
655     x86_stq_phys(cs,
656              env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
657     x86_stq_phys(cs,
658              env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
659     x86_stq_phys(cs,
660              env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
661     x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
662              env->hflags & HF_CPL_MASK);
663 
664     /* Reload the host state from vm_hsave */
665     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
666     env->hflags &= ~HF_GUEST_MASK;
667     env->intercept = 0;
668     env->intercept_exceptions = 0;
669     cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
670     env->tsc_offset = 0;
671 
672     env->gdt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
673                                                        save.gdtr.base));
674     env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
675                                                        save.gdtr.limit));
676 
677     env->idt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
678                                                        save.idtr.base));
679     env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
680                                                        save.idtr.limit));
681 
682     cpu_x86_update_cr0(env, x86_ldq_phys(cs,
683                                      env->vm_hsave + offsetof(struct vmcb,
684                                                               save.cr0)) |
685                        CR0_PE_MASK);
686     cpu_x86_update_cr4(env, x86_ldq_phys(cs,
687                                      env->vm_hsave + offsetof(struct vmcb,
688                                                               save.cr4)));
689     cpu_x86_update_cr3(env, x86_ldq_phys(cs,
690                                      env->vm_hsave + offsetof(struct vmcb,
691                                                               save.cr3)));
692     /* we need to set the efer after the crs so the hidden flags get
693        set properly */
694     cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
695                                                          save.efer)));
696     env->eflags = 0;
697     cpu_load_eflags(env, x86_ldq_phys(cs,
698                                   env->vm_hsave + offsetof(struct vmcb,
699                                                            save.rflags)),
700                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
701                       VM_MASK));
702 
703     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
704                        R_ES);
705     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
706                        R_CS);
707     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
708                        R_SS);
709     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
710                        R_DS);
711 
712     env->eip = x86_ldq_phys(cs,
713                         env->vm_hsave + offsetof(struct vmcb, save.rip));
714     env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
715                                 offsetof(struct vmcb, save.rsp));
716     env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
717                                 offsetof(struct vmcb, save.rax));
718 
719     env->dr[6] = x86_ldq_phys(cs,
720                           env->vm_hsave + offsetof(struct vmcb, save.dr6));
721     env->dr[7] = x86_ldq_phys(cs,
722                           env->vm_hsave + offsetof(struct vmcb, save.dr7));
723 
724     /* other setups */
725     x86_stl_phys(cs,
726              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
727              x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
728                                               control.event_inj)));
729     x86_stl_phys(cs,
730              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
731              x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
732                                               control.event_inj_err)));
733     x86_stl_phys(cs,
734              env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
735 
736     env->hflags2 &= ~HF2_GIF_MASK;
737     /* FIXME: Resets the current ASID register to zero (host ASID). */
738 
739     /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
740 
741     /* Clears the TSC_OFFSET inside the processor. */
742 
743     /* If the host is in PAE mode, the processor reloads the host's PDPEs
744        from the page table indicated the host's CR3. If the PDPEs contain
745        illegal state, the processor causes a shutdown. */
746 
747     /* Disables all breakpoints in the host DR7 register. */
748 
749     /* Checks the reloaded host state for consistency. */
750 
751     /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
752        host's code segment or non-canonical (in the case of long mode), a
753        #GP fault is delivered inside the host. */
754 }
755