xref: /openbmc/qemu/target/i386/tcg/sysemu/svm_helper.c (revision ba7d12eb)
1 /*
2  *  x86 SVM helpers (sysemu only)
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
26 #include "tcg/helper-tcg.h"
27 
28 /* Secure Virtual Machine helpers */
29 
30 static void svm_save_seg(CPUX86State *env, int mmu_idx, hwaddr addr,
31                          const SegmentCache *sc)
32 {
33     cpu_stw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, selector),
34                       sc->selector, mmu_idx, 0);
35     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, base),
36                       sc->base, mmu_idx, 0);
37     cpu_stl_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, limit),
38                       sc->limit, mmu_idx, 0);
39     cpu_stw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, attrib),
40                       ((sc->flags >> 8) & 0xff)
41                       | ((sc->flags >> 12) & 0x0f00),
42                       mmu_idx, 0);
43 }
44 
45 /*
46  * VMRUN and VMLOAD canonicalizes (i.e., sign-extend to bit 63) all base
47  * addresses in the segment registers that have been loaded.
48  */
49 static inline void svm_canonicalization(CPUX86State *env, target_ulong *seg_base)
50 {
51     uint16_t shift_amt = 64 - cpu_x86_virtual_addr_width(env);
52     *seg_base = ((((long) *seg_base) << shift_amt) >> shift_amt);
53 }
54 
55 static void svm_load_seg(CPUX86State *env, int mmu_idx, hwaddr addr,
56                          SegmentCache *sc)
57 {
58     unsigned int flags;
59 
60     sc->selector =
61         cpu_lduw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, selector),
62                            mmu_idx, 0);
63     sc->base =
64         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, base),
65                           mmu_idx, 0);
66     sc->limit =
67         cpu_ldl_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, limit),
68                           mmu_idx, 0);
69     flags =
70         cpu_lduw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, attrib),
71                            mmu_idx, 0);
72     sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
73 
74     svm_canonicalization(env, &sc->base);
75 }
76 
77 static void svm_load_seg_cache(CPUX86State *env, int mmu_idx,
78                                hwaddr addr, int seg_reg)
79 {
80     SegmentCache sc;
81 
82     svm_load_seg(env, mmu_idx, addr, &sc);
83     cpu_x86_load_seg_cache(env, seg_reg, sc.selector,
84                            sc.base, sc.limit, sc.flags);
85 }
86 
87 static inline bool is_efer_invalid_state (CPUX86State *env)
88 {
89     if (!(env->efer & MSR_EFER_SVME)) {
90         return true;
91     }
92 
93     if (env->efer & MSR_EFER_RESERVED) {
94         return true;
95     }
96 
97     if ((env->efer & (MSR_EFER_LMA | MSR_EFER_LME)) &&
98             !(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
99         return true;
100     }
101 
102     if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
103                                 && !(env->cr[4] & CR4_PAE_MASK)) {
104         return true;
105     }
106 
107     if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
108                                 && !(env->cr[0] & CR0_PE_MASK)) {
109         return true;
110     }
111 
112     if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
113                                 && (env->cr[4] & CR4_PAE_MASK)
114                                 && (env->segs[R_CS].flags & DESC_L_MASK)
115                                 && (env->segs[R_CS].flags & DESC_B_MASK)) {
116         return true;
117     }
118 
119     return false;
120 }
121 
122 static inline bool virtual_gif_enabled(CPUX86State *env)
123 {
124     if (likely(env->hflags & HF_GUEST_MASK)) {
125         return (env->features[FEAT_SVM] & CPUID_SVM_VGIF)
126                     && (env->int_ctl & V_GIF_ENABLED_MASK);
127     }
128     return false;
129 }
130 
131 static inline bool virtual_vm_load_save_enabled(CPUX86State *env, uint32_t exit_code, uintptr_t retaddr)
132 {
133     uint64_t lbr_ctl;
134 
135     if (likely(env->hflags & HF_GUEST_MASK)) {
136         if (likely(!(env->hflags2 & HF2_NPT_MASK)) || !(env->efer & MSR_EFER_LMA)) {
137             cpu_vmexit(env, exit_code, 0, retaddr);
138         }
139 
140         lbr_ctl = x86_ldl_phys(env_cpu(env), env->vm_vmcb + offsetof(struct vmcb,
141                                                   control.lbr_ctl));
142         return (env->features[FEAT_SVM] & CPUID_SVM_V_VMSAVE_VMLOAD)
143                 && (lbr_ctl & V_VMLOAD_VMSAVE_ENABLED_MASK);
144 
145     }
146 
147     return false;
148 }
149 
150 static inline bool virtual_gif_set(CPUX86State *env)
151 {
152     return !virtual_gif_enabled(env) || (env->int_ctl & V_GIF_MASK);
153 }
154 
155 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
156 {
157     CPUState *cs = env_cpu(env);
158     X86CPU *cpu = env_archcpu(env);
159     target_ulong addr;
160     uint64_t nested_ctl;
161     uint32_t event_inj;
162     uint32_t asid;
163     uint64_t new_cr0;
164     uint64_t new_cr3;
165     uint64_t new_cr4;
166 
167     cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
168 
169     if (aflag == 2) {
170         addr = env->regs[R_EAX];
171     } else {
172         addr = (uint32_t)env->regs[R_EAX];
173     }
174 
175     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
176 
177     env->vm_vmcb = addr;
178 
179     /* save the current CPU state in the hsave page */
180     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
181              env->gdt.base);
182     x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
183              env->gdt.limit);
184 
185     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
186              env->idt.base);
187     x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
188              env->idt.limit);
189 
190     x86_stq_phys(cs,
191              env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
192     x86_stq_phys(cs,
193              env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
194     x86_stq_phys(cs,
195              env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
196     x86_stq_phys(cs,
197              env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
198     x86_stq_phys(cs,
199              env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
200     x86_stq_phys(cs,
201              env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
202 
203     x86_stq_phys(cs,
204              env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
205     x86_stq_phys(cs,
206              env->vm_hsave + offsetof(struct vmcb, save.rflags),
207              cpu_compute_eflags(env));
208 
209     svm_save_seg(env, MMU_PHYS_IDX,
210                  env->vm_hsave + offsetof(struct vmcb, save.es),
211                  &env->segs[R_ES]);
212     svm_save_seg(env, MMU_PHYS_IDX,
213                  env->vm_hsave + offsetof(struct vmcb, save.cs),
214                  &env->segs[R_CS]);
215     svm_save_seg(env, MMU_PHYS_IDX,
216                  env->vm_hsave + offsetof(struct vmcb, save.ss),
217                  &env->segs[R_SS]);
218     svm_save_seg(env, MMU_PHYS_IDX,
219                  env->vm_hsave + offsetof(struct vmcb, save.ds),
220                  &env->segs[R_DS]);
221 
222     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
223              env->eip + next_eip_addend);
224     x86_stq_phys(cs,
225              env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
226     x86_stq_phys(cs,
227              env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
228 
229     /* load the interception bitmaps so we do not need to access the
230        vmcb in svm mode */
231     env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
232                                                       control.intercept));
233     env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
234                                        offsetof(struct vmcb,
235                                                 control.intercept_cr_read));
236     env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
237                                         offsetof(struct vmcb,
238                                                  control.intercept_cr_write));
239     env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
240                                        offsetof(struct vmcb,
241                                                 control.intercept_dr_read));
242     env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
243                                         offsetof(struct vmcb,
244                                                  control.intercept_dr_write));
245     env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
246                                          offsetof(struct vmcb,
247                                                   control.intercept_exceptions
248                                                   ));
249 
250     nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
251                                                           control.nested_ctl));
252     asid = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
253                                                           control.asid));
254 
255     uint64_t msrpm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
256                                     offsetof(struct vmcb,
257                                             control.msrpm_base_pa));
258     uint64_t iopm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
259                                  offsetof(struct vmcb, control.iopm_base_pa));
260 
261     if ((msrpm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_MSRPM_SIZE) {
262         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
263     }
264 
265     if ((iopm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_IOPM_SIZE) {
266         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
267     }
268 
269     env->nested_pg_mode = 0;
270 
271     if (!cpu_svm_has_intercept(env, SVM_EXIT_VMRUN)) {
272         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
273     }
274     if (asid == 0) {
275         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
276     }
277 
278     if (nested_ctl & SVM_NPT_ENABLED) {
279         env->nested_cr3 = x86_ldq_phys(cs,
280                                 env->vm_vmcb + offsetof(struct vmcb,
281                                                         control.nested_cr3));
282         env->hflags2 |= HF2_NPT_MASK;
283 
284         env->nested_pg_mode = get_pg_mode(env) & PG_MODE_SVM_MASK;
285 
286         tlb_flush_by_mmuidx(cs, 1 << MMU_NESTED_IDX);
287     }
288 
289     /* enable intercepts */
290     env->hflags |= HF_GUEST_MASK;
291 
292     env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
293                                offsetof(struct vmcb, control.tsc_offset));
294 
295     new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0));
296     if (new_cr0 & SVM_CR0_RESERVED_MASK) {
297         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
298     }
299     if ((new_cr0 & CR0_NW_MASK) && !(new_cr0 & CR0_CD_MASK)) {
300         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
301     }
302     new_cr3 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr3));
303     if ((env->efer & MSR_EFER_LMA) &&
304             (new_cr3 & ((~0ULL) << cpu->phys_bits))) {
305         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
306     }
307     new_cr4 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr4));
308     if (new_cr4 & cr4_reserved_bits(env)) {
309         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
310     }
311     /* clear exit_info_2 so we behave like the real hardware */
312     x86_stq_phys(cs,
313              env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
314 
315     cpu_x86_update_cr0(env, new_cr0);
316     cpu_x86_update_cr4(env, new_cr4);
317     cpu_x86_update_cr3(env, new_cr3);
318     env->cr[2] = x86_ldq_phys(cs,
319                           env->vm_vmcb + offsetof(struct vmcb, save.cr2));
320     env->int_ctl = x86_ldl_phys(cs,
321                        env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
322     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
323     if (env->int_ctl & V_INTR_MASKING_MASK) {
324         env->hflags2 |= HF2_VINTR_MASK;
325         if (env->eflags & IF_MASK) {
326             env->hflags2 |= HF2_HIF_MASK;
327         }
328     }
329 
330     cpu_load_efer(env,
331                   x86_ldq_phys(cs,
332                            env->vm_vmcb + offsetof(struct vmcb, save.efer)));
333     env->eflags = 0;
334     cpu_load_eflags(env, x86_ldq_phys(cs,
335                                   env->vm_vmcb + offsetof(struct vmcb,
336                                                           save.rflags)),
337                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
338 
339     svm_load_seg_cache(env, MMU_PHYS_IDX,
340                        env->vm_vmcb + offsetof(struct vmcb, save.es), R_ES);
341     svm_load_seg_cache(env, MMU_PHYS_IDX,
342                        env->vm_vmcb + offsetof(struct vmcb, save.cs), R_CS);
343     svm_load_seg_cache(env, MMU_PHYS_IDX,
344                        env->vm_vmcb + offsetof(struct vmcb, save.ss), R_SS);
345     svm_load_seg_cache(env, MMU_PHYS_IDX,
346                        env->vm_vmcb + offsetof(struct vmcb, save.ds), R_DS);
347     svm_load_seg(env, MMU_PHYS_IDX,
348                  env->vm_vmcb + offsetof(struct vmcb, save.idtr), &env->idt);
349     svm_load_seg(env, MMU_PHYS_IDX,
350                  env->vm_vmcb + offsetof(struct vmcb, save.gdtr), &env->gdt);
351 
352     env->eip = x86_ldq_phys(cs,
353                         env->vm_vmcb + offsetof(struct vmcb, save.rip));
354 
355     env->regs[R_ESP] = x86_ldq_phys(cs,
356                                 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
357     env->regs[R_EAX] = x86_ldq_phys(cs,
358                                 env->vm_vmcb + offsetof(struct vmcb, save.rax));
359     env->dr[7] = x86_ldq_phys(cs,
360                           env->vm_vmcb + offsetof(struct vmcb, save.dr7));
361     env->dr[6] = x86_ldq_phys(cs,
362                           env->vm_vmcb + offsetof(struct vmcb, save.dr6));
363 
364 #ifdef TARGET_X86_64
365     if (env->dr[6] & DR_RESERVED_MASK) {
366         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
367     }
368     if (env->dr[7] & DR_RESERVED_MASK) {
369         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
370     }
371 #endif
372 
373     if (is_efer_invalid_state(env)) {
374         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
375     }
376 
377     switch (x86_ldub_phys(cs,
378                       env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
379     case TLB_CONTROL_DO_NOTHING:
380         break;
381     case TLB_CONTROL_FLUSH_ALL_ASID:
382         /* FIXME: this is not 100% correct but should work for now */
383         tlb_flush(cs);
384         break;
385     }
386 
387     env->hflags2 |= HF2_GIF_MASK;
388 
389     if (ctl_has_irq(env)) {
390         cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
391     }
392 
393     if (virtual_gif_set(env)) {
394         env->hflags2 |= HF2_VGIF_MASK;
395     }
396 
397     /* maybe we need to inject an event */
398     event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
399                                                  control.event_inj));
400     if (event_inj & SVM_EVTINJ_VALID) {
401         uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
402         uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
403         uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
404                                           offsetof(struct vmcb,
405                                                    control.event_inj_err));
406 
407         qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
408         /* FIXME: need to implement valid_err */
409         switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
410         case SVM_EVTINJ_TYPE_INTR:
411             cs->exception_index = vector;
412             env->error_code = event_inj_err;
413             env->exception_is_int = 0;
414             env->exception_next_eip = -1;
415             qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
416             /* XXX: is it always correct? */
417             do_interrupt_x86_hardirq(env, vector, 1);
418             break;
419         case SVM_EVTINJ_TYPE_NMI:
420             cs->exception_index = EXCP02_NMI;
421             env->error_code = event_inj_err;
422             env->exception_is_int = 0;
423             env->exception_next_eip = env->eip;
424             qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
425             cpu_loop_exit(cs);
426             break;
427         case SVM_EVTINJ_TYPE_EXEPT:
428             if (vector == EXCP02_NMI || vector >= 31)  {
429                 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
430             }
431             cs->exception_index = vector;
432             env->error_code = event_inj_err;
433             env->exception_is_int = 0;
434             env->exception_next_eip = -1;
435             qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
436             cpu_loop_exit(cs);
437             break;
438         case SVM_EVTINJ_TYPE_SOFT:
439             cs->exception_index = vector;
440             env->error_code = event_inj_err;
441             env->exception_is_int = 1;
442             env->exception_next_eip = env->eip;
443             qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
444             cpu_loop_exit(cs);
445             break;
446         default:
447             cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
448             break;
449         }
450         qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
451                       env->error_code);
452     }
453 }
454 
455 void helper_vmmcall(CPUX86State *env)
456 {
457     cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
458     raise_exception(env, EXCP06_ILLOP);
459 }
460 
461 void helper_vmload(CPUX86State *env, int aflag)
462 {
463     int mmu_idx = MMU_PHYS_IDX;
464     target_ulong addr;
465 
466     cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
467 
468     if (aflag == 2) {
469         addr = env->regs[R_EAX];
470     } else {
471         addr = (uint32_t)env->regs[R_EAX];
472     }
473 
474     if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMLOAD, GETPC())) {
475         mmu_idx = MMU_NESTED_IDX;
476     }
477 
478     svm_load_seg_cache(env, mmu_idx,
479                        addr + offsetof(struct vmcb, save.fs), R_FS);
480     svm_load_seg_cache(env, mmu_idx,
481                        addr + offsetof(struct vmcb, save.gs), R_GS);
482     svm_load_seg(env, mmu_idx,
483                  addr + offsetof(struct vmcb, save.tr), &env->tr);
484     svm_load_seg(env, mmu_idx,
485                  addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
486 
487 #ifdef TARGET_X86_64
488     env->kernelgsbase =
489         cpu_ldq_mmuidx_ra(env,
490                           addr + offsetof(struct vmcb, save.kernel_gs_base),
491                           mmu_idx, 0);
492     env->lstar =
493         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.lstar),
494                           mmu_idx, 0);
495     env->cstar =
496         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.cstar),
497                           mmu_idx, 0);
498     env->fmask =
499         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sfmask),
500                           mmu_idx, 0);
501     svm_canonicalization(env, &env->kernelgsbase);
502 #endif
503     env->star =
504         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.star),
505                           mmu_idx, 0);
506     env->sysenter_cs =
507         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_cs),
508                           mmu_idx, 0);
509     env->sysenter_esp =
510         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_esp),
511                           mmu_idx, 0);
512     env->sysenter_eip =
513         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_eip),
514                           mmu_idx, 0);
515 }
516 
517 void helper_vmsave(CPUX86State *env, int aflag)
518 {
519     int mmu_idx = MMU_PHYS_IDX;
520     target_ulong addr;
521 
522     cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
523 
524     if (aflag == 2) {
525         addr = env->regs[R_EAX];
526     } else {
527         addr = (uint32_t)env->regs[R_EAX];
528     }
529 
530     if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMSAVE, GETPC())) {
531         mmu_idx = MMU_NESTED_IDX;
532     }
533 
534     svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.fs),
535                  &env->segs[R_FS]);
536     svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.gs),
537                  &env->segs[R_GS]);
538     svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.tr),
539                  &env->tr);
540     svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.ldtr),
541                  &env->ldt);
542 
543 #ifdef TARGET_X86_64
544     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.kernel_gs_base),
545                       env->kernelgsbase, mmu_idx, 0);
546     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.lstar),
547                       env->lstar, mmu_idx, 0);
548     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.cstar),
549                       env->cstar, mmu_idx, 0);
550     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sfmask),
551                       env->fmask, mmu_idx, 0);
552 #endif
553     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.star),
554                       env->star, mmu_idx, 0);
555     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_cs),
556                       env->sysenter_cs, mmu_idx, 0);
557     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_esp),
558                       env->sysenter_esp, mmu_idx, 0);
559     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_eip),
560                       env->sysenter_eip, mmu_idx, 0);
561 }
562 
563 void helper_stgi(CPUX86State *env)
564 {
565     cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
566 
567     if (virtual_gif_enabled(env)) {
568         env->int_ctl |= V_GIF_MASK;
569         env->hflags2 |= HF2_VGIF_MASK;
570     } else {
571         env->hflags2 |= HF2_GIF_MASK;
572     }
573 }
574 
575 void helper_clgi(CPUX86State *env)
576 {
577     cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
578 
579     if (virtual_gif_enabled(env)) {
580         env->int_ctl &= ~V_GIF_MASK;
581         env->hflags2 &= ~HF2_VGIF_MASK;
582     } else {
583         env->hflags2 &= ~HF2_GIF_MASK;
584     }
585 }
586 
587 bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type)
588 {
589     switch (type) {
590     case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
591         if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
592             return true;
593         }
594         break;
595     case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
596         if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
597             return true;
598         }
599         break;
600     case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
601         if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
602             return true;
603         }
604         break;
605     case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
606         if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
607             return true;
608         }
609         break;
610     case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
611         if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
612             return true;
613         }
614         break;
615     default:
616         if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
617             return true;
618         }
619         break;
620     }
621     return false;
622 }
623 
624 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
625                                    uint64_t param, uintptr_t retaddr)
626 {
627     CPUState *cs = env_cpu(env);
628 
629     if (likely(!(env->hflags & HF_GUEST_MASK))) {
630         return;
631     }
632 
633     if (!cpu_svm_has_intercept(env, type)) {
634         return;
635     }
636 
637     if (type == SVM_EXIT_MSR) {
638         /* FIXME: this should be read in at vmrun (faster this way?) */
639         uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
640                                     offsetof(struct vmcb,
641                                             control.msrpm_base_pa));
642         uint32_t t0, t1;
643 
644         switch ((uint32_t)env->regs[R_ECX]) {
645         case 0 ... 0x1fff:
646             t0 = (env->regs[R_ECX] * 2) % 8;
647             t1 = (env->regs[R_ECX] * 2) / 8;
648             break;
649         case 0xc0000000 ... 0xc0001fff:
650             t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
651             t1 = (t0 / 8);
652             t0 %= 8;
653             break;
654         case 0xc0010000 ... 0xc0011fff:
655             t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
656             t1 = (t0 / 8);
657             t0 %= 8;
658             break;
659         default:
660             cpu_vmexit(env, type, param, retaddr);
661             t0 = 0;
662             t1 = 0;
663             break;
664         }
665         if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
666             cpu_vmexit(env, type, param, retaddr);
667         }
668         return;
669     }
670 
671     cpu_vmexit(env, type, param, retaddr);
672 }
673 
674 void helper_svm_check_intercept(CPUX86State *env, uint32_t type)
675 {
676     cpu_svm_check_intercept_param(env, type, 0, GETPC());
677 }
678 
679 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
680                          uint32_t next_eip_addend)
681 {
682     CPUState *cs = env_cpu(env);
683 
684     if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
685         /* FIXME: this should be read in at vmrun (faster this way?) */
686         uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
687                                  offsetof(struct vmcb, control.iopm_base_pa));
688         uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
689 
690         if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
691             /* next env->eip */
692             x86_stq_phys(cs,
693                      env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
694                      env->eip + next_eip_addend);
695             cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
696         }
697     }
698 }
699 
700 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
701                 uintptr_t retaddr)
702 {
703     CPUState *cs = env_cpu(env);
704 
705     cpu_restore_state(cs, retaddr);
706 
707     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
708                   PRIx64 ", " TARGET_FMT_lx ")!\n",
709                   exit_code, exit_info_1,
710                   x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
711                                                    control.exit_info_2)),
712                   env->eip);
713 
714     cs->exception_index = EXCP_VMEXIT;
715     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
716              exit_code);
717 
718     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
719                                              control.exit_info_1), exit_info_1),
720 
721     /* remove any pending exception */
722     env->old_exception = -1;
723     cpu_loop_exit(cs);
724 }
725 
726 void do_vmexit(CPUX86State *env)
727 {
728     CPUState *cs = env_cpu(env);
729 
730     if (env->hflags & HF_INHIBIT_IRQ_MASK) {
731         x86_stl_phys(cs,
732                  env->vm_vmcb + offsetof(struct vmcb, control.int_state),
733                  SVM_INTERRUPT_SHADOW_MASK);
734         env->hflags &= ~HF_INHIBIT_IRQ_MASK;
735     } else {
736         x86_stl_phys(cs,
737                  env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
738     }
739     env->hflags2 &= ~HF2_NPT_MASK;
740     tlb_flush_by_mmuidx(cs, 1 << MMU_NESTED_IDX);
741 
742     /* Save the VM state in the vmcb */
743     svm_save_seg(env, MMU_PHYS_IDX,
744                  env->vm_vmcb + offsetof(struct vmcb, save.es),
745                  &env->segs[R_ES]);
746     svm_save_seg(env, MMU_PHYS_IDX,
747                  env->vm_vmcb + offsetof(struct vmcb, save.cs),
748                  &env->segs[R_CS]);
749     svm_save_seg(env, MMU_PHYS_IDX,
750                  env->vm_vmcb + offsetof(struct vmcb, save.ss),
751                  &env->segs[R_SS]);
752     svm_save_seg(env, MMU_PHYS_IDX,
753                  env->vm_vmcb + offsetof(struct vmcb, save.ds),
754                  &env->segs[R_DS]);
755 
756     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
757              env->gdt.base);
758     x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
759              env->gdt.limit);
760 
761     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
762              env->idt.base);
763     x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
764              env->idt.limit);
765 
766     x86_stq_phys(cs,
767              env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
768     x86_stq_phys(cs,
769              env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
770     x86_stq_phys(cs,
771              env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
772     x86_stq_phys(cs,
773              env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
774     x86_stq_phys(cs,
775              env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
776     x86_stl_phys(cs,
777              env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), env->int_ctl);
778 
779     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
780              cpu_compute_eflags(env));
781     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
782              env->eip);
783     x86_stq_phys(cs,
784              env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
785     x86_stq_phys(cs,
786              env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
787     x86_stq_phys(cs,
788              env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
789     x86_stq_phys(cs,
790              env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
791     x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
792              env->hflags & HF_CPL_MASK);
793 
794     /* Reload the host state from vm_hsave */
795     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
796     env->hflags &= ~HF_GUEST_MASK;
797     env->intercept = 0;
798     env->intercept_exceptions = 0;
799     cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
800     env->int_ctl = 0;
801     env->tsc_offset = 0;
802 
803     env->gdt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
804                                                        save.gdtr.base));
805     env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
806                                                        save.gdtr.limit));
807 
808     env->idt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
809                                                        save.idtr.base));
810     env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
811                                                        save.idtr.limit));
812 
813     cpu_x86_update_cr0(env, x86_ldq_phys(cs,
814                                      env->vm_hsave + offsetof(struct vmcb,
815                                                               save.cr0)) |
816                        CR0_PE_MASK);
817     cpu_x86_update_cr4(env, x86_ldq_phys(cs,
818                                      env->vm_hsave + offsetof(struct vmcb,
819                                                               save.cr4)));
820     cpu_x86_update_cr3(env, x86_ldq_phys(cs,
821                                      env->vm_hsave + offsetof(struct vmcb,
822                                                               save.cr3)));
823     /* we need to set the efer after the crs so the hidden flags get
824        set properly */
825     cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
826                                                          save.efer)));
827     env->eflags = 0;
828     cpu_load_eflags(env, x86_ldq_phys(cs,
829                                   env->vm_hsave + offsetof(struct vmcb,
830                                                            save.rflags)),
831                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
832                       VM_MASK));
833 
834     svm_load_seg_cache(env, MMU_PHYS_IDX,
835                        env->vm_hsave + offsetof(struct vmcb, save.es), R_ES);
836     svm_load_seg_cache(env, MMU_PHYS_IDX,
837                        env->vm_hsave + offsetof(struct vmcb, save.cs), R_CS);
838     svm_load_seg_cache(env, MMU_PHYS_IDX,
839                        env->vm_hsave + offsetof(struct vmcb, save.ss), R_SS);
840     svm_load_seg_cache(env, MMU_PHYS_IDX,
841                        env->vm_hsave + offsetof(struct vmcb, save.ds), R_DS);
842 
843     env->eip = x86_ldq_phys(cs,
844                         env->vm_hsave + offsetof(struct vmcb, save.rip));
845     env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
846                                 offsetof(struct vmcb, save.rsp));
847     env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
848                                 offsetof(struct vmcb, save.rax));
849 
850     env->dr[6] = x86_ldq_phys(cs,
851                           env->vm_hsave + offsetof(struct vmcb, save.dr6));
852     env->dr[7] = x86_ldq_phys(cs,
853                           env->vm_hsave + offsetof(struct vmcb, save.dr7));
854 
855     /* other setups */
856     x86_stl_phys(cs,
857              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
858              x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
859                                               control.event_inj)));
860     x86_stl_phys(cs,
861              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
862              x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
863                                               control.event_inj_err)));
864     x86_stl_phys(cs,
865              env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
866 
867     env->hflags2 &= ~HF2_GIF_MASK;
868     env->hflags2 &= ~HF2_VGIF_MASK;
869     /* FIXME: Resets the current ASID register to zero (host ASID). */
870 
871     /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
872 
873     /* Clears the TSC_OFFSET inside the processor. */
874 
875     /* If the host is in PAE mode, the processor reloads the host's PDPEs
876        from the page table indicated the host's CR3. If the PDPEs contain
877        illegal state, the processor causes a shutdown. */
878 
879     /* Disables all breakpoints in the host DR7 register. */
880 
881     /* Checks the reloaded host state for consistency. */
882 
883     /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
884        host's code segment or non-canonical (in the case of long mode), a
885        #GP fault is delivered inside the host. */
886 }
887