xref: /openbmc/qemu/target/i386/tcg/sysemu/svm_helper.c (revision 3b34ccad)
1 /*
2  *  x86 SVM helpers (sysemu only)
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
26 #include "tcg/helper-tcg.h"
27 
28 /* Secure Virtual Machine helpers */
29 
30 static void svm_save_seg(CPUX86State *env, int mmu_idx, hwaddr addr,
31                          const SegmentCache *sc)
32 {
33     cpu_stw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, selector),
34                       sc->selector, mmu_idx, 0);
35     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, base),
36                       sc->base, mmu_idx, 0);
37     cpu_stl_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, limit),
38                       sc->limit, mmu_idx, 0);
39     cpu_stw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, attrib),
40                       ((sc->flags >> 8) & 0xff)
41                       | ((sc->flags >> 12) & 0x0f00),
42                       mmu_idx, 0);
43 }
44 
45 /*
46  * VMRUN and VMLOAD canonicalizes (i.e., sign-extend to bit 63) all base
47  * addresses in the segment registers that have been loaded.
48  */
49 static inline void svm_canonicalization(CPUX86State *env, target_ulong *seg_base)
50 {
51     uint16_t shift_amt = 64 - cpu_x86_virtual_addr_width(env);
52     *seg_base = ((((long) *seg_base) << shift_amt) >> shift_amt);
53 }
54 
55 static void svm_load_seg(CPUX86State *env, int mmu_idx, hwaddr addr,
56                          SegmentCache *sc)
57 {
58     unsigned int flags;
59 
60     sc->selector =
61         cpu_lduw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, selector),
62                            mmu_idx, 0);
63     sc->base =
64         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, base),
65                           mmu_idx, 0);
66     sc->limit =
67         cpu_ldl_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, limit),
68                           mmu_idx, 0);
69     flags =
70         cpu_lduw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, attrib),
71                            mmu_idx, 0);
72     sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
73 
74     svm_canonicalization(env, &sc->base);
75 }
76 
77 static void svm_load_seg_cache(CPUX86State *env, int mmu_idx,
78                                hwaddr addr, int seg_reg)
79 {
80     SegmentCache sc;
81 
82     svm_load_seg(env, mmu_idx, addr, &sc);
83     cpu_x86_load_seg_cache(env, seg_reg, sc.selector,
84                            sc.base, sc.limit, sc.flags);
85 }
86 
87 static inline bool is_efer_invalid_state (CPUX86State *env)
88 {
89     if (!(env->efer & MSR_EFER_SVME)) {
90         return true;
91     }
92 
93     if (env->efer & MSR_EFER_RESERVED) {
94         return true;
95     }
96 
97     if ((env->efer & (MSR_EFER_LMA | MSR_EFER_LME)) &&
98             !(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
99         return true;
100     }
101 
102     if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
103                                 && !(env->cr[4] & CR4_PAE_MASK)) {
104         return true;
105     }
106 
107     if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
108                                 && !(env->cr[0] & CR0_PE_MASK)) {
109         return true;
110     }
111 
112     if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
113                                 && (env->cr[4] & CR4_PAE_MASK)
114                                 && (env->segs[R_CS].flags & DESC_L_MASK)
115                                 && (env->segs[R_CS].flags & DESC_B_MASK)) {
116         return true;
117     }
118 
119     return false;
120 }
121 
122 static inline bool virtual_gif_enabled(CPUX86State *env)
123 {
124     if (likely(env->hflags & HF_GUEST_MASK)) {
125         return (env->features[FEAT_SVM] & CPUID_SVM_VGIF)
126                     && (env->int_ctl & V_GIF_ENABLED_MASK);
127     }
128     return false;
129 }
130 
131 static inline bool virtual_vm_load_save_enabled(CPUX86State *env, uint32_t exit_code, uintptr_t retaddr)
132 {
133     uint64_t lbr_ctl;
134 
135     if (likely(env->hflags & HF_GUEST_MASK)) {
136         if (likely(!(env->hflags2 & HF2_NPT_MASK)) || !(env->efer & MSR_EFER_LMA)) {
137             cpu_vmexit(env, exit_code, 0, retaddr);
138         }
139 
140         lbr_ctl = x86_ldl_phys(env_cpu(env), env->vm_vmcb + offsetof(struct vmcb,
141                                                   control.lbr_ctl));
142         return (env->features[FEAT_SVM] & CPUID_SVM_V_VMSAVE_VMLOAD)
143                 && (lbr_ctl & V_VMLOAD_VMSAVE_ENABLED_MASK);
144 
145     }
146 
147     return false;
148 }
149 
150 static inline bool virtual_gif_set(CPUX86State *env)
151 {
152     return !virtual_gif_enabled(env) || (env->int_ctl & V_GIF_MASK);
153 }
154 
155 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
156 {
157     CPUState *cs = env_cpu(env);
158     X86CPU *cpu = env_archcpu(env);
159     target_ulong addr;
160     uint64_t nested_ctl;
161     uint32_t event_inj;
162     uint32_t asid;
163     uint64_t new_cr0;
164     uint64_t new_cr3;
165     uint64_t new_cr4;
166 
167     cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
168 
169     if (aflag == 2) {
170         addr = env->regs[R_EAX];
171     } else {
172         addr = (uint32_t)env->regs[R_EAX];
173     }
174 
175     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
176 
177     env->vm_vmcb = addr;
178 
179     /* save the current CPU state in the hsave page */
180     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
181              env->gdt.base);
182     x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
183              env->gdt.limit);
184 
185     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
186              env->idt.base);
187     x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
188              env->idt.limit);
189 
190     x86_stq_phys(cs,
191              env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
192     x86_stq_phys(cs,
193              env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
194     x86_stq_phys(cs,
195              env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
196     x86_stq_phys(cs,
197              env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
198     x86_stq_phys(cs,
199              env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
200     x86_stq_phys(cs,
201              env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
202 
203     x86_stq_phys(cs,
204              env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
205     x86_stq_phys(cs,
206              env->vm_hsave + offsetof(struct vmcb, save.rflags),
207              cpu_compute_eflags(env));
208 
209     svm_save_seg(env, MMU_PHYS_IDX,
210                  env->vm_hsave + offsetof(struct vmcb, save.es),
211                  &env->segs[R_ES]);
212     svm_save_seg(env, MMU_PHYS_IDX,
213                  env->vm_hsave + offsetof(struct vmcb, save.cs),
214                  &env->segs[R_CS]);
215     svm_save_seg(env, MMU_PHYS_IDX,
216                  env->vm_hsave + offsetof(struct vmcb, save.ss),
217                  &env->segs[R_SS]);
218     svm_save_seg(env, MMU_PHYS_IDX,
219                  env->vm_hsave + offsetof(struct vmcb, save.ds),
220                  &env->segs[R_DS]);
221 
222     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
223              env->eip + next_eip_addend);
224     x86_stq_phys(cs,
225              env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
226     x86_stq_phys(cs,
227              env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
228 
229     /* load the interception bitmaps so we do not need to access the
230        vmcb in svm mode */
231     env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
232                                                       control.intercept));
233     env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
234                                        offsetof(struct vmcb,
235                                                 control.intercept_cr_read));
236     env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
237                                         offsetof(struct vmcb,
238                                                  control.intercept_cr_write));
239     env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
240                                        offsetof(struct vmcb,
241                                                 control.intercept_dr_read));
242     env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
243                                         offsetof(struct vmcb,
244                                                  control.intercept_dr_write));
245     env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
246                                          offsetof(struct vmcb,
247                                                   control.intercept_exceptions
248                                                   ));
249 
250     nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
251                                                           control.nested_ctl));
252     asid = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
253                                                           control.asid));
254 
255     uint64_t msrpm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
256                                     offsetof(struct vmcb,
257                                             control.msrpm_base_pa));
258     uint64_t iopm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
259                                  offsetof(struct vmcb, control.iopm_base_pa));
260 
261     if ((msrpm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_MSRPM_SIZE) {
262         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
263     }
264 
265     if ((iopm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_IOPM_SIZE) {
266         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
267     }
268 
269     env->nested_pg_mode = 0;
270 
271     if (!cpu_svm_has_intercept(env, SVM_EXIT_VMRUN)) {
272         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
273     }
274     if (asid == 0) {
275         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
276     }
277 
278     if (nested_ctl & SVM_NPT_ENABLED) {
279         env->nested_cr3 = x86_ldq_phys(cs,
280                                 env->vm_vmcb + offsetof(struct vmcb,
281                                                         control.nested_cr3));
282         env->hflags2 |= HF2_NPT_MASK;
283 
284         env->nested_pg_mode = get_pg_mode(env) & PG_MODE_SVM_MASK;
285 
286         tlb_flush_by_mmuidx(cs, 1 << MMU_NESTED_IDX);
287     }
288 
289     /* enable intercepts */
290     env->hflags |= HF_GUEST_MASK;
291 
292     env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
293                                offsetof(struct vmcb, control.tsc_offset));
294 
295     new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0));
296     if (new_cr0 & SVM_CR0_RESERVED_MASK) {
297         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
298     }
299     if ((new_cr0 & CR0_NW_MASK) && !(new_cr0 & CR0_CD_MASK)) {
300         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
301     }
302     new_cr3 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr3));
303     if ((env->efer & MSR_EFER_LMA) &&
304             (new_cr3 & ((~0ULL) << cpu->phys_bits))) {
305         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
306     }
307     new_cr4 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr4));
308     if (new_cr4 & cr4_reserved_bits(env)) {
309         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
310     }
311     /* clear exit_info_2 so we behave like the real hardware */
312     x86_stq_phys(cs,
313              env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
314 
315     cpu_x86_update_cr0(env, new_cr0);
316     cpu_x86_update_cr4(env, new_cr4);
317     cpu_x86_update_cr3(env, new_cr3);
318     env->cr[2] = x86_ldq_phys(cs,
319                           env->vm_vmcb + offsetof(struct vmcb, save.cr2));
320     env->int_ctl = x86_ldl_phys(cs,
321                        env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
322     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
323     if (env->int_ctl & V_INTR_MASKING_MASK) {
324         env->hflags2 |= HF2_VINTR_MASK;
325         if (env->eflags & IF_MASK) {
326             env->hflags2 |= HF2_HIF_MASK;
327         }
328     }
329 
330     cpu_load_efer(env,
331                   x86_ldq_phys(cs,
332                            env->vm_vmcb + offsetof(struct vmcb, save.efer)));
333     env->eflags = 0;
334     cpu_load_eflags(env, x86_ldq_phys(cs,
335                                   env->vm_vmcb + offsetof(struct vmcb,
336                                                           save.rflags)),
337                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
338 
339     svm_load_seg_cache(env, MMU_PHYS_IDX,
340                        env->vm_vmcb + offsetof(struct vmcb, save.es), R_ES);
341     svm_load_seg_cache(env, MMU_PHYS_IDX,
342                        env->vm_vmcb + offsetof(struct vmcb, save.cs), R_CS);
343     svm_load_seg_cache(env, MMU_PHYS_IDX,
344                        env->vm_vmcb + offsetof(struct vmcb, save.ss), R_SS);
345     svm_load_seg_cache(env, MMU_PHYS_IDX,
346                        env->vm_vmcb + offsetof(struct vmcb, save.ds), R_DS);
347     svm_load_seg(env, MMU_PHYS_IDX,
348                  env->vm_vmcb + offsetof(struct vmcb, save.idtr), &env->idt);
349     svm_load_seg(env, MMU_PHYS_IDX,
350                  env->vm_vmcb + offsetof(struct vmcb, save.gdtr), &env->gdt);
351 
352     env->eip = x86_ldq_phys(cs,
353                         env->vm_vmcb + offsetof(struct vmcb, save.rip));
354 
355     env->regs[R_ESP] = x86_ldq_phys(cs,
356                                 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
357     env->regs[R_EAX] = x86_ldq_phys(cs,
358                                 env->vm_vmcb + offsetof(struct vmcb, save.rax));
359     env->dr[7] = x86_ldq_phys(cs,
360                           env->vm_vmcb + offsetof(struct vmcb, save.dr7));
361     env->dr[6] = x86_ldq_phys(cs,
362                           env->vm_vmcb + offsetof(struct vmcb, save.dr6));
363 
364 #ifdef TARGET_X86_64
365     if (env->dr[6] & DR_RESERVED_MASK) {
366         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
367     }
368     if (env->dr[7] & DR_RESERVED_MASK) {
369         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
370     }
371 #endif
372 
373     if (is_efer_invalid_state(env)) {
374         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
375     }
376 
377     switch (x86_ldub_phys(cs,
378                       env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
379     case TLB_CONTROL_DO_NOTHING:
380         break;
381     case TLB_CONTROL_FLUSH_ALL_ASID:
382         /* FIXME: this is not 100% correct but should work for now */
383         tlb_flush(cs);
384         break;
385     }
386 
387     env->hflags2 |= HF2_GIF_MASK;
388 
389     if (ctl_has_irq(env)) {
390         CPUState *cs = env_cpu(env);
391 
392         cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
393     }
394 
395     if (virtual_gif_set(env)) {
396         env->hflags2 |= HF2_VGIF_MASK;
397     }
398 
399     /* maybe we need to inject an event */
400     event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
401                                                  control.event_inj));
402     if (event_inj & SVM_EVTINJ_VALID) {
403         uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
404         uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
405         uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
406                                           offsetof(struct vmcb,
407                                                    control.event_inj_err));
408 
409         qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
410         /* FIXME: need to implement valid_err */
411         switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
412         case SVM_EVTINJ_TYPE_INTR:
413             cs->exception_index = vector;
414             env->error_code = event_inj_err;
415             env->exception_is_int = 0;
416             env->exception_next_eip = -1;
417             qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
418             /* XXX: is it always correct? */
419             do_interrupt_x86_hardirq(env, vector, 1);
420             break;
421         case SVM_EVTINJ_TYPE_NMI:
422             cs->exception_index = EXCP02_NMI;
423             env->error_code = event_inj_err;
424             env->exception_is_int = 0;
425             env->exception_next_eip = env->eip;
426             qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
427             cpu_loop_exit(cs);
428             break;
429         case SVM_EVTINJ_TYPE_EXEPT:
430             if (vector == EXCP02_NMI || vector >= 31)  {
431                 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
432             }
433             cs->exception_index = vector;
434             env->error_code = event_inj_err;
435             env->exception_is_int = 0;
436             env->exception_next_eip = -1;
437             qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
438             cpu_loop_exit(cs);
439             break;
440         case SVM_EVTINJ_TYPE_SOFT:
441             cs->exception_index = vector;
442             env->error_code = event_inj_err;
443             env->exception_is_int = 1;
444             env->exception_next_eip = env->eip;
445             qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
446             cpu_loop_exit(cs);
447             break;
448         default:
449             cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
450             break;
451         }
452         qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
453                       env->error_code);
454     }
455 }
456 
457 void helper_vmmcall(CPUX86State *env)
458 {
459     cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
460     raise_exception(env, EXCP06_ILLOP);
461 }
462 
463 void helper_vmload(CPUX86State *env, int aflag)
464 {
465     int mmu_idx = MMU_PHYS_IDX;
466     target_ulong addr;
467 
468     cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
469 
470     if (aflag == 2) {
471         addr = env->regs[R_EAX];
472     } else {
473         addr = (uint32_t)env->regs[R_EAX];
474     }
475 
476     if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMLOAD, GETPC())) {
477         mmu_idx = MMU_NESTED_IDX;
478     }
479 
480     svm_load_seg_cache(env, mmu_idx,
481                        addr + offsetof(struct vmcb, save.fs), R_FS);
482     svm_load_seg_cache(env, mmu_idx,
483                        addr + offsetof(struct vmcb, save.gs), R_GS);
484     svm_load_seg(env, mmu_idx,
485                  addr + offsetof(struct vmcb, save.tr), &env->tr);
486     svm_load_seg(env, mmu_idx,
487                  addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
488 
489 #ifdef TARGET_X86_64
490     env->kernelgsbase =
491         cpu_ldq_mmuidx_ra(env,
492                           addr + offsetof(struct vmcb, save.kernel_gs_base),
493                           mmu_idx, 0);
494     env->lstar =
495         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.lstar),
496                           mmu_idx, 0);
497     env->cstar =
498         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.cstar),
499                           mmu_idx, 0);
500     env->fmask =
501         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sfmask),
502                           mmu_idx, 0);
503     svm_canonicalization(env, &env->kernelgsbase);
504 #endif
505     env->star =
506         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.star),
507                           mmu_idx, 0);
508     env->sysenter_cs =
509         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_cs),
510                           mmu_idx, 0);
511     env->sysenter_esp =
512         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_esp),
513                           mmu_idx, 0);
514     env->sysenter_eip =
515         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_eip),
516                           mmu_idx, 0);
517 }
518 
519 void helper_vmsave(CPUX86State *env, int aflag)
520 {
521     int mmu_idx = MMU_PHYS_IDX;
522     target_ulong addr;
523 
524     cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
525 
526     if (aflag == 2) {
527         addr = env->regs[R_EAX];
528     } else {
529         addr = (uint32_t)env->regs[R_EAX];
530     }
531 
532     if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMSAVE, GETPC())) {
533         mmu_idx = MMU_NESTED_IDX;
534     }
535 
536     svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.fs),
537                  &env->segs[R_FS]);
538     svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.gs),
539                  &env->segs[R_GS]);
540     svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.tr),
541                  &env->tr);
542     svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.ldtr),
543                  &env->ldt);
544 
545 #ifdef TARGET_X86_64
546     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.kernel_gs_base),
547                       env->kernelgsbase, mmu_idx, 0);
548     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.lstar),
549                       env->lstar, mmu_idx, 0);
550     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.cstar),
551                       env->cstar, mmu_idx, 0);
552     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sfmask),
553                       env->fmask, mmu_idx, 0);
554 #endif
555     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.star),
556                       env->star, mmu_idx, 0);
557     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_cs),
558                       env->sysenter_cs, mmu_idx, 0);
559     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_esp),
560                       env->sysenter_esp, mmu_idx, 0);
561     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_eip),
562                       env->sysenter_eip, mmu_idx, 0);
563 }
564 
565 void helper_stgi(CPUX86State *env)
566 {
567     cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
568 
569     if (virtual_gif_enabled(env)) {
570         env->int_ctl |= V_GIF_MASK;
571         env->hflags2 |= HF2_VGIF_MASK;
572     } else {
573         env->hflags2 |= HF2_GIF_MASK;
574     }
575 }
576 
577 void helper_clgi(CPUX86State *env)
578 {
579     cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
580 
581     if (virtual_gif_enabled(env)) {
582         env->int_ctl &= ~V_GIF_MASK;
583         env->hflags2 &= ~HF2_VGIF_MASK;
584     } else {
585         env->hflags2 &= ~HF2_GIF_MASK;
586     }
587 }
588 
589 bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type)
590 {
591     switch (type) {
592     case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
593         if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
594             return true;
595         }
596         break;
597     case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
598         if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
599             return true;
600         }
601         break;
602     case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
603         if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
604             return true;
605         }
606         break;
607     case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
608         if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
609             return true;
610         }
611         break;
612     case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
613         if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
614             return true;
615         }
616         break;
617     default:
618         if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
619             return true;
620         }
621         break;
622     }
623     return false;
624 }
625 
626 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
627                                    uint64_t param, uintptr_t retaddr)
628 {
629     CPUState *cs = env_cpu(env);
630 
631     if (likely(!(env->hflags & HF_GUEST_MASK))) {
632         return;
633     }
634 
635     if (!cpu_svm_has_intercept(env, type)) {
636         return;
637     }
638 
639     if (type == SVM_EXIT_MSR) {
640         /* FIXME: this should be read in at vmrun (faster this way?) */
641         uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
642                                     offsetof(struct vmcb,
643                                             control.msrpm_base_pa));
644         uint32_t t0, t1;
645 
646         switch ((uint32_t)env->regs[R_ECX]) {
647         case 0 ... 0x1fff:
648             t0 = (env->regs[R_ECX] * 2) % 8;
649             t1 = (env->regs[R_ECX] * 2) / 8;
650             break;
651         case 0xc0000000 ... 0xc0001fff:
652             t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
653             t1 = (t0 / 8);
654             t0 %= 8;
655             break;
656         case 0xc0010000 ... 0xc0011fff:
657             t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
658             t1 = (t0 / 8);
659             t0 %= 8;
660             break;
661         default:
662             cpu_vmexit(env, type, param, retaddr);
663             t0 = 0;
664             t1 = 0;
665             break;
666         }
667         if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
668             cpu_vmexit(env, type, param, retaddr);
669         }
670         return;
671     }
672 
673     cpu_vmexit(env, type, param, retaddr);
674 }
675 
676 void helper_svm_check_intercept(CPUX86State *env, uint32_t type)
677 {
678     cpu_svm_check_intercept_param(env, type, 0, GETPC());
679 }
680 
681 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
682                          uint32_t next_eip_addend)
683 {
684     CPUState *cs = env_cpu(env);
685 
686     if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
687         /* FIXME: this should be read in at vmrun (faster this way?) */
688         uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
689                                  offsetof(struct vmcb, control.iopm_base_pa));
690         uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
691 
692         if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
693             /* next env->eip */
694             x86_stq_phys(cs,
695                      env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
696                      env->eip + next_eip_addend);
697             cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
698         }
699     }
700 }
701 
702 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
703                 uintptr_t retaddr)
704 {
705     CPUState *cs = env_cpu(env);
706 
707     cpu_restore_state(cs, retaddr);
708 
709     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
710                   PRIx64 ", " TARGET_FMT_lx ")!\n",
711                   exit_code, exit_info_1,
712                   x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
713                                                    control.exit_info_2)),
714                   env->eip);
715 
716     cs->exception_index = EXCP_VMEXIT;
717     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
718              exit_code);
719 
720     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
721                                              control.exit_info_1), exit_info_1),
722 
723     /* remove any pending exception */
724     env->old_exception = -1;
725     cpu_loop_exit(cs);
726 }
727 
728 void do_vmexit(CPUX86State *env)
729 {
730     CPUState *cs = env_cpu(env);
731 
732     if (env->hflags & HF_INHIBIT_IRQ_MASK) {
733         x86_stl_phys(cs,
734                  env->vm_vmcb + offsetof(struct vmcb, control.int_state),
735                  SVM_INTERRUPT_SHADOW_MASK);
736         env->hflags &= ~HF_INHIBIT_IRQ_MASK;
737     } else {
738         x86_stl_phys(cs,
739                  env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
740     }
741     env->hflags2 &= ~HF2_NPT_MASK;
742     tlb_flush_by_mmuidx(cs, 1 << MMU_NESTED_IDX);
743 
744     /* Save the VM state in the vmcb */
745     svm_save_seg(env, MMU_PHYS_IDX,
746                  env->vm_vmcb + offsetof(struct vmcb, save.es),
747                  &env->segs[R_ES]);
748     svm_save_seg(env, MMU_PHYS_IDX,
749                  env->vm_vmcb + offsetof(struct vmcb, save.cs),
750                  &env->segs[R_CS]);
751     svm_save_seg(env, MMU_PHYS_IDX,
752                  env->vm_vmcb + offsetof(struct vmcb, save.ss),
753                  &env->segs[R_SS]);
754     svm_save_seg(env, MMU_PHYS_IDX,
755                  env->vm_vmcb + offsetof(struct vmcb, save.ds),
756                  &env->segs[R_DS]);
757 
758     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
759              env->gdt.base);
760     x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
761              env->gdt.limit);
762 
763     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
764              env->idt.base);
765     x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
766              env->idt.limit);
767 
768     x86_stq_phys(cs,
769              env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
770     x86_stq_phys(cs,
771              env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
772     x86_stq_phys(cs,
773              env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
774     x86_stq_phys(cs,
775              env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
776     x86_stq_phys(cs,
777              env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
778     x86_stl_phys(cs,
779              env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), env->int_ctl);
780 
781     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
782              cpu_compute_eflags(env));
783     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
784              env->eip);
785     x86_stq_phys(cs,
786              env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
787     x86_stq_phys(cs,
788              env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
789     x86_stq_phys(cs,
790              env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
791     x86_stq_phys(cs,
792              env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
793     x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
794              env->hflags & HF_CPL_MASK);
795 
796     /* Reload the host state from vm_hsave */
797     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
798     env->hflags &= ~HF_GUEST_MASK;
799     env->intercept = 0;
800     env->intercept_exceptions = 0;
801     cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
802     env->int_ctl = 0;
803     env->tsc_offset = 0;
804 
805     env->gdt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
806                                                        save.gdtr.base));
807     env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
808                                                        save.gdtr.limit));
809 
810     env->idt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
811                                                        save.idtr.base));
812     env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
813                                                        save.idtr.limit));
814 
815     cpu_x86_update_cr0(env, x86_ldq_phys(cs,
816                                      env->vm_hsave + offsetof(struct vmcb,
817                                                               save.cr0)) |
818                        CR0_PE_MASK);
819     cpu_x86_update_cr4(env, x86_ldq_phys(cs,
820                                      env->vm_hsave + offsetof(struct vmcb,
821                                                               save.cr4)));
822     cpu_x86_update_cr3(env, x86_ldq_phys(cs,
823                                      env->vm_hsave + offsetof(struct vmcb,
824                                                               save.cr3)));
825     /* we need to set the efer after the crs so the hidden flags get
826        set properly */
827     cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
828                                                          save.efer)));
829     env->eflags = 0;
830     cpu_load_eflags(env, x86_ldq_phys(cs,
831                                   env->vm_hsave + offsetof(struct vmcb,
832                                                            save.rflags)),
833                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
834                       VM_MASK));
835 
836     svm_load_seg_cache(env, MMU_PHYS_IDX,
837                        env->vm_hsave + offsetof(struct vmcb, save.es), R_ES);
838     svm_load_seg_cache(env, MMU_PHYS_IDX,
839                        env->vm_hsave + offsetof(struct vmcb, save.cs), R_CS);
840     svm_load_seg_cache(env, MMU_PHYS_IDX,
841                        env->vm_hsave + offsetof(struct vmcb, save.ss), R_SS);
842     svm_load_seg_cache(env, MMU_PHYS_IDX,
843                        env->vm_hsave + offsetof(struct vmcb, save.ds), R_DS);
844 
845     env->eip = x86_ldq_phys(cs,
846                         env->vm_hsave + offsetof(struct vmcb, save.rip));
847     env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
848                                 offsetof(struct vmcb, save.rsp));
849     env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
850                                 offsetof(struct vmcb, save.rax));
851 
852     env->dr[6] = x86_ldq_phys(cs,
853                           env->vm_hsave + offsetof(struct vmcb, save.dr6));
854     env->dr[7] = x86_ldq_phys(cs,
855                           env->vm_hsave + offsetof(struct vmcb, save.dr7));
856 
857     /* other setups */
858     x86_stl_phys(cs,
859              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
860              x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
861                                               control.event_inj)));
862     x86_stl_phys(cs,
863              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
864              x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
865                                               control.event_inj_err)));
866     x86_stl_phys(cs,
867              env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
868 
869     env->hflags2 &= ~HF2_GIF_MASK;
870     env->hflags2 &= ~HF2_VGIF_MASK;
871     /* FIXME: Resets the current ASID register to zero (host ASID). */
872 
873     /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
874 
875     /* Clears the TSC_OFFSET inside the processor. */
876 
877     /* If the host is in PAE mode, the processor reloads the host's PDPEs
878        from the page table indicated the host's CR3. If the PDPEs contain
879        illegal state, the processor causes a shutdown. */
880 
881     /* Disables all breakpoints in the host DR7 register. */
882 
883     /* Checks the reloaded host state for consistency. */
884 
885     /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
886        host's code segment or non-canonical (in the case of long mode), a
887        #GP fault is delivered inside the host. */
888 }
889