xref: /openbmc/qemu/target/i386/tcg/sysemu/svm_helper.c (revision 707ded20)
1 /*
2  *  x86 SVM helpers (sysemu only)
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
26 #include "tcg/helper-tcg.h"
27 
28 /* Secure Virtual Machine helpers */
29 
30 static void svm_save_seg(CPUX86State *env, int mmu_idx, hwaddr addr,
31                          const SegmentCache *sc)
32 {
33     cpu_stw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, selector),
34                       sc->selector, mmu_idx, 0);
35     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, base),
36                       sc->base, mmu_idx, 0);
37     cpu_stl_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, limit),
38                       sc->limit, mmu_idx, 0);
39     cpu_stw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, attrib),
40                       ((sc->flags >> 8) & 0xff)
41                       | ((sc->flags >> 12) & 0x0f00),
42                       mmu_idx, 0);
43 }
44 
45 /*
46  * VMRUN and VMLOAD canonicalizes (i.e., sign-extend to bit 63) all base
47  * addresses in the segment registers that have been loaded.
48  */
49 static inline void svm_canonicalization(CPUX86State *env, target_ulong *seg_base)
50 {
51     uint16_t shift_amt = 64 - cpu_x86_virtual_addr_width(env);
52     *seg_base = ((((long) *seg_base) << shift_amt) >> shift_amt);
53 }
54 
55 static void svm_load_seg(CPUX86State *env, int mmu_idx, hwaddr addr,
56                          SegmentCache *sc)
57 {
58     unsigned int flags;
59 
60     sc->selector =
61         cpu_lduw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, selector),
62                            mmu_idx, 0);
63     sc->base =
64         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, base),
65                           mmu_idx, 0);
66     sc->limit =
67         cpu_ldl_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, limit),
68                           mmu_idx, 0);
69     flags =
70         cpu_lduw_mmuidx_ra(env, addr + offsetof(struct vmcb_seg, attrib),
71                            mmu_idx, 0);
72     sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
73 
74     svm_canonicalization(env, &sc->base);
75 }
76 
77 static void svm_load_seg_cache(CPUX86State *env, int mmu_idx,
78                                hwaddr addr, int seg_reg)
79 {
80     SegmentCache sc;
81 
82     svm_load_seg(env, mmu_idx, addr, &sc);
83     cpu_x86_load_seg_cache(env, seg_reg, sc.selector,
84                            sc.base, sc.limit, sc.flags);
85 }
86 
87 static inline bool is_efer_invalid_state (CPUX86State *env)
88 {
89     if (!(env->efer & MSR_EFER_SVME)) {
90         return true;
91     }
92 
93     if (env->efer & MSR_EFER_RESERVED) {
94         return true;
95     }
96 
97     if ((env->efer & (MSR_EFER_LMA | MSR_EFER_LME)) &&
98             !(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
99         return true;
100     }
101 
102     if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
103                                 && !(env->cr[4] & CR4_PAE_MASK)) {
104         return true;
105     }
106 
107     if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
108                                 && !(env->cr[0] & CR0_PE_MASK)) {
109         return true;
110     }
111 
112     if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
113                                 && (env->cr[4] & CR4_PAE_MASK)
114                                 && (env->segs[R_CS].flags & DESC_L_MASK)
115                                 && (env->segs[R_CS].flags & DESC_B_MASK)) {
116         return true;
117     }
118 
119     return false;
120 }
121 
122 static inline bool virtual_gif_enabled(CPUX86State *env)
123 {
124     if (likely(env->hflags & HF_GUEST_MASK)) {
125         return (env->features[FEAT_SVM] & CPUID_SVM_VGIF)
126                     && (env->int_ctl & V_GIF_ENABLED_MASK);
127     }
128     return false;
129 }
130 
131 static inline bool virtual_vm_load_save_enabled(CPUX86State *env, uint32_t exit_code, uintptr_t retaddr)
132 {
133     uint64_t lbr_ctl;
134 
135     if (likely(env->hflags & HF_GUEST_MASK)) {
136         if (likely(!(env->hflags2 & HF2_NPT_MASK)) || !(env->efer & MSR_EFER_LMA)) {
137             cpu_vmexit(env, exit_code, 0, retaddr);
138         }
139 
140         lbr_ctl = x86_ldl_phys(env_cpu(env), env->vm_vmcb + offsetof(struct vmcb,
141                                                   control.lbr_ctl));
142         return (env->features[FEAT_SVM] & CPUID_SVM_V_VMSAVE_VMLOAD)
143                 && (lbr_ctl & V_VMLOAD_VMSAVE_ENABLED_MASK);
144 
145     }
146 
147     return false;
148 }
149 
150 static inline bool virtual_gif_set(CPUX86State *env)
151 {
152     return !virtual_gif_enabled(env) || (env->int_ctl & V_GIF_MASK);
153 }
154 
155 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
156 {
157     CPUState *cs = env_cpu(env);
158     X86CPU *cpu = env_archcpu(env);
159     target_ulong addr;
160     uint64_t nested_ctl;
161     uint32_t event_inj;
162     uint32_t asid;
163     uint64_t new_cr0;
164     uint64_t new_cr3;
165     uint64_t new_cr4;
166 
167     if (aflag == 2) {
168         addr = env->regs[R_EAX];
169     } else {
170         addr = (uint32_t)env->regs[R_EAX];
171     }
172 
173     /* Exceptions are checked before the intercept.  */
174     if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) {
175         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
176     }
177 
178     cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
179 
180     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
181 
182     env->vm_vmcb = addr;
183 
184     /* save the current CPU state in the hsave page */
185     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
186              env->gdt.base);
187     x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
188              env->gdt.limit);
189 
190     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
191              env->idt.base);
192     x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
193              env->idt.limit);
194 
195     x86_stq_phys(cs,
196              env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
197     x86_stq_phys(cs,
198              env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
199     x86_stq_phys(cs,
200              env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
201     x86_stq_phys(cs,
202              env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
203     x86_stq_phys(cs,
204              env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
205     x86_stq_phys(cs,
206              env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
207 
208     x86_stq_phys(cs,
209              env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
210     x86_stq_phys(cs,
211              env->vm_hsave + offsetof(struct vmcb, save.rflags),
212              cpu_compute_eflags(env));
213 
214     svm_save_seg(env, MMU_PHYS_IDX,
215                  env->vm_hsave + offsetof(struct vmcb, save.es),
216                  &env->segs[R_ES]);
217     svm_save_seg(env, MMU_PHYS_IDX,
218                  env->vm_hsave + offsetof(struct vmcb, save.cs),
219                  &env->segs[R_CS]);
220     svm_save_seg(env, MMU_PHYS_IDX,
221                  env->vm_hsave + offsetof(struct vmcb, save.ss),
222                  &env->segs[R_SS]);
223     svm_save_seg(env, MMU_PHYS_IDX,
224                  env->vm_hsave + offsetof(struct vmcb, save.ds),
225                  &env->segs[R_DS]);
226 
227     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
228              env->eip + next_eip_addend);
229     x86_stq_phys(cs,
230              env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
231     x86_stq_phys(cs,
232              env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
233 
234     /* load the interception bitmaps so we do not need to access the
235        vmcb in svm mode */
236     env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
237                                                       control.intercept));
238     env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
239                                        offsetof(struct vmcb,
240                                                 control.intercept_cr_read));
241     env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
242                                         offsetof(struct vmcb,
243                                                  control.intercept_cr_write));
244     env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
245                                        offsetof(struct vmcb,
246                                                 control.intercept_dr_read));
247     env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
248                                         offsetof(struct vmcb,
249                                                  control.intercept_dr_write));
250     env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
251                                          offsetof(struct vmcb,
252                                                   control.intercept_exceptions
253                                                   ));
254 
255     nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
256                                                           control.nested_ctl));
257     asid = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
258                                                           control.asid));
259 
260     uint64_t msrpm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
261                                     offsetof(struct vmcb,
262                                             control.msrpm_base_pa));
263     uint64_t iopm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
264                                  offsetof(struct vmcb, control.iopm_base_pa));
265 
266     if ((msrpm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_MSRPM_SIZE) {
267         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
268     }
269 
270     if ((iopm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_IOPM_SIZE) {
271         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
272     }
273 
274     env->nested_pg_mode = 0;
275 
276     if (!cpu_svm_has_intercept(env, SVM_EXIT_VMRUN)) {
277         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
278     }
279     if (asid == 0) {
280         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
281     }
282 
283     if (nested_ctl & SVM_NPT_ENABLED) {
284         env->nested_cr3 = x86_ldq_phys(cs,
285                                 env->vm_vmcb + offsetof(struct vmcb,
286                                                         control.nested_cr3));
287         env->hflags2 |= HF2_NPT_MASK;
288 
289         env->nested_pg_mode = get_pg_mode(env) & PG_MODE_SVM_MASK;
290 
291         tlb_flush_by_mmuidx(cs, 1 << MMU_NESTED_IDX);
292     }
293 
294     /* enable intercepts */
295     env->hflags |= HF_GUEST_MASK;
296 
297     env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
298                                offsetof(struct vmcb, control.tsc_offset));
299 
300     new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0));
301     if (new_cr0 & SVM_CR0_RESERVED_MASK) {
302         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
303     }
304     if ((new_cr0 & CR0_NW_MASK) && !(new_cr0 & CR0_CD_MASK)) {
305         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
306     }
307     new_cr3 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr3));
308     if ((env->efer & MSR_EFER_LMA) &&
309             (new_cr3 & ((~0ULL) << cpu->phys_bits))) {
310         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
311     }
312     new_cr4 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr4));
313     if (new_cr4 & cr4_reserved_bits(env)) {
314         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
315     }
316     /* clear exit_info_2 so we behave like the real hardware */
317     x86_stq_phys(cs,
318              env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
319 
320     cpu_x86_update_cr0(env, new_cr0);
321     cpu_x86_update_cr4(env, new_cr4);
322     cpu_x86_update_cr3(env, new_cr3);
323     env->cr[2] = x86_ldq_phys(cs,
324                           env->vm_vmcb + offsetof(struct vmcb, save.cr2));
325     env->int_ctl = x86_ldl_phys(cs,
326                        env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
327     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
328     if (env->int_ctl & V_INTR_MASKING_MASK) {
329         env->hflags2 |= HF2_VINTR_MASK;
330         if (env->eflags & IF_MASK) {
331             env->hflags2 |= HF2_HIF_MASK;
332         }
333     }
334 
335     cpu_load_efer(env,
336                   x86_ldq_phys(cs,
337                            env->vm_vmcb + offsetof(struct vmcb, save.efer)));
338     env->eflags = 0;
339     cpu_load_eflags(env, x86_ldq_phys(cs,
340                                   env->vm_vmcb + offsetof(struct vmcb,
341                                                           save.rflags)),
342                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
343 
344     svm_load_seg_cache(env, MMU_PHYS_IDX,
345                        env->vm_vmcb + offsetof(struct vmcb, save.es), R_ES);
346     svm_load_seg_cache(env, MMU_PHYS_IDX,
347                        env->vm_vmcb + offsetof(struct vmcb, save.cs), R_CS);
348     svm_load_seg_cache(env, MMU_PHYS_IDX,
349                        env->vm_vmcb + offsetof(struct vmcb, save.ss), R_SS);
350     svm_load_seg_cache(env, MMU_PHYS_IDX,
351                        env->vm_vmcb + offsetof(struct vmcb, save.ds), R_DS);
352     svm_load_seg(env, MMU_PHYS_IDX,
353                  env->vm_vmcb + offsetof(struct vmcb, save.idtr), &env->idt);
354     svm_load_seg(env, MMU_PHYS_IDX,
355                  env->vm_vmcb + offsetof(struct vmcb, save.gdtr), &env->gdt);
356 
357     env->eip = x86_ldq_phys(cs,
358                         env->vm_vmcb + offsetof(struct vmcb, save.rip));
359 
360     env->regs[R_ESP] = x86_ldq_phys(cs,
361                                 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
362     env->regs[R_EAX] = x86_ldq_phys(cs,
363                                 env->vm_vmcb + offsetof(struct vmcb, save.rax));
364     env->dr[7] = x86_ldq_phys(cs,
365                           env->vm_vmcb + offsetof(struct vmcb, save.dr7));
366     env->dr[6] = x86_ldq_phys(cs,
367                           env->vm_vmcb + offsetof(struct vmcb, save.dr6));
368 
369 #ifdef TARGET_X86_64
370     if (env->dr[6] & DR_RESERVED_MASK) {
371         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
372     }
373     if (env->dr[7] & DR_RESERVED_MASK) {
374         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
375     }
376 #endif
377 
378     if (is_efer_invalid_state(env)) {
379         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
380     }
381 
382     switch (x86_ldub_phys(cs,
383                       env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
384     case TLB_CONTROL_DO_NOTHING:
385         break;
386     case TLB_CONTROL_FLUSH_ALL_ASID:
387         /* FIXME: this is not 100% correct but should work for now */
388         tlb_flush(cs);
389         break;
390     }
391 
392     env->hflags2 |= HF2_GIF_MASK;
393 
394     if (ctl_has_irq(env)) {
395         cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
396     }
397 
398     if (virtual_gif_set(env)) {
399         env->hflags2 |= HF2_VGIF_MASK;
400     }
401 
402     /* maybe we need to inject an event */
403     event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
404                                                  control.event_inj));
405     if (event_inj & SVM_EVTINJ_VALID) {
406         uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
407         uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
408         uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
409                                           offsetof(struct vmcb,
410                                                    control.event_inj_err));
411 
412         qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
413         /* FIXME: need to implement valid_err */
414         switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
415         case SVM_EVTINJ_TYPE_INTR:
416             cs->exception_index = vector;
417             env->error_code = event_inj_err;
418             env->exception_is_int = 0;
419             env->exception_next_eip = -1;
420             qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
421             /* XXX: is it always correct? */
422             do_interrupt_x86_hardirq(env, vector, 1);
423             break;
424         case SVM_EVTINJ_TYPE_NMI:
425             cs->exception_index = EXCP02_NMI;
426             env->error_code = event_inj_err;
427             env->exception_is_int = 0;
428             env->exception_next_eip = env->eip;
429             qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
430             cpu_loop_exit(cs);
431             break;
432         case SVM_EVTINJ_TYPE_EXEPT:
433             if (vector == EXCP02_NMI || vector >= 31)  {
434                 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
435             }
436             cs->exception_index = vector;
437             env->error_code = event_inj_err;
438             env->exception_is_int = 0;
439             env->exception_next_eip = -1;
440             qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
441             cpu_loop_exit(cs);
442             break;
443         case SVM_EVTINJ_TYPE_SOFT:
444             cs->exception_index = vector;
445             env->error_code = event_inj_err;
446             env->exception_is_int = 1;
447             env->exception_next_eip = env->eip;
448             qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
449             cpu_loop_exit(cs);
450             break;
451         default:
452             cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
453             break;
454         }
455         qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
456                       env->error_code);
457     }
458 }
459 
460 void helper_vmmcall(CPUX86State *env)
461 {
462     cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
463     raise_exception(env, EXCP06_ILLOP);
464 }
465 
466 void helper_vmload(CPUX86State *env, int aflag)
467 {
468     int mmu_idx = MMU_PHYS_IDX;
469     target_ulong addr;
470 
471     if (aflag == 2) {
472         addr = env->regs[R_EAX];
473     } else {
474         addr = (uint32_t)env->regs[R_EAX];
475     }
476 
477     /* Exceptions are checked before the intercept.  */
478     if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) {
479         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
480     }
481 
482     cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
483 
484     if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMLOAD, GETPC())) {
485         mmu_idx = MMU_NESTED_IDX;
486     }
487 
488     svm_load_seg_cache(env, mmu_idx,
489                        addr + offsetof(struct vmcb, save.fs), R_FS);
490     svm_load_seg_cache(env, mmu_idx,
491                        addr + offsetof(struct vmcb, save.gs), R_GS);
492     svm_load_seg(env, mmu_idx,
493                  addr + offsetof(struct vmcb, save.tr), &env->tr);
494     svm_load_seg(env, mmu_idx,
495                  addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
496 
497 #ifdef TARGET_X86_64
498     env->kernelgsbase =
499         cpu_ldq_mmuidx_ra(env,
500                           addr + offsetof(struct vmcb, save.kernel_gs_base),
501                           mmu_idx, 0);
502     env->lstar =
503         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.lstar),
504                           mmu_idx, 0);
505     env->cstar =
506         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.cstar),
507                           mmu_idx, 0);
508     env->fmask =
509         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sfmask),
510                           mmu_idx, 0);
511     svm_canonicalization(env, &env->kernelgsbase);
512 #endif
513     env->star =
514         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.star),
515                           mmu_idx, 0);
516     env->sysenter_cs =
517         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_cs),
518                           mmu_idx, 0);
519     env->sysenter_esp =
520         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_esp),
521                           mmu_idx, 0);
522     env->sysenter_eip =
523         cpu_ldq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_eip),
524                           mmu_idx, 0);
525 }
526 
527 void helper_vmsave(CPUX86State *env, int aflag)
528 {
529     int mmu_idx = MMU_PHYS_IDX;
530     target_ulong addr;
531 
532     if (aflag == 2) {
533         addr = env->regs[R_EAX];
534     } else {
535         addr = (uint32_t)env->regs[R_EAX];
536     }
537 
538     /* Exceptions are checked before the intercept.  */
539     if (addr & (0xfff | ((~0ULL) << env_archcpu(env)->phys_bits))) {
540         raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
541     }
542 
543     cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
544 
545     if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMSAVE, GETPC())) {
546         mmu_idx = MMU_NESTED_IDX;
547     }
548 
549     svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.fs),
550                  &env->segs[R_FS]);
551     svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.gs),
552                  &env->segs[R_GS]);
553     svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.tr),
554                  &env->tr);
555     svm_save_seg(env, mmu_idx, addr + offsetof(struct vmcb, save.ldtr),
556                  &env->ldt);
557 
558 #ifdef TARGET_X86_64
559     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.kernel_gs_base),
560                       env->kernelgsbase, mmu_idx, 0);
561     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.lstar),
562                       env->lstar, mmu_idx, 0);
563     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.cstar),
564                       env->cstar, mmu_idx, 0);
565     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sfmask),
566                       env->fmask, mmu_idx, 0);
567 #endif
568     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.star),
569                       env->star, mmu_idx, 0);
570     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_cs),
571                       env->sysenter_cs, mmu_idx, 0);
572     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_esp),
573                       env->sysenter_esp, mmu_idx, 0);
574     cpu_stq_mmuidx_ra(env, addr + offsetof(struct vmcb, save.sysenter_eip),
575                       env->sysenter_eip, mmu_idx, 0);
576 }
577 
578 void helper_stgi(CPUX86State *env)
579 {
580     cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
581 
582     if (virtual_gif_enabled(env)) {
583         env->int_ctl |= V_GIF_MASK;
584         env->hflags2 |= HF2_VGIF_MASK;
585     } else {
586         env->hflags2 |= HF2_GIF_MASK;
587     }
588 }
589 
590 void helper_clgi(CPUX86State *env)
591 {
592     cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
593 
594     if (virtual_gif_enabled(env)) {
595         env->int_ctl &= ~V_GIF_MASK;
596         env->hflags2 &= ~HF2_VGIF_MASK;
597     } else {
598         env->hflags2 &= ~HF2_GIF_MASK;
599     }
600 }
601 
602 bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type)
603 {
604     switch (type) {
605     case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
606         if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
607             return true;
608         }
609         break;
610     case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
611         if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
612             return true;
613         }
614         break;
615     case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
616         if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
617             return true;
618         }
619         break;
620     case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
621         if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
622             return true;
623         }
624         break;
625     case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
626         if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
627             return true;
628         }
629         break;
630     default:
631         if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
632             return true;
633         }
634         break;
635     }
636     return false;
637 }
638 
639 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
640                                    uint64_t param, uintptr_t retaddr)
641 {
642     CPUState *cs = env_cpu(env);
643 
644     if (likely(!(env->hflags & HF_GUEST_MASK))) {
645         return;
646     }
647 
648     if (!cpu_svm_has_intercept(env, type)) {
649         return;
650     }
651 
652     if (type == SVM_EXIT_MSR) {
653         /* FIXME: this should be read in at vmrun (faster this way?) */
654         uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
655                                     offsetof(struct vmcb,
656                                             control.msrpm_base_pa));
657         uint32_t t0, t1;
658 
659         switch ((uint32_t)env->regs[R_ECX]) {
660         case 0 ... 0x1fff:
661             t0 = (env->regs[R_ECX] * 2) % 8;
662             t1 = (env->regs[R_ECX] * 2) / 8;
663             break;
664         case 0xc0000000 ... 0xc0001fff:
665             t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
666             t1 = (t0 / 8);
667             t0 %= 8;
668             break;
669         case 0xc0010000 ... 0xc0011fff:
670             t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
671             t1 = (t0 / 8);
672             t0 %= 8;
673             break;
674         default:
675             cpu_vmexit(env, type, param, retaddr);
676             t0 = 0;
677             t1 = 0;
678             break;
679         }
680         if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
681             cpu_vmexit(env, type, param, retaddr);
682         }
683         return;
684     }
685 
686     cpu_vmexit(env, type, param, retaddr);
687 }
688 
689 void helper_svm_check_intercept(CPUX86State *env, uint32_t type)
690 {
691     cpu_svm_check_intercept_param(env, type, 0, GETPC());
692 }
693 
694 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
695                          uint32_t next_eip_addend)
696 {
697     CPUState *cs = env_cpu(env);
698 
699     if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
700         /* FIXME: this should be read in at vmrun (faster this way?) */
701         uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
702                                  offsetof(struct vmcb, control.iopm_base_pa));
703         uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
704 
705         if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
706             /* next env->eip */
707             x86_stq_phys(cs,
708                      env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
709                      env->eip + next_eip_addend);
710             cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
711         }
712     }
713 }
714 
715 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
716                 uintptr_t retaddr)
717 {
718     CPUState *cs = env_cpu(env);
719 
720     cpu_restore_state(cs, retaddr);
721 
722     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
723                   PRIx64 ", " TARGET_FMT_lx ")!\n",
724                   exit_code, exit_info_1,
725                   x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
726                                                    control.exit_info_2)),
727                   env->eip);
728 
729     cs->exception_index = EXCP_VMEXIT;
730     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
731              exit_code);
732 
733     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
734                                              control.exit_info_1), exit_info_1),
735 
736     /* remove any pending exception */
737     env->old_exception = -1;
738     cpu_loop_exit(cs);
739 }
740 
741 void do_vmexit(CPUX86State *env)
742 {
743     CPUState *cs = env_cpu(env);
744 
745     if (env->hflags & HF_INHIBIT_IRQ_MASK) {
746         x86_stl_phys(cs,
747                  env->vm_vmcb + offsetof(struct vmcb, control.int_state),
748                  SVM_INTERRUPT_SHADOW_MASK);
749         env->hflags &= ~HF_INHIBIT_IRQ_MASK;
750     } else {
751         x86_stl_phys(cs,
752                  env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
753     }
754     env->hflags2 &= ~HF2_NPT_MASK;
755     tlb_flush_by_mmuidx(cs, 1 << MMU_NESTED_IDX);
756 
757     /* Save the VM state in the vmcb */
758     svm_save_seg(env, MMU_PHYS_IDX,
759                  env->vm_vmcb + offsetof(struct vmcb, save.es),
760                  &env->segs[R_ES]);
761     svm_save_seg(env, MMU_PHYS_IDX,
762                  env->vm_vmcb + offsetof(struct vmcb, save.cs),
763                  &env->segs[R_CS]);
764     svm_save_seg(env, MMU_PHYS_IDX,
765                  env->vm_vmcb + offsetof(struct vmcb, save.ss),
766                  &env->segs[R_SS]);
767     svm_save_seg(env, MMU_PHYS_IDX,
768                  env->vm_vmcb + offsetof(struct vmcb, save.ds),
769                  &env->segs[R_DS]);
770 
771     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
772              env->gdt.base);
773     x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
774              env->gdt.limit);
775 
776     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
777              env->idt.base);
778     x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
779              env->idt.limit);
780 
781     x86_stq_phys(cs,
782              env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
783     x86_stq_phys(cs,
784              env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
785     x86_stq_phys(cs,
786              env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
787     x86_stq_phys(cs,
788              env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
789     x86_stq_phys(cs,
790              env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
791     x86_stl_phys(cs,
792              env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), env->int_ctl);
793 
794     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
795              cpu_compute_eflags(env));
796     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
797              env->eip);
798     x86_stq_phys(cs,
799              env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
800     x86_stq_phys(cs,
801              env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
802     x86_stq_phys(cs,
803              env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
804     x86_stq_phys(cs,
805              env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
806     x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
807              env->hflags & HF_CPL_MASK);
808 
809     /* Reload the host state from vm_hsave */
810     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
811     env->hflags &= ~HF_GUEST_MASK;
812     env->intercept = 0;
813     env->intercept_exceptions = 0;
814     cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
815     env->int_ctl = 0;
816     env->tsc_offset = 0;
817 
818     env->gdt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
819                                                        save.gdtr.base));
820     env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
821                                                        save.gdtr.limit));
822 
823     env->idt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
824                                                        save.idtr.base));
825     env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
826                                                        save.idtr.limit));
827 
828     cpu_x86_update_cr0(env, x86_ldq_phys(cs,
829                                      env->vm_hsave + offsetof(struct vmcb,
830                                                               save.cr0)) |
831                        CR0_PE_MASK);
832     cpu_x86_update_cr4(env, x86_ldq_phys(cs,
833                                      env->vm_hsave + offsetof(struct vmcb,
834                                                               save.cr4)));
835     cpu_x86_update_cr3(env, x86_ldq_phys(cs,
836                                      env->vm_hsave + offsetof(struct vmcb,
837                                                               save.cr3)));
838     /* we need to set the efer after the crs so the hidden flags get
839        set properly */
840     cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
841                                                          save.efer)));
842     env->eflags = 0;
843     cpu_load_eflags(env, x86_ldq_phys(cs,
844                                   env->vm_hsave + offsetof(struct vmcb,
845                                                            save.rflags)),
846                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
847                       VM_MASK));
848 
849     svm_load_seg_cache(env, MMU_PHYS_IDX,
850                        env->vm_hsave + offsetof(struct vmcb, save.es), R_ES);
851     svm_load_seg_cache(env, MMU_PHYS_IDX,
852                        env->vm_hsave + offsetof(struct vmcb, save.cs), R_CS);
853     svm_load_seg_cache(env, MMU_PHYS_IDX,
854                        env->vm_hsave + offsetof(struct vmcb, save.ss), R_SS);
855     svm_load_seg_cache(env, MMU_PHYS_IDX,
856                        env->vm_hsave + offsetof(struct vmcb, save.ds), R_DS);
857 
858     env->eip = x86_ldq_phys(cs,
859                         env->vm_hsave + offsetof(struct vmcb, save.rip));
860     env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
861                                 offsetof(struct vmcb, save.rsp));
862     env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
863                                 offsetof(struct vmcb, save.rax));
864 
865     env->dr[6] = x86_ldq_phys(cs,
866                           env->vm_hsave + offsetof(struct vmcb, save.dr6));
867     env->dr[7] = x86_ldq_phys(cs,
868                           env->vm_hsave + offsetof(struct vmcb, save.dr7));
869 
870     /* other setups */
871     x86_stl_phys(cs,
872              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
873              x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
874                                               control.event_inj)));
875     x86_stl_phys(cs,
876              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
877              x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
878                                               control.event_inj_err)));
879     x86_stl_phys(cs,
880              env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
881 
882     env->hflags2 &= ~HF2_GIF_MASK;
883     env->hflags2 &= ~HF2_VGIF_MASK;
884     /* FIXME: Resets the current ASID register to zero (host ASID). */
885 
886     /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
887 
888     /* Clears the TSC_OFFSET inside the processor. */
889 
890     /* If the host is in PAE mode, the processor reloads the host's PDPEs
891        from the page table indicated the host's CR3. If the PDPEs contain
892        illegal state, the processor causes a shutdown. */
893 
894     /* Disables all breakpoints in the host DR7 register. */
895 
896     /* Checks the reloaded host state for consistency. */
897 
898     /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
899        host's code segment or non-canonical (in the case of long mode), a
900        #GP fault is delivered inside the host. */
901 }
902