xref: /openbmc/qemu/target/i386/tcg/sysemu/svm_helper.c (revision b14df228)
1 /*
2  *  x86 SVM helpers (sysemu only)
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
26 #include "tcg/helper-tcg.h"
27 
28 /* Secure Virtual Machine helpers */
29 
30 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
31                                 const SegmentCache *sc)
32 {
33     CPUState *cs = env_cpu(env);
34 
35     x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
36              sc->selector);
37     x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
38              sc->base);
39     x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
40              sc->limit);
41     x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
42              ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
43 }
44 
45 /*
46  * VMRUN and VMLOAD canonicalizes (i.e., sign-extend to bit 63) all base
47  * addresses in the segment registers that have been loaded.
48  */
49 static inline void svm_canonicalization(CPUX86State *env, target_ulong *seg_base)
50 {
51     uint16_t shift_amt = 64 - cpu_x86_virtual_addr_width(env);
52     *seg_base = ((((long) *seg_base) << shift_amt) >> shift_amt);
53 }
54 
55 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
56                                 SegmentCache *sc)
57 {
58     CPUState *cs = env_cpu(env);
59     unsigned int flags;
60 
61     sc->selector = x86_lduw_phys(cs,
62                              addr + offsetof(struct vmcb_seg, selector));
63     sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
64     sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
65     flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
66     sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
67     svm_canonicalization(env, &sc->base);
68 }
69 
70 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
71                                       int seg_reg)
72 {
73     SegmentCache sc1, *sc = &sc1;
74 
75     svm_load_seg(env, addr, sc);
76     cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
77                            sc->base, sc->limit, sc->flags);
78 }
79 
80 static inline bool is_efer_invalid_state (CPUX86State *env)
81 {
82     if (!(env->efer & MSR_EFER_SVME)) {
83         return true;
84     }
85 
86     if (env->efer & MSR_EFER_RESERVED) {
87         return true;
88     }
89 
90     if ((env->efer & (MSR_EFER_LMA | MSR_EFER_LME)) &&
91             !(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
92         return true;
93     }
94 
95     if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
96                                 && !(env->cr[4] & CR4_PAE_MASK)) {
97         return true;
98     }
99 
100     if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
101                                 && !(env->cr[0] & CR0_PE_MASK)) {
102         return true;
103     }
104 
105     if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
106                                 && (env->cr[4] & CR4_PAE_MASK)
107                                 && (env->segs[R_CS].flags & DESC_L_MASK)
108                                 && (env->segs[R_CS].flags & DESC_B_MASK)) {
109         return true;
110     }
111 
112     return false;
113 }
114 
115 static inline bool virtual_gif_enabled(CPUX86State *env)
116 {
117     if (likely(env->hflags & HF_GUEST_MASK)) {
118         return (env->features[FEAT_SVM] & CPUID_SVM_VGIF)
119                     && (env->int_ctl & V_GIF_ENABLED_MASK);
120     }
121     return false;
122 }
123 
124 static inline bool virtual_vm_load_save_enabled(CPUX86State *env, uint32_t exit_code, uintptr_t retaddr)
125 {
126     uint64_t lbr_ctl;
127 
128     if (likely(env->hflags & HF_GUEST_MASK)) {
129         if (likely(!(env->hflags2 & HF2_NPT_MASK)) || !(env->efer & MSR_EFER_LMA)) {
130             cpu_vmexit(env, exit_code, 0, retaddr);
131         }
132 
133         lbr_ctl = x86_ldl_phys(env_cpu(env), env->vm_vmcb + offsetof(struct vmcb,
134                                                   control.lbr_ctl));
135         return (env->features[FEAT_SVM] & CPUID_SVM_V_VMSAVE_VMLOAD)
136                 && (lbr_ctl & V_VMLOAD_VMSAVE_ENABLED_MASK);
137 
138     }
139 
140     return false;
141 }
142 
143 static inline bool virtual_gif_set(CPUX86State *env)
144 {
145     return !virtual_gif_enabled(env) || (env->int_ctl & V_GIF_MASK);
146 }
147 
148 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
149 {
150     CPUState *cs = env_cpu(env);
151     X86CPU *cpu = env_archcpu(env);
152     target_ulong addr;
153     uint64_t nested_ctl;
154     uint32_t event_inj;
155     uint32_t asid;
156     uint64_t new_cr0;
157     uint64_t new_cr3;
158     uint64_t new_cr4;
159 
160     cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
161 
162     if (aflag == 2) {
163         addr = env->regs[R_EAX];
164     } else {
165         addr = (uint32_t)env->regs[R_EAX];
166     }
167 
168     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
169 
170     env->vm_vmcb = addr;
171 
172     /* save the current CPU state in the hsave page */
173     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
174              env->gdt.base);
175     x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
176              env->gdt.limit);
177 
178     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
179              env->idt.base);
180     x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
181              env->idt.limit);
182 
183     x86_stq_phys(cs,
184              env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
185     x86_stq_phys(cs,
186              env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
187     x86_stq_phys(cs,
188              env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
189     x86_stq_phys(cs,
190              env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
191     x86_stq_phys(cs,
192              env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
193     x86_stq_phys(cs,
194              env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
195 
196     x86_stq_phys(cs,
197              env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
198     x86_stq_phys(cs,
199              env->vm_hsave + offsetof(struct vmcb, save.rflags),
200              cpu_compute_eflags(env));
201 
202     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
203                  &env->segs[R_ES]);
204     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
205                  &env->segs[R_CS]);
206     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
207                  &env->segs[R_SS]);
208     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
209                  &env->segs[R_DS]);
210 
211     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
212              env->eip + next_eip_addend);
213     x86_stq_phys(cs,
214              env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
215     x86_stq_phys(cs,
216              env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
217 
218     /* load the interception bitmaps so we do not need to access the
219        vmcb in svm mode */
220     env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
221                                                       control.intercept));
222     env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
223                                        offsetof(struct vmcb,
224                                                 control.intercept_cr_read));
225     env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
226                                         offsetof(struct vmcb,
227                                                  control.intercept_cr_write));
228     env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
229                                        offsetof(struct vmcb,
230                                                 control.intercept_dr_read));
231     env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
232                                         offsetof(struct vmcb,
233                                                  control.intercept_dr_write));
234     env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
235                                          offsetof(struct vmcb,
236                                                   control.intercept_exceptions
237                                                   ));
238 
239     nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
240                                                           control.nested_ctl));
241     asid = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
242                                                           control.asid));
243 
244     uint64_t msrpm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
245                                     offsetof(struct vmcb,
246                                             control.msrpm_base_pa));
247     uint64_t iopm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
248                                  offsetof(struct vmcb, control.iopm_base_pa));
249 
250     if ((msrpm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_MSRPM_SIZE) {
251         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
252     }
253 
254     if ((iopm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_IOPM_SIZE) {
255         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
256     }
257 
258     env->nested_pg_mode = 0;
259 
260     if (!cpu_svm_has_intercept(env, SVM_EXIT_VMRUN)) {
261         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
262     }
263     if (asid == 0) {
264         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
265     }
266 
267     if (nested_ctl & SVM_NPT_ENABLED) {
268         env->nested_cr3 = x86_ldq_phys(cs,
269                                 env->vm_vmcb + offsetof(struct vmcb,
270                                                         control.nested_cr3));
271         env->hflags2 |= HF2_NPT_MASK;
272 
273         env->nested_pg_mode = get_pg_mode(env) & PG_MODE_SVM_MASK;
274     }
275 
276     /* enable intercepts */
277     env->hflags |= HF_GUEST_MASK;
278 
279     env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
280                                offsetof(struct vmcb, control.tsc_offset));
281 
282     new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0));
283     if (new_cr0 & SVM_CR0_RESERVED_MASK) {
284         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
285     }
286     if ((new_cr0 & CR0_NW_MASK) && !(new_cr0 & CR0_CD_MASK)) {
287         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
288     }
289     new_cr3 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr3));
290     if ((env->efer & MSR_EFER_LMA) &&
291             (new_cr3 & ((~0ULL) << cpu->phys_bits))) {
292         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
293     }
294     new_cr4 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr4));
295     if (new_cr4 & cr4_reserved_bits(env)) {
296         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
297     }
298     /* clear exit_info_2 so we behave like the real hardware */
299     x86_stq_phys(cs,
300              env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
301 
302     cpu_x86_update_cr0(env, new_cr0);
303     cpu_x86_update_cr4(env, new_cr4);
304     cpu_x86_update_cr3(env, new_cr3);
305     env->cr[2] = x86_ldq_phys(cs,
306                           env->vm_vmcb + offsetof(struct vmcb, save.cr2));
307     env->int_ctl = x86_ldl_phys(cs,
308                        env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
309     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
310     if (env->int_ctl & V_INTR_MASKING_MASK) {
311         env->hflags2 |= HF2_VINTR_MASK;
312         if (env->eflags & IF_MASK) {
313             env->hflags2 |= HF2_HIF_MASK;
314         }
315     }
316 
317     cpu_load_efer(env,
318                   x86_ldq_phys(cs,
319                            env->vm_vmcb + offsetof(struct vmcb, save.efer)));
320     env->eflags = 0;
321     cpu_load_eflags(env, x86_ldq_phys(cs,
322                                   env->vm_vmcb + offsetof(struct vmcb,
323                                                           save.rflags)),
324                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
325 
326     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
327                        R_ES);
328     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
329                        R_CS);
330     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
331                        R_SS);
332     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
333                        R_DS);
334     svm_load_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.idtr),
335                        &env->idt);
336     svm_load_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.gdtr),
337                        &env->gdt);
338 
339     env->eip = x86_ldq_phys(cs,
340                         env->vm_vmcb + offsetof(struct vmcb, save.rip));
341 
342     env->regs[R_ESP] = x86_ldq_phys(cs,
343                                 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
344     env->regs[R_EAX] = x86_ldq_phys(cs,
345                                 env->vm_vmcb + offsetof(struct vmcb, save.rax));
346     env->dr[7] = x86_ldq_phys(cs,
347                           env->vm_vmcb + offsetof(struct vmcb, save.dr7));
348     env->dr[6] = x86_ldq_phys(cs,
349                           env->vm_vmcb + offsetof(struct vmcb, save.dr6));
350 
351 #ifdef TARGET_X86_64
352     if (env->dr[6] & DR_RESERVED_MASK) {
353         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
354     }
355     if (env->dr[7] & DR_RESERVED_MASK) {
356         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
357     }
358 #endif
359 
360     if (is_efer_invalid_state(env)) {
361         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
362     }
363 
364     switch (x86_ldub_phys(cs,
365                       env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
366     case TLB_CONTROL_DO_NOTHING:
367         break;
368     case TLB_CONTROL_FLUSH_ALL_ASID:
369         /* FIXME: this is not 100% correct but should work for now */
370         tlb_flush(cs);
371         break;
372     }
373 
374     env->hflags2 |= HF2_GIF_MASK;
375 
376     if (ctl_has_irq(env)) {
377         CPUState *cs = env_cpu(env);
378 
379         cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
380     }
381 
382     if (virtual_gif_set(env)) {
383         env->hflags2 |= HF2_VGIF_MASK;
384     }
385 
386     /* maybe we need to inject an event */
387     event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
388                                                  control.event_inj));
389     if (event_inj & SVM_EVTINJ_VALID) {
390         uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
391         uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
392         uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
393                                           offsetof(struct vmcb,
394                                                    control.event_inj_err));
395 
396         qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
397         /* FIXME: need to implement valid_err */
398         switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
399         case SVM_EVTINJ_TYPE_INTR:
400             cs->exception_index = vector;
401             env->error_code = event_inj_err;
402             env->exception_is_int = 0;
403             env->exception_next_eip = -1;
404             qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
405             /* XXX: is it always correct? */
406             do_interrupt_x86_hardirq(env, vector, 1);
407             break;
408         case SVM_EVTINJ_TYPE_NMI:
409             cs->exception_index = EXCP02_NMI;
410             env->error_code = event_inj_err;
411             env->exception_is_int = 0;
412             env->exception_next_eip = env->eip;
413             qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
414             cpu_loop_exit(cs);
415             break;
416         case SVM_EVTINJ_TYPE_EXEPT:
417             if (vector == EXCP02_NMI || vector >= 31)  {
418                 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
419             }
420             cs->exception_index = vector;
421             env->error_code = event_inj_err;
422             env->exception_is_int = 0;
423             env->exception_next_eip = -1;
424             qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
425             cpu_loop_exit(cs);
426             break;
427         case SVM_EVTINJ_TYPE_SOFT:
428             cs->exception_index = vector;
429             env->error_code = event_inj_err;
430             env->exception_is_int = 1;
431             env->exception_next_eip = env->eip;
432             qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
433             cpu_loop_exit(cs);
434             break;
435         default:
436             cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
437             break;
438         }
439         qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
440                       env->error_code);
441     }
442 }
443 
444 void helper_vmmcall(CPUX86State *env)
445 {
446     cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
447     raise_exception(env, EXCP06_ILLOP);
448 }
449 
450 void helper_vmload(CPUX86State *env, int aflag)
451 {
452     CPUState *cs = env_cpu(env);
453     target_ulong addr;
454     int prot;
455 
456     cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
457 
458     if (aflag == 2) {
459         addr = env->regs[R_EAX];
460     } else {
461         addr = (uint32_t)env->regs[R_EAX];
462     }
463 
464     if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMLOAD, GETPC())) {
465         addr = get_hphys(cs, addr, MMU_DATA_LOAD, &prot);
466     }
467 
468     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
469                   "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
470                   addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
471                                                           save.fs.base)),
472                   env->segs[R_FS].base);
473 
474     svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
475     svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
476     svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
477     svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
478 
479 #ifdef TARGET_X86_64
480     env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
481                                                  save.kernel_gs_base));
482     env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
483     env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
484     env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
485     svm_canonicalization(env, &env->kernelgsbase);
486 #endif
487     env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
488     env->sysenter_cs = x86_ldq_phys(cs,
489                                 addr + offsetof(struct vmcb, save.sysenter_cs));
490     env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
491                                                  save.sysenter_esp));
492     env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
493                                                  save.sysenter_eip));
494 
495 }
496 
497 void helper_vmsave(CPUX86State *env, int aflag)
498 {
499     CPUState *cs = env_cpu(env);
500     target_ulong addr;
501     int prot;
502 
503     cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
504 
505     if (aflag == 2) {
506         addr = env->regs[R_EAX];
507     } else {
508         addr = (uint32_t)env->regs[R_EAX];
509     }
510 
511     if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMSAVE, GETPC())) {
512         addr = get_hphys(cs, addr, MMU_DATA_STORE, &prot);
513     }
514 
515     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
516                   "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
517                   addr, x86_ldq_phys(cs,
518                                  addr + offsetof(struct vmcb, save.fs.base)),
519                   env->segs[R_FS].base);
520 
521     svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
522                  &env->segs[R_FS]);
523     svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
524                  &env->segs[R_GS]);
525     svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
526                  &env->tr);
527     svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
528                  &env->ldt);
529 
530 #ifdef TARGET_X86_64
531     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
532              env->kernelgsbase);
533     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
534     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
535     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
536 #endif
537     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
538     x86_stq_phys(cs,
539              addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
540     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
541              env->sysenter_esp);
542     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
543              env->sysenter_eip);
544 }
545 
546 void helper_stgi(CPUX86State *env)
547 {
548     cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
549 
550     if (virtual_gif_enabled(env)) {
551         env->int_ctl |= V_GIF_MASK;
552         env->hflags2 |= HF2_VGIF_MASK;
553     } else {
554         env->hflags2 |= HF2_GIF_MASK;
555     }
556 }
557 
558 void helper_clgi(CPUX86State *env)
559 {
560     cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
561 
562     if (virtual_gif_enabled(env)) {
563         env->int_ctl &= ~V_GIF_MASK;
564         env->hflags2 &= ~HF2_VGIF_MASK;
565     } else {
566         env->hflags2 &= ~HF2_GIF_MASK;
567     }
568 }
569 
570 bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type)
571 {
572     switch (type) {
573     case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
574         if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
575             return true;
576         }
577         break;
578     case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
579         if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
580             return true;
581         }
582         break;
583     case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
584         if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
585             return true;
586         }
587         break;
588     case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
589         if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
590             return true;
591         }
592         break;
593     case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
594         if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
595             return true;
596         }
597         break;
598     default:
599         if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
600             return true;
601         }
602         break;
603     }
604     return false;
605 }
606 
607 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
608                                    uint64_t param, uintptr_t retaddr)
609 {
610     CPUState *cs = env_cpu(env);
611 
612     if (likely(!(env->hflags & HF_GUEST_MASK))) {
613         return;
614     }
615 
616     if (!cpu_svm_has_intercept(env, type)) {
617         return;
618     }
619 
620     if (type == SVM_EXIT_MSR) {
621         /* FIXME: this should be read in at vmrun (faster this way?) */
622         uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
623                                     offsetof(struct vmcb,
624                                             control.msrpm_base_pa));
625         uint32_t t0, t1;
626 
627         switch ((uint32_t)env->regs[R_ECX]) {
628         case 0 ... 0x1fff:
629             t0 = (env->regs[R_ECX] * 2) % 8;
630             t1 = (env->regs[R_ECX] * 2) / 8;
631             break;
632         case 0xc0000000 ... 0xc0001fff:
633             t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
634             t1 = (t0 / 8);
635             t0 %= 8;
636             break;
637         case 0xc0010000 ... 0xc0011fff:
638             t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
639             t1 = (t0 / 8);
640             t0 %= 8;
641             break;
642         default:
643             cpu_vmexit(env, type, param, retaddr);
644             t0 = 0;
645             t1 = 0;
646             break;
647         }
648         if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
649             cpu_vmexit(env, type, param, retaddr);
650         }
651         return;
652     }
653 
654     cpu_vmexit(env, type, param, retaddr);
655 }
656 
657 void helper_svm_check_intercept(CPUX86State *env, uint32_t type)
658 {
659     cpu_svm_check_intercept_param(env, type, 0, GETPC());
660 }
661 
662 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
663                          uint32_t next_eip_addend)
664 {
665     CPUState *cs = env_cpu(env);
666 
667     if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
668         /* FIXME: this should be read in at vmrun (faster this way?) */
669         uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
670                                  offsetof(struct vmcb, control.iopm_base_pa));
671         uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
672 
673         if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
674             /* next env->eip */
675             x86_stq_phys(cs,
676                      env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
677                      env->eip + next_eip_addend);
678             cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
679         }
680     }
681 }
682 
683 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
684                 uintptr_t retaddr)
685 {
686     CPUState *cs = env_cpu(env);
687 
688     cpu_restore_state(cs, retaddr, true);
689 
690     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
691                   PRIx64 ", " TARGET_FMT_lx ")!\n",
692                   exit_code, exit_info_1,
693                   x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
694                                                    control.exit_info_2)),
695                   env->eip);
696 
697     cs->exception_index = EXCP_VMEXIT;
698     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
699              exit_code);
700 
701     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
702                                              control.exit_info_1), exit_info_1),
703 
704     /* remove any pending exception */
705     env->old_exception = -1;
706     cpu_loop_exit(cs);
707 }
708 
709 void do_vmexit(CPUX86State *env)
710 {
711     CPUState *cs = env_cpu(env);
712 
713     if (env->hflags & HF_INHIBIT_IRQ_MASK) {
714         x86_stl_phys(cs,
715                  env->vm_vmcb + offsetof(struct vmcb, control.int_state),
716                  SVM_INTERRUPT_SHADOW_MASK);
717         env->hflags &= ~HF_INHIBIT_IRQ_MASK;
718     } else {
719         x86_stl_phys(cs,
720                  env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
721     }
722     env->hflags2 &= ~HF2_NPT_MASK;
723 
724     /* Save the VM state in the vmcb */
725     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
726                  &env->segs[R_ES]);
727     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
728                  &env->segs[R_CS]);
729     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
730                  &env->segs[R_SS]);
731     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
732                  &env->segs[R_DS]);
733 
734     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
735              env->gdt.base);
736     x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
737              env->gdt.limit);
738 
739     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
740              env->idt.base);
741     x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
742              env->idt.limit);
743 
744     x86_stq_phys(cs,
745              env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
746     x86_stq_phys(cs,
747              env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
748     x86_stq_phys(cs,
749              env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
750     x86_stq_phys(cs,
751              env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
752     x86_stq_phys(cs,
753              env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
754     x86_stl_phys(cs,
755              env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), env->int_ctl);
756 
757     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
758              cpu_compute_eflags(env));
759     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
760              env->eip);
761     x86_stq_phys(cs,
762              env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
763     x86_stq_phys(cs,
764              env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
765     x86_stq_phys(cs,
766              env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
767     x86_stq_phys(cs,
768              env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
769     x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
770              env->hflags & HF_CPL_MASK);
771 
772     /* Reload the host state from vm_hsave */
773     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
774     env->hflags &= ~HF_GUEST_MASK;
775     env->intercept = 0;
776     env->intercept_exceptions = 0;
777     cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
778     env->int_ctl = 0;
779     env->tsc_offset = 0;
780 
781     env->gdt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
782                                                        save.gdtr.base));
783     env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
784                                                        save.gdtr.limit));
785 
786     env->idt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
787                                                        save.idtr.base));
788     env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
789                                                        save.idtr.limit));
790 
791     cpu_x86_update_cr0(env, x86_ldq_phys(cs,
792                                      env->vm_hsave + offsetof(struct vmcb,
793                                                               save.cr0)) |
794                        CR0_PE_MASK);
795     cpu_x86_update_cr4(env, x86_ldq_phys(cs,
796                                      env->vm_hsave + offsetof(struct vmcb,
797                                                               save.cr4)));
798     cpu_x86_update_cr3(env, x86_ldq_phys(cs,
799                                      env->vm_hsave + offsetof(struct vmcb,
800                                                               save.cr3)));
801     /* we need to set the efer after the crs so the hidden flags get
802        set properly */
803     cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
804                                                          save.efer)));
805     env->eflags = 0;
806     cpu_load_eflags(env, x86_ldq_phys(cs,
807                                   env->vm_hsave + offsetof(struct vmcb,
808                                                            save.rflags)),
809                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
810                       VM_MASK));
811 
812     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
813                        R_ES);
814     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
815                        R_CS);
816     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
817                        R_SS);
818     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
819                        R_DS);
820 
821     env->eip = x86_ldq_phys(cs,
822                         env->vm_hsave + offsetof(struct vmcb, save.rip));
823     env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
824                                 offsetof(struct vmcb, save.rsp));
825     env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
826                                 offsetof(struct vmcb, save.rax));
827 
828     env->dr[6] = x86_ldq_phys(cs,
829                           env->vm_hsave + offsetof(struct vmcb, save.dr6));
830     env->dr[7] = x86_ldq_phys(cs,
831                           env->vm_hsave + offsetof(struct vmcb, save.dr7));
832 
833     /* other setups */
834     x86_stl_phys(cs,
835              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
836              x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
837                                               control.event_inj)));
838     x86_stl_phys(cs,
839              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
840              x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
841                                               control.event_inj_err)));
842     x86_stl_phys(cs,
843              env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
844 
845     env->hflags2 &= ~HF2_GIF_MASK;
846     env->hflags2 &= ~HF2_VGIF_MASK;
847     /* FIXME: Resets the current ASID register to zero (host ASID). */
848 
849     /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
850 
851     /* Clears the TSC_OFFSET inside the processor. */
852 
853     /* If the host is in PAE mode, the processor reloads the host's PDPEs
854        from the page table indicated the host's CR3. If the PDPEs contain
855        illegal state, the processor causes a shutdown. */
856 
857     /* Disables all breakpoints in the host DR7 register. */
858 
859     /* Checks the reloaded host state for consistency. */
860 
861     /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
862        host's code segment or non-canonical (in the case of long mode), a
863        #GP fault is delivered inside the host. */
864 }
865