xref: /openbmc/qemu/target/i386/tcg/sysemu/svm_helper.c (revision fe8adae3)
1 /*
2  *  x86 SVM helpers (sysemu only)
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
25 #include "tcg/helper-tcg.h"
26 
27 /* Secure Virtual Machine helpers */
28 
29 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
30                                 const SegmentCache *sc)
31 {
32     CPUState *cs = env_cpu(env);
33 
34     x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
35              sc->selector);
36     x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
37              sc->base);
38     x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
39              sc->limit);
40     x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
41              ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
42 }
43 
44 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
45                                 SegmentCache *sc)
46 {
47     CPUState *cs = env_cpu(env);
48     unsigned int flags;
49 
50     sc->selector = x86_lduw_phys(cs,
51                              addr + offsetof(struct vmcb_seg, selector));
52     sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
53     sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
54     flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
55     sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
56 }
57 
58 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
59                                       int seg_reg)
60 {
61     SegmentCache sc1, *sc = &sc1;
62 
63     svm_load_seg(env, addr, sc);
64     cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
65                            sc->base, sc->limit, sc->flags);
66 }
67 
68 static inline bool ctl_has_irq(uint32_t int_ctl)
69 {
70     uint32_t int_prio;
71     uint32_t tpr;
72 
73     int_prio = (int_ctl & V_INTR_PRIO_MASK) >> V_INTR_PRIO_SHIFT;
74     tpr = int_ctl & V_TPR_MASK;
75     return (int_ctl & V_IRQ_MASK) && (int_prio >= tpr);
76 }
77 
78 static inline bool is_efer_invalid_state (CPUX86State *env)
79 {
80     if (!(env->efer & MSR_EFER_SVME)) {
81         return true;
82     }
83 
84     if (env->efer & MSR_EFER_RESERVED) {
85         return true;
86     }
87 
88     if ((env->efer & (MSR_EFER_LMA | MSR_EFER_LME)) &&
89             !(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
90         return true;
91     }
92 
93     if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
94                                 && !(env->cr[4] & CR4_PAE_MASK)) {
95         return true;
96     }
97 
98     if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
99                                 && !(env->cr[0] & CR0_PE_MASK)) {
100         return true;
101     }
102 
103     if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
104                                 && (env->cr[4] & CR4_PAE_MASK)
105                                 && (env->segs[R_CS].flags & DESC_L_MASK)
106                                 && (env->segs[R_CS].flags & DESC_B_MASK)) {
107         return true;
108     }
109 
110     return false;
111 }
112 
113 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
114 {
115     CPUState *cs = env_cpu(env);
116     X86CPU *cpu = env_archcpu(env);
117     target_ulong addr;
118     uint64_t nested_ctl;
119     uint32_t event_inj;
120     uint32_t int_ctl;
121     uint32_t asid;
122     uint64_t new_cr0;
123     uint64_t new_cr3;
124     uint64_t new_cr4;
125 
126     cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
127 
128     if (aflag == 2) {
129         addr = env->regs[R_EAX];
130     } else {
131         addr = (uint32_t)env->regs[R_EAX];
132     }
133 
134     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
135 
136     env->vm_vmcb = addr;
137 
138     /* save the current CPU state in the hsave page */
139     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
140              env->gdt.base);
141     x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
142              env->gdt.limit);
143 
144     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
145              env->idt.base);
146     x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
147              env->idt.limit);
148 
149     x86_stq_phys(cs,
150              env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
151     x86_stq_phys(cs,
152              env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
153     x86_stq_phys(cs,
154              env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
155     x86_stq_phys(cs,
156              env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
157     x86_stq_phys(cs,
158              env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
159     x86_stq_phys(cs,
160              env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
161 
162     x86_stq_phys(cs,
163              env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
164     x86_stq_phys(cs,
165              env->vm_hsave + offsetof(struct vmcb, save.rflags),
166              cpu_compute_eflags(env));
167 
168     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
169                  &env->segs[R_ES]);
170     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
171                  &env->segs[R_CS]);
172     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
173                  &env->segs[R_SS]);
174     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
175                  &env->segs[R_DS]);
176 
177     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
178              env->eip + next_eip_addend);
179     x86_stq_phys(cs,
180              env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
181     x86_stq_phys(cs,
182              env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
183 
184     /* load the interception bitmaps so we do not need to access the
185        vmcb in svm mode */
186     env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
187                                                       control.intercept));
188     env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
189                                        offsetof(struct vmcb,
190                                                 control.intercept_cr_read));
191     env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
192                                         offsetof(struct vmcb,
193                                                  control.intercept_cr_write));
194     env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
195                                        offsetof(struct vmcb,
196                                                 control.intercept_dr_read));
197     env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
198                                         offsetof(struct vmcb,
199                                                  control.intercept_dr_write));
200     env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
201                                          offsetof(struct vmcb,
202                                                   control.intercept_exceptions
203                                                   ));
204 
205     nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
206                                                           control.nested_ctl));
207     asid = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
208                                                           control.asid));
209 
210     uint64_t msrpm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
211                                     offsetof(struct vmcb,
212                                             control.msrpm_base_pa));
213     uint64_t iopm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
214                                  offsetof(struct vmcb, control.iopm_base_pa));
215 
216     if ((msrpm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_MSRPM_SIZE) {
217         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
218     }
219 
220     if ((iopm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_IOPM_SIZE) {
221         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
222     }
223 
224     env->nested_pg_mode = 0;
225 
226     if (!cpu_svm_has_intercept(env, SVM_EXIT_VMRUN)) {
227         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
228     }
229     if (asid == 0) {
230         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
231     }
232 
233     if (nested_ctl & SVM_NPT_ENABLED) {
234         env->nested_cr3 = x86_ldq_phys(cs,
235                                 env->vm_vmcb + offsetof(struct vmcb,
236                                                         control.nested_cr3));
237         env->hflags2 |= HF2_NPT_MASK;
238 
239         env->nested_pg_mode = get_pg_mode(env) & PG_MODE_SVM_MASK;
240     }
241 
242     /* enable intercepts */
243     env->hflags |= HF_GUEST_MASK;
244 
245     env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
246                                offsetof(struct vmcb, control.tsc_offset));
247 
248     env->gdt.base  = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
249                                                       save.gdtr.base));
250     env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
251                                                       save.gdtr.limit));
252 
253     env->idt.base  = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
254                                                       save.idtr.base));
255     env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
256                                                       save.idtr.limit));
257 
258     new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0));
259     if (new_cr0 & SVM_CR0_RESERVED_MASK) {
260         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
261     }
262     if ((new_cr0 & CR0_NW_MASK) && !(new_cr0 & CR0_CD_MASK)) {
263         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
264     }
265     new_cr3 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr3));
266     if ((env->efer & MSR_EFER_LMA) &&
267             (new_cr3 & ((~0ULL) << cpu->phys_bits))) {
268         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
269     }
270     new_cr4 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr4));
271     if (new_cr4 & cr4_reserved_bits(env)) {
272         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
273     }
274     /* clear exit_info_2 so we behave like the real hardware */
275     x86_stq_phys(cs,
276              env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
277 
278     cpu_x86_update_cr0(env, new_cr0);
279     cpu_x86_update_cr4(env, new_cr4);
280     cpu_x86_update_cr3(env, new_cr3);
281     env->cr[2] = x86_ldq_phys(cs,
282                           env->vm_vmcb + offsetof(struct vmcb, save.cr2));
283     int_ctl = x86_ldl_phys(cs,
284                        env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
285     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
286     if (int_ctl & V_INTR_MASKING_MASK) {
287         env->v_tpr = int_ctl & V_TPR_MASK;
288         env->hflags2 |= HF2_VINTR_MASK;
289         if (env->eflags & IF_MASK) {
290             env->hflags2 |= HF2_HIF_MASK;
291         }
292     }
293 
294     cpu_load_efer(env,
295                   x86_ldq_phys(cs,
296                            env->vm_vmcb + offsetof(struct vmcb, save.efer)));
297     env->eflags = 0;
298     cpu_load_eflags(env, x86_ldq_phys(cs,
299                                   env->vm_vmcb + offsetof(struct vmcb,
300                                                           save.rflags)),
301                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
302 
303     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
304                        R_ES);
305     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
306                        R_CS);
307     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
308                        R_SS);
309     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
310                        R_DS);
311 
312     env->eip = x86_ldq_phys(cs,
313                         env->vm_vmcb + offsetof(struct vmcb, save.rip));
314 
315     env->regs[R_ESP] = x86_ldq_phys(cs,
316                                 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
317     env->regs[R_EAX] = x86_ldq_phys(cs,
318                                 env->vm_vmcb + offsetof(struct vmcb, save.rax));
319     env->dr[7] = x86_ldq_phys(cs,
320                           env->vm_vmcb + offsetof(struct vmcb, save.dr7));
321     env->dr[6] = x86_ldq_phys(cs,
322                           env->vm_vmcb + offsetof(struct vmcb, save.dr6));
323 
324 #ifdef TARGET_X86_64
325     if (env->dr[6] & DR_RESERVED_MASK) {
326         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
327     }
328     if (env->dr[7] & DR_RESERVED_MASK) {
329         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
330     }
331 #endif
332 
333     if (is_efer_invalid_state(env)) {
334         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
335     }
336 
337     switch (x86_ldub_phys(cs,
338                       env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
339     case TLB_CONTROL_DO_NOTHING:
340         break;
341     case TLB_CONTROL_FLUSH_ALL_ASID:
342         /* FIXME: this is not 100% correct but should work for now */
343         tlb_flush(cs);
344         break;
345     }
346 
347     env->hflags2 |= HF2_GIF_MASK;
348 
349     if (ctl_has_irq(int_ctl)) {
350         CPUState *cs = env_cpu(env);
351 
352         cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
353     }
354 
355     /* maybe we need to inject an event */
356     event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
357                                                  control.event_inj));
358     if (event_inj & SVM_EVTINJ_VALID) {
359         uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
360         uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
361         uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
362                                           offsetof(struct vmcb,
363                                                    control.event_inj_err));
364 
365         qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
366         /* FIXME: need to implement valid_err */
367         switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
368         case SVM_EVTINJ_TYPE_INTR:
369             cs->exception_index = vector;
370             env->error_code = event_inj_err;
371             env->exception_is_int = 0;
372             env->exception_next_eip = -1;
373             qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
374             /* XXX: is it always correct? */
375             do_interrupt_x86_hardirq(env, vector, 1);
376             break;
377         case SVM_EVTINJ_TYPE_NMI:
378             cs->exception_index = EXCP02_NMI;
379             env->error_code = event_inj_err;
380             env->exception_is_int = 0;
381             env->exception_next_eip = env->eip;
382             qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
383             cpu_loop_exit(cs);
384             break;
385         case SVM_EVTINJ_TYPE_EXEPT:
386             if (vector == EXCP02_NMI || vector >= 31)  {
387                 cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
388             }
389             cs->exception_index = vector;
390             env->error_code = event_inj_err;
391             env->exception_is_int = 0;
392             env->exception_next_eip = -1;
393             qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
394             cpu_loop_exit(cs);
395             break;
396         case SVM_EVTINJ_TYPE_SOFT:
397             cs->exception_index = vector;
398             env->error_code = event_inj_err;
399             env->exception_is_int = 1;
400             env->exception_next_eip = env->eip;
401             qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
402             cpu_loop_exit(cs);
403             break;
404         default:
405             cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
406             break;
407         }
408         qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
409                       env->error_code);
410     }
411 }
412 
413 void helper_vmmcall(CPUX86State *env)
414 {
415     cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
416     raise_exception(env, EXCP06_ILLOP);
417 }
418 
419 void helper_vmload(CPUX86State *env, int aflag)
420 {
421     CPUState *cs = env_cpu(env);
422     target_ulong addr;
423 
424     cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
425 
426     if (aflag == 2) {
427         addr = env->regs[R_EAX];
428     } else {
429         addr = (uint32_t)env->regs[R_EAX];
430     }
431 
432     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
433                   "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
434                   addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
435                                                           save.fs.base)),
436                   env->segs[R_FS].base);
437 
438     svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
439     svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
440     svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
441     svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
442 
443 #ifdef TARGET_X86_64
444     env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
445                                                  save.kernel_gs_base));
446     env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
447     env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
448     env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
449 #endif
450     env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
451     env->sysenter_cs = x86_ldq_phys(cs,
452                                 addr + offsetof(struct vmcb, save.sysenter_cs));
453     env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
454                                                  save.sysenter_esp));
455     env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
456                                                  save.sysenter_eip));
457 }
458 
459 void helper_vmsave(CPUX86State *env, int aflag)
460 {
461     CPUState *cs = env_cpu(env);
462     target_ulong addr;
463 
464     cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
465 
466     if (aflag == 2) {
467         addr = env->regs[R_EAX];
468     } else {
469         addr = (uint32_t)env->regs[R_EAX];
470     }
471 
472     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
473                   "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
474                   addr, x86_ldq_phys(cs,
475                                  addr + offsetof(struct vmcb, save.fs.base)),
476                   env->segs[R_FS].base);
477 
478     svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
479                  &env->segs[R_FS]);
480     svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
481                  &env->segs[R_GS]);
482     svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
483                  &env->tr);
484     svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
485                  &env->ldt);
486 
487 #ifdef TARGET_X86_64
488     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
489              env->kernelgsbase);
490     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
491     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
492     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
493 #endif
494     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
495     x86_stq_phys(cs,
496              addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
497     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
498              env->sysenter_esp);
499     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
500              env->sysenter_eip);
501 }
502 
503 void helper_stgi(CPUX86State *env)
504 {
505     cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
506     env->hflags2 |= HF2_GIF_MASK;
507 }
508 
509 void helper_clgi(CPUX86State *env)
510 {
511     cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
512     env->hflags2 &= ~HF2_GIF_MASK;
513 }
514 
515 bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type)
516 {
517     switch (type) {
518     case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
519         if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
520             return true;
521         }
522         break;
523     case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
524         if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
525             return true;
526         }
527         break;
528     case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
529         if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
530             return true;
531         }
532         break;
533     case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
534         if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
535             return true;
536         }
537         break;
538     case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
539         if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
540             return true;
541         }
542         break;
543     default:
544         if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
545             return true;
546         }
547         break;
548     }
549     return false;
550 }
551 
552 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
553                                    uint64_t param, uintptr_t retaddr)
554 {
555     CPUState *cs = env_cpu(env);
556 
557     if (likely(!(env->hflags & HF_GUEST_MASK))) {
558         return;
559     }
560 
561     if (!cpu_svm_has_intercept(env, type)) {
562         return;
563     }
564 
565     if (type == SVM_EXIT_MSR) {
566         /* FIXME: this should be read in at vmrun (faster this way?) */
567         uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
568                                     offsetof(struct vmcb,
569                                             control.msrpm_base_pa));
570         uint32_t t0, t1;
571 
572         switch ((uint32_t)env->regs[R_ECX]) {
573         case 0 ... 0x1fff:
574             t0 = (env->regs[R_ECX] * 2) % 8;
575             t1 = (env->regs[R_ECX] * 2) / 8;
576             break;
577         case 0xc0000000 ... 0xc0001fff:
578             t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
579             t1 = (t0 / 8);
580             t0 %= 8;
581             break;
582         case 0xc0010000 ... 0xc0011fff:
583             t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
584             t1 = (t0 / 8);
585             t0 %= 8;
586             break;
587         default:
588             cpu_vmexit(env, type, param, retaddr);
589             t0 = 0;
590             t1 = 0;
591             break;
592         }
593         if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
594             cpu_vmexit(env, type, param, retaddr);
595         }
596         return;
597     }
598 
599     cpu_vmexit(env, type, param, retaddr);
600 }
601 
602 void helper_svm_check_intercept(CPUX86State *env, uint32_t type)
603 {
604     cpu_svm_check_intercept_param(env, type, 0, GETPC());
605 }
606 
607 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
608                          uint32_t next_eip_addend)
609 {
610     CPUState *cs = env_cpu(env);
611 
612     if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
613         /* FIXME: this should be read in at vmrun (faster this way?) */
614         uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
615                                  offsetof(struct vmcb, control.iopm_base_pa));
616         uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
617 
618         if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
619             /* next env->eip */
620             x86_stq_phys(cs,
621                      env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
622                      env->eip + next_eip_addend);
623             cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
624         }
625     }
626 }
627 
628 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
629                 uintptr_t retaddr)
630 {
631     CPUState *cs = env_cpu(env);
632 
633     cpu_restore_state(cs, retaddr, true);
634 
635     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
636                   PRIx64 ", " TARGET_FMT_lx ")!\n",
637                   exit_code, exit_info_1,
638                   x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
639                                                    control.exit_info_2)),
640                   env->eip);
641 
642     cs->exception_index = EXCP_VMEXIT;
643     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
644              exit_code);
645 
646     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
647                                              control.exit_info_1), exit_info_1),
648 
649     /* remove any pending exception */
650     env->old_exception = -1;
651     cpu_loop_exit(cs);
652 }
653 
654 void do_vmexit(CPUX86State *env)
655 {
656     CPUState *cs = env_cpu(env);
657     uint32_t int_ctl;
658 
659     if (env->hflags & HF_INHIBIT_IRQ_MASK) {
660         x86_stl_phys(cs,
661                  env->vm_vmcb + offsetof(struct vmcb, control.int_state),
662                  SVM_INTERRUPT_SHADOW_MASK);
663         env->hflags &= ~HF_INHIBIT_IRQ_MASK;
664     } else {
665         x86_stl_phys(cs,
666                  env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
667     }
668     env->hflags2 &= ~HF2_NPT_MASK;
669 
670     /* Save the VM state in the vmcb */
671     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
672                  &env->segs[R_ES]);
673     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
674                  &env->segs[R_CS]);
675     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
676                  &env->segs[R_SS]);
677     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
678                  &env->segs[R_DS]);
679 
680     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
681              env->gdt.base);
682     x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
683              env->gdt.limit);
684 
685     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
686              env->idt.base);
687     x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
688              env->idt.limit);
689 
690     x86_stq_phys(cs,
691              env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
692     x86_stq_phys(cs,
693              env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
694     x86_stq_phys(cs,
695              env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
696     x86_stq_phys(cs,
697              env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
698     x86_stq_phys(cs,
699              env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
700 
701     int_ctl = x86_ldl_phys(cs,
702                        env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
703     int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
704     int_ctl |= env->v_tpr & V_TPR_MASK;
705     if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
706         int_ctl |= V_IRQ_MASK;
707     }
708     x86_stl_phys(cs,
709              env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
710 
711     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
712              cpu_compute_eflags(env));
713     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
714              env->eip);
715     x86_stq_phys(cs,
716              env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
717     x86_stq_phys(cs,
718              env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
719     x86_stq_phys(cs,
720              env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
721     x86_stq_phys(cs,
722              env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
723     x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
724              env->hflags & HF_CPL_MASK);
725 
726     /* Reload the host state from vm_hsave */
727     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
728     env->hflags &= ~HF_GUEST_MASK;
729     env->intercept = 0;
730     env->intercept_exceptions = 0;
731     cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
732     env->tsc_offset = 0;
733 
734     env->gdt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
735                                                        save.gdtr.base));
736     env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
737                                                        save.gdtr.limit));
738 
739     env->idt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
740                                                        save.idtr.base));
741     env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
742                                                        save.idtr.limit));
743 
744     cpu_x86_update_cr0(env, x86_ldq_phys(cs,
745                                      env->vm_hsave + offsetof(struct vmcb,
746                                                               save.cr0)) |
747                        CR0_PE_MASK);
748     cpu_x86_update_cr4(env, x86_ldq_phys(cs,
749                                      env->vm_hsave + offsetof(struct vmcb,
750                                                               save.cr4)));
751     cpu_x86_update_cr3(env, x86_ldq_phys(cs,
752                                      env->vm_hsave + offsetof(struct vmcb,
753                                                               save.cr3)));
754     /* we need to set the efer after the crs so the hidden flags get
755        set properly */
756     cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
757                                                          save.efer)));
758     env->eflags = 0;
759     cpu_load_eflags(env, x86_ldq_phys(cs,
760                                   env->vm_hsave + offsetof(struct vmcb,
761                                                            save.rflags)),
762                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
763                       VM_MASK));
764 
765     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
766                        R_ES);
767     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
768                        R_CS);
769     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
770                        R_SS);
771     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
772                        R_DS);
773 
774     env->eip = x86_ldq_phys(cs,
775                         env->vm_hsave + offsetof(struct vmcb, save.rip));
776     env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
777                                 offsetof(struct vmcb, save.rsp));
778     env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
779                                 offsetof(struct vmcb, save.rax));
780 
781     env->dr[6] = x86_ldq_phys(cs,
782                           env->vm_hsave + offsetof(struct vmcb, save.dr6));
783     env->dr[7] = x86_ldq_phys(cs,
784                           env->vm_hsave + offsetof(struct vmcb, save.dr7));
785 
786     /* other setups */
787     x86_stl_phys(cs,
788              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
789              x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
790                                               control.event_inj)));
791     x86_stl_phys(cs,
792              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
793              x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
794                                               control.event_inj_err)));
795     x86_stl_phys(cs,
796              env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
797 
798     env->hflags2 &= ~HF2_GIF_MASK;
799     /* FIXME: Resets the current ASID register to zero (host ASID). */
800 
801     /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
802 
803     /* Clears the TSC_OFFSET inside the processor. */
804 
805     /* If the host is in PAE mode, the processor reloads the host's PDPEs
806        from the page table indicated the host's CR3. If the PDPEs contain
807        illegal state, the processor causes a shutdown. */
808 
809     /* Disables all breakpoints in the host DR7 register. */
810 
811     /* Checks the reloaded host state for consistency. */
812 
813     /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
814        host's code segment or non-canonical (in the case of long mode), a
815        #GP fault is delivered inside the host. */
816 }
817