xref: /openbmc/qemu/target/i386/tcg/sysemu/svm_helper.c (revision 18fa3ebc)
1 /*
2  *  x86 SVM helpers (sysemu only)
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
25 #include "tcg/helper-tcg.h"
26 
27 /* Secure Virtual Machine helpers */
28 
29 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
30                                 const SegmentCache *sc)
31 {
32     CPUState *cs = env_cpu(env);
33 
34     x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
35              sc->selector);
36     x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
37              sc->base);
38     x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
39              sc->limit);
40     x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
41              ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
42 }
43 
44 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
45                                 SegmentCache *sc)
46 {
47     CPUState *cs = env_cpu(env);
48     unsigned int flags;
49 
50     sc->selector = x86_lduw_phys(cs,
51                              addr + offsetof(struct vmcb_seg, selector));
52     sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
53     sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
54     flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
55     sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
56 }
57 
58 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
59                                       int seg_reg)
60 {
61     SegmentCache sc1, *sc = &sc1;
62 
63     svm_load_seg(env, addr, sc);
64     cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
65                            sc->base, sc->limit, sc->flags);
66 }
67 
68 static inline bool ctl_has_irq(uint32_t int_ctl)
69 {
70     uint32_t int_prio;
71     uint32_t tpr;
72 
73     int_prio = (int_ctl & V_INTR_PRIO_MASK) >> V_INTR_MASKING_SHIFT;
74     tpr = int_ctl & V_TPR_MASK;
75     return (int_ctl & V_IRQ_MASK) && (int_prio >= tpr);
76 }
77 
78 static inline bool is_efer_invalid_state (CPUX86State *env)
79 {
80     if (!(env->efer & MSR_EFER_SVME)) {
81         return true;
82     }
83 
84     if (env->efer & MSR_EFER_RESERVED) {
85         return true;
86     }
87 
88     if ((env->efer & (MSR_EFER_LMA | MSR_EFER_LME)) &&
89             !(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
90         return true;
91     }
92 
93     if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
94                                 && !(env->cr[4] & CR4_PAE_MASK)) {
95         return true;
96     }
97 
98     if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
99                                 && !(env->cr[0] & CR0_PE_MASK)) {
100         return true;
101     }
102 
103     if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
104                                 && (env->cr[4] & CR4_PAE_MASK)
105                                 && (env->segs[R_CS].flags & DESC_L_MASK)
106                                 && (env->segs[R_CS].flags & DESC_B_MASK)) {
107         return true;
108     }
109 
110     return false;
111 }
112 
113 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
114 {
115     CPUState *cs = env_cpu(env);
116     X86CPU *cpu = env_archcpu(env);
117     target_ulong addr;
118     uint64_t nested_ctl;
119     uint32_t event_inj;
120     uint32_t int_ctl;
121     uint32_t asid;
122     uint64_t new_cr0;
123     uint64_t new_cr3;
124     uint64_t new_cr4;
125 
126     cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
127 
128     if (aflag == 2) {
129         addr = env->regs[R_EAX];
130     } else {
131         addr = (uint32_t)env->regs[R_EAX];
132     }
133 
134     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
135 
136     env->vm_vmcb = addr;
137 
138     /* save the current CPU state in the hsave page */
139     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
140              env->gdt.base);
141     x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
142              env->gdt.limit);
143 
144     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
145              env->idt.base);
146     x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
147              env->idt.limit);
148 
149     x86_stq_phys(cs,
150              env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
151     x86_stq_phys(cs,
152              env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
153     x86_stq_phys(cs,
154              env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
155     x86_stq_phys(cs,
156              env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
157     x86_stq_phys(cs,
158              env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
159     x86_stq_phys(cs,
160              env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
161 
162     x86_stq_phys(cs,
163              env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
164     x86_stq_phys(cs,
165              env->vm_hsave + offsetof(struct vmcb, save.rflags),
166              cpu_compute_eflags(env));
167 
168     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
169                  &env->segs[R_ES]);
170     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
171                  &env->segs[R_CS]);
172     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
173                  &env->segs[R_SS]);
174     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
175                  &env->segs[R_DS]);
176 
177     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
178              env->eip + next_eip_addend);
179     x86_stq_phys(cs,
180              env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
181     x86_stq_phys(cs,
182              env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
183 
184     /* load the interception bitmaps so we do not need to access the
185        vmcb in svm mode */
186     env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
187                                                       control.intercept));
188     env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
189                                        offsetof(struct vmcb,
190                                                 control.intercept_cr_read));
191     env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
192                                         offsetof(struct vmcb,
193                                                  control.intercept_cr_write));
194     env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
195                                        offsetof(struct vmcb,
196                                                 control.intercept_dr_read));
197     env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
198                                         offsetof(struct vmcb,
199                                                  control.intercept_dr_write));
200     env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
201                                          offsetof(struct vmcb,
202                                                   control.intercept_exceptions
203                                                   ));
204 
205     nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
206                                                           control.nested_ctl));
207     asid = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
208                                                           control.asid));
209 
210     uint64_t msrpm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
211                                     offsetof(struct vmcb,
212                                             control.msrpm_base_pa));
213     uint64_t iopm_base_pa = x86_ldq_phys(cs, env->vm_vmcb +
214                                  offsetof(struct vmcb, control.iopm_base_pa));
215 
216     if ((msrpm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_MSRPM_SIZE) {
217         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
218     }
219 
220     if ((iopm_base_pa & ~0xfff) >= (1ull << cpu->phys_bits) - SVM_IOPM_SIZE) {
221         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
222     }
223 
224     env->nested_pg_mode = 0;
225 
226     if (!cpu_svm_has_intercept(env, SVM_EXIT_VMRUN)) {
227         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
228     }
229     if (asid == 0) {
230         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
231     }
232 
233     if (nested_ctl & SVM_NPT_ENABLED) {
234         env->nested_cr3 = x86_ldq_phys(cs,
235                                 env->vm_vmcb + offsetof(struct vmcb,
236                                                         control.nested_cr3));
237         env->hflags2 |= HF2_NPT_MASK;
238 
239         env->nested_pg_mode = get_pg_mode(env) & PG_MODE_SVM_MASK;
240     }
241 
242     /* enable intercepts */
243     env->hflags |= HF_GUEST_MASK;
244 
245     env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
246                                offsetof(struct vmcb, control.tsc_offset));
247 
248     env->gdt.base  = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
249                                                       save.gdtr.base));
250     env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
251                                                       save.gdtr.limit));
252 
253     env->idt.base  = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
254                                                       save.idtr.base));
255     env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
256                                                       save.idtr.limit));
257 
258     new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0));
259     if (new_cr0 & SVM_CR0_RESERVED_MASK) {
260         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
261     }
262     if ((new_cr0 & CR0_NW_MASK) && !(new_cr0 & CR0_CD_MASK)) {
263         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
264     }
265     new_cr3 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr3));
266     if ((env->efer & MSR_EFER_LMA) &&
267             (new_cr3 & ((~0UL) << cpu->phys_bits))) {
268         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
269     }
270     new_cr4 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr4));
271     if (new_cr4 & cr4_reserved_bits(env)) {
272         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
273     }
274     /* clear exit_info_2 so we behave like the real hardware */
275     x86_stq_phys(cs,
276              env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
277 
278     cpu_x86_update_cr0(env, new_cr0);
279     cpu_x86_update_cr4(env, new_cr4);
280     cpu_x86_update_cr3(env, new_cr3);
281     env->cr[2] = x86_ldq_phys(cs,
282                           env->vm_vmcb + offsetof(struct vmcb, save.cr2));
283     int_ctl = x86_ldl_phys(cs,
284                        env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
285     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
286     if (int_ctl & V_INTR_MASKING_MASK) {
287         env->v_tpr = int_ctl & V_TPR_MASK;
288         env->hflags2 |= HF2_VINTR_MASK;
289         if (env->eflags & IF_MASK) {
290             env->hflags2 |= HF2_HIF_MASK;
291         }
292     }
293 
294     cpu_load_efer(env,
295                   x86_ldq_phys(cs,
296                            env->vm_vmcb + offsetof(struct vmcb, save.efer)));
297     env->eflags = 0;
298     cpu_load_eflags(env, x86_ldq_phys(cs,
299                                   env->vm_vmcb + offsetof(struct vmcb,
300                                                           save.rflags)),
301                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
302 
303     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
304                        R_ES);
305     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
306                        R_CS);
307     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
308                        R_SS);
309     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
310                        R_DS);
311 
312     env->eip = x86_ldq_phys(cs,
313                         env->vm_vmcb + offsetof(struct vmcb, save.rip));
314 
315     env->regs[R_ESP] = x86_ldq_phys(cs,
316                                 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
317     env->regs[R_EAX] = x86_ldq_phys(cs,
318                                 env->vm_vmcb + offsetof(struct vmcb, save.rax));
319     env->dr[7] = x86_ldq_phys(cs,
320                           env->vm_vmcb + offsetof(struct vmcb, save.dr7));
321     env->dr[6] = x86_ldq_phys(cs,
322                           env->vm_vmcb + offsetof(struct vmcb, save.dr6));
323 
324 #ifdef TARGET_X86_64
325     if (env->dr[6] & DR_RESERVED_MASK) {
326         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
327     }
328     if (env->dr[7] & DR_RESERVED_MASK) {
329         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
330     }
331 #endif
332 
333     if (is_efer_invalid_state(env)) {
334         cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
335     }
336 
337     switch (x86_ldub_phys(cs,
338                       env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
339     case TLB_CONTROL_DO_NOTHING:
340         break;
341     case TLB_CONTROL_FLUSH_ALL_ASID:
342         /* FIXME: this is not 100% correct but should work for now */
343         tlb_flush(cs);
344         break;
345     }
346 
347     env->hflags2 |= HF2_GIF_MASK;
348 
349     if (ctl_has_irq(int_ctl)) {
350         CPUState *cs = env_cpu(env);
351 
352         cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
353     }
354 
355     /* maybe we need to inject an event */
356     event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
357                                                  control.event_inj));
358     if (event_inj & SVM_EVTINJ_VALID) {
359         uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
360         uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
361         uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
362                                           offsetof(struct vmcb,
363                                                    control.event_inj_err));
364 
365         qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
366         /* FIXME: need to implement valid_err */
367         switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
368         case SVM_EVTINJ_TYPE_INTR:
369             cs->exception_index = vector;
370             env->error_code = event_inj_err;
371             env->exception_is_int = 0;
372             env->exception_next_eip = -1;
373             qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
374             /* XXX: is it always correct? */
375             do_interrupt_x86_hardirq(env, vector, 1);
376             break;
377         case SVM_EVTINJ_TYPE_NMI:
378             cs->exception_index = EXCP02_NMI;
379             env->error_code = event_inj_err;
380             env->exception_is_int = 0;
381             env->exception_next_eip = env->eip;
382             qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
383             cpu_loop_exit(cs);
384             break;
385         case SVM_EVTINJ_TYPE_EXEPT:
386             cs->exception_index = vector;
387             env->error_code = event_inj_err;
388             env->exception_is_int = 0;
389             env->exception_next_eip = -1;
390             qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
391             cpu_loop_exit(cs);
392             break;
393         case SVM_EVTINJ_TYPE_SOFT:
394             cs->exception_index = vector;
395             env->error_code = event_inj_err;
396             env->exception_is_int = 1;
397             env->exception_next_eip = env->eip;
398             qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
399             cpu_loop_exit(cs);
400             break;
401         }
402         qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
403                       env->error_code);
404     }
405 }
406 
407 void helper_vmmcall(CPUX86State *env)
408 {
409     cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
410     raise_exception(env, EXCP06_ILLOP);
411 }
412 
413 void helper_vmload(CPUX86State *env, int aflag)
414 {
415     CPUState *cs = env_cpu(env);
416     target_ulong addr;
417 
418     cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
419 
420     if (aflag == 2) {
421         addr = env->regs[R_EAX];
422     } else {
423         addr = (uint32_t)env->regs[R_EAX];
424     }
425 
426     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
427                   "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
428                   addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
429                                                           save.fs.base)),
430                   env->segs[R_FS].base);
431 
432     svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
433     svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
434     svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
435     svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
436 
437 #ifdef TARGET_X86_64
438     env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
439                                                  save.kernel_gs_base));
440     env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
441     env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
442     env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
443 #endif
444     env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
445     env->sysenter_cs = x86_ldq_phys(cs,
446                                 addr + offsetof(struct vmcb, save.sysenter_cs));
447     env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
448                                                  save.sysenter_esp));
449     env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
450                                                  save.sysenter_eip));
451 }
452 
453 void helper_vmsave(CPUX86State *env, int aflag)
454 {
455     CPUState *cs = env_cpu(env);
456     target_ulong addr;
457 
458     cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
459 
460     if (aflag == 2) {
461         addr = env->regs[R_EAX];
462     } else {
463         addr = (uint32_t)env->regs[R_EAX];
464     }
465 
466     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
467                   "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
468                   addr, x86_ldq_phys(cs,
469                                  addr + offsetof(struct vmcb, save.fs.base)),
470                   env->segs[R_FS].base);
471 
472     svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
473                  &env->segs[R_FS]);
474     svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
475                  &env->segs[R_GS]);
476     svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
477                  &env->tr);
478     svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
479                  &env->ldt);
480 
481 #ifdef TARGET_X86_64
482     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
483              env->kernelgsbase);
484     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
485     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
486     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
487 #endif
488     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
489     x86_stq_phys(cs,
490              addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
491     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
492              env->sysenter_esp);
493     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
494              env->sysenter_eip);
495 }
496 
497 void helper_stgi(CPUX86State *env)
498 {
499     cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
500     env->hflags2 |= HF2_GIF_MASK;
501 }
502 
503 void helper_clgi(CPUX86State *env)
504 {
505     cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
506     env->hflags2 &= ~HF2_GIF_MASK;
507 }
508 
509 bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type)
510 {
511     switch (type) {
512     case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
513         if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
514             return true;
515         }
516         break;
517     case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
518         if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
519             return true;
520         }
521         break;
522     case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
523         if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
524             return true;
525         }
526         break;
527     case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
528         if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
529             return true;
530         }
531         break;
532     case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
533         if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
534             return true;
535         }
536         break;
537     default:
538         if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
539             return true;
540         }
541         break;
542     }
543     return false;
544 }
545 
546 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
547                                    uint64_t param, uintptr_t retaddr)
548 {
549     CPUState *cs = env_cpu(env);
550 
551     if (likely(!(env->hflags & HF_GUEST_MASK))) {
552         return;
553     }
554 
555     if (!cpu_svm_has_intercept(env, type)) {
556         return;
557     }
558 
559     if (type == SVM_EXIT_MSR) {
560         /* FIXME: this should be read in at vmrun (faster this way?) */
561         uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
562                                     offsetof(struct vmcb,
563                                             control.msrpm_base_pa));
564         uint32_t t0, t1;
565 
566         switch ((uint32_t)env->regs[R_ECX]) {
567         case 0 ... 0x1fff:
568             t0 = (env->regs[R_ECX] * 2) % 8;
569             t1 = (env->regs[R_ECX] * 2) / 8;
570             break;
571         case 0xc0000000 ... 0xc0001fff:
572             t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
573             t1 = (t0 / 8);
574             t0 %= 8;
575             break;
576         case 0xc0010000 ... 0xc0011fff:
577             t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
578             t1 = (t0 / 8);
579             t0 %= 8;
580             break;
581         default:
582             cpu_vmexit(env, type, param, retaddr);
583             t0 = 0;
584             t1 = 0;
585             break;
586         }
587         if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
588             cpu_vmexit(env, type, param, retaddr);
589         }
590         return;
591     }
592 
593     cpu_vmexit(env, type, param, retaddr);
594 }
595 
596 void helper_svm_check_intercept(CPUX86State *env, uint32_t type)
597 {
598     cpu_svm_check_intercept_param(env, type, 0, GETPC());
599 }
600 
601 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
602                          uint32_t next_eip_addend)
603 {
604     CPUState *cs = env_cpu(env);
605 
606     if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
607         /* FIXME: this should be read in at vmrun (faster this way?) */
608         uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
609                                  offsetof(struct vmcb, control.iopm_base_pa));
610         uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
611 
612         if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
613             /* next env->eip */
614             x86_stq_phys(cs,
615                      env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
616                      env->eip + next_eip_addend);
617             cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
618         }
619     }
620 }
621 
622 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
623                 uintptr_t retaddr)
624 {
625     CPUState *cs = env_cpu(env);
626 
627     cpu_restore_state(cs, retaddr, true);
628 
629     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
630                   PRIx64 ", " TARGET_FMT_lx ")!\n",
631                   exit_code, exit_info_1,
632                   x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
633                                                    control.exit_info_2)),
634                   env->eip);
635 
636     cs->exception_index = EXCP_VMEXIT;
637     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
638              exit_code);
639 
640     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
641                                              control.exit_info_1), exit_info_1),
642 
643     /* remove any pending exception */
644     env->old_exception = -1;
645     cpu_loop_exit(cs);
646 }
647 
648 void do_vmexit(CPUX86State *env)
649 {
650     CPUState *cs = env_cpu(env);
651     uint32_t int_ctl;
652 
653     if (env->hflags & HF_INHIBIT_IRQ_MASK) {
654         x86_stl_phys(cs,
655                  env->vm_vmcb + offsetof(struct vmcb, control.int_state),
656                  SVM_INTERRUPT_SHADOW_MASK);
657         env->hflags &= ~HF_INHIBIT_IRQ_MASK;
658     } else {
659         x86_stl_phys(cs,
660                  env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
661     }
662     env->hflags2 &= ~HF2_NPT_MASK;
663 
664     /* Save the VM state in the vmcb */
665     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
666                  &env->segs[R_ES]);
667     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
668                  &env->segs[R_CS]);
669     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
670                  &env->segs[R_SS]);
671     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
672                  &env->segs[R_DS]);
673 
674     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
675              env->gdt.base);
676     x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
677              env->gdt.limit);
678 
679     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
680              env->idt.base);
681     x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
682              env->idt.limit);
683 
684     x86_stq_phys(cs,
685              env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
686     x86_stq_phys(cs,
687              env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
688     x86_stq_phys(cs,
689              env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
690     x86_stq_phys(cs,
691              env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
692     x86_stq_phys(cs,
693              env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
694 
695     int_ctl = x86_ldl_phys(cs,
696                        env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
697     int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
698     int_ctl |= env->v_tpr & V_TPR_MASK;
699     if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
700         int_ctl |= V_IRQ_MASK;
701     }
702     x86_stl_phys(cs,
703              env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
704 
705     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
706              cpu_compute_eflags(env));
707     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
708              env->eip);
709     x86_stq_phys(cs,
710              env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
711     x86_stq_phys(cs,
712              env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
713     x86_stq_phys(cs,
714              env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
715     x86_stq_phys(cs,
716              env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
717     x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
718              env->hflags & HF_CPL_MASK);
719 
720     /* Reload the host state from vm_hsave */
721     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
722     env->hflags &= ~HF_GUEST_MASK;
723     env->intercept = 0;
724     env->intercept_exceptions = 0;
725     cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
726     env->tsc_offset = 0;
727 
728     env->gdt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
729                                                        save.gdtr.base));
730     env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
731                                                        save.gdtr.limit));
732 
733     env->idt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
734                                                        save.idtr.base));
735     env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
736                                                        save.idtr.limit));
737 
738     cpu_x86_update_cr0(env, x86_ldq_phys(cs,
739                                      env->vm_hsave + offsetof(struct vmcb,
740                                                               save.cr0)) |
741                        CR0_PE_MASK);
742     cpu_x86_update_cr4(env, x86_ldq_phys(cs,
743                                      env->vm_hsave + offsetof(struct vmcb,
744                                                               save.cr4)));
745     cpu_x86_update_cr3(env, x86_ldq_phys(cs,
746                                      env->vm_hsave + offsetof(struct vmcb,
747                                                               save.cr3)));
748     /* we need to set the efer after the crs so the hidden flags get
749        set properly */
750     cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
751                                                          save.efer)));
752     env->eflags = 0;
753     cpu_load_eflags(env, x86_ldq_phys(cs,
754                                   env->vm_hsave + offsetof(struct vmcb,
755                                                            save.rflags)),
756                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
757                       VM_MASK));
758 
759     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
760                        R_ES);
761     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
762                        R_CS);
763     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
764                        R_SS);
765     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
766                        R_DS);
767 
768     env->eip = x86_ldq_phys(cs,
769                         env->vm_hsave + offsetof(struct vmcb, save.rip));
770     env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
771                                 offsetof(struct vmcb, save.rsp));
772     env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
773                                 offsetof(struct vmcb, save.rax));
774 
775     env->dr[6] = x86_ldq_phys(cs,
776                           env->vm_hsave + offsetof(struct vmcb, save.dr6));
777     env->dr[7] = x86_ldq_phys(cs,
778                           env->vm_hsave + offsetof(struct vmcb, save.dr7));
779 
780     /* other setups */
781     x86_stl_phys(cs,
782              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
783              x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
784                                               control.event_inj)));
785     x86_stl_phys(cs,
786              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
787              x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
788                                               control.event_inj_err)));
789     x86_stl_phys(cs,
790              env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
791 
792     env->hflags2 &= ~HF2_GIF_MASK;
793     /* FIXME: Resets the current ASID register to zero (host ASID). */
794 
795     /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
796 
797     /* Clears the TSC_OFFSET inside the processor. */
798 
799     /* If the host is in PAE mode, the processor reloads the host's PDPEs
800        from the page table indicated the host's CR3. If the PDPEs contain
801        illegal state, the processor causes a shutdown. */
802 
803     /* Disables all breakpoints in the host DR7 register. */
804 
805     /* Checks the reloaded host state for consistency. */
806 
807     /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
808        host's code segment or non-canonical (in the case of long mode), a
809        #GP fault is delivered inside the host. */
810 }
811