xref: /openbmc/qemu/target/i386/hvf/x86_task.c (revision 3b295bcb)
169e0a03cSPaolo Bonzini // This software is licensed under the terms of the GNU General Public
269e0a03cSPaolo Bonzini // License version 2, as published by the Free Software Foundation, and
369e0a03cSPaolo Bonzini // may be copied, distributed, and modified under those terms.
469e0a03cSPaolo Bonzini //
569e0a03cSPaolo Bonzini // This program is distributed in the hope that it will be useful,
669e0a03cSPaolo Bonzini // but WITHOUT ANY WARRANTY; without even the implied warranty of
769e0a03cSPaolo Bonzini // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
869e0a03cSPaolo Bonzini // GNU General Public License for more details.
969e0a03cSPaolo Bonzini #include "qemu/osdep.h"
10895f9fdfSPaolo Bonzini #include "panic.h"
1169e0a03cSPaolo Bonzini #include "qemu/error-report.h"
1269e0a03cSPaolo Bonzini 
1369e0a03cSPaolo Bonzini #include "sysemu/hvf.h"
1469e0a03cSPaolo Bonzini #include "hvf-i386.h"
1569e0a03cSPaolo Bonzini #include "vmcs.h"
1669e0a03cSPaolo Bonzini #include "vmx.h"
1769e0a03cSPaolo Bonzini #include "x86.h"
1869e0a03cSPaolo Bonzini #include "x86_descr.h"
1969e0a03cSPaolo Bonzini #include "x86_mmu.h"
2069e0a03cSPaolo Bonzini #include "x86_decode.h"
2169e0a03cSPaolo Bonzini #include "x86_emu.h"
2269e0a03cSPaolo Bonzini #include "x86_task.h"
2369e0a03cSPaolo Bonzini #include "x86hvf.h"
2469e0a03cSPaolo Bonzini 
2569e0a03cSPaolo Bonzini #include <Hypervisor/hv.h>
2669e0a03cSPaolo Bonzini #include <Hypervisor/hv_vmx.h>
2769e0a03cSPaolo Bonzini 
2869e0a03cSPaolo Bonzini #include "hw/i386/apic_internal.h"
2969e0a03cSPaolo Bonzini #include "qemu/main-loop.h"
30940e43aaSClaudio Fontana #include "qemu/accel.h"
3169e0a03cSPaolo Bonzini #include "target/i386/cpu.h"
3269e0a03cSPaolo Bonzini 
3369e0a03cSPaolo Bonzini // TODO: taskswitch handling
save_state_to_tss32(CPUState * cpu,struct x86_tss_segment32 * tss)3469e0a03cSPaolo Bonzini static void save_state_to_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
3569e0a03cSPaolo Bonzini {
3669e0a03cSPaolo Bonzini     X86CPU *x86_cpu = X86_CPU(cpu);
3769e0a03cSPaolo Bonzini     CPUX86State *env = &x86_cpu->env;
3869e0a03cSPaolo Bonzini 
3969e0a03cSPaolo Bonzini     /* CR3 and ldt selector are not saved intentionally */
405d32173fSRoman Bolshakov     tss->eip = (uint32_t)env->eip;
41967f4da2SRoman Bolshakov     tss->eflags = (uint32_t)env->eflags;
4269e0a03cSPaolo Bonzini     tss->eax = EAX(env);
4369e0a03cSPaolo Bonzini     tss->ecx = ECX(env);
4469e0a03cSPaolo Bonzini     tss->edx = EDX(env);
4569e0a03cSPaolo Bonzini     tss->ebx = EBX(env);
4669e0a03cSPaolo Bonzini     tss->esp = ESP(env);
4769e0a03cSPaolo Bonzini     tss->ebp = EBP(env);
4869e0a03cSPaolo Bonzini     tss->esi = ESI(env);
4969e0a03cSPaolo Bonzini     tss->edi = EDI(env);
5069e0a03cSPaolo Bonzini 
516701d81dSPaolo Bonzini     tss->es = vmx_read_segment_selector(cpu, R_ES).sel;
526701d81dSPaolo Bonzini     tss->cs = vmx_read_segment_selector(cpu, R_CS).sel;
536701d81dSPaolo Bonzini     tss->ss = vmx_read_segment_selector(cpu, R_SS).sel;
546701d81dSPaolo Bonzini     tss->ds = vmx_read_segment_selector(cpu, R_DS).sel;
556701d81dSPaolo Bonzini     tss->fs = vmx_read_segment_selector(cpu, R_FS).sel;
566701d81dSPaolo Bonzini     tss->gs = vmx_read_segment_selector(cpu, R_GS).sel;
5769e0a03cSPaolo Bonzini }
5869e0a03cSPaolo Bonzini 
load_state_from_tss32(CPUState * cpu,struct x86_tss_segment32 * tss)5969e0a03cSPaolo Bonzini static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
6069e0a03cSPaolo Bonzini {
6169e0a03cSPaolo Bonzini     X86CPU *x86_cpu = X86_CPU(cpu);
6269e0a03cSPaolo Bonzini     CPUX86State *env = &x86_cpu->env;
6369e0a03cSPaolo Bonzini 
64*3b295bcbSPhilippe Mathieu-Daudé     wvmcs(cpu->accel->fd, VMCS_GUEST_CR3, tss->cr3);
6569e0a03cSPaolo Bonzini 
665d32173fSRoman Bolshakov     env->eip = tss->eip;
67967f4da2SRoman Bolshakov     env->eflags = tss->eflags | 2;
6869e0a03cSPaolo Bonzini 
6969e0a03cSPaolo Bonzini     /* General purpose registers */
7069e0a03cSPaolo Bonzini     RAX(env) = tss->eax;
7169e0a03cSPaolo Bonzini     RCX(env) = tss->ecx;
7269e0a03cSPaolo Bonzini     RDX(env) = tss->edx;
7369e0a03cSPaolo Bonzini     RBX(env) = tss->ebx;
7469e0a03cSPaolo Bonzini     RSP(env) = tss->esp;
7569e0a03cSPaolo Bonzini     RBP(env) = tss->ebp;
7669e0a03cSPaolo Bonzini     RSI(env) = tss->esi;
7769e0a03cSPaolo Bonzini     RDI(env) = tss->edi;
7869e0a03cSPaolo Bonzini 
796701d81dSPaolo Bonzini     vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ldt}}, R_LDTR);
806701d81dSPaolo Bonzini     vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->es}}, R_ES);
816701d81dSPaolo Bonzini     vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->cs}}, R_CS);
826701d81dSPaolo Bonzini     vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ss}}, R_SS);
836701d81dSPaolo Bonzini     vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ds}}, R_DS);
846701d81dSPaolo Bonzini     vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->fs}}, R_FS);
856701d81dSPaolo Bonzini     vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->gs}}, R_GS);
8669e0a03cSPaolo Bonzini }
8769e0a03cSPaolo Bonzini 
task_switch_32(CPUState * cpu,x68_segment_selector tss_sel,x68_segment_selector old_tss_sel,uint64_t old_tss_base,struct x86_segment_descriptor * new_desc)8869e0a03cSPaolo Bonzini static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segment_selector old_tss_sel,
8969e0a03cSPaolo Bonzini                           uint64_t old_tss_base, struct x86_segment_descriptor *new_desc)
9069e0a03cSPaolo Bonzini {
9169e0a03cSPaolo Bonzini     struct x86_tss_segment32 tss_seg;
9269e0a03cSPaolo Bonzini     uint32_t new_tss_base = x86_segment_base(new_desc);
9369e0a03cSPaolo Bonzini     uint32_t eip_offset = offsetof(struct x86_tss_segment32, eip);
9469e0a03cSPaolo Bonzini     uint32_t ldt_sel_offset = offsetof(struct x86_tss_segment32, ldt);
9569e0a03cSPaolo Bonzini 
9669e0a03cSPaolo Bonzini     vmx_read_mem(cpu, &tss_seg, old_tss_base, sizeof(tss_seg));
9769e0a03cSPaolo Bonzini     save_state_to_tss32(cpu, &tss_seg);
9869e0a03cSPaolo Bonzini 
9969e0a03cSPaolo Bonzini     vmx_write_mem(cpu, old_tss_base + eip_offset, &tss_seg.eip, ldt_sel_offset - eip_offset);
10069e0a03cSPaolo Bonzini     vmx_read_mem(cpu, &tss_seg, new_tss_base, sizeof(tss_seg));
10169e0a03cSPaolo Bonzini 
10269e0a03cSPaolo Bonzini     if (old_tss_sel.sel != 0xffff) {
10369e0a03cSPaolo Bonzini         tss_seg.prev_tss = old_tss_sel.sel;
10469e0a03cSPaolo Bonzini 
10569e0a03cSPaolo Bonzini         vmx_write_mem(cpu, new_tss_base, &tss_seg.prev_tss, sizeof(tss_seg.prev_tss));
10669e0a03cSPaolo Bonzini     }
10769e0a03cSPaolo Bonzini     load_state_from_tss32(cpu, &tss_seg);
10869e0a03cSPaolo Bonzini     return 0;
10969e0a03cSPaolo Bonzini }
11069e0a03cSPaolo Bonzini 
vmx_handle_task_switch(CPUState * cpu,x68_segment_selector tss_sel,int reason,bool gate_valid,uint8_t gate,uint64_t gate_type)11169e0a03cSPaolo Bonzini void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type)
11269e0a03cSPaolo Bonzini {
113*3b295bcbSPhilippe Mathieu-Daudé     uint64_t rip = rreg(cpu->accel->fd, HV_X86_RIP);
11469e0a03cSPaolo Bonzini     if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION &&
11569e0a03cSPaolo Bonzini                         gate_type != VMCS_INTR_T_HWINTR &&
11669e0a03cSPaolo Bonzini                         gate_type != VMCS_INTR_T_NMI)) {
117*3b295bcbSPhilippe Mathieu-Daudé         int ins_len = rvmcs(cpu->accel->fd, VMCS_EXIT_INSTRUCTION_LENGTH);
11869e0a03cSPaolo Bonzini         macvm_set_rip(cpu, rip + ins_len);
11969e0a03cSPaolo Bonzini         return;
12069e0a03cSPaolo Bonzini     }
12169e0a03cSPaolo Bonzini 
12269e0a03cSPaolo Bonzini     load_regs(cpu);
12369e0a03cSPaolo Bonzini 
12469e0a03cSPaolo Bonzini     struct x86_segment_descriptor curr_tss_desc, next_tss_desc;
12569e0a03cSPaolo Bonzini     int ret;
1266701d81dSPaolo Bonzini     x68_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR);
1276701d81dSPaolo Bonzini     uint64_t old_tss_base = vmx_read_segment_base(cpu, R_TR);
12869e0a03cSPaolo Bonzini     uint32_t desc_limit;
12969e0a03cSPaolo Bonzini     struct x86_call_gate task_gate_desc;
13069e0a03cSPaolo Bonzini     struct vmx_segment vmx_seg;
13169e0a03cSPaolo Bonzini 
13269e0a03cSPaolo Bonzini     X86CPU *x86_cpu = X86_CPU(cpu);
13369e0a03cSPaolo Bonzini     CPUX86State *env = &x86_cpu->env;
13469e0a03cSPaolo Bonzini 
13569e0a03cSPaolo Bonzini     x86_read_segment_descriptor(cpu, &next_tss_desc, tss_sel);
13669e0a03cSPaolo Bonzini     x86_read_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel);
13769e0a03cSPaolo Bonzini 
13869e0a03cSPaolo Bonzini     if (reason == TSR_IDT_GATE && gate_valid) {
13969e0a03cSPaolo Bonzini         int dpl;
14069e0a03cSPaolo Bonzini 
14169e0a03cSPaolo Bonzini         ret = x86_read_call_gate(cpu, &task_gate_desc, gate);
14269e0a03cSPaolo Bonzini 
14369e0a03cSPaolo Bonzini         dpl = task_gate_desc.dpl;
1446701d81dSPaolo Bonzini         x68_segment_selector cs = vmx_read_segment_selector(cpu, R_CS);
14569e0a03cSPaolo Bonzini         if (tss_sel.rpl > dpl || cs.rpl > dpl)
14669e0a03cSPaolo Bonzini             ;//DPRINTF("emulate_gp");
14769e0a03cSPaolo Bonzini     }
14869e0a03cSPaolo Bonzini 
14969e0a03cSPaolo Bonzini     desc_limit = x86_segment_limit(&next_tss_desc);
15069e0a03cSPaolo Bonzini     if (!next_tss_desc.p || ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || desc_limit < 0x2b)) {
15169e0a03cSPaolo Bonzini         VM_PANIC("emulate_ts");
15269e0a03cSPaolo Bonzini     }
15369e0a03cSPaolo Bonzini 
15469e0a03cSPaolo Bonzini     if (reason == TSR_IRET || reason == TSR_JMP) {
15569e0a03cSPaolo Bonzini         curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
15669e0a03cSPaolo Bonzini         x86_write_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel);
15769e0a03cSPaolo Bonzini     }
15869e0a03cSPaolo Bonzini 
15969e0a03cSPaolo Bonzini     if (reason == TSR_IRET)
160ea48ae91SRoman Bolshakov         env->eflags &= ~NT_MASK;
16169e0a03cSPaolo Bonzini 
16269e0a03cSPaolo Bonzini     if (reason != TSR_CALL && reason != TSR_IDT_GATE)
16369e0a03cSPaolo Bonzini         old_tss_sel.sel = 0xffff;
16469e0a03cSPaolo Bonzini 
16569e0a03cSPaolo Bonzini     if (reason != TSR_IRET) {
16669e0a03cSPaolo Bonzini         next_tss_desc.type |= (1 << 1); /* set busy flag */
16769e0a03cSPaolo Bonzini         x86_write_segment_descriptor(cpu, &next_tss_desc, tss_sel);
16869e0a03cSPaolo Bonzini     }
16969e0a03cSPaolo Bonzini 
17069e0a03cSPaolo Bonzini     if (next_tss_desc.type & 8)
17169e0a03cSPaolo Bonzini         ret = task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
17269e0a03cSPaolo Bonzini     else
17369e0a03cSPaolo Bonzini         //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
17469e0a03cSPaolo Bonzini         VM_PANIC("task_switch_16");
17569e0a03cSPaolo Bonzini 
176*3b295bcbSPhilippe Mathieu-Daudé     macvm_set_cr0(cpu->accel->fd, rvmcs(cpu->accel->fd, VMCS_GUEST_CR0) |
177704afe34SCameron Esfahani                                 CR0_TS_MASK);
17869e0a03cSPaolo Bonzini     x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg);
1796701d81dSPaolo Bonzini     vmx_write_segment_descriptor(cpu, &vmx_seg, R_TR);
18069e0a03cSPaolo Bonzini 
18169e0a03cSPaolo Bonzini     store_regs(cpu);
18269e0a03cSPaolo Bonzini 
183*3b295bcbSPhilippe Mathieu-Daudé     hv_vcpu_invalidate_tlb(cpu->accel->fd);
18469e0a03cSPaolo Bonzini }
185