1 /* 2 * x86 segmentation related helpers: (sysemu-only code) 3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors 4 * 5 * Copyright (c) 2003 Fabrice Bellard 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "qemu/log.h" 23 #include "cpu.h" 24 #include "exec/helper-proto.h" 25 #include "exec/cpu_ldst.h" 26 #include "tcg/helper-tcg.h" 27 #include "../seg_helper.h" 28 29 void helper_syscall(CPUX86State *env, int next_eip_addend) 30 { 31 int selector; 32 33 if (!(env->efer & MSR_EFER_SCE)) { 34 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC()); 35 } 36 selector = (env->star >> 32) & 0xffff; 37 #ifdef TARGET_X86_64 38 if (env->hflags & HF_LMA_MASK) { 39 int code64; 40 41 env->regs[R_ECX] = env->eip + next_eip_addend; 42 env->regs[11] = cpu_compute_eflags(env) & ~RF_MASK; 43 44 code64 = env->hflags & HF_CS64_MASK; 45 46 env->eflags &= ~(env->fmask | RF_MASK); 47 cpu_load_eflags(env, env->eflags, 0); 48 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 49 0, 0xffffffff, 50 DESC_G_MASK | DESC_P_MASK | 51 DESC_S_MASK | 52 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | 53 DESC_L_MASK); 54 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 55 0, 0xffffffff, 56 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 57 DESC_S_MASK | 58 DESC_W_MASK | DESC_A_MASK); 59 if (code64) { 60 env->eip = env->lstar; 61 } else { 62 env->eip = env->cstar; 63 } 64 } else 65 #endif 66 { 67 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend); 68 69 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK); 70 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 71 0, 0xffffffff, 72 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 73 DESC_S_MASK | 74 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); 75 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 76 0, 0xffffffff, 77 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | 78 DESC_S_MASK | 79 DESC_W_MASK | DESC_A_MASK); 80 env->eip = (uint32_t)env->star; 81 } 82 } 83 84 void handle_even_inj(CPUX86State *env, int intno, int is_int, 85 int error_code, int is_hw, int rm) 86 { 87 CPUState *cs = env_cpu(env); 88 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, 89 control.event_inj)); 90 91 if (!(event_inj & SVM_EVTINJ_VALID)) { 92 int type; 93 94 if (is_int) { 95 type = SVM_EVTINJ_TYPE_SOFT; 96 } else { 97 type = SVM_EVTINJ_TYPE_EXEPT; 98 } 99 event_inj = intno | type | SVM_EVTINJ_VALID; 100 if (!rm && exception_has_error_code(intno)) { 101 event_inj |= SVM_EVTINJ_VALID_ERR; 102 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, 103 control.event_inj_err), 104 error_code); 105 } 106 x86_stl_phys(cs, 107 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 108 event_inj); 109 } 110 } 111 112 void x86_cpu_do_interrupt(CPUState *cs) 113 { 114 X86CPU *cpu = X86_CPU(cs); 115 CPUX86State *env = &cpu->env; 116 117 if (cs->exception_index == EXCP_VMEXIT) { 118 assert(env->old_exception == -1); 119 do_vmexit(env); 120 } else { 121 do_interrupt_all(cpu, cs->exception_index, 122 env->exception_is_int, 123 env->error_code, 124 env->exception_next_eip, 0); 125 /* successfully delivered */ 126 env->old_exception = -1; 127 } 128 } 129 130 bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 131 { 132 X86CPU *cpu = X86_CPU(cs); 133 CPUX86State *env = &cpu->env; 134 int intno; 135 136 interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request); 137 if (!interrupt_request) { 138 return false; 139 } 140 141 /* Don't process multiple interrupt requests in a single call. 142 * This is required to make icount-driven execution deterministic. 143 */ 144 switch (interrupt_request) { 145 case CPU_INTERRUPT_POLL: 146 cs->interrupt_request &= ~CPU_INTERRUPT_POLL; 147 apic_poll_irq(cpu->apic_state); 148 break; 149 case CPU_INTERRUPT_SIPI: 150 do_cpu_sipi(cpu); 151 break; 152 case CPU_INTERRUPT_SMI: 153 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0); 154 cs->interrupt_request &= ~CPU_INTERRUPT_SMI; 155 do_smm_enter(cpu); 156 break; 157 case CPU_INTERRUPT_NMI: 158 cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0); 159 cs->interrupt_request &= ~CPU_INTERRUPT_NMI; 160 env->hflags2 |= HF2_NMI_MASK; 161 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1); 162 break; 163 case CPU_INTERRUPT_MCE: 164 cs->interrupt_request &= ~CPU_INTERRUPT_MCE; 165 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0); 166 break; 167 case CPU_INTERRUPT_HARD: 168 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0); 169 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD | 170 CPU_INTERRUPT_VIRQ); 171 intno = cpu_get_pic_interrupt(env); 172 qemu_log_mask(CPU_LOG_INT, 173 "Servicing hardware INT=0x%02x\n", intno); 174 do_interrupt_x86_hardirq(env, intno, 1); 175 break; 176 case CPU_INTERRUPT_VIRQ: 177 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0); 178 intno = x86_ldl_phys(cs, env->vm_vmcb 179 + offsetof(struct vmcb, control.int_vector)); 180 qemu_log_mask(CPU_LOG_INT, 181 "Servicing virtual hardware INT=0x%02x\n", intno); 182 do_interrupt_x86_hardirq(env, intno, 1); 183 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; 184 env->int_ctl &= ~V_IRQ_MASK; 185 break; 186 } 187 188 /* Ensure that no TB jump will be modified as the program flow was changed. */ 189 return true; 190 } 191 192 /* check if Port I/O is allowed in TSS */ 193 void helper_check_io(CPUX86State *env, uint32_t addr, uint32_t size) 194 { 195 uintptr_t retaddr = GETPC(); 196 uint32_t io_offset, val, mask; 197 198 /* TSS must be a valid 32 bit one */ 199 if (!(env->tr.flags & DESC_P_MASK) || 200 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 || 201 env->tr.limit < 103) { 202 goto fail; 203 } 204 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr); 205 io_offset += (addr >> 3); 206 /* Note: the check needs two bytes */ 207 if ((io_offset + 1) > env->tr.limit) { 208 goto fail; 209 } 210 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr); 211 val >>= (addr & 7); 212 mask = (1 << size) - 1; 213 /* all bits must be zero to allow the I/O */ 214 if ((val & mask) != 0) { 215 fail: 216 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr); 217 } 218 } 219